diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-06 17:22:09 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-06 17:22:09 -0800 |
commit | 9753dfe19a85e7e45a34a56f4cb2048bb4f50e27 (patch) | |
tree | c017a1b4a70b8447c71b01d8b320e071546b5c9d /drivers | |
parent | edf7c8148ec40c0fd27c0ef3f688defcc65e3913 (diff) | |
parent | 9f42f126154786e6e76df513004800c8c633f020 (diff) | |
download | linux-3.10-9753dfe19a85e7e45a34a56f4cb2048bb4f50e27.tar.gz linux-3.10-9753dfe19a85e7e45a34a56f4cb2048bb4f50e27.tar.bz2 linux-3.10-9753dfe19a85e7e45a34a56f4cb2048bb4f50e27.zip |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1958 commits)
net: pack skb_shared_info more efficiently
net_sched: red: split red_parms into parms and vars
net_sched: sfq: extend limits
cnic: Improve error recovery on bnx2x devices
cnic: Re-init dev->stats_addr after chip reset
net_sched: Bug in netem reordering
bna: fix sparse warnings/errors
bna: make ethtool_ops and strings const
xgmac: cleanups
net: make ethtool_ops const
vmxnet3" make ethtool ops const
xen-netback: make ops structs const
virtio_net: Pass gfp flags when allocating rx buffers.
ixgbe: FCoE: Add support for ndo_get_fcoe_hbainfo() call
netdev: FCoE: Add new ndo_get_fcoe_hbainfo() call
igb: reset PHY after recovering from PHY power down
igb: add basic runtime PM support
igb: Add support for byte queue limits.
e1000: cleanup CE4100 MDIO registers access
e1000: unmap ce4100_gbe_mdio_base_virt in e1000_remove
...
Diffstat (limited to 'drivers')
799 files changed, 87764 insertions, 63304 deletions
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c index 3d0c2b0fed9..9e373ba2030 100644 --- a/drivers/atm/iphase.c +++ b/drivers/atm/iphase.c @@ -1320,8 +1320,8 @@ static void rx_dle_intr(struct atm_dev *dev) if (ia_vcc == NULL) { atomic_inc(&vcc->stats->rx_err); + atm_return(vcc, skb->truesize); dev_kfree_skb_any(skb); - atm_return(vcc, atm_guess_pdu2truesize(len)); goto INCR_DLE; } // get real pkt length pwang_test @@ -1334,8 +1334,8 @@ static void rx_dle_intr(struct atm_dev *dev) atomic_inc(&vcc->stats->rx_err); IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)", length, skb->len);) + atm_return(vcc, skb->truesize); dev_kfree_skb_any(skb); - atm_return(vcc, atm_guess_pdu2truesize(len)); goto INCR_DLE; } skb_trim(skb, length); diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h index 30a3085d335..fda56bde36b 100644 --- a/drivers/bcma/bcma_private.h +++ b/drivers/bcma/bcma_private.h @@ -18,6 +18,9 @@ void bcma_bus_unregister(struct bcma_bus *bus); int __init bcma_bus_early_register(struct bcma_bus *bus, struct bcma_device *core_cc, struct bcma_device *core_mips); +#ifdef CONFIG_PM +int bcma_bus_resume(struct bcma_bus *bus); +#endif /* scan.c */ int bcma_bus_scan(struct bcma_bus *bus); diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c index 1b51d8b7ac8..443b83a2fd7 100644 --- a/drivers/bcma/host_pci.c +++ b/drivers/bcma/host_pci.c @@ -21,48 +21,58 @@ static void bcma_host_pci_switch_core(struct bcma_device *core) pr_debug("Switched to core: 0x%X\n", core->id.id); } -static u8 bcma_host_pci_read8(struct bcma_device *core, u16 offset) +/* Provides access to the requested core. Returns base offset that has to be + * used. It makes use of fixed windows when possible. */ +static u16 bcma_host_pci_provide_access_to_core(struct bcma_device *core) { + switch (core->id.id) { + case BCMA_CORE_CHIPCOMMON: + return 3 * BCMA_CORE_SIZE; + case BCMA_CORE_PCIE: + return 2 * BCMA_CORE_SIZE; + } + if (core->bus->mapped_core != core) bcma_host_pci_switch_core(core); + return 0; +} + +static u8 bcma_host_pci_read8(struct bcma_device *core, u16 offset) +{ + offset += bcma_host_pci_provide_access_to_core(core); return ioread8(core->bus->mmio + offset); } static u16 bcma_host_pci_read16(struct bcma_device *core, u16 offset) { - if (core->bus->mapped_core != core) - bcma_host_pci_switch_core(core); + offset += bcma_host_pci_provide_access_to_core(core); return ioread16(core->bus->mmio + offset); } static u32 bcma_host_pci_read32(struct bcma_device *core, u16 offset) { - if (core->bus->mapped_core != core) - bcma_host_pci_switch_core(core); + offset += bcma_host_pci_provide_access_to_core(core); return ioread32(core->bus->mmio + offset); } static void bcma_host_pci_write8(struct bcma_device *core, u16 offset, u8 value) { - if (core->bus->mapped_core != core) - bcma_host_pci_switch_core(core); + offset += bcma_host_pci_provide_access_to_core(core); iowrite8(value, core->bus->mmio + offset); } static void bcma_host_pci_write16(struct bcma_device *core, u16 offset, u16 value) { - if (core->bus->mapped_core != core) - bcma_host_pci_switch_core(core); + offset += bcma_host_pci_provide_access_to_core(core); iowrite16(value, core->bus->mmio + offset); } static void bcma_host_pci_write32(struct bcma_device *core, u16 offset, u32 value) { - if (core->bus->mapped_core != core) - bcma_host_pci_switch_core(core); + offset += bcma_host_pci_provide_access_to_core(core); iowrite32(value, core->bus->mmio + offset); } @@ -224,6 +234,41 @@ static void bcma_host_pci_remove(struct pci_dev *dev) pci_set_drvdata(dev, NULL); } +#ifdef CONFIG_PM +static int bcma_host_pci_suspend(struct pci_dev *dev, pm_message_t state) +{ + /* Host specific */ + pci_save_state(dev); + pci_disable_device(dev); + pci_set_power_state(dev, pci_choose_state(dev, state)); + + return 0; +} + +static int bcma_host_pci_resume(struct pci_dev *dev) +{ + struct bcma_bus *bus = pci_get_drvdata(dev); + int err; + + /* Host specific */ + pci_set_power_state(dev, 0); + err = pci_enable_device(dev); + if (err) + return err; + pci_restore_state(dev); + + /* Bus specific */ + err = bcma_bus_resume(bus); + if (err) + return err; + + return 0; +} +#else /* CONFIG_PM */ +# define bcma_host_pci_suspend NULL +# define bcma_host_pci_resume NULL +#endif /* CONFIG_PM */ + static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = { { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) }, @@ -239,6 +284,8 @@ static struct pci_driver bcma_pci_bridge_driver = { .id_table = bcma_pci_bridge_tbl, .probe = bcma_host_pci_probe, .remove = bcma_host_pci_remove, + .suspend = bcma_host_pci_suspend, + .resume = bcma_host_pci_resume, }; int __init bcma_host_pci_init(void) diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c index 70c84b95109..10f92b371e5 100644 --- a/drivers/bcma/main.c +++ b/drivers/bcma/main.c @@ -240,6 +240,22 @@ int __init bcma_bus_early_register(struct bcma_bus *bus, return 0; } +#ifdef CONFIG_PM +int bcma_bus_resume(struct bcma_bus *bus) +{ + struct bcma_device *core; + + /* Init CC core */ + core = bcma_find_core(bus, BCMA_CORE_CHIPCOMMON); + if (core) { + bus->drv_cc.setup_done = false; + bcma_core_chipcommon_init(&bus->drv_cc); + } + + return 0; +} +#endif + int __bcma_driver_register(struct bcma_driver *drv, struct module *owner) { drv->drv.name = drv->name; diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c index d7292390d23..6f230fb087c 100644 --- a/drivers/bcma/sprom.c +++ b/drivers/bcma/sprom.c @@ -129,6 +129,9 @@ static void bcma_sprom_extract_r8(struct bcma_bus *bus, const u16 *sprom) u16 v; int i; + bus->sprom.revision = sprom[SSB_SPROMSIZE_WORDS_R4 - 1] & + SSB_SPROM_REVISION_REV; + for (i = 0; i < 3; i++) { v = sprom[SPOFF(SSB_SPROM8_IL0MAC) + i]; *(((__be16 *)bus->sprom.il0mac) + i) = cpu_to_be16(v); @@ -136,12 +139,70 @@ static void bcma_sprom_extract_r8(struct bcma_bus *bus, const u16 *sprom) bus->sprom.board_rev = sprom[SPOFF(SSB_SPROM8_BOARDREV)]; + bus->sprom.txpid2g[0] = (sprom[SPOFF(SSB_SPROM4_TXPID2G01)] & + SSB_SPROM4_TXPID2G0) >> SSB_SPROM4_TXPID2G0_SHIFT; + bus->sprom.txpid2g[1] = (sprom[SPOFF(SSB_SPROM4_TXPID2G01)] & + SSB_SPROM4_TXPID2G1) >> SSB_SPROM4_TXPID2G1_SHIFT; + bus->sprom.txpid2g[2] = (sprom[SPOFF(SSB_SPROM4_TXPID2G23)] & + SSB_SPROM4_TXPID2G2) >> SSB_SPROM4_TXPID2G2_SHIFT; + bus->sprom.txpid2g[3] = (sprom[SPOFF(SSB_SPROM4_TXPID2G23)] & + SSB_SPROM4_TXPID2G3) >> SSB_SPROM4_TXPID2G3_SHIFT; + + bus->sprom.txpid5gl[0] = (sprom[SPOFF(SSB_SPROM4_TXPID5GL01)] & + SSB_SPROM4_TXPID5GL0) >> SSB_SPROM4_TXPID5GL0_SHIFT; + bus->sprom.txpid5gl[1] = (sprom[SPOFF(SSB_SPROM4_TXPID5GL01)] & + SSB_SPROM4_TXPID5GL1) >> SSB_SPROM4_TXPID5GL1_SHIFT; + bus->sprom.txpid5gl[2] = (sprom[SPOFF(SSB_SPROM4_TXPID5GL23)] & + SSB_SPROM4_TXPID5GL2) >> SSB_SPROM4_TXPID5GL2_SHIFT; + bus->sprom.txpid5gl[3] = (sprom[SPOFF(SSB_SPROM4_TXPID5GL23)] & + SSB_SPROM4_TXPID5GL3) >> SSB_SPROM4_TXPID5GL3_SHIFT; + + bus->sprom.txpid5g[0] = (sprom[SPOFF(SSB_SPROM4_TXPID5G01)] & + SSB_SPROM4_TXPID5G0) >> SSB_SPROM4_TXPID5G0_SHIFT; + bus->sprom.txpid5g[1] = (sprom[SPOFF(SSB_SPROM4_TXPID5G01)] & + SSB_SPROM4_TXPID5G1) >> SSB_SPROM4_TXPID5G1_SHIFT; + bus->sprom.txpid5g[2] = (sprom[SPOFF(SSB_SPROM4_TXPID5G23)] & + SSB_SPROM4_TXPID5G2) >> SSB_SPROM4_TXPID5G2_SHIFT; + bus->sprom.txpid5g[3] = (sprom[SPOFF(SSB_SPROM4_TXPID5G23)] & + SSB_SPROM4_TXPID5G3) >> SSB_SPROM4_TXPID5G3_SHIFT; + + bus->sprom.txpid5gh[0] = (sprom[SPOFF(SSB_SPROM4_TXPID5GH01)] & + SSB_SPROM4_TXPID5GH0) >> SSB_SPROM4_TXPID5GH0_SHIFT; + bus->sprom.txpid5gh[1] = (sprom[SPOFF(SSB_SPROM4_TXPID5GH01)] & + SSB_SPROM4_TXPID5GH1) >> SSB_SPROM4_TXPID5GH1_SHIFT; + bus->sprom.txpid5gh[2] = (sprom[SPOFF(SSB_SPROM4_TXPID5GH23)] & + SSB_SPROM4_TXPID5GH2) >> SSB_SPROM4_TXPID5GH2_SHIFT; + bus->sprom.txpid5gh[3] = (sprom[SPOFF(SSB_SPROM4_TXPID5GH23)] & + SSB_SPROM4_TXPID5GH3) >> SSB_SPROM4_TXPID5GH3_SHIFT; + bus->sprom.boardflags_lo = sprom[SPOFF(SSB_SPROM8_BFLLO)]; bus->sprom.boardflags_hi = sprom[SPOFF(SSB_SPROM8_BFLHI)]; bus->sprom.boardflags2_lo = sprom[SPOFF(SSB_SPROM8_BFL2LO)]; bus->sprom.boardflags2_hi = sprom[SPOFF(SSB_SPROM8_BFL2HI)]; bus->sprom.country_code = sprom[SPOFF(SSB_SPROM8_CCODE)]; + + bus->sprom.fem.ghz2.tssipos = (sprom[SPOFF(SSB_SPROM8_FEM2G)] & + SSB_SROM8_FEM_TSSIPOS) >> SSB_SROM8_FEM_TSSIPOS_SHIFT; + bus->sprom.fem.ghz2.extpa_gain = (sprom[SPOFF(SSB_SPROM8_FEM2G)] & + SSB_SROM8_FEM_EXTPA_GAIN) >> SSB_SROM8_FEM_EXTPA_GAIN_SHIFT; + bus->sprom.fem.ghz2.pdet_range = (sprom[SPOFF(SSB_SPROM8_FEM2G)] & + SSB_SROM8_FEM_PDET_RANGE) >> SSB_SROM8_FEM_PDET_RANGE_SHIFT; + bus->sprom.fem.ghz2.tr_iso = (sprom[SPOFF(SSB_SPROM8_FEM2G)] & + SSB_SROM8_FEM_TR_ISO) >> SSB_SROM8_FEM_TR_ISO_SHIFT; + bus->sprom.fem.ghz2.antswlut = (sprom[SPOFF(SSB_SPROM8_FEM2G)] & + SSB_SROM8_FEM_ANTSWLUT) >> SSB_SROM8_FEM_ANTSWLUT_SHIFT; + + bus->sprom.fem.ghz5.tssipos = (sprom[SPOFF(SSB_SPROM8_FEM5G)] & + SSB_SROM8_FEM_TSSIPOS) >> SSB_SROM8_FEM_TSSIPOS_SHIFT; + bus->sprom.fem.ghz5.extpa_gain = (sprom[SPOFF(SSB_SPROM8_FEM5G)] & + SSB_SROM8_FEM_EXTPA_GAIN) >> SSB_SROM8_FEM_EXTPA_GAIN_SHIFT; + bus->sprom.fem.ghz5.pdet_range = (sprom[SPOFF(SSB_SPROM8_FEM5G)] & + SSB_SROM8_FEM_PDET_RANGE) >> SSB_SROM8_FEM_PDET_RANGE_SHIFT; + bus->sprom.fem.ghz5.tr_iso = (sprom[SPOFF(SSB_SPROM8_FEM5G)] & + SSB_SROM8_FEM_TR_ISO) >> SSB_SROM8_FEM_TR_ISO_SHIFT; + bus->sprom.fem.ghz5.antswlut = (sprom[SPOFF(SSB_SPROM8_FEM5G)] & + SSB_SROM8_FEM_ANTSWLUT) >> SSB_SROM8_FEM_ANTSWLUT_SHIFT; } int bcma_sprom_get(struct bcma_bus *bus) diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index 106beb194f3..1622772f802 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c @@ -30,6 +30,7 @@ #include <net/bluetooth/bluetooth.h> #define VERSION "1.0" +#define ATH3K_FIRMWARE "ath3k-1.fw" #define ATH3K_DNLOAD 0x01 #define ATH3K_GETSTATE 0x05 @@ -400,9 +401,15 @@ static int ath3k_probe(struct usb_interface *intf, return 0; } - if (request_firmware(&firmware, "ath3k-1.fw", &udev->dev) < 0) { - BT_ERR("Error loading firmware"); - return -EIO; + ret = request_firmware(&firmware, ATH3K_FIRMWARE, &udev->dev); + if (ret < 0) { + if (ret == -ENOENT) + BT_ERR("Firmware file \"%s\" not found", + ATH3K_FIRMWARE); + else + BT_ERR("Firmware file \"%s\" request failed (err=%d)", + ATH3K_FIRMWARE, ret); + return ret; } ret = ath3k_load_firmware(udev, firmware); @@ -441,4 +448,4 @@ MODULE_AUTHOR("Atheros Communications"); MODULE_DESCRIPTION("Atheros AR30xx firmware driver"); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); -MODULE_FIRMWARE("ath3k-1.fw"); +MODULE_FIRMWARE(ATH3K_FIRMWARE); diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c index 61b591470a9..a936763b8c3 100644 --- a/drivers/bluetooth/bfusb.c +++ b/drivers/bluetooth/bfusb.c @@ -751,9 +751,7 @@ static void bfusb_disconnect(struct usb_interface *intf) bfusb_close(hdev); - if (hci_unregister_dev(hdev) < 0) - BT_ERR("Can't unregister HCI device %s", hdev->name); - + hci_unregister_dev(hdev); hci_free_dev(hdev); } diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c index aed1904ea67..c6a0c610374 100644 --- a/drivers/bluetooth/bluecard_cs.c +++ b/drivers/bluetooth/bluecard_cs.c @@ -844,9 +844,7 @@ static int bluecard_close(bluecard_info_t *info) /* Turn FPGA off */ outb(0x80, iobase + 0x30); - if (hci_unregister_dev(hdev) < 0) - BT_ERR("Can't unregister HCI device %s", hdev->name); - + hci_unregister_dev(hdev); hci_free_dev(hdev); return 0; diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c index 4fc01949d39..0c97e5d514b 100644 --- a/drivers/bluetooth/bt3c_cs.c +++ b/drivers/bluetooth/bt3c_cs.c @@ -636,9 +636,7 @@ static int bt3c_close(bt3c_info_t *info) bt3c_hci_close(hdev); - if (hci_unregister_dev(hdev) < 0) - BT_ERR("Can't unregister HCI device %s", hdev->name); - + hci_unregister_dev(hdev); hci_free_dev(hdev); return 0; diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c index 526b61807d9..200b3a2877d 100644 --- a/drivers/bluetooth/btuart_cs.c +++ b/drivers/bluetooth/btuart_cs.c @@ -565,9 +565,7 @@ static int btuart_close(btuart_info_t *info) spin_unlock_irqrestore(&(info->lock), flags); - if (hci_unregister_dev(hdev) < 0) - BT_ERR("Can't unregister HCI device %s", hdev->name); - + hci_unregister_dev(hdev); hci_free_dev(hdev); return 0; diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index eabc437ce50..fbfba802a3d 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -101,6 +101,7 @@ static struct usb_device_id btusb_table[] = { { USB_DEVICE(0x0c10, 0x0000) }, /* Broadcom BCM20702A0 */ + { USB_DEVICE(0x0a5c, 0x21e3) }, { USB_DEVICE(0x413c, 0x8197) }, { } /* Terminating entry */ @@ -315,7 +316,8 @@ static int btusb_submit_intr_urb(struct hci_dev *hdev, gfp_t mem_flags) err = usb_submit_urb(urb, mem_flags); if (err < 0) { - BT_ERR("%s urb %p submission failed (%d)", + if (err != -EPERM && err != -ENODEV) + BT_ERR("%s urb %p submission failed (%d)", hdev->name, urb, -err); usb_unanchor_urb(urb); } @@ -400,7 +402,8 @@ static int btusb_submit_bulk_urb(struct hci_dev *hdev, gfp_t mem_flags) err = usb_submit_urb(urb, mem_flags); if (err < 0) { - BT_ERR("%s urb %p submission failed (%d)", + if (err != -EPERM && err != -ENODEV) + BT_ERR("%s urb %p submission failed (%d)", hdev->name, urb, -err); usb_unanchor_urb(urb); } @@ -506,15 +509,10 @@ static int btusb_submit_isoc_urb(struct hci_dev *hdev, gfp_t mem_flags) pipe = usb_rcvisocpipe(data->udev, data->isoc_rx_ep->bEndpointAddress); - urb->dev = data->udev; - urb->pipe = pipe; - urb->context = hdev; - urb->complete = btusb_isoc_complete; - urb->interval = data->isoc_rx_ep->bInterval; + usb_fill_int_urb(urb, data->udev, pipe, buf, size, btusb_isoc_complete, + hdev, data->isoc_rx_ep->bInterval); urb->transfer_flags = URB_FREE_BUFFER | URB_ISO_ASAP; - urb->transfer_buffer = buf; - urb->transfer_buffer_length = size; __fill_isoc_descriptor(urb, size, le16_to_cpu(data->isoc_rx_ep->wMaxPacketSize)); @@ -523,7 +521,8 @@ static int btusb_submit_isoc_urb(struct hci_dev *hdev, gfp_t mem_flags) err = usb_submit_urb(urb, mem_flags); if (err < 0) { - BT_ERR("%s urb %p submission failed (%d)", + if (err != -EPERM && err != -ENODEV) + BT_ERR("%s urb %p submission failed (%d)", hdev->name, urb, -err); usb_unanchor_urb(urb); } @@ -727,6 +726,9 @@ static int btusb_send_frame(struct sk_buff *skb) usb_fill_bulk_urb(urb, data->udev, pipe, skb->data, skb->len, btusb_tx_complete, skb); + if (skb->priority >= HCI_PRIO_MAX - 1) + urb->transfer_flags = URB_ISO_ASAP; + hdev->stat.acl_tx++; break; @@ -770,7 +772,9 @@ skip_waking: err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) { - BT_ERR("%s urb %p submission failed", hdev->name, urb); + if (err != -EPERM && err != -ENODEV) + BT_ERR("%s urb %p submission failed (%d)", + hdev->name, urb, -err); kfree(urb->setup_packet); usb_unanchor_urb(urb); } else { diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c index 5e4c2de9fc3..969bb22e493 100644 --- a/drivers/bluetooth/dtl1_cs.c +++ b/drivers/bluetooth/dtl1_cs.c @@ -551,9 +551,7 @@ static int dtl1_close(dtl1_info_t *info) spin_unlock_irqrestore(&(info->lock), flags); - if (hci_unregister_dev(hdev) < 0) - BT_ERR("Can't unregister HCI device %s", hdev->name); - + hci_unregister_dev(hdev); hci_free_dev(hdev); return 0; diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c index 67c180c2c1e..2ed6ab1c6e1 100644 --- a/drivers/bluetooth/hci_vhci.c +++ b/drivers/bluetooth/hci_vhci.c @@ -41,6 +41,8 @@ #define VERSION "1.3" +static bool amp; + struct vhci_data { struct hci_dev *hdev; @@ -239,6 +241,9 @@ static int vhci_open(struct inode *inode, struct file *file) hdev->bus = HCI_VIRTUAL; hdev->driver_data = data; + if (amp) + hdev->dev_type = HCI_AMP; + hdev->open = vhci_open_dev; hdev->close = vhci_close_dev; hdev->flush = vhci_flush; @@ -264,10 +269,7 @@ static int vhci_release(struct inode *inode, struct file *file) struct vhci_data *data = file->private_data; struct hci_dev *hdev = data->hdev; - if (hci_unregister_dev(hdev) < 0) { - BT_ERR("Can't unregister HCI device %s", hdev->name); - } - + hci_unregister_dev(hdev); hci_free_dev(hdev); file->private_data = NULL; @@ -306,6 +308,9 @@ static void __exit vhci_exit(void) module_init(vhci_init); module_exit(vhci_exit); +module_param(amp, bool, 0644); +MODULE_PARM_DESC(amp, "Create AMP controller device"); + MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); MODULE_DESCRIPTION("Bluetooth virtual HCI driver ver " VERSION); MODULE_VERSION(VERSION); diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c index eb0e2ccc79a..73d45315940 100644 --- a/drivers/ieee802154/fakehard.c +++ b/drivers/ieee802154/fakehard.c @@ -343,7 +343,7 @@ static void ieee802154_fake_setup(struct net_device *dev) { dev->addr_len = IEEE802154_ADDR_LEN; memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN); - dev->features = NETIF_F_NO_CSUM; + dev->features = NETIF_F_HW_CSUM; dev->needed_tailroom = 2; /* FCS */ dev->mtu = 127; dev->tx_queue_len = 10; diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index e9cf51b1343..1612cfd50f3 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -178,6 +178,25 @@ static void queue_req(struct addr_req *req) mutex_unlock(&lock); } +static int dst_fetch_ha(struct dst_entry *dst, struct rdma_dev_addr *addr) +{ + struct neighbour *n; + int ret; + + rcu_read_lock(); + n = dst_get_neighbour_noref(dst); + if (!n || !(n->nud_state & NUD_VALID)) { + if (n) + neigh_event_send(n, NULL); + ret = -ENODATA; + } else { + ret = rdma_copy_addr(addr, dst->dev, n->ha); + } + rcu_read_unlock(); + + return ret; +} + static int addr4_resolve(struct sockaddr_in *src_in, struct sockaddr_in *dst_in, struct rdma_dev_addr *addr) @@ -185,7 +204,6 @@ static int addr4_resolve(struct sockaddr_in *src_in, __be32 src_ip = src_in->sin_addr.s_addr; __be32 dst_ip = dst_in->sin_addr.s_addr; struct rtable *rt; - struct neighbour *neigh; struct flowi4 fl4; int ret; @@ -214,20 +232,7 @@ static int addr4_resolve(struct sockaddr_in *src_in, goto put; } - neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->dst.dev); - if (!neigh || !(neigh->nud_state & NUD_VALID)) { - rcu_read_lock(); - neigh_event_send(dst_get_neighbour(&rt->dst), NULL); - rcu_read_unlock(); - ret = -ENODATA; - if (neigh) - goto release; - goto put; - } - - ret = rdma_copy_addr(addr, neigh->dev, neigh->ha); -release: - neigh_release(neigh); + ret = dst_fetch_ha(&rt->dst, addr); put: ip_rt_put(rt); out: @@ -240,13 +245,12 @@ static int addr6_resolve(struct sockaddr_in6 *src_in, struct rdma_dev_addr *addr) { struct flowi6 fl6; - struct neighbour *neigh; struct dst_entry *dst; int ret; memset(&fl6, 0, sizeof fl6); - ipv6_addr_copy(&fl6.daddr, &dst_in->sin6_addr); - ipv6_addr_copy(&fl6.saddr, &src_in->sin6_addr); + fl6.daddr = dst_in->sin6_addr; + fl6.saddr = src_in->sin6_addr; fl6.flowi6_oif = addr->bound_dev_if; dst = ip6_route_output(&init_net, NULL, &fl6); @@ -260,7 +264,7 @@ static int addr6_resolve(struct sockaddr_in6 *src_in, goto put; src_in->sin6_family = AF_INET6; - ipv6_addr_copy(&src_in->sin6_addr, &fl6.saddr); + src_in->sin6_addr = fl6.saddr; } if (dst->dev->flags & IFF_LOOPBACK) { @@ -276,16 +280,7 @@ static int addr6_resolve(struct sockaddr_in6 *src_in, goto put; } - rcu_read_lock(); - neigh = dst_get_neighbour(dst); - if (!neigh || !(neigh->nud_state & NUD_VALID)) { - if (neigh) - neigh_event_send(neigh, NULL); - ret = -ENODATA; - } else { - ret = rdma_copy_addr(addr, dst->dev, neigh->ha); - } - rcu_read_unlock(); + ret = dst_fetch_ha(dst, addr); put: dst_release(dst); return ret; diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index d0d4aa9f480..236a88c1ca8 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -2005,11 +2005,11 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv) if (cma_zero_addr(src)) { dst = (struct sockaddr *) &id_priv->id.route.addr.dst_addr; if ((src->sa_family = dst->sa_family) == AF_INET) { - ((struct sockaddr_in *) src)->sin_addr.s_addr = - ((struct sockaddr_in *) dst)->sin_addr.s_addr; + ((struct sockaddr_in *)src)->sin_addr = + ((struct sockaddr_in *)dst)->sin_addr; } else { - ipv6_addr_copy(&((struct sockaddr_in6 *) src)->sin6_addr, - &((struct sockaddr_in6 *) dst)->sin6_addr); + ((struct sockaddr_in6 *)src)->sin6_addr = + ((struct sockaddr_in6 *)dst)->sin6_addr; } } diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c index c88b12beef2..740dcc065cf 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c @@ -1338,7 +1338,6 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) struct iwch_ep *child_ep, *parent_ep = ctx; struct cpl_pass_accept_req *req = cplhdr(skb); unsigned int hwtid = GET_TID(req); - struct neighbour *neigh; struct dst_entry *dst; struct l2t_entry *l2t; struct rtable *rt; @@ -1375,10 +1374,7 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) goto reject; } dst = &rt->dst; - rcu_read_lock(); - neigh = dst_get_neighbour(dst); - l2t = t3_l2t_get(tdev, neigh, neigh->dev); - rcu_read_unlock(); + l2t = t3_l2t_get(tdev, dst, NULL); if (!l2t) { printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", __func__); @@ -1889,7 +1885,6 @@ static int is_loopback_dst(struct iw_cm_id *cm_id) int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) { struct iwch_dev *h = to_iwch_dev(cm_id->device); - struct neighbour *neigh; struct iwch_ep *ep; struct rtable *rt; int err = 0; @@ -1947,13 +1942,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) goto fail3; } ep->dst = &rt->dst; - - rcu_read_lock(); - neigh = dst_get_neighbour(ep->dst); - - /* get a l2t entry */ - ep->l2t = t3_l2t_get(ep->com.tdev, neigh, neigh->dev); - rcu_read_unlock(); + ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst, NULL); if (!ep->l2t) { printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); err = -ENOMEM; diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 0747004313a..0668bb3472d 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -1556,6 +1556,67 @@ static void get_4tuple(struct cpl_pass_accept_req *req, return; } +static int import_ep(struct c4iw_ep *ep, __be32 peer_ip, struct dst_entry *dst, + struct c4iw_dev *cdev, bool clear_mpa_v1) +{ + struct neighbour *n; + int err, step; + + rcu_read_lock(); + n = dst_get_neighbour_noref(dst); + err = -ENODEV; + if (!n) + goto out; + err = -ENOMEM; + if (n->dev->flags & IFF_LOOPBACK) { + struct net_device *pdev; + + pdev = ip_dev_find(&init_net, peer_ip); + ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, + n, pdev, 0); + if (!ep->l2t) + goto out; + ep->mtu = pdev->mtu; + ep->tx_chan = cxgb4_port_chan(pdev); + ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1; + step = cdev->rdev.lldi.ntxq / + cdev->rdev.lldi.nchan; + ep->txq_idx = cxgb4_port_idx(pdev) * step; + step = cdev->rdev.lldi.nrxq / + cdev->rdev.lldi.nchan; + ep->ctrlq_idx = cxgb4_port_idx(pdev); + ep->rss_qid = cdev->rdev.lldi.rxq_ids[ + cxgb4_port_idx(pdev) * step]; + dev_put(pdev); + } else { + ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, + n, n->dev, 0); + if (!ep->l2t) + goto out; + ep->mtu = dst_mtu(ep->dst); + ep->tx_chan = cxgb4_port_chan(n->dev); + ep->smac_idx = (cxgb4_port_viid(n->dev) & 0x7F) << 1; + step = cdev->rdev.lldi.ntxq / + cdev->rdev.lldi.nchan; + ep->txq_idx = cxgb4_port_idx(n->dev) * step; + ep->ctrlq_idx = cxgb4_port_idx(n->dev); + step = cdev->rdev.lldi.nrxq / + cdev->rdev.lldi.nchan; + ep->rss_qid = cdev->rdev.lldi.rxq_ids[ + cxgb4_port_idx(n->dev) * step]; + + if (clear_mpa_v1) { + ep->retry_with_mpa_v1 = 0; + ep->tried_with_mpa_v1 = 0; + } + } + err = 0; +out: + rcu_read_unlock(); + + return err; +} + static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) { struct c4iw_ep *child_ep, *parent_ep; @@ -1563,18 +1624,11 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid)); struct tid_info *t = dev->rdev.lldi.tids; unsigned int hwtid = GET_TID(req); - struct neighbour *neigh; struct dst_entry *dst; - struct l2t_entry *l2t; struct rtable *rt; __be32 local_ip, peer_ip; __be16 local_port, peer_port; - struct net_device *pdev; - u32 tx_chan, smac_idx; - u16 rss_qid; - u32 mtu; - int step; - int txq_idx, ctrlq_idx; + int err; parent_ep = lookup_stid(t, stid); PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid); @@ -1596,49 +1650,24 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) goto reject; } dst = &rt->dst; - rcu_read_lock(); - neigh = dst_get_neighbour(dst); - if (neigh->dev->flags & IFF_LOOPBACK) { - pdev = ip_dev_find(&init_net, peer_ip); - BUG_ON(!pdev); - l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, pdev, 0); - mtu = pdev->mtu; - tx_chan = cxgb4_port_chan(pdev); - smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1; - step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan; - txq_idx = cxgb4_port_idx(pdev) * step; - ctrlq_idx = cxgb4_port_idx(pdev); - step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; - rss_qid = dev->rdev.lldi.rxq_ids[cxgb4_port_idx(pdev) * step]; - dev_put(pdev); - } else { - l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, neigh->dev, 0); - mtu = dst_mtu(dst); - tx_chan = cxgb4_port_chan(neigh->dev); - smac_idx = (cxgb4_port_viid(neigh->dev) & 0x7F) << 1; - step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan; - txq_idx = cxgb4_port_idx(neigh->dev) * step; - ctrlq_idx = cxgb4_port_idx(neigh->dev); - step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; - rss_qid = dev->rdev.lldi.rxq_ids[ - cxgb4_port_idx(neigh->dev) * step]; - } - rcu_read_unlock(); - if (!l2t) { - printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", + + child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL); + if (!child_ep) { + printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n", __func__); dst_release(dst); goto reject; } - child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL); - if (!child_ep) { - printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n", + err = import_ep(child_ep, peer_ip, dst, dev, false); + if (err) { + printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", __func__); - cxgb4_l2t_release(l2t); dst_release(dst); + kfree(child_ep); goto reject; } + state_set(&child_ep->com, CONNECTING); child_ep->com.dev = dev; child_ep->com.cm_id = NULL; @@ -1651,18 +1680,11 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) c4iw_get_ep(&parent_ep->com); child_ep->parent_ep = parent_ep; child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid)); - child_ep->l2t = l2t; child_ep->dst = dst; child_ep->hwtid = hwtid; - child_ep->tx_chan = tx_chan; - child_ep->smac_idx = smac_idx; - child_ep->rss_qid = rss_qid; - child_ep->mtu = mtu; - child_ep->txq_idx = txq_idx; - child_ep->ctrlq_idx = ctrlq_idx; PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__, - tx_chan, smac_idx, rss_qid); + child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid); init_timer(&child_ep->timer); cxgb4_insert_tid(t, child_ep, hwtid); @@ -1792,11 +1814,8 @@ static int is_neg_adv_abort(unsigned int status) static int c4iw_reconnect(struct c4iw_ep *ep) { - int err = 0; struct rtable *rt; - struct net_device *pdev; - struct neighbour *neigh; - int step; + int err = 0; PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id); init_timer(&ep->timer); @@ -1824,47 +1843,10 @@ static int c4iw_reconnect(struct c4iw_ep *ep) } ep->dst = &rt->dst; - rcu_read_lock(); - neigh = dst_get_neighbour(ep->dst); - - /* get a l2t entry */ - if (neigh->dev->flags & IFF_LOOPBACK) { - PDBG("%s LOOPBACK\n", __func__); - pdev = ip_dev_find(&init_net, - ep->com.cm_id->remote_addr.sin_addr.s_addr); - ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t, - neigh, pdev, 0); - ep->mtu = pdev->mtu; - ep->tx_chan = cxgb4_port_chan(pdev); - ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1; - step = ep->com.dev->rdev.lldi.ntxq / - ep->com.dev->rdev.lldi.nchan; - ep->txq_idx = cxgb4_port_idx(pdev) * step; - step = ep->com.dev->rdev.lldi.nrxq / - ep->com.dev->rdev.lldi.nchan; - ep->ctrlq_idx = cxgb4_port_idx(pdev); - ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[ - cxgb4_port_idx(pdev) * step]; - dev_put(pdev); - } else { - ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t, - neigh, neigh->dev, 0); - ep->mtu = dst_mtu(ep->dst); - ep->tx_chan = cxgb4_port_chan(neigh->dev); - ep->smac_idx = (cxgb4_port_viid(neigh->dev) & 0x7F) << 1; - step = ep->com.dev->rdev.lldi.ntxq / - ep->com.dev->rdev.lldi.nchan; - ep->txq_idx = cxgb4_port_idx(neigh->dev) * step; - ep->ctrlq_idx = cxgb4_port_idx(neigh->dev); - step = ep->com.dev->rdev.lldi.nrxq / - ep->com.dev->rdev.lldi.nchan; - ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[ - cxgb4_port_idx(neigh->dev) * step]; - } - rcu_read_unlock(); - if (!ep->l2t) { + err = import_ep(ep, ep->com.cm_id->remote_addr.sin_addr.s_addr, + ep->dst, ep->com.dev, false); + if (err) { printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); - err = -ENOMEM; goto fail4; } @@ -2240,13 +2222,10 @@ err: int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) { - int err = 0; struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); struct c4iw_ep *ep; struct rtable *rt; - struct net_device *pdev; - struct neighbour *neigh; - int step; + int err = 0; if ((conn_param->ord > c4iw_max_read_depth) || (conn_param->ird > c4iw_max_read_depth)) { @@ -2307,49 +2286,10 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) } ep->dst = &rt->dst; - rcu_read_lock(); - neigh = dst_get_neighbour(ep->dst); - - /* get a l2t entry */ - if (neigh->dev->flags & IFF_LOOPBACK) { - PDBG("%s LOOPBACK\n", __func__); - pdev = ip_dev_find(&init_net, - cm_id->remote_addr.sin_addr.s_addr); - ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t, - neigh, pdev, 0); - ep->mtu = pdev->mtu; - ep->tx_chan = cxgb4_port_chan(pdev); - ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1; - step = ep->com.dev->rdev.lldi.ntxq / - ep->com.dev->rdev.lldi.nchan; - ep->txq_idx = cxgb4_port_idx(pdev) * step; - step = ep->com.dev->rdev.lldi.nrxq / - ep->com.dev->rdev.lldi.nchan; - ep->ctrlq_idx = cxgb4_port_idx(pdev); - ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[ - cxgb4_port_idx(pdev) * step]; - dev_put(pdev); - } else { - ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t, - neigh, neigh->dev, 0); - ep->mtu = dst_mtu(ep->dst); - ep->tx_chan = cxgb4_port_chan(neigh->dev); - ep->smac_idx = (cxgb4_port_viid(neigh->dev) & 0x7F) << 1; - step = ep->com.dev->rdev.lldi.ntxq / - ep->com.dev->rdev.lldi.nchan; - ep->txq_idx = cxgb4_port_idx(neigh->dev) * step; - ep->ctrlq_idx = cxgb4_port_idx(neigh->dev); - step = ep->com.dev->rdev.lldi.nrxq / - ep->com.dev->rdev.lldi.nchan; - ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[ - cxgb4_port_idx(neigh->dev) * step]; - ep->retry_with_mpa_v1 = 0; - ep->tried_with_mpa_v1 = 0; - } - rcu_read_unlock(); - if (!ep->l2t) { + err = import_ep(ep, cm_id->remote_addr.sin_addr.s_addr, + ep->dst, ep->com.dev, true); + if (err) { printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); - err = -ENOMEM; goto fail4; } diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index f36da994a85..95c94d8f025 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c @@ -109,7 +109,8 @@ int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int ignore_mkey, int ignore_bkey, err = mlx4_cmd_box(dev->dev, inmailbox->dma, outmailbox->dma, in_modifier, op_modifier, - MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C); + MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C, + MLX4_CMD_NATIVE); if (!err) memcpy(response_mad, outmailbox->buf, 256); @@ -330,7 +331,8 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, return IB_MAD_RESULT_FAILURE; err = mlx4_cmd_box(dev->dev, 0, mailbox->dma, inmod, 0, - MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C); + MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C, + MLX4_CMD_WRAPPED); if (err) err = IB_MAD_RESULT_FAILURE; else { diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 18836cdf1e1..7b445df6a66 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -177,7 +177,7 @@ mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num) { struct mlx4_dev *dev = to_mdev(device)->dev; - return dev->caps.port_mask & (1 << (port_num - 1)) ? + return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ? IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET; } @@ -434,7 +434,7 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask, memset(mailbox->buf, 0, 256); memcpy(mailbox->buf, props->node_desc, 64); mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0, - MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A); + MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox); @@ -463,7 +463,7 @@ static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols, } err = mlx4_cmd(dev->dev, mailbox->dma, port, is_eth, MLX4_CMD_SET_PORT, - MLX4_CMD_TIME_CLASS_B); + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); mlx4_free_cmd_mailbox(dev->dev, mailbox); return err; @@ -899,7 +899,8 @@ static void update_gids_task(struct work_struct *work) memcpy(gids, gw->gids, sizeof gw->gids); err = mlx4_cmd(dev, mailbox->dma, MLX4_SET_PORT_GID_TABLE << 8 | gw->port, - 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B); + 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_NATIVE); if (err) printk(KERN_WARNING "set port command failed\n"); else { @@ -1074,6 +1075,11 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) printk_once(KERN_INFO "%s", mlx4_ib_version); + if (mlx4_is_mfunc(dev)) { + printk(KERN_WARNING "IB not yet supported in SRIOV\n"); + return NULL; + } + mlx4_foreach_ib_transport_port(i, dev) num_ports++; diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index 0a52d72371e..b1e6cae5f47 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c @@ -1348,7 +1348,8 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi else netdev = nesvnic->netdev; - neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, netdev); + rcu_read_lock(); + neigh = dst_get_neighbour_noref(&rt->dst); if (neigh) { if (neigh->nud_state & NUD_VALID) { nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X" @@ -1359,7 +1360,6 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi if (!memcmp(nesadapter->arp_table[arpindex].mac_addr, neigh->ha, ETH_ALEN)) { /* Mac address same as in nes_arp_table */ - neigh_release(neigh); ip_rt_put(rt); return rc; } @@ -1373,15 +1373,11 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi dst_ip, NES_ARP_ADD); rc = nes_arp_table(nesvnic->nesdev, dst_ip, NULL, NES_ARP_RESOLVE); + } else { + neigh_event_send(neigh, NULL); } - neigh_release(neigh); - } - - if ((neigh == NULL) || (!(neigh->nud_state & NUD_VALID))) { - rcu_read_lock(); - neigh_event_send(dst_get_neighbour(&rt->dst), NULL); - rcu_read_unlock(); } + rcu_read_unlock(); ip_rt_put(rt); return rc; } diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c index c00d2f3f896..4b3fa711a24 100644 --- a/drivers/infiniband/hw/nes/nes_nic.c +++ b/drivers/infiniband/hw/nes/nes_nic.c @@ -1589,7 +1589,7 @@ static const struct ethtool_ops nes_ethtool_ops = { .set_pauseparam = nes_netdev_set_pauseparam, }; -static void nes_vlan_mode(struct net_device *netdev, struct nes_device *nesdev, u32 features) +static void nes_vlan_mode(struct net_device *netdev, struct nes_device *nesdev, netdev_features_t features) { struct nes_adapter *nesadapter = nesdev->nesadapter; u32 u32temp; @@ -1610,7 +1610,7 @@ static void nes_vlan_mode(struct net_device *netdev, struct nes_device *nesdev, spin_unlock_irqrestore(&nesadapter->phy_lock, flags); } -static u32 nes_fix_features(struct net_device *netdev, u32 features) +static netdev_features_t nes_fix_features(struct net_device *netdev, netdev_features_t features) { /* * Since there is no support for separate rx/tx vlan accel @@ -1624,7 +1624,7 @@ static u32 nes_fix_features(struct net_device *netdev, u32 features) return features; } -static int nes_set_features(struct net_device *netdev, u32 features) +static int nes_set_features(struct net_device *netdev, netdev_features_t features) { struct nes_vnic *nesvnic = netdev_priv(netdev); struct nes_device *nesdev = nesvnic->nesdev; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 83695b48b01..3514ca05dee 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -171,7 +171,7 @@ static int ipoib_stop(struct net_device *dev) return 0; } -static u32 ipoib_fix_features(struct net_device *dev, u32 features) +static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_features_t features) { struct ipoib_dev_priv *priv = netdev_priv(dev); @@ -556,15 +556,13 @@ static int path_rec_start(struct net_device *dev, } /* called with rcu_read_lock */ -static void neigh_add_path(struct sk_buff *skb, struct net_device *dev) +static void neigh_add_path(struct sk_buff *skb, struct neighbour *n, struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_path *path; struct ipoib_neigh *neigh; - struct neighbour *n; unsigned long flags; - n = dst_get_neighbour(skb_dst(skb)); neigh = ipoib_neigh_alloc(n, skb->dev); if (!neigh) { ++dev->stats.tx_dropped; @@ -638,16 +636,13 @@ err_drop: } /* called with rcu_read_lock */ -static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev) +static void ipoib_path_lookup(struct sk_buff *skb, struct neighbour *n, struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(skb->dev); - struct dst_entry *dst = skb_dst(skb); - struct neighbour *n; /* Look up path record for unicasts */ - n = dst_get_neighbour(dst); if (n->ha[4] != 0xff) { - neigh_add_path(skb, dev); + neigh_add_path(skb, n, dev); return; } @@ -723,12 +718,17 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) unsigned long flags; rcu_read_lock(); - if (likely(skb_dst(skb))) - n = dst_get_neighbour(skb_dst(skb)); - + if (likely(skb_dst(skb))) { + n = dst_get_neighbour_noref(skb_dst(skb)); + if (!n) { + ++dev->stats.tx_dropped; + dev_kfree_skb_any(skb); + goto unlock; + } + } if (likely(n)) { if (unlikely(!*to_ipoib_neigh(n))) { - ipoib_path_lookup(skb, dev); + ipoib_path_lookup(skb, n, dev); goto unlock; } @@ -751,7 +751,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) list_del(&neigh->list); ipoib_neigh_free(dev, neigh); spin_unlock_irqrestore(&priv->lock, flags); - ipoib_path_lookup(skb, dev); + ipoib_path_lookup(skb, n, dev); goto unlock; } @@ -841,7 +841,7 @@ static int ipoib_hard_header(struct sk_buff *skb, dst = skb_dst(skb); n = NULL; if (dst) - n = dst_get_neighbour_raw(dst); + n = dst_get_neighbour_noref_raw(dst); if ((!dst || !n) && daddr) { struct ipoib_pseudoheader *phdr = (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr); @@ -1222,6 +1222,8 @@ static struct net_device *ipoib_add_port(const char *format, priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu); priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu; + priv->dev->neigh_priv_len = sizeof(struct ipoib_neigh); + result = ib_query_pkey(hca, port, 0, &priv->pkey); if (result) { printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n", diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 873bff97e69..f7ff9dd66cd 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -269,7 +269,7 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast, skb->dev = dev; if (dst) - n = dst_get_neighbour_raw(dst); + n = dst_get_neighbour_noref_raw(dst); if (!dst || !n) { /* put pseudoheader back on for next time */ skb_push(skb, sizeof (struct ipoib_pseudoheader)); @@ -728,7 +728,7 @@ out: rcu_read_lock(); if (dst) - n = dst_get_neighbour(dst); + n = dst_get_neighbour_noref(dst); if (n && !*to_ipoib_neigh(n)) { struct ipoib_neigh *neigh = ipoib_neigh_alloc(n, skb->dev); diff --git a/drivers/isdn/gigaset/i4l.c b/drivers/isdn/gigaset/i4l.c index 04231cb2f03..1793ba1b6a8 100644 --- a/drivers/isdn/gigaset/i4l.c +++ b/drivers/isdn/gigaset/i4l.c @@ -624,8 +624,6 @@ int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid) { isdn_if *iif; - pr_info("ISDN4Linux interface\n"); - iif = kmalloc(sizeof *iif, GFP_KERNEL); if (!iif) { pr_err("out of memory\n"); @@ -684,6 +682,7 @@ void gigaset_isdn_unregdev(struct cardstate *cs) */ void gigaset_isdn_regdrv(void) { + pr_info("ISDN4Linux interface\n"); /* nothing to do */ } diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c index 0dc30ffde5a..595d7319701 100644 --- a/drivers/lguest/lguest_device.c +++ b/drivers/lguest/lguest_device.c @@ -381,6 +381,11 @@ error: return PTR_ERR(vqs[i]); } +static const char *lg_bus_name(struct virtio_device *vdev) +{ + return ""; +} + /* The ops structure which hooks everything together. */ static struct virtio_config_ops lguest_config_ops = { .get_features = lg_get_features, @@ -392,6 +397,7 @@ static struct virtio_config_ops lguest_config_ops = { .reset = lg_reset, .find_vqs = lg_find_vqs, .del_vqs = lg_del_vqs, + .bus_name = lg_bus_name, }; /* diff --git a/drivers/misc/eeprom/eeprom_93cx6.c b/drivers/misc/eeprom/eeprom_93cx6.c index 7b33de95c4b..0ff4b02177b 100644 --- a/drivers/misc/eeprom/eeprom_93cx6.c +++ b/drivers/misc/eeprom/eeprom_93cx6.c @@ -63,6 +63,7 @@ static void eeprom_93cx6_startup(struct eeprom_93cx6 *eeprom) eeprom->reg_data_out = 0; eeprom->reg_data_clock = 0; eeprom->reg_chip_select = 1; + eeprom->drive_data = 1; eeprom->register_write(eeprom); /* @@ -101,6 +102,7 @@ static void eeprom_93cx6_write_bits(struct eeprom_93cx6 *eeprom, */ eeprom->reg_data_in = 0; eeprom->reg_data_out = 0; + eeprom->drive_data = 1; /* * Start writing all bits. @@ -140,6 +142,7 @@ static void eeprom_93cx6_read_bits(struct eeprom_93cx6 *eeprom, */ eeprom->reg_data_in = 0; eeprom->reg_data_out = 0; + eeprom->drive_data = 0; /* * Start reading all bits. @@ -231,3 +234,88 @@ void eeprom_93cx6_multiread(struct eeprom_93cx6 *eeprom, const u8 word, } EXPORT_SYMBOL_GPL(eeprom_93cx6_multiread); +/** + * eeprom_93cx6_wren - set the write enable state + * @eeprom: Pointer to eeprom structure + * @enable: true to enable writes, otherwise disable writes + * + * Set the EEPROM write enable state to either allow or deny + * writes depending on the @enable value. + */ +void eeprom_93cx6_wren(struct eeprom_93cx6 *eeprom, bool enable) +{ + u16 command; + + /* start the command */ + eeprom_93cx6_startup(eeprom); + + /* create command to enable/disable */ + + command = enable ? PCI_EEPROM_EWEN_OPCODE : PCI_EEPROM_EWDS_OPCODE; + command <<= (eeprom->width - 2); + + eeprom_93cx6_write_bits(eeprom, command, + PCI_EEPROM_WIDTH_OPCODE + eeprom->width); + + eeprom_93cx6_cleanup(eeprom); +} +EXPORT_SYMBOL_GPL(eeprom_93cx6_wren); + +/** + * eeprom_93cx6_write - write data to the EEPROM + * @eeprom: Pointer to eeprom structure + * @addr: Address to write data to. + * @data: The data to write to address @addr. + * + * Write the @data to the specified @addr in the EEPROM and + * waiting for the device to finish writing. + * + * Note, since we do not expect large number of write operations + * we delay in between parts of the operation to avoid using excessive + * amounts of CPU time busy waiting. + */ +void eeprom_93cx6_write(struct eeprom_93cx6 *eeprom, u8 addr, u16 data) +{ + int timeout = 100; + u16 command; + + /* start the command */ + eeprom_93cx6_startup(eeprom); + + command = PCI_EEPROM_WRITE_OPCODE << eeprom->width; + command |= addr; + + /* send write command */ + eeprom_93cx6_write_bits(eeprom, command, + PCI_EEPROM_WIDTH_OPCODE + eeprom->width); + + /* send data */ + eeprom_93cx6_write_bits(eeprom, data, 16); + + /* get ready to check for busy */ + eeprom->drive_data = 0; + eeprom->reg_chip_select = 1; + eeprom->register_write(eeprom); + + /* wait at-least 250ns to get DO to be the busy signal */ + usleep_range(1000, 2000); + + /* wait for DO to go high to signify finish */ + + while (true) { + eeprom->register_read(eeprom); + + if (eeprom->reg_data_out) + break; + + usleep_range(1000, 2000); + + if (--timeout <= 0) { + printk(KERN_ERR "%s: timeout\n", __func__); + break; + } + } + + eeprom_93cx6_cleanup(eeprom); +} +EXPORT_SYMBOL_GPL(eeprom_93cx6_write); diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c index 42f067347bc..3fac67a5204 100644 --- a/drivers/misc/sgi-xp/xpnet.c +++ b/drivers/misc/sgi-xp/xpnet.c @@ -576,7 +576,7 @@ xpnet_init(void) * report an error if the data is not retrievable and the * packet will be dropped. */ - xpnet_device->features = NETIF_F_NO_CSUM; + xpnet_device->features = NETIF_F_HW_CSUM; result = register_netdev(xpnet_device); if (result != 0) { diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 654a5e94e0e..9845afb37cc 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -125,6 +125,8 @@ config IFB 'ifb1' etc. Look at the iproute2 documentation directory for usage etc +source "drivers/net/team/Kconfig" + config MACVLAN tristate "MAC-VLAN support (EXPERIMENTAL)" depends on EXPERIMENTAL @@ -241,6 +243,8 @@ source "drivers/atm/Kconfig" source "drivers/net/caif/Kconfig" +source "drivers/net/dsa/Kconfig" + source "drivers/net/ethernet/Kconfig" source "drivers/net/fddi/Kconfig" diff --git a/drivers/net/Makefile b/drivers/net/Makefile index fa877cd2b13..1988881853a 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -17,6 +17,7 @@ obj-$(CONFIG_NET) += Space.o loopback.o obj-$(CONFIG_NETCONSOLE) += netconsole.o obj-$(CONFIG_PHYLIB) += phy/ obj-$(CONFIG_RIONET) += rionet.o +obj-$(CONFIG_NET_TEAM) += team/ obj-$(CONFIG_TUN) += tun.o obj-$(CONFIG_VETH) += veth.o obj-$(CONFIG_VIRTIO_NET) += virtio_net.o @@ -29,6 +30,7 @@ obj-$(CONFIG_DEV_APPLETALK) += appletalk/ obj-$(CONFIG_CAIF) += caif/ obj-$(CONFIG_CAN) += can/ obj-$(CONFIG_ETRAX_ETHERNET) += cris/ +obj-$(CONFIG_NET_DSA) += dsa/ obj-$(CONFIG_ETHERNET) += ethernet/ obj-$(CONFIG_FDDI) += fddi/ obj-$(CONFIG_HIPPI) += hippi/ diff --git a/drivers/net/bonding/bond_ipv6.c b/drivers/net/bonding/bond_ipv6.c deleted file mode 100644 index 027a0ee7d85..00000000000 --- a/drivers/net/bonding/bond_ipv6.c +++ /dev/null @@ -1,225 +0,0 @@ -/* - * Copyright(c) 2008 Hewlett-Packard Development Company, L.P. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY - * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/types.h> -#include <linux/if_vlan.h> -#include <net/ipv6.h> -#include <net/ndisc.h> -#include <net/addrconf.h> -#include <net/netns/generic.h> -#include "bonding.h" - -/* - * Assign bond->master_ipv6 to the next IPv6 address in the list, or - * zero it out if there are none. - */ -static void bond_glean_dev_ipv6(struct net_device *dev, struct in6_addr *addr) -{ - struct inet6_dev *idev; - - if (!dev) - return; - - idev = in6_dev_get(dev); - if (!idev) - return; - - read_lock_bh(&idev->lock); - if (!list_empty(&idev->addr_list)) { - struct inet6_ifaddr *ifa - = list_first_entry(&idev->addr_list, - struct inet6_ifaddr, if_list); - ipv6_addr_copy(addr, &ifa->addr); - } else - ipv6_addr_set(addr, 0, 0, 0, 0); - - read_unlock_bh(&idev->lock); - - in6_dev_put(idev); -} - -static void bond_na_send(struct net_device *slave_dev, - struct in6_addr *daddr, - int router, - unsigned short vlan_id) -{ - struct in6_addr mcaddr; - struct icmp6hdr icmp6h = { - .icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT, - }; - struct sk_buff *skb; - - icmp6h.icmp6_router = router; - icmp6h.icmp6_solicited = 0; - icmp6h.icmp6_override = 1; - - addrconf_addr_solict_mult(daddr, &mcaddr); - - pr_debug("ipv6 na on slave %s: dest %pI6, src %pI6\n", - slave_dev->name, &mcaddr, daddr); - - skb = ndisc_build_skb(slave_dev, &mcaddr, daddr, &icmp6h, daddr, - ND_OPT_TARGET_LL_ADDR); - - if (!skb) { - pr_err("NA packet allocation failed\n"); - return; - } - - if (vlan_id) { - /* The Ethernet header is not present yet, so it is - * too early to insert a VLAN tag. Force use of an - * out-of-line tag here and let dev_hard_start_xmit() - * insert it if the slave hardware can't. - */ - skb = __vlan_hwaccel_put_tag(skb, vlan_id); - if (!skb) { - pr_err("failed to insert VLAN tag\n"); - return; - } - } - - ndisc_send_skb(skb, slave_dev, NULL, &mcaddr, daddr, &icmp6h); -} - -/* - * Kick out an unsolicited Neighbor Advertisement for an IPv6 address on - * the bonding master. This will help the switch learn our address - * if in active-backup mode. - * - * Caller must hold curr_slave_lock for read or better - */ -void bond_send_unsolicited_na(struct bonding *bond) -{ - struct slave *slave = bond->curr_active_slave; - struct vlan_entry *vlan; - struct inet6_dev *idev; - int is_router; - - pr_debug("%s: bond %s slave %s\n", bond->dev->name, - __func__, slave ? slave->dev->name : "NULL"); - - if (!slave || !bond->send_unsol_na || - test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state)) - return; - - bond->send_unsol_na--; - - idev = in6_dev_get(bond->dev); - if (!idev) - return; - - is_router = !!idev->cnf.forwarding; - - in6_dev_put(idev); - - if (!ipv6_addr_any(&bond->master_ipv6)) - bond_na_send(slave->dev, &bond->master_ipv6, is_router, 0); - - list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { - if (!ipv6_addr_any(&vlan->vlan_ipv6)) { - bond_na_send(slave->dev, &vlan->vlan_ipv6, is_router, - vlan->vlan_id); - } - } -} - -/* - * bond_inet6addr_event: handle inet6addr notifier chain events. - * - * We keep track of device IPv6 addresses primarily to use as source - * addresses in NS probes. - * - * We track one IPv6 for the main device (if it has one). - */ -static int bond_inet6addr_event(struct notifier_block *this, - unsigned long event, - void *ptr) -{ - struct inet6_ifaddr *ifa = ptr; - struct net_device *vlan_dev, *event_dev = ifa->idev->dev; - struct bonding *bond; - struct vlan_entry *vlan; - struct bond_net *bn = net_generic(dev_net(event_dev), bond_net_id); - - list_for_each_entry(bond, &bn->dev_list, bond_list) { - if (bond->dev == event_dev) { - switch (event) { - case NETDEV_UP: - if (ipv6_addr_any(&bond->master_ipv6)) - ipv6_addr_copy(&bond->master_ipv6, - &ifa->addr); - return NOTIFY_OK; - case NETDEV_DOWN: - if (ipv6_addr_equal(&bond->master_ipv6, - &ifa->addr)) - bond_glean_dev_ipv6(bond->dev, - &bond->master_ipv6); - return NOTIFY_OK; - default: - return NOTIFY_DONE; - } - } - - list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { - rcu_read_lock(); - vlan_dev = __vlan_find_dev_deep(bond->dev, - vlan->vlan_id); - rcu_read_unlock(); - if (vlan_dev == event_dev) { - switch (event) { - case NETDEV_UP: - if (ipv6_addr_any(&vlan->vlan_ipv6)) - ipv6_addr_copy(&vlan->vlan_ipv6, - &ifa->addr); - return NOTIFY_OK; - case NETDEV_DOWN: - if (ipv6_addr_equal(&vlan->vlan_ipv6, - &ifa->addr)) - bond_glean_dev_ipv6(vlan_dev, - &vlan->vlan_ipv6); - return NOTIFY_OK; - default: - return NOTIFY_DONE; - } - } - } - } - return NOTIFY_DONE; -} - -static struct notifier_block bond_inet6addr_notifier = { - .notifier_call = bond_inet6addr_event, -}; - -void bond_register_ipv6_notifier(void) -{ - register_inet6addr_notifier(&bond_inet6addr_notifier); -} - -void bond_unregister_ipv6_notifier(void) -{ - unregister_inet6addr_notifier(&bond_inet6addr_notifier); -} - diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 7f8756825b8..435984ad8b2 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -428,27 +428,34 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, * @bond_dev: bonding net device that got called * @vid: vlan id being added */ -static void bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid) +static int bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid) { struct bonding *bond = netdev_priv(bond_dev); - struct slave *slave; + struct slave *slave, *stop_at; int i, res; bond_for_each_slave(bond, slave, i) { - struct net_device *slave_dev = slave->dev; - const struct net_device_ops *slave_ops = slave_dev->netdev_ops; - - if ((slave_dev->features & NETIF_F_HW_VLAN_FILTER) && - slave_ops->ndo_vlan_rx_add_vid) { - slave_ops->ndo_vlan_rx_add_vid(slave_dev, vid); - } + res = vlan_vid_add(slave->dev, vid); + if (res) + goto unwind; } res = bond_add_vlan(bond, vid); if (res) { pr_err("%s: Error: Failed to add vlan id %d\n", bond_dev->name, vid); + return res; } + + return 0; + +unwind: + /* unwind from head to the slave that failed */ + stop_at = slave; + bond_for_each_slave_from_to(bond, slave, i, bond->first_slave, stop_at) + vlan_vid_del(slave->dev, vid); + + return res; } /** @@ -456,56 +463,48 @@ static void bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid) * @bond_dev: bonding net device that got called * @vid: vlan id being removed */ -static void bond_vlan_rx_kill_vid(struct net_device *bond_dev, uint16_t vid) +static int bond_vlan_rx_kill_vid(struct net_device *bond_dev, uint16_t vid) { struct bonding *bond = netdev_priv(bond_dev); struct slave *slave; int i, res; - bond_for_each_slave(bond, slave, i) { - struct net_device *slave_dev = slave->dev; - const struct net_device_ops *slave_ops = slave_dev->netdev_ops; - - if ((slave_dev->features & NETIF_F_HW_VLAN_FILTER) && - slave_ops->ndo_vlan_rx_kill_vid) { - slave_ops->ndo_vlan_rx_kill_vid(slave_dev, vid); - } - } + bond_for_each_slave(bond, slave, i) + vlan_vid_del(slave->dev, vid); res = bond_del_vlan(bond, vid); if (res) { pr_err("%s: Error: Failed to remove vlan id %d\n", bond_dev->name, vid); + return res; } + + return 0; } static void bond_add_vlans_on_slave(struct bonding *bond, struct net_device *slave_dev) { struct vlan_entry *vlan; - const struct net_device_ops *slave_ops = slave_dev->netdev_ops; - - if (!(slave_dev->features & NETIF_F_HW_VLAN_FILTER) || - !(slave_ops->ndo_vlan_rx_add_vid)) - return; + int res; - list_for_each_entry(vlan, &bond->vlan_list, vlan_list) - slave_ops->ndo_vlan_rx_add_vid(slave_dev, vlan->vlan_id); + list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { + res = vlan_vid_add(slave_dev, vlan->vlan_id); + if (res) + pr_warning("%s: Failed to add vlan id %d to device %s\n", + bond->dev->name, vlan->vlan_id, + slave_dev->name); + } } static void bond_del_vlans_from_slave(struct bonding *bond, struct net_device *slave_dev) { - const struct net_device_ops *slave_ops = slave_dev->netdev_ops; struct vlan_entry *vlan; - if (!(slave_dev->features & NETIF_F_HW_VLAN_FILTER) || - !(slave_ops->ndo_vlan_rx_kill_vid)) - return; - list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { if (!vlan->vlan_id) continue; - slave_ops->ndo_vlan_rx_kill_vid(slave_dev, vlan->vlan_id); + vlan_vid_del(slave_dev, vlan->vlan_id); } } @@ -1325,11 +1324,12 @@ static int bond_sethwaddr(struct net_device *bond_dev, return 0; } -static u32 bond_fix_features(struct net_device *dev, u32 features) +static netdev_features_t bond_fix_features(struct net_device *dev, + netdev_features_t features) { struct slave *slave; struct bonding *bond = netdev_priv(dev); - u32 mask; + netdev_features_t mask; int i; read_lock(&bond->lock); @@ -1363,7 +1363,7 @@ static void bond_compute_features(struct bonding *bond) { struct slave *slave; struct net_device *bond_dev = bond->dev; - u32 vlan_features = BOND_VLAN_FEATURES; + netdev_features_t vlan_features = BOND_VLAN_FEATURES; unsigned short max_hard_header_len = ETH_HLEN; int i; @@ -1822,7 +1822,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) "but new slave device does not support netpoll.\n", bond_dev->name); res = -EBUSY; - goto err_close; + goto err_detach; } } #endif @@ -1831,7 +1831,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) res = bond_create_slave_symlinks(bond_dev, slave_dev); if (res) - goto err_close; + goto err_detach; res = netdev_rx_handler_register(slave_dev, bond_handle_frame, new_slave); @@ -1852,6 +1852,11 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) err_dest_symlinks: bond_destroy_slave_symlinks(bond_dev, slave_dev); +err_detach: + write_lock_bh(&bond->lock); + bond_detach_slave(bond, new_slave); + write_unlock_bh(&bond->lock); + err_close: dev_close(slave_dev); @@ -1897,7 +1902,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) struct bonding *bond = netdev_priv(bond_dev); struct slave *slave, *oldcurrent; struct sockaddr addr; - u32 old_features = bond_dev->features; + netdev_features_t old_features = bond_dev->features; /* slave is not a slave or master is not master of this slave */ if (!(slave_dev->flags & IFF_SLAVE) || @@ -4339,7 +4344,7 @@ static void bond_setup(struct net_device *bond_dev) NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; - bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_NO_CSUM); + bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM); bond_dev->features |= bond_dev->hw_features; } diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c index 073352517ad..0a4fc62a381 100644 --- a/drivers/net/caif/caif_hsi.c +++ b/drivers/net/caif/caif_hsi.c @@ -117,15 +117,6 @@ static int cfhsi_flush_fifo(struct cfhsi *cfhsi) dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__); - - ret = cfhsi->dev->cfhsi_wake_up(cfhsi->dev); - if (ret) { - dev_warn(&cfhsi->ndev->dev, - "%s: can't wake up HSI interface: %d.\n", - __func__, ret); - return ret; - } - do { ret = cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev, &fifo_occupancy); @@ -168,8 +159,6 @@ static int cfhsi_flush_fifo(struct cfhsi *cfhsi) } } while (1); - cfhsi->dev->cfhsi_wake_down(cfhsi->dev); - return ret; } @@ -944,7 +933,7 @@ static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev) /* Create HSI frame. */ len = cfhsi_tx_frm(desc, cfhsi); - BUG_ON(!len); + WARN_ON(!len); /* Set up new transfer. */ res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev); diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c index 23406e62c0b..8a3054b8481 100644 --- a/drivers/net/caif/caif_serial.c +++ b/drivers/net/caif/caif_serial.c @@ -38,15 +38,15 @@ MODULE_ALIAS_LDISC(N_CAIF); /*This list is protected by the rtnl lock. */ static LIST_HEAD(ser_list); -static int ser_loop; +static bool ser_loop; module_param(ser_loop, bool, S_IRUGO); MODULE_PARM_DESC(ser_loop, "Run in simulated loopback mode."); -static int ser_use_stx = 1; +static bool ser_use_stx = true; module_param(ser_use_stx, bool, S_IRUGO); MODULE_PARM_DESC(ser_use_stx, "STX enabled or not."); -static int ser_use_fcs = 1; +static bool ser_use_fcs = true; module_param(ser_use_fcs, bool, S_IRUGO); MODULE_PARM_DESC(ser_use_fcs, "FCS enabled or not."); @@ -261,7 +261,7 @@ static int handle_tx(struct ser_device *ser) skb_pull(skb, tty_wr); if (skb->len == 0) { struct sk_buff *tmp = skb_dequeue(&ser->head); - BUG_ON(tmp != skb); + WARN_ON(tmp != skb); if (in_interrupt()) dev_kfree_skb_irq(skb); else @@ -305,7 +305,7 @@ static void ldisc_tx_wakeup(struct tty_struct *tty) ser = tty->disc_data; BUG_ON(ser == NULL); - BUG_ON(ser->tty != tty); + WARN_ON(ser->tty != tty); handle_tx(ser); } diff --git a/drivers/net/caif/caif_shmcore.c b/drivers/net/caif/caif_shmcore.c index d4b26fb24ed..5b2041319a3 100644 --- a/drivers/net/caif/caif_shmcore.c +++ b/drivers/net/caif/caif_shmcore.c @@ -238,11 +238,11 @@ int caif_shmdrv_rx_cb(u32 mbx_msg, void *priv) if ((avail_emptybuff > HIGH_WATERMARK) && (!pshm_drv->tx_empty_available)) { pshm_drv->tx_empty_available = 1; + spin_unlock_irqrestore(&pshm_drv->lock, flags); pshm_drv->cfdev.flowctrl (pshm_drv->pshm_dev->pshm_netdev, CAIF_FLOW_ON); - spin_unlock_irqrestore(&pshm_drv->lock, flags); /* Schedule the work queue. if required */ if (!work_pending(&pshm_drv->shm_tx_work)) @@ -285,6 +285,7 @@ static void shm_rx_work_func(struct work_struct *rx_work) list_entry(pshm_drv->rx_full_list.next, struct buf_list, list); list_del_init(&pbuf->list); + spin_unlock_irqrestore(&pshm_drv->lock, flags); /* Retrieve pointer to start of the packet descriptor area. */ pck_desc = (struct shm_pck_desc *) pbuf->desc_vptr; @@ -336,7 +337,11 @@ static void shm_rx_work_func(struct work_struct *rx_work) /* Get a suitable CAIF packet and copy in data. */ skb = netdev_alloc_skb(pshm_drv->pshm_dev->pshm_netdev, frm_pck_len + 1); - BUG_ON(skb == NULL); + + if (skb == NULL) { + pr_info("OOM: Try next frame in descriptor\n"); + break; + } p = skb_put(skb, frm_pck_len); memcpy(p, pbuf->desc_vptr + frm_pck_ofs, frm_pck_len); @@ -360,6 +365,7 @@ static void shm_rx_work_func(struct work_struct *rx_work) pck_desc++; } + spin_lock_irqsave(&pshm_drv->lock, flags); list_add_tail(&pbuf->list, &pshm_drv->rx_pend_list); spin_unlock_irqrestore(&pshm_drv->lock, flags); @@ -412,7 +418,6 @@ static void shm_tx_work_func(struct work_struct *tx_work) if (skb == NULL) goto send_msg; - /* Check the available no. of buffers in the empty list */ list_for_each(pos, &pshm_drv->tx_empty_list) avail_emptybuff++; @@ -421,9 +426,11 @@ static void shm_tx_work_func(struct work_struct *tx_work) pshm_drv->tx_empty_available) { /* Update blocking condition. */ pshm_drv->tx_empty_available = 0; + spin_unlock_irqrestore(&pshm_drv->lock, flags); pshm_drv->cfdev.flowctrl (pshm_drv->pshm_dev->pshm_netdev, CAIF_FLOW_OFF); + spin_lock_irqsave(&pshm_drv->lock, flags); } /* * We simply return back to the caller if we do not have space @@ -469,6 +476,8 @@ static void shm_tx_work_func(struct work_struct *tx_work) } skb = skb_dequeue(&pshm_drv->sk_qhead); + if (skb == NULL) + break; /* Copy in CAIF frame. */ skb_copy_bits(skb, 0, pbuf->desc_vptr + pbuf->frm_ofs + SHM_HDR_LEN + @@ -477,7 +486,7 @@ static void shm_tx_work_func(struct work_struct *tx_work) pshm_drv->pshm_dev->pshm_netdev->stats.tx_packets++; pshm_drv->pshm_dev->pshm_netdev->stats.tx_bytes += frmlen; - dev_kfree_skb(skb); + dev_kfree_skb_irq(skb); /* Fill in the shared memory packet descriptor area. */ pck_desc = (struct shm_pck_desc *) (pbuf->desc_vptr); @@ -512,16 +521,11 @@ send_msg: static int shm_netdev_tx(struct sk_buff *skb, struct net_device *shm_netdev) { struct shmdrv_layer *pshm_drv; - unsigned long flags = 0; pshm_drv = netdev_priv(shm_netdev); - spin_lock_irqsave(&pshm_drv->lock, flags); - skb_queue_tail(&pshm_drv->sk_qhead, skb); - spin_unlock_irqrestore(&pshm_drv->lock, flags); - /* Schedule Tx work queue. for deferred processing of skbs*/ if (!work_pending(&pshm_drv->shm_tx_work)) queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work); @@ -606,6 +610,7 @@ int caif_shmcore_probe(struct shmdev_layer *pshm_dev) pshm_drv->shm_rx_addr = pshm_dev->shm_base_addr + (NR_TX_BUF * TX_BUF_SZ); + spin_lock_init(&pshm_drv->lock); INIT_LIST_HEAD(&pshm_drv->tx_empty_list); INIT_LIST_HEAD(&pshm_drv->tx_pend_list); INIT_LIST_HEAD(&pshm_drv->tx_full_list); @@ -640,7 +645,7 @@ int caif_shmcore_probe(struct shmdev_layer *pshm_dev) tx_buf->frm_ofs = SHM_CAIF_FRM_OFS; if (pshm_dev->shm_loopback) - tx_buf->desc_vptr = (char *)tx_buf->phy_addr; + tx_buf->desc_vptr = (unsigned char *)tx_buf->phy_addr; else tx_buf->desc_vptr = ioremap(tx_buf->phy_addr, TX_BUF_SZ); @@ -664,7 +669,7 @@ int caif_shmcore_probe(struct shmdev_layer *pshm_dev) rx_buf->len = RX_BUF_SZ; if (pshm_dev->shm_loopback) - rx_buf->desc_vptr = (char *)rx_buf->phy_addr; + rx_buf->desc_vptr = (unsigned char *)rx_buf->phy_addr; else rx_buf->desc_vptr = ioremap(rx_buf->phy_addr, RX_BUF_SZ); diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c index 05e791f46ae..96391c36fa7 100644 --- a/drivers/net/caif/caif_spi.c +++ b/drivers/net/caif/caif_spi.c @@ -35,7 +35,7 @@ MODULE_DESCRIPTION("CAIF SPI driver"); /* Returns the number of padding bytes for alignment. */ #define PAD_POW2(x, pow) ((((x)&((pow)-1))==0) ? 0 : (((pow)-((x)&((pow)-1))))) -static int spi_loop; +static bool spi_loop; module_param(spi_loop, bool, S_IRUGO); MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode."); @@ -226,7 +226,7 @@ static ssize_t dbgfs_frame(struct file *file, char __user *user_buf, "Tx data (Len: %d):\n", cfspi->tx_cpck_len); len += print_frame((buf + len), (DEBUGFS_BUF_SIZE - len), - cfspi->xfer.va_tx, + cfspi->xfer.va_tx[0], (cfspi->tx_cpck_len + SPI_CMD_SZ), 100); len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), @@ -599,48 +599,11 @@ static int cfspi_close(struct net_device *dev) netif_stop_queue(dev); return 0; } -static const struct net_device_ops cfspi_ops = { - .ndo_open = cfspi_open, - .ndo_stop = cfspi_close, - .ndo_start_xmit = cfspi_xmit -}; -static void cfspi_setup(struct net_device *dev) +static int cfspi_init(struct net_device *dev) { + int res = 0; struct cfspi *cfspi = netdev_priv(dev); - dev->features = 0; - dev->netdev_ops = &cfspi_ops; - dev->type = ARPHRD_CAIF; - dev->flags = IFF_NOARP | IFF_POINTOPOINT; - dev->tx_queue_len = 0; - dev->mtu = SPI_MAX_PAYLOAD_SIZE; - dev->destructor = free_netdev; - skb_queue_head_init(&cfspi->qhead); - skb_queue_head_init(&cfspi->chead); - cfspi->cfdev.link_select = CAIF_LINK_HIGH_BANDW; - cfspi->cfdev.use_frag = false; - cfspi->cfdev.use_stx = false; - cfspi->cfdev.use_fcs = false; - cfspi->ndev = dev; -} - -int cfspi_spi_probe(struct platform_device *pdev) -{ - struct cfspi *cfspi = NULL; - struct net_device *ndev; - struct cfspi_dev *dev; - int res; - dev = (struct cfspi_dev *)pdev->dev.platform_data; - - ndev = alloc_netdev(sizeof(struct cfspi), - "cfspi%d", cfspi_setup); - if (!ndev) - return -ENOMEM; - - cfspi = netdev_priv(ndev); - netif_stop_queue(ndev); - cfspi->ndev = ndev; - cfspi->pdev = pdev; /* Set flow info. */ cfspi->flow_off_sent = 0; @@ -656,16 +619,11 @@ int cfspi_spi_probe(struct platform_device *pdev) cfspi->slave_talked = false; } - /* Assign the SPI device. */ - cfspi->dev = dev; - /* Assign the device ifc to this SPI interface. */ - dev->ifc = &cfspi->ifc; - /* Allocate DMA buffers. */ - cfspi->xfer.va_tx = dma_alloc(&cfspi->xfer.pa_tx); - if (!cfspi->xfer.va_tx) { + cfspi->xfer.va_tx[0] = dma_alloc(&cfspi->xfer.pa_tx[0]); + if (!cfspi->xfer.va_tx[0]) { res = -ENODEV; - goto err_dma_alloc_tx; + goto err_dma_alloc_tx_0; } cfspi->xfer.va_rx = dma_alloc(&cfspi->xfer.pa_rx); @@ -714,6 +672,87 @@ int cfspi_spi_probe(struct platform_device *pdev) /* Schedule the work queue. */ queue_work(cfspi->wq, &cfspi->work); + return 0; + + err_create_wq: + dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx); + err_dma_alloc_rx: + dma_free(cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]); + err_dma_alloc_tx_0: + return res; +} + +static void cfspi_uninit(struct net_device *dev) +{ + struct cfspi *cfspi = netdev_priv(dev); + + /* Remove from list. */ + spin_lock(&cfspi_list_lock); + list_del(&cfspi->list); + spin_unlock(&cfspi_list_lock); + + cfspi->ndev = NULL; + /* Free DMA buffers. */ + dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx); + dma_free(cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]); + set_bit(SPI_TERMINATE, &cfspi->state); + wake_up_interruptible(&cfspi->wait); + destroy_workqueue(cfspi->wq); + /* Destroy debugfs directory and files. */ + dev_debugfs_rem(cfspi); + return; +} + +static const struct net_device_ops cfspi_ops = { + .ndo_open = cfspi_open, + .ndo_stop = cfspi_close, + .ndo_init = cfspi_init, + .ndo_uninit = cfspi_uninit, + .ndo_start_xmit = cfspi_xmit +}; + +static void cfspi_setup(struct net_device *dev) +{ + struct cfspi *cfspi = netdev_priv(dev); + dev->features = 0; + dev->netdev_ops = &cfspi_ops; + dev->type = ARPHRD_CAIF; + dev->flags = IFF_NOARP | IFF_POINTOPOINT; + dev->tx_queue_len = 0; + dev->mtu = SPI_MAX_PAYLOAD_SIZE; + dev->destructor = free_netdev; + skb_queue_head_init(&cfspi->qhead); + skb_queue_head_init(&cfspi->chead); + cfspi->cfdev.link_select = CAIF_LINK_HIGH_BANDW; + cfspi->cfdev.use_frag = false; + cfspi->cfdev.use_stx = false; + cfspi->cfdev.use_fcs = false; + cfspi->ndev = dev; +} + +int cfspi_spi_probe(struct platform_device *pdev) +{ + struct cfspi *cfspi = NULL; + struct net_device *ndev; + struct cfspi_dev *dev; + int res; + dev = (struct cfspi_dev *)pdev->dev.platform_data; + + ndev = alloc_netdev(sizeof(struct cfspi), + "cfspi%d", cfspi_setup); + if (!dev) + return -ENODEV; + + cfspi = netdev_priv(ndev); + netif_stop_queue(ndev); + cfspi->ndev = ndev; + cfspi->pdev = pdev; + + /* Assign the SPI device. */ + cfspi->dev = dev; + /* Assign the device ifc to this SPI interface. */ + dev->ifc = &cfspi->ifc; + /* Register network device. */ res = register_netdev(ndev); if (res) { @@ -723,15 +762,6 @@ int cfspi_spi_probe(struct platform_device *pdev) return res; err_net_reg: - dev_debugfs_rem(cfspi); - set_bit(SPI_TERMINATE, &cfspi->state); - wake_up_interruptible(&cfspi->wait); - destroy_workqueue(cfspi->wq); - err_create_wq: - dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx); - err_dma_alloc_rx: - dma_free(cfspi->xfer.va_tx, cfspi->xfer.pa_tx); - err_dma_alloc_tx: free_netdev(ndev); return res; @@ -739,34 +769,8 @@ int cfspi_spi_probe(struct platform_device *pdev) int cfspi_spi_remove(struct platform_device *pdev) { - struct list_head *list_node; - struct list_head *n; - struct cfspi *cfspi = NULL; - struct cfspi_dev *dev; - - dev = (struct cfspi_dev *)pdev->dev.platform_data; - spin_lock(&cfspi_list_lock); - list_for_each_safe(list_node, n, &cfspi_list) { - cfspi = list_entry(list_node, struct cfspi, list); - /* Find the corresponding device. */ - if (cfspi->dev == dev) { - /* Remove from list. */ - list_del(list_node); - /* Free DMA buffers. */ - dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx); - dma_free(cfspi->xfer.va_tx, cfspi->xfer.pa_tx); - set_bit(SPI_TERMINATE, &cfspi->state); - wake_up_interruptible(&cfspi->wait); - destroy_workqueue(cfspi->wq); - /* Destroy debugfs directory and files. */ - dev_debugfs_rem(cfspi); - unregister_netdev(cfspi->ndev); - spin_unlock(&cfspi_list_lock); - return 0; - } - } - spin_unlock(&cfspi_list_lock); - return -ENODEV; + /* Everything is done in cfspi_uninit(). */ + return 0; } static void __exit cfspi_exit_module(void) @@ -777,7 +781,7 @@ static void __exit cfspi_exit_module(void) list_for_each_safe(list_node, n, &cfspi_list) { cfspi = list_entry(list_node, struct cfspi, list); - platform_device_unregister(cfspi->pdev); + unregister_netdev(cfspi->ndev); } /* Destroy sysfs files. */ diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index f6c98fb4a51..ab45758c49a 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig @@ -116,6 +116,8 @@ source "drivers/net/can/sja1000/Kconfig" source "drivers/net/can/c_can/Kconfig" +source "drivers/net/can/cc770/Kconfig" + source "drivers/net/can/usb/Kconfig" source "drivers/net/can/softing/Kconfig" diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile index 24ebfe8d758..938be37b670 100644 --- a/drivers/net/can/Makefile +++ b/drivers/net/can/Makefile @@ -14,6 +14,7 @@ obj-y += softing/ obj-$(CONFIG_CAN_SJA1000) += sja1000/ obj-$(CONFIG_CAN_MSCAN) += mscan/ obj-$(CONFIG_CAN_C_CAN) += c_can/ +obj-$(CONFIG_CAN_CC770) += cc770/ obj-$(CONFIG_CAN_AT91) += at91_can.o obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o obj-$(CONFIG_CAN_MCP251X) += mcp251x.o diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c index 044ea0647b0..6ea905c2cf6 100644 --- a/drivers/net/can/at91_can.c +++ b/drivers/net/can/at91_can.c @@ -1383,18 +1383,7 @@ static struct platform_driver at91_can_driver = { .id_table = at91_can_id_table, }; -static int __init at91_can_module_init(void) -{ - return platform_driver_register(&at91_can_driver); -} - -static void __exit at91_can_module_exit(void) -{ - platform_driver_unregister(&at91_can_driver); -} - -module_init(at91_can_module_init); -module_exit(at91_can_module_exit); +module_platform_driver(at91_can_driver); MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c index a1c5abc38cd..349e0fabb63 100644 --- a/drivers/net/can/bfin_can.c +++ b/drivers/net/can/bfin_can.c @@ -676,17 +676,7 @@ static struct platform_driver bfin_can_driver = { }, }; -static int __init bfin_can_init(void) -{ - return platform_driver_register(&bfin_can_driver); -} -module_init(bfin_can_init); - -static void __exit bfin_can_exit(void) -{ - platform_driver_unregister(&bfin_can_driver); -} -module_exit(bfin_can_exit); +module_platform_driver(bfin_can_driver); MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c index 0b5c6f8bdd3..5e1a5ff6476 100644 --- a/drivers/net/can/c_can/c_can_platform.c +++ b/drivers/net/can/c_can/c_can_platform.c @@ -197,17 +197,7 @@ static struct platform_driver c_can_plat_driver = { .remove = __devexit_p(c_can_plat_remove), }; -static int __init c_can_plat_init(void) -{ - return platform_driver_register(&c_can_plat_driver); -} -module_init(c_can_plat_init); - -static void __exit c_can_plat_exit(void) -{ - platform_driver_unregister(&c_can_plat_driver); -} -module_exit(c_can_plat_exit); +module_platform_driver(c_can_plat_driver); MODULE_AUTHOR("Bhupesh Sharma <bhupesh.sharma@st.com>"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/can/cc770/Kconfig b/drivers/net/can/cc770/Kconfig new file mode 100644 index 00000000000..22c07a8c8b4 --- /dev/null +++ b/drivers/net/can/cc770/Kconfig @@ -0,0 +1,21 @@ +menuconfig CAN_CC770 + tristate "Bosch CC770 and Intel AN82527 devices" + depends on CAN_DEV && HAS_IOMEM + +if CAN_CC770 + +config CAN_CC770_ISA + tristate "ISA Bus based legacy CC770 driver" + ---help--- + This driver adds legacy support for CC770 and AN82527 chips + connected to the ISA bus using I/O port, memory mapped or + indirect access. + +config CAN_CC770_PLATFORM + tristate "Generic Platform Bus based CC770 driver" + ---help--- + This driver adds support for the CC770 and AN82527 chips + connected to the "platform bus" (Linux abstraction for directly + to the processor attached devices). + +endif diff --git a/drivers/net/can/cc770/Makefile b/drivers/net/can/cc770/Makefile new file mode 100644 index 00000000000..9fb8321b33e --- /dev/null +++ b/drivers/net/can/cc770/Makefile @@ -0,0 +1,9 @@ +# +# Makefile for the Bosch CC770 CAN controller drivers. +# + +obj-$(CONFIG_CAN_CC770) += cc770.o +obj-$(CONFIG_CAN_CC770_ISA) += cc770_isa.o +obj-$(CONFIG_CAN_CC770_PLATFORM) += cc770_platform.o + +ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c new file mode 100644 index 00000000000..76689674764 --- /dev/null +++ b/drivers/net/can/cc770/cc770.c @@ -0,0 +1,881 @@ +/* + * Core driver for the CC770 and AN82527 CAN controllers + * + * Copyright (C) 2009, 2011 Wolfgang Grandegger <wg@grandegger.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the version 2 of the GNU General Public License + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/types.h> +#include <linux/fcntl.h> +#include <linux/interrupt.h> +#include <linux/ptrace.h> +#include <linux/string.h> +#include <linux/errno.h> +#include <linux/netdevice.h> +#include <linux/if_arp.h> +#include <linux/if_ether.h> +#include <linux/skbuff.h> +#include <linux/delay.h> + +#include <linux/can.h> +#include <linux/can/dev.h> +#include <linux/can/error.h> +#include <linux/can/dev.h> +#include <linux/can/platform/cc770.h> + +#include "cc770.h" + +MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>"); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION(KBUILD_MODNAME "CAN netdevice driver"); + +/* + * The CC770 is a CAN controller from Bosch, which is 100% compatible + * with the AN82527 from Intel, but with "bugs" being fixed and some + * additional functionality, mainly: + * + * 1. RX and TX error counters are readable. + * 2. Support of silent (listen-only) mode. + * 3. Message object 15 can receive all types of frames, also RTR and EFF. + * + * Details are available from Bosch's "CC770_Product_Info_2007-01.pdf", + * which explains in detail the compatibility between the CC770 and the + * 82527. This driver use the additional functionality 3. on real CC770 + * devices. Unfortunately, the CC770 does still not store the message + * identifier of received remote transmission request frames and + * therefore it's set to 0. + * + * The message objects 1..14 can be used for TX and RX while the message + * objects 15 is optimized for RX. It has a shadow register for reliable + * data receiption under heavy bus load. Therefore it makes sense to use + * this message object for the needed use case. The frame type (EFF/SFF) + * for the message object 15 can be defined via kernel module parameter + * "msgobj15_eff". If not equal 0, it will receive 29-bit EFF frames, + * otherwise 11 bit SFF messages. + */ +static int msgobj15_eff; +module_param(msgobj15_eff, int, S_IRUGO); +MODULE_PARM_DESC(msgobj15_eff, "Extended 29-bit frames for message object 15 " + "(default: 11-bit standard frames)"); + +static int i82527_compat; +module_param(i82527_compat, int, S_IRUGO); +MODULE_PARM_DESC(i82527_compat, "Strict Intel 82527 comptibility mode " + "without using additional functions"); + +/* + * This driver uses the last 5 message objects 11..15. The definitions + * and structure below allows to configure and assign them to the real + * message object. + */ +static unsigned char cc770_obj_flags[CC770_OBJ_MAX] = { + [CC770_OBJ_RX0] = CC770_OBJ_FLAG_RX, + [CC770_OBJ_RX1] = CC770_OBJ_FLAG_RX | CC770_OBJ_FLAG_EFF, + [CC770_OBJ_RX_RTR0] = CC770_OBJ_FLAG_RX | CC770_OBJ_FLAG_RTR, + [CC770_OBJ_RX_RTR1] = CC770_OBJ_FLAG_RX | CC770_OBJ_FLAG_RTR | + CC770_OBJ_FLAG_EFF, + [CC770_OBJ_TX] = 0, +}; + +static struct can_bittiming_const cc770_bittiming_const = { + .name = KBUILD_MODNAME, + .tseg1_min = 1, + .tseg1_max = 16, + .tseg2_min = 1, + .tseg2_max = 8, + .sjw_max = 4, + .brp_min = 1, + .brp_max = 64, + .brp_inc = 1, +}; + +static inline int intid2obj(unsigned int intid) +{ + if (intid == 2) + return 0; + else + return MSGOBJ_LAST + 2 - intid; +} + +static void enable_all_objs(const struct net_device *dev) +{ + struct cc770_priv *priv = netdev_priv(dev); + u8 msgcfg; + unsigned char obj_flags; + unsigned int o, mo; + + for (o = 0; o < ARRAY_SIZE(priv->obj_flags); o++) { + obj_flags = priv->obj_flags[o]; + mo = obj2msgobj(o); + + if (obj_flags & CC770_OBJ_FLAG_RX) { + /* + * We don't need extra objects for RTR and EFF if + * the additional CC770 functions are enabled. + */ + if (priv->control_normal_mode & CTRL_EAF) { + if (o > 0) + continue; + netdev_dbg(dev, "Message object %d for " + "RX data, RTR, SFF and EFF\n", mo); + } else { + netdev_dbg(dev, + "Message object %d for RX %s %s\n", + mo, obj_flags & CC770_OBJ_FLAG_RTR ? + "RTR" : "data", + obj_flags & CC770_OBJ_FLAG_EFF ? + "EFF" : "SFF"); + } + + if (obj_flags & CC770_OBJ_FLAG_EFF) + msgcfg = MSGCFG_XTD; + else + msgcfg = 0; + if (obj_flags & CC770_OBJ_FLAG_RTR) + msgcfg |= MSGCFG_DIR; + + cc770_write_reg(priv, msgobj[mo].config, msgcfg); + cc770_write_reg(priv, msgobj[mo].ctrl0, + MSGVAL_SET | TXIE_RES | + RXIE_SET | INTPND_RES); + + if (obj_flags & CC770_OBJ_FLAG_RTR) + cc770_write_reg(priv, msgobj[mo].ctrl1, + NEWDAT_RES | CPUUPD_SET | + TXRQST_RES | RMTPND_RES); + else + cc770_write_reg(priv, msgobj[mo].ctrl1, + NEWDAT_RES | MSGLST_RES | + TXRQST_RES | RMTPND_RES); + } else { + netdev_dbg(dev, "Message object %d for " + "TX data, RTR, SFF and EFF\n", mo); + + cc770_write_reg(priv, msgobj[mo].ctrl1, + RMTPND_RES | TXRQST_RES | + CPUUPD_RES | NEWDAT_RES); + cc770_write_reg(priv, msgobj[mo].ctrl0, + MSGVAL_RES | TXIE_RES | + RXIE_RES | INTPND_RES); + } + } +} + +static void disable_all_objs(const struct cc770_priv *priv) +{ + int o, mo; + + for (o = 0; o < ARRAY_SIZE(priv->obj_flags); o++) { + mo = obj2msgobj(o); + + if (priv->obj_flags[o] & CC770_OBJ_FLAG_RX) { + if (o > 0 && priv->control_normal_mode & CTRL_EAF) + continue; + + cc770_write_reg(priv, msgobj[mo].ctrl1, + NEWDAT_RES | MSGLST_RES | + TXRQST_RES | RMTPND_RES); + cc770_write_reg(priv, msgobj[mo].ctrl0, + MSGVAL_RES | TXIE_RES | + RXIE_RES | INTPND_RES); + } else { + /* Clear message object for send */ + cc770_write_reg(priv, msgobj[mo].ctrl1, + RMTPND_RES | TXRQST_RES | + CPUUPD_RES | NEWDAT_RES); + cc770_write_reg(priv, msgobj[mo].ctrl0, + MSGVAL_RES | TXIE_RES | + RXIE_RES | INTPND_RES); + } + } +} + +static void set_reset_mode(struct net_device *dev) +{ + struct cc770_priv *priv = netdev_priv(dev); + + /* Enable configuration and puts chip in bus-off, disable interrupts */ + cc770_write_reg(priv, control, CTRL_CCE | CTRL_INI); + + priv->can.state = CAN_STATE_STOPPED; + + /* Clear interrupts */ + cc770_read_reg(priv, interrupt); + + /* Clear status register */ + cc770_write_reg(priv, status, 0); + + /* Disable all used message objects */ + disable_all_objs(priv); +} + +static void set_normal_mode(struct net_device *dev) +{ + struct cc770_priv *priv = netdev_priv(dev); + + /* Clear interrupts */ + cc770_read_reg(priv, interrupt); + + /* Clear status register and pre-set last error code */ + cc770_write_reg(priv, status, STAT_LEC_MASK); + + /* Enable all used message objects*/ + enable_all_objs(dev); + + /* + * Clear bus-off, interrupts only for errors, + * not for status change + */ + cc770_write_reg(priv, control, priv->control_normal_mode); + + priv->can.state = CAN_STATE_ERROR_ACTIVE; +} + +static void chipset_init(struct cc770_priv *priv) +{ + int mo, id, data; + + /* Enable configuration and put chip in bus-off, disable interrupts */ + cc770_write_reg(priv, control, (CTRL_CCE | CTRL_INI)); + + /* Set CLKOUT divider and slew rates */ + cc770_write_reg(priv, clkout, priv->clkout); + + /* Configure CPU interface / CLKOUT enable */ + cc770_write_reg(priv, cpu_interface, priv->cpu_interface); + + /* Set bus configuration */ + cc770_write_reg(priv, bus_config, priv->bus_config); + + /* Clear interrupts */ + cc770_read_reg(priv, interrupt); + + /* Clear status register */ + cc770_write_reg(priv, status, 0); + + /* Clear and invalidate message objects */ + for (mo = MSGOBJ_FIRST; mo <= MSGOBJ_LAST; mo++) { + cc770_write_reg(priv, msgobj[mo].ctrl0, + INTPND_UNC | RXIE_RES | + TXIE_RES | MSGVAL_RES); + cc770_write_reg(priv, msgobj[mo].ctrl0, + INTPND_RES | RXIE_RES | + TXIE_RES | MSGVAL_RES); + cc770_write_reg(priv, msgobj[mo].ctrl1, + NEWDAT_RES | MSGLST_RES | + TXRQST_RES | RMTPND_RES); + for (data = 0; data < 8; data++) + cc770_write_reg(priv, msgobj[mo].data[data], 0); + for (id = 0; id < 4; id++) + cc770_write_reg(priv, msgobj[mo].id[id], 0); + cc770_write_reg(priv, msgobj[mo].config, 0); + } + + /* Set all global ID masks to "don't care" */ + cc770_write_reg(priv, global_mask_std[0], 0); + cc770_write_reg(priv, global_mask_std[1], 0); + cc770_write_reg(priv, global_mask_ext[0], 0); + cc770_write_reg(priv, global_mask_ext[1], 0); + cc770_write_reg(priv, global_mask_ext[2], 0); + cc770_write_reg(priv, global_mask_ext[3], 0); + +} + +static int cc770_probe_chip(struct net_device *dev) +{ + struct cc770_priv *priv = netdev_priv(dev); + + /* Enable configuration, put chip in bus-off, disable ints */ + cc770_write_reg(priv, control, CTRL_CCE | CTRL_EAF | CTRL_INI); + /* Configure cpu interface / CLKOUT disable */ + cc770_write_reg(priv, cpu_interface, priv->cpu_interface); + + /* + * Check if hardware reset is still inactive or maybe there + * is no chip in this address space + */ + if (cc770_read_reg(priv, cpu_interface) & CPUIF_RST) { + netdev_info(dev, "probing @0x%p failed (reset)\n", + priv->reg_base); + return -ENODEV; + } + + /* Write and read back test pattern (some arbitrary values) */ + cc770_write_reg(priv, msgobj[1].data[1], 0x25); + cc770_write_reg(priv, msgobj[2].data[3], 0x52); + cc770_write_reg(priv, msgobj[10].data[6], 0xc3); + if ((cc770_read_reg(priv, msgobj[1].data[1]) != 0x25) || + (cc770_read_reg(priv, msgobj[2].data[3]) != 0x52) || + (cc770_read_reg(priv, msgobj[10].data[6]) != 0xc3)) { + netdev_info(dev, "probing @0x%p failed (pattern)\n", + priv->reg_base); + return -ENODEV; + } + + /* Check if this chip is a CC770 supporting additional functions */ + if (cc770_read_reg(priv, control) & CTRL_EAF) + priv->control_normal_mode |= CTRL_EAF; + + return 0; +} + +static void cc770_start(struct net_device *dev) +{ + struct cc770_priv *priv = netdev_priv(dev); + + /* leave reset mode */ + if (priv->can.state != CAN_STATE_STOPPED) + set_reset_mode(dev); + + /* leave reset mode */ + set_normal_mode(dev); +} + +static int cc770_set_mode(struct net_device *dev, enum can_mode mode) +{ + switch (mode) { + case CAN_MODE_START: + cc770_start(dev); + netif_wake_queue(dev); + break; + + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int cc770_set_bittiming(struct net_device *dev) +{ + struct cc770_priv *priv = netdev_priv(dev); + struct can_bittiming *bt = &priv->can.bittiming; + u8 btr0, btr1; + + btr0 = ((bt->brp - 1) & 0x3f) | (((bt->sjw - 1) & 0x3) << 6); + btr1 = ((bt->prop_seg + bt->phase_seg1 - 1) & 0xf) | + (((bt->phase_seg2 - 1) & 0x7) << 4); + if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) + btr1 |= 0x80; + + netdev_info(dev, "setting BTR0=0x%02x BTR1=0x%02x\n", btr0, btr1); + + cc770_write_reg(priv, bit_timing_0, btr0); + cc770_write_reg(priv, bit_timing_1, btr1); + + return 0; +} + +static int cc770_get_berr_counter(const struct net_device *dev, + struct can_berr_counter *bec) +{ + struct cc770_priv *priv = netdev_priv(dev); + + bec->txerr = cc770_read_reg(priv, tx_error_counter); + bec->rxerr = cc770_read_reg(priv, rx_error_counter); + + return 0; +} + +static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct cc770_priv *priv = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; + struct can_frame *cf = (struct can_frame *)skb->data; + unsigned int mo = obj2msgobj(CC770_OBJ_TX); + u8 dlc, rtr; + u32 id; + int i; + + if (can_dropped_invalid_skb(dev, skb)) + return NETDEV_TX_OK; + + if ((cc770_read_reg(priv, + msgobj[mo].ctrl1) & TXRQST_UNC) == TXRQST_SET) { + netdev_err(dev, "TX register is still occupied!\n"); + return NETDEV_TX_BUSY; + } + + netif_stop_queue(dev); + + dlc = cf->can_dlc; + id = cf->can_id; + if (cf->can_id & CAN_RTR_FLAG) + rtr = 0; + else + rtr = MSGCFG_DIR; + cc770_write_reg(priv, msgobj[mo].ctrl1, + RMTPND_RES | TXRQST_RES | CPUUPD_SET | NEWDAT_RES); + cc770_write_reg(priv, msgobj[mo].ctrl0, + MSGVAL_SET | TXIE_SET | RXIE_RES | INTPND_RES); + if (id & CAN_EFF_FLAG) { + id &= CAN_EFF_MASK; + cc770_write_reg(priv, msgobj[mo].config, + (dlc << 4) | rtr | MSGCFG_XTD); + cc770_write_reg(priv, msgobj[mo].id[3], id << 3); + cc770_write_reg(priv, msgobj[mo].id[2], id >> 5); + cc770_write_reg(priv, msgobj[mo].id[1], id >> 13); + cc770_write_reg(priv, msgobj[mo].id[0], id >> 21); + } else { + id &= CAN_SFF_MASK; + cc770_write_reg(priv, msgobj[mo].config, (dlc << 4) | rtr); + cc770_write_reg(priv, msgobj[mo].id[0], id >> 3); + cc770_write_reg(priv, msgobj[mo].id[1], id << 5); + } + + for (i = 0; i < dlc; i++) + cc770_write_reg(priv, msgobj[mo].data[i], cf->data[i]); + + cc770_write_reg(priv, msgobj[mo].ctrl1, + RMTPND_RES | TXRQST_SET | CPUUPD_RES | NEWDAT_UNC); + + stats->tx_bytes += dlc; + + can_put_echo_skb(skb, dev, 0); + + /* + * HM: We had some cases of repeated IRQs so make sure the + * INT is acknowledged I know it's already further up, but + * doing again fixed the issue + */ + cc770_write_reg(priv, msgobj[mo].ctrl0, + MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES); + + return NETDEV_TX_OK; +} + +static void cc770_rx(struct net_device *dev, unsigned int mo, u8 ctrl1) +{ + struct cc770_priv *priv = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; + struct can_frame *cf; + struct sk_buff *skb; + u8 config; + u32 id; + int i; + + skb = alloc_can_skb(dev, &cf); + if (!skb) + return; + + config = cc770_read_reg(priv, msgobj[mo].config); + + if (ctrl1 & RMTPND_SET) { + /* + * Unfortunately, the chip does not store the real message + * identifier of the received remote transmission request + * frame. Therefore we set it to 0. + */ + cf->can_id = CAN_RTR_FLAG; + if (config & MSGCFG_XTD) + cf->can_id |= CAN_EFF_FLAG; + cf->can_dlc = 0; + } else { + if (config & MSGCFG_XTD) { + id = cc770_read_reg(priv, msgobj[mo].id[3]); + id |= cc770_read_reg(priv, msgobj[mo].id[2]) << 8; + id |= cc770_read_reg(priv, msgobj[mo].id[1]) << 16; + id |= cc770_read_reg(priv, msgobj[mo].id[0]) << 24; + id >>= 3; + id |= CAN_EFF_FLAG; + } else { + id = cc770_read_reg(priv, msgobj[mo].id[1]); + id |= cc770_read_reg(priv, msgobj[mo].id[0]) << 8; + id >>= 5; + } + + cf->can_id = id; + cf->can_dlc = get_can_dlc((config & 0xf0) >> 4); + for (i = 0; i < cf->can_dlc; i++) + cf->data[i] = cc770_read_reg(priv, msgobj[mo].data[i]); + } + netif_rx(skb); + + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; +} + +static int cc770_err(struct net_device *dev, u8 status) +{ + struct cc770_priv *priv = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; + struct can_frame *cf; + struct sk_buff *skb; + u8 lec; + + netdev_dbg(dev, "status interrupt (%#x)\n", status); + + skb = alloc_can_err_skb(dev, &cf); + if (!skb) + return -ENOMEM; + + /* Use extended functions of the CC770 */ + if (priv->control_normal_mode & CTRL_EAF) { + cf->data[6] = cc770_read_reg(priv, tx_error_counter); + cf->data[7] = cc770_read_reg(priv, rx_error_counter); + } + + if (status & STAT_BOFF) { + /* Disable interrupts */ + cc770_write_reg(priv, control, CTRL_INI); + cf->can_id |= CAN_ERR_BUSOFF; + priv->can.state = CAN_STATE_BUS_OFF; + can_bus_off(dev); + } else if (status & STAT_WARN) { + cf->can_id |= CAN_ERR_CRTL; + /* Only the CC770 does show error passive */ + if (cf->data[7] > 127) { + cf->data[1] = CAN_ERR_CRTL_RX_PASSIVE | + CAN_ERR_CRTL_TX_PASSIVE; + priv->can.state = CAN_STATE_ERROR_PASSIVE; + priv->can.can_stats.error_passive++; + } else { + cf->data[1] = CAN_ERR_CRTL_RX_WARNING | + CAN_ERR_CRTL_TX_WARNING; + priv->can.state = CAN_STATE_ERROR_WARNING; + priv->can.can_stats.error_warning++; + } + } else { + /* Back to error avtive */ + cf->can_id |= CAN_ERR_PROT; + cf->data[2] = CAN_ERR_PROT_ACTIVE; + priv->can.state = CAN_STATE_ERROR_ACTIVE; + } + + lec = status & STAT_LEC_MASK; + if (lec < 7 && lec > 0) { + if (lec == STAT_LEC_ACK) { + cf->can_id |= CAN_ERR_ACK; + } else { + cf->can_id |= CAN_ERR_PROT; + switch (lec) { + case STAT_LEC_STUFF: + cf->data[2] |= CAN_ERR_PROT_STUFF; + break; + case STAT_LEC_FORM: + cf->data[2] |= CAN_ERR_PROT_FORM; + break; + case STAT_LEC_BIT1: + cf->data[2] |= CAN_ERR_PROT_BIT1; + break; + case STAT_LEC_BIT0: + cf->data[2] |= CAN_ERR_PROT_BIT0; + break; + case STAT_LEC_CRC: + cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ; + break; + } + } + } + + netif_rx(skb); + + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + + return 0; +} + +static int cc770_status_interrupt(struct net_device *dev) +{ + struct cc770_priv *priv = netdev_priv(dev); + u8 status; + + status = cc770_read_reg(priv, status); + /* Reset the status register including RXOK and TXOK */ + cc770_write_reg(priv, status, STAT_LEC_MASK); + + if (status & (STAT_WARN | STAT_BOFF) || + (status & STAT_LEC_MASK) != STAT_LEC_MASK) { + cc770_err(dev, status); + return status & STAT_BOFF; + } + + return 0; +} + +static void cc770_rx_interrupt(struct net_device *dev, unsigned int o) +{ + struct cc770_priv *priv = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; + unsigned int mo = obj2msgobj(o); + u8 ctrl1; + int n = CC770_MAX_MSG; + + while (n--) { + ctrl1 = cc770_read_reg(priv, msgobj[mo].ctrl1); + + if (!(ctrl1 & NEWDAT_SET)) { + /* Check for RTR if additional functions are enabled */ + if (priv->control_normal_mode & CTRL_EAF) { + if (!(cc770_read_reg(priv, msgobj[mo].ctrl0) & + INTPND_SET)) + break; + } else { + break; + } + } + + if (ctrl1 & MSGLST_SET) { + stats->rx_over_errors++; + stats->rx_errors++; + } + if (mo < MSGOBJ_LAST) + cc770_write_reg(priv, msgobj[mo].ctrl1, + NEWDAT_RES | MSGLST_RES | + TXRQST_UNC | RMTPND_UNC); + cc770_rx(dev, mo, ctrl1); + + cc770_write_reg(priv, msgobj[mo].ctrl0, + MSGVAL_SET | TXIE_RES | + RXIE_SET | INTPND_RES); + cc770_write_reg(priv, msgobj[mo].ctrl1, + NEWDAT_RES | MSGLST_RES | + TXRQST_RES | RMTPND_RES); + } +} + +static void cc770_rtr_interrupt(struct net_device *dev, unsigned int o) +{ + struct cc770_priv *priv = netdev_priv(dev); + unsigned int mo = obj2msgobj(o); + u8 ctrl0, ctrl1; + int n = CC770_MAX_MSG; + + while (n--) { + ctrl0 = cc770_read_reg(priv, msgobj[mo].ctrl0); + if (!(ctrl0 & INTPND_SET)) + break; + + ctrl1 = cc770_read_reg(priv, msgobj[mo].ctrl1); + cc770_rx(dev, mo, ctrl1); + + cc770_write_reg(priv, msgobj[mo].ctrl0, + MSGVAL_SET | TXIE_RES | + RXIE_SET | INTPND_RES); + cc770_write_reg(priv, msgobj[mo].ctrl1, + NEWDAT_RES | CPUUPD_SET | + TXRQST_RES | RMTPND_RES); + } +} + +static void cc770_tx_interrupt(struct net_device *dev, unsigned int o) +{ + struct cc770_priv *priv = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; + unsigned int mo = obj2msgobj(o); + + /* Nothing more to send, switch off interrupts */ + cc770_write_reg(priv, msgobj[mo].ctrl0, + MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES); + /* + * We had some cases of repeated IRQ so make sure the + * INT is acknowledged + */ + cc770_write_reg(priv, msgobj[mo].ctrl0, + MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES); + + stats->tx_packets++; + can_get_echo_skb(dev, 0); + netif_wake_queue(dev); +} + +irqreturn_t cc770_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *)dev_id; + struct cc770_priv *priv = netdev_priv(dev); + u8 intid; + int o, n = 0; + + /* Shared interrupts and IRQ off? */ + if (priv->can.state == CAN_STATE_STOPPED) + return IRQ_NONE; + + if (priv->pre_irq) + priv->pre_irq(priv); + + while (n < CC770_MAX_IRQ) { + /* Read the highest pending interrupt request */ + intid = cc770_read_reg(priv, interrupt); + if (!intid) + break; + n++; + + if (intid == 1) { + /* Exit in case of bus-off */ + if (cc770_status_interrupt(dev)) + break; + } else { + o = intid2obj(intid); + + if (o >= CC770_OBJ_MAX) { + netdev_err(dev, "Unexpected interrupt id %d\n", + intid); + continue; + } + + if (priv->obj_flags[o] & CC770_OBJ_FLAG_RTR) + cc770_rtr_interrupt(dev, o); + else if (priv->obj_flags[o] & CC770_OBJ_FLAG_RX) + cc770_rx_interrupt(dev, o); + else + cc770_tx_interrupt(dev, o); + } + } + + if (priv->post_irq) + priv->post_irq(priv); + + if (n >= CC770_MAX_IRQ) + netdev_dbg(dev, "%d messages handled in ISR", n); + + return (n) ? IRQ_HANDLED : IRQ_NONE; +} + +static int cc770_open(struct net_device *dev) +{ + struct cc770_priv *priv = netdev_priv(dev); + int err; + + /* set chip into reset mode */ + set_reset_mode(dev); + + /* common open */ + err = open_candev(dev); + if (err) + return err; + + err = request_irq(dev->irq, &cc770_interrupt, priv->irq_flags, + dev->name, dev); + if (err) { + close_candev(dev); + return -EAGAIN; + } + + /* init and start chip */ + cc770_start(dev); + + netif_start_queue(dev); + + return 0; +} + +static int cc770_close(struct net_device *dev) +{ + netif_stop_queue(dev); + set_reset_mode(dev); + + free_irq(dev->irq, dev); + close_candev(dev); + + return 0; +} + +struct net_device *alloc_cc770dev(int sizeof_priv) +{ + struct net_device *dev; + struct cc770_priv *priv; + + dev = alloc_candev(sizeof(struct cc770_priv) + sizeof_priv, + CC770_ECHO_SKB_MAX); + if (!dev) + return NULL; + + priv = netdev_priv(dev); + + priv->dev = dev; + priv->can.bittiming_const = &cc770_bittiming_const; + priv->can.do_set_bittiming = cc770_set_bittiming; + priv->can.do_set_mode = cc770_set_mode; + priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES; + + memcpy(priv->obj_flags, cc770_obj_flags, sizeof(cc770_obj_flags)); + + if (sizeof_priv) + priv->priv = (void *)priv + sizeof(struct cc770_priv); + + return dev; +} +EXPORT_SYMBOL_GPL(alloc_cc770dev); + +void free_cc770dev(struct net_device *dev) +{ + free_candev(dev); +} +EXPORT_SYMBOL_GPL(free_cc770dev); + +static const struct net_device_ops cc770_netdev_ops = { + .ndo_open = cc770_open, + .ndo_stop = cc770_close, + .ndo_start_xmit = cc770_start_xmit, +}; + +int register_cc770dev(struct net_device *dev) +{ + struct cc770_priv *priv = netdev_priv(dev); + int err; + + err = cc770_probe_chip(dev); + if (err) + return err; + + dev->netdev_ops = &cc770_netdev_ops; + + dev->flags |= IFF_ECHO; /* we support local echo */ + + /* Should we use additional functions? */ + if (!i82527_compat && priv->control_normal_mode & CTRL_EAF) { + priv->can.do_get_berr_counter = cc770_get_berr_counter; + priv->control_normal_mode = CTRL_IE | CTRL_EAF | CTRL_EIE; + netdev_dbg(dev, "i82527 mode with additional functions\n"); + } else { + priv->control_normal_mode = CTRL_IE | CTRL_EIE; + netdev_dbg(dev, "strict i82527 compatibility mode\n"); + } + + chipset_init(priv); + set_reset_mode(dev); + + return register_candev(dev); +} +EXPORT_SYMBOL_GPL(register_cc770dev); + +void unregister_cc770dev(struct net_device *dev) +{ + set_reset_mode(dev); + unregister_candev(dev); +} +EXPORT_SYMBOL_GPL(unregister_cc770dev); + +static __init int cc770_init(void) +{ + if (msgobj15_eff) { + cc770_obj_flags[CC770_OBJ_RX0] |= CC770_OBJ_FLAG_EFF; + cc770_obj_flags[CC770_OBJ_RX1] &= ~CC770_OBJ_FLAG_EFF; + } + + pr_info("CAN netdevice driver\n"); + + return 0; +} +module_init(cc770_init); + +static __exit void cc770_exit(void) +{ + pr_info("driver removed\n"); +} +module_exit(cc770_exit); diff --git a/drivers/net/can/cc770/cc770.h b/drivers/net/can/cc770/cc770.h new file mode 100644 index 00000000000..a1739db98d9 --- /dev/null +++ b/drivers/net/can/cc770/cc770.h @@ -0,0 +1,203 @@ +/* + * Core driver for the CC770 and AN82527 CAN controllers + * + * Copyright (C) 2009, 2011 Wolfgang Grandegger <wg@grandegger.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the version 2 of the GNU General Public License + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef CC770_DEV_H +#define CC770_DEV_H + +#include <linux/can/dev.h> + +struct cc770_msgobj { + u8 ctrl0; + u8 ctrl1; + u8 id[4]; + u8 config; + u8 data[8]; + u8 dontuse; /* padding */ +} __packed; + +struct cc770_regs { + union { + struct cc770_msgobj msgobj[16]; /* Message object 1..15 */ + struct { + u8 control; /* Control Register */ + u8 status; /* Status Register */ + u8 cpu_interface; /* CPU Interface Register */ + u8 dontuse1; + u8 high_speed_read[2]; /* High Speed Read */ + u8 global_mask_std[2]; /* Standard Global Mask */ + u8 global_mask_ext[4]; /* Extended Global Mask */ + u8 msg15_mask[4]; /* Message 15 Mask */ + u8 dontuse2[15]; + u8 clkout; /* Clock Out Register */ + u8 dontuse3[15]; + u8 bus_config; /* Bus Configuration Register */ + u8 dontuse4[15]; + u8 bit_timing_0; /* Bit Timing Register byte 0 */ + u8 dontuse5[15]; + u8 bit_timing_1; /* Bit Timing Register byte 1 */ + u8 dontuse6[15]; + u8 interrupt; /* Interrupt Register */ + u8 dontuse7[15]; + u8 rx_error_counter; /* Receive Error Counter */ + u8 dontuse8[15]; + u8 tx_error_counter; /* Transmit Error Counter */ + u8 dontuse9[31]; + u8 p1_conf; + u8 dontuse10[15]; + u8 p2_conf; + u8 dontuse11[15]; + u8 p1_in; + u8 dontuse12[15]; + u8 p2_in; + u8 dontuse13[15]; + u8 p1_out; + u8 dontuse14[15]; + u8 p2_out; + u8 dontuse15[15]; + u8 serial_reset_addr; + }; + }; +} __packed; + +/* Control Register (0x00) */ +#define CTRL_INI 0x01 /* Initialization */ +#define CTRL_IE 0x02 /* Interrupt Enable */ +#define CTRL_SIE 0x04 /* Status Interrupt Enable */ +#define CTRL_EIE 0x08 /* Error Interrupt Enable */ +#define CTRL_EAF 0x20 /* Enable additional functions */ +#define CTRL_CCE 0x40 /* Change Configuration Enable */ + +/* Status Register (0x01) */ +#define STAT_LEC_STUFF 0x01 /* Stuff error */ +#define STAT_LEC_FORM 0x02 /* Form error */ +#define STAT_LEC_ACK 0x03 /* Acknowledgement error */ +#define STAT_LEC_BIT1 0x04 /* Bit1 error */ +#define STAT_LEC_BIT0 0x05 /* Bit0 error */ +#define STAT_LEC_CRC 0x06 /* CRC error */ +#define STAT_LEC_MASK 0x07 /* Last Error Code mask */ +#define STAT_TXOK 0x08 /* Transmit Message Successfully */ +#define STAT_RXOK 0x10 /* Receive Message Successfully */ +#define STAT_WAKE 0x20 /* Wake Up Status */ +#define STAT_WARN 0x40 /* Warning Status */ +#define STAT_BOFF 0x80 /* Bus Off Status */ + +/* + * CPU Interface Register (0x02) + * Clock Out Register (0x1f) + * Bus Configuration Register (0x2f) + * + * see include/linux/can/platform/cc770.h + */ + +/* Message Control Register 0 (Base Address + 0x0) */ +#define INTPND_RES 0x01 /* No Interrupt pending */ +#define INTPND_SET 0x02 /* Interrupt pending */ +#define INTPND_UNC 0x03 +#define RXIE_RES 0x04 /* Receive Interrupt Disable */ +#define RXIE_SET 0x08 /* Receive Interrupt Enable */ +#define RXIE_UNC 0x0c +#define TXIE_RES 0x10 /* Transmit Interrupt Disable */ +#define TXIE_SET 0x20 /* Transmit Interrupt Enable */ +#define TXIE_UNC 0x30 +#define MSGVAL_RES 0x40 /* Message Invalid */ +#define MSGVAL_SET 0x80 /* Message Valid */ +#define MSGVAL_UNC 0xc0 + +/* Message Control Register 1 (Base Address + 0x01) */ +#define NEWDAT_RES 0x01 /* No New Data */ +#define NEWDAT_SET 0x02 /* New Data */ +#define NEWDAT_UNC 0x03 +#define MSGLST_RES 0x04 /* No Message Lost */ +#define MSGLST_SET 0x08 /* Message Lost */ +#define MSGLST_UNC 0x0c +#define CPUUPD_RES 0x04 /* No CPU Updating */ +#define CPUUPD_SET 0x08 /* CPU Updating */ +#define CPUUPD_UNC 0x0c +#define TXRQST_RES 0x10 /* No Transmission Request */ +#define TXRQST_SET 0x20 /* Transmission Request */ +#define TXRQST_UNC 0x30 +#define RMTPND_RES 0x40 /* No Remote Request Pending */ +#define RMTPND_SET 0x80 /* Remote Request Pending */ +#define RMTPND_UNC 0xc0 + +/* Message Configuration Register (Base Address + 0x06) */ +#define MSGCFG_XTD 0x04 /* Extended Identifier */ +#define MSGCFG_DIR 0x08 /* Direction is Transmit */ + +#define MSGOBJ_FIRST 1 +#define MSGOBJ_LAST 15 + +#define CC770_IO_SIZE 0x100 +#define CC770_MAX_IRQ 20 /* max. number of interrupts handled in ISR */ +#define CC770_MAX_MSG 4 /* max. number of messages handled in ISR */ + +#define CC770_ECHO_SKB_MAX 1 + +#define cc770_read_reg(priv, member) \ + priv->read_reg(priv, offsetof(struct cc770_regs, member)) + +#define cc770_write_reg(priv, member, value) \ + priv->write_reg(priv, offsetof(struct cc770_regs, member), value) + +/* + * Message objects and flags used by this driver + */ +#define CC770_OBJ_FLAG_RX 0x01 +#define CC770_OBJ_FLAG_RTR 0x02 +#define CC770_OBJ_FLAG_EFF 0x04 + +enum { + CC770_OBJ_RX0 = 0, /* for receiving normal messages */ + CC770_OBJ_RX1, /* for receiving normal messages */ + CC770_OBJ_RX_RTR0, /* for receiving remote transmission requests */ + CC770_OBJ_RX_RTR1, /* for receiving remote transmission requests */ + CC770_OBJ_TX, /* for sending messages */ + CC770_OBJ_MAX +}; + +#define obj2msgobj(o) (MSGOBJ_LAST - (o)) /* message object 11..15 */ + +/* + * CC770 private data structure + */ +struct cc770_priv { + struct can_priv can; /* must be the first member */ + struct sk_buff *echo_skb; + + /* the lower-layer is responsible for appropriate locking */ + u8 (*read_reg)(const struct cc770_priv *priv, int reg); + void (*write_reg)(const struct cc770_priv *priv, int reg, u8 val); + void (*pre_irq)(const struct cc770_priv *priv); + void (*post_irq)(const struct cc770_priv *priv); + + void *priv; /* for board-specific data */ + struct net_device *dev; + + void __iomem *reg_base; /* ioremap'ed address to registers */ + unsigned long irq_flags; /* for request_irq() */ + + unsigned char obj_flags[CC770_OBJ_MAX]; + u8 control_normal_mode; /* Control register for normal mode */ + u8 cpu_interface; /* CPU interface register */ + u8 clkout; /* Clock out register */ + u8 bus_config; /* Bus conffiguration register */ +}; + +struct net_device *alloc_cc770dev(int sizeof_priv); +void free_cc770dev(struct net_device *dev); +int register_cc770dev(struct net_device *dev); +void unregister_cc770dev(struct net_device *dev); + +#endif /* CC770_DEV_H */ diff --git a/drivers/net/can/cc770/cc770_isa.c b/drivers/net/can/cc770/cc770_isa.c new file mode 100644 index 00000000000..4be5fe2c40a --- /dev/null +++ b/drivers/net/can/cc770/cc770_isa.c @@ -0,0 +1,367 @@ +/* + * Driver for CC770 and AN82527 CAN controllers on the legacy ISA bus + * + * Copyright (C) 2009, 2011 Wolfgang Grandegger <wg@grandegger.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the version 2 of the GNU General Public License + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * Bosch CC770 and Intel AN82527 CAN controllers on the ISA or PC-104 bus. + * The I/O port or memory address and the IRQ number must be specified via + * module parameters: + * + * insmod cc770_isa.ko port=0x310,0x380 irq=7,11 + * + * for ISA devices using I/O ports or: + * + * insmod cc770_isa.ko mem=0xd1000,0xd1000 irq=7,11 + * + * for memory mapped ISA devices. + * + * Indirect access via address and data port is supported as well: + * + * insmod cc770_isa.ko port=0x310,0x380 indirect=1 irq=7,11 + * + * Furthermore, the following mode parameter can be defined: + * + * clk: External oscillator clock frequency (default=16000000 [16 MHz]) + * cir: CPU interface register (default=0x40 [DSC]) + * bcr: Bus configuration register (default=0x40 [CBY]) + * cor: Clockout register (default=0x00) + * + * Note: for clk, cir, bcr and cor, the first argument re-defines the + * default for all other devices, e.g.: + * + * insmod cc770_isa.ko mem=0xd1000,0xd1000 irq=7,11 clk=24000000 + * + * is equivalent to + * + * insmod cc770_isa.ko mem=0xd1000,0xd1000 irq=7,11 clk=24000000,24000000 + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/interrupt.h> +#include <linux/netdevice.h> +#include <linux/delay.h> +#include <linux/irq.h> +#include <linux/io.h> +#include <linux/can.h> +#include <linux/can/dev.h> +#include <linux/can/platform/cc770.h> + +#include "cc770.h" + +#define MAXDEV 8 + +MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>"); +MODULE_DESCRIPTION("Socket-CAN driver for CC770 on the ISA bus"); +MODULE_LICENSE("GPL v2"); + +#define CLK_DEFAULT 16000000 /* 16 MHz */ +#define COR_DEFAULT 0x00 +#define BCR_DEFAULT BUSCFG_CBY + +static unsigned long port[MAXDEV]; +static unsigned long mem[MAXDEV]; +static int __devinitdata irq[MAXDEV]; +static int __devinitdata clk[MAXDEV]; +static u8 __devinitdata cir[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff}; +static u8 __devinitdata cor[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff}; +static u8 __devinitdata bcr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff}; +static int __devinitdata indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1}; + +module_param_array(port, ulong, NULL, S_IRUGO); +MODULE_PARM_DESC(port, "I/O port number"); + +module_param_array(mem, ulong, NULL, S_IRUGO); +MODULE_PARM_DESC(mem, "I/O memory address"); + +module_param_array(indirect, int, NULL, S_IRUGO); +MODULE_PARM_DESC(indirect, "Indirect access via address and data port"); + +module_param_array(irq, int, NULL, S_IRUGO); +MODULE_PARM_DESC(irq, "IRQ number"); + +module_param_array(clk, int, NULL, S_IRUGO); +MODULE_PARM_DESC(clk, "External oscillator clock frequency " + "(default=16000000 [16 MHz])"); + +module_param_array(cir, byte, NULL, S_IRUGO); +MODULE_PARM_DESC(cir, "CPU interface register (default=0x40 [DSC])"); + +module_param_array(cor, byte, NULL, S_IRUGO); +MODULE_PARM_DESC(cor, "Clockout register (default=0x00)"); + +module_param_array(bcr, byte, NULL, S_IRUGO); +MODULE_PARM_DESC(bcr, "Bus configuration register (default=0x40 [CBY])"); + +#define CC770_IOSIZE 0x20 +#define CC770_IOSIZE_INDIRECT 0x02 + +static struct platform_device *cc770_isa_devs[MAXDEV]; + +static u8 cc770_isa_mem_read_reg(const struct cc770_priv *priv, int reg) +{ + return readb(priv->reg_base + reg); +} + +static void cc770_isa_mem_write_reg(const struct cc770_priv *priv, + int reg, u8 val) +{ + writeb(val, priv->reg_base + reg); +} + +static u8 cc770_isa_port_read_reg(const struct cc770_priv *priv, int reg) +{ + return inb((unsigned long)priv->reg_base + reg); +} + +static void cc770_isa_port_write_reg(const struct cc770_priv *priv, + int reg, u8 val) +{ + outb(val, (unsigned long)priv->reg_base + reg); +} + +static u8 cc770_isa_port_read_reg_indirect(const struct cc770_priv *priv, + int reg) +{ + unsigned long base = (unsigned long)priv->reg_base; + + outb(reg, base); + return inb(base + 1); +} + +static void cc770_isa_port_write_reg_indirect(const struct cc770_priv *priv, + int reg, u8 val) +{ + unsigned long base = (unsigned long)priv->reg_base; + + outb(reg, base); + outb(val, base + 1); +} + +static int __devinit cc770_isa_probe(struct platform_device *pdev) +{ + struct net_device *dev; + struct cc770_priv *priv; + void __iomem *base = NULL; + int iosize = CC770_IOSIZE; + int idx = pdev->id; + int err; + u32 clktmp; + + dev_dbg(&pdev->dev, "probing idx=%d: port=%#lx, mem=%#lx, irq=%d\n", + idx, port[idx], mem[idx], irq[idx]); + if (mem[idx]) { + if (!request_mem_region(mem[idx], iosize, KBUILD_MODNAME)) { + err = -EBUSY; + goto exit; + } + base = ioremap_nocache(mem[idx], iosize); + if (!base) { + err = -ENOMEM; + goto exit_release; + } + } else { + if (indirect[idx] > 0 || + (indirect[idx] == -1 && indirect[0] > 0)) + iosize = CC770_IOSIZE_INDIRECT; + if (!request_region(port[idx], iosize, KBUILD_MODNAME)) { + err = -EBUSY; + goto exit; + } + } + + dev = alloc_cc770dev(0); + if (!dev) { + err = -ENOMEM; + goto exit_unmap; + } + priv = netdev_priv(dev); + + dev->irq = irq[idx]; + priv->irq_flags = IRQF_SHARED; + if (mem[idx]) { + priv->reg_base = base; + dev->base_addr = mem[idx]; + priv->read_reg = cc770_isa_mem_read_reg; + priv->write_reg = cc770_isa_mem_write_reg; + } else { + priv->reg_base = (void __iomem *)port[idx]; + dev->base_addr = port[idx]; + + if (iosize == CC770_IOSIZE_INDIRECT) { + priv->read_reg = cc770_isa_port_read_reg_indirect; + priv->write_reg = cc770_isa_port_write_reg_indirect; + } else { + priv->read_reg = cc770_isa_port_read_reg; + priv->write_reg = cc770_isa_port_write_reg; + } + } + + if (clk[idx]) + clktmp = clk[idx]; + else if (clk[0]) + clktmp = clk[0]; + else + clktmp = CLK_DEFAULT; + priv->can.clock.freq = clktmp; + + if (cir[idx] != 0xff) { + priv->cpu_interface = cir[idx]; + } else if (cir[0] != 0xff) { + priv->cpu_interface = cir[0]; + } else { + /* The system clock may not exceed 10 MHz */ + if (clktmp > 10000000) { + priv->cpu_interface |= CPUIF_DSC; + clktmp /= 2; + } + /* The memory clock may not exceed 8 MHz */ + if (clktmp > 8000000) + priv->cpu_interface |= CPUIF_DMC; + } + + if (priv->cpu_interface & CPUIF_DSC) + priv->can.clock.freq /= 2; + + if (bcr[idx] != 0xff) + priv->bus_config = bcr[idx]; + else if (bcr[0] != 0xff) + priv->bus_config = bcr[0]; + else + priv->bus_config = BCR_DEFAULT; + + if (cor[idx] != 0xff) + priv->clkout = cor[idx]; + else if (cor[0] != 0xff) + priv->clkout = cor[0]; + else + priv->clkout = COR_DEFAULT; + + dev_set_drvdata(&pdev->dev, dev); + SET_NETDEV_DEV(dev, &pdev->dev); + + err = register_cc770dev(dev); + if (err) { + dev_err(&pdev->dev, + "couldn't register device (err=%d)\n", err); + goto exit_unmap; + } + + dev_info(&pdev->dev, "device registered (reg_base=0x%p, irq=%d)\n", + priv->reg_base, dev->irq); + return 0; + + exit_unmap: + if (mem[idx]) + iounmap(base); + exit_release: + if (mem[idx]) + release_mem_region(mem[idx], iosize); + else + release_region(port[idx], iosize); + exit: + return err; +} + +static int __devexit cc770_isa_remove(struct platform_device *pdev) +{ + struct net_device *dev = dev_get_drvdata(&pdev->dev); + struct cc770_priv *priv = netdev_priv(dev); + int idx = pdev->id; + + unregister_cc770dev(dev); + dev_set_drvdata(&pdev->dev, NULL); + + if (mem[idx]) { + iounmap(priv->reg_base); + release_mem_region(mem[idx], CC770_IOSIZE); + } else { + if (priv->read_reg == cc770_isa_port_read_reg_indirect) + release_region(port[idx], CC770_IOSIZE_INDIRECT); + else + release_region(port[idx], CC770_IOSIZE); + } + free_cc770dev(dev); + + return 0; +} + +static struct platform_driver cc770_isa_driver = { + .probe = cc770_isa_probe, + .remove = __devexit_p(cc770_isa_remove), + .driver = { + .name = KBUILD_MODNAME, + .owner = THIS_MODULE, + }, +}; + +static int __init cc770_isa_init(void) +{ + int idx, err; + + for (idx = 0; idx < ARRAY_SIZE(cc770_isa_devs); idx++) { + if ((port[idx] || mem[idx]) && irq[idx]) { + cc770_isa_devs[idx] = + platform_device_alloc(KBUILD_MODNAME, idx); + if (!cc770_isa_devs[idx]) { + err = -ENOMEM; + goto exit_free_devices; + } + err = platform_device_add(cc770_isa_devs[idx]); + if (err) { + platform_device_put(cc770_isa_devs[idx]); + goto exit_free_devices; + } + pr_debug("platform device %d: port=%#lx, mem=%#lx, " + "irq=%d\n", + idx, port[idx], mem[idx], irq[idx]); + } else if (idx == 0 || port[idx] || mem[idx]) { + pr_err("insufficient parameters supplied\n"); + err = -EINVAL; + goto exit_free_devices; + } + } + + err = platform_driver_register(&cc770_isa_driver); + if (err) + goto exit_free_devices; + + pr_info("driver for max. %d devices registered\n", MAXDEV); + + return 0; + +exit_free_devices: + while (--idx >= 0) { + if (cc770_isa_devs[idx]) + platform_device_unregister(cc770_isa_devs[idx]); + } + + return err; +} +module_init(cc770_isa_init); + +static void __exit cc770_isa_exit(void) +{ + int idx; + + platform_driver_unregister(&cc770_isa_driver); + for (idx = 0; idx < ARRAY_SIZE(cc770_isa_devs); idx++) { + if (cc770_isa_devs[idx]) + platform_device_unregister(cc770_isa_devs[idx]); + } +} +module_exit(cc770_isa_exit); diff --git a/drivers/net/can/cc770/cc770_platform.c b/drivers/net/can/cc770/cc770_platform.c new file mode 100644 index 00000000000..53115eee807 --- /dev/null +++ b/drivers/net/can/cc770/cc770_platform.c @@ -0,0 +1,272 @@ +/* + * Driver for CC770 and AN82527 CAN controllers on the platform bus + * + * Copyright (C) 2009, 2011 Wolfgang Grandegger <wg@grandegger.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the version 2 of the GNU General Public License + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * If platform data are used you should have similar definitions + * in your board-specific code: + * + * static struct cc770_platform_data myboard_cc770_pdata = { + * .osc_freq = 16000000, + * .cir = 0x41, + * .cor = 0x20, + * .bcr = 0x40, + * }; + * + * Please see include/linux/can/platform/cc770.h for description of + * above fields. + * + * If the device tree is used, you need a CAN node definition in your + * DTS file similar to: + * + * can@3,100 { + * compatible = "bosch,cc770"; + * reg = <3 0x100 0x80>; + * interrupts = <2 0>; + * interrupt-parent = <&mpic>; + * bosch,external-clock-frequency = <16000000>; + * }; + * + * See "Documentation/devicetree/bindings/net/can/cc770.txt" for further + * information. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/netdevice.h> +#include <linux/delay.h> +#include <linux/platform_device.h> +#include <linux/of.h> +#include <linux/can.h> +#include <linux/can/dev.h> +#include <linux/can/platform/cc770.h> + +#include "cc770.h" + +#define DRV_NAME "cc770_platform" + +MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>"); +MODULE_DESCRIPTION("Socket-CAN driver for CC770 on the platform bus"); +MODULE_LICENSE("GPL v2"); + +#define CC770_PLATFORM_CAN_CLOCK 16000000 + +static u8 cc770_platform_read_reg(const struct cc770_priv *priv, int reg) +{ + return ioread8(priv->reg_base + reg); +} + +static void cc770_platform_write_reg(const struct cc770_priv *priv, int reg, + u8 val) +{ + iowrite8(val, priv->reg_base + reg); +} + +static int __devinit cc770_get_of_node_data(struct platform_device *pdev, + struct cc770_priv *priv) +{ + struct device_node *np = pdev->dev.of_node; + const u32 *prop; + int prop_size; + u32 clkext; + + prop = of_get_property(np, "bosch,external-clock-frequency", + &prop_size); + if (prop && (prop_size == sizeof(u32))) + clkext = *prop; + else + clkext = CC770_PLATFORM_CAN_CLOCK; /* default */ + priv->can.clock.freq = clkext; + + /* The system clock may not exceed 10 MHz */ + if (priv->can.clock.freq > 10000000) { + priv->cpu_interface |= CPUIF_DSC; + priv->can.clock.freq /= 2; + } + + /* The memory clock may not exceed 8 MHz */ + if (priv->can.clock.freq > 8000000) + priv->cpu_interface |= CPUIF_DMC; + + if (of_get_property(np, "bosch,divide-memory-clock", NULL)) + priv->cpu_interface |= CPUIF_DMC; + if (of_get_property(np, "bosch,iso-low-speed-mux", NULL)) + priv->cpu_interface |= CPUIF_MUX; + + if (!of_get_property(np, "bosch,no-comperator-bypass", NULL)) + priv->bus_config |= BUSCFG_CBY; + if (of_get_property(np, "bosch,disconnect-rx0-input", NULL)) + priv->bus_config |= BUSCFG_DR0; + if (of_get_property(np, "bosch,disconnect-rx1-input", NULL)) + priv->bus_config |= BUSCFG_DR1; + if (of_get_property(np, "bosch,disconnect-tx1-output", NULL)) + priv->bus_config |= BUSCFG_DT1; + if (of_get_property(np, "bosch,polarity-dominant", NULL)) + priv->bus_config |= BUSCFG_POL; + + prop = of_get_property(np, "bosch,clock-out-frequency", &prop_size); + if (prop && (prop_size == sizeof(u32)) && *prop > 0) { + u32 cdv = clkext / *prop; + int slew; + + if (cdv > 0 && cdv < 16) { + priv->cpu_interface |= CPUIF_CEN; + priv->clkout |= (cdv - 1) & CLKOUT_CD_MASK; + + prop = of_get_property(np, "bosch,slew-rate", + &prop_size); + if (prop && (prop_size == sizeof(u32))) { + slew = *prop; + } else { + /* Determine default slew rate */ + slew = (CLKOUT_SL_MASK >> + CLKOUT_SL_SHIFT) - + ((cdv * clkext - 1) / 8000000); + if (slew < 0) + slew = 0; + } + priv->clkout |= (slew << CLKOUT_SL_SHIFT) & + CLKOUT_SL_MASK; + } else { + dev_dbg(&pdev->dev, "invalid clock-out-frequency\n"); + } + } + + return 0; +} + +static int __devinit cc770_get_platform_data(struct platform_device *pdev, + struct cc770_priv *priv) +{ + + struct cc770_platform_data *pdata = pdev->dev.platform_data; + + priv->can.clock.freq = pdata->osc_freq; + if (priv->cpu_interface | CPUIF_DSC) + priv->can.clock.freq /= 2; + priv->clkout = pdata->cor; + priv->bus_config = pdata->bcr; + priv->cpu_interface = pdata->cir; + + return 0; +} + +static int __devinit cc770_platform_probe(struct platform_device *pdev) +{ + struct net_device *dev; + struct cc770_priv *priv; + struct resource *mem; + resource_size_t mem_size; + void __iomem *base; + int err, irq; + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + irq = platform_get_irq(pdev, 0); + if (!mem || irq <= 0) + return -ENODEV; + + mem_size = resource_size(mem); + if (!request_mem_region(mem->start, mem_size, pdev->name)) + return -EBUSY; + + base = ioremap(mem->start, mem_size); + if (!base) { + err = -ENOMEM; + goto exit_release_mem; + } + + dev = alloc_cc770dev(0); + if (!dev) { + err = -ENOMEM; + goto exit_unmap_mem; + } + + dev->irq = irq; + priv = netdev_priv(dev); + priv->read_reg = cc770_platform_read_reg; + priv->write_reg = cc770_platform_write_reg; + priv->irq_flags = IRQF_SHARED; + priv->reg_base = base; + + if (pdev->dev.of_node) + err = cc770_get_of_node_data(pdev, priv); + else if (pdev->dev.platform_data) + err = cc770_get_platform_data(pdev, priv); + else + err = -ENODEV; + if (err) + goto exit_free_cc770; + + dev_dbg(&pdev->dev, + "reg_base=0x%p irq=%d clock=%d cpu_interface=0x%02x " + "bus_config=0x%02x clkout=0x%02x\n", + priv->reg_base, dev->irq, priv->can.clock.freq, + priv->cpu_interface, priv->bus_config, priv->clkout); + + dev_set_drvdata(&pdev->dev, dev); + SET_NETDEV_DEV(dev, &pdev->dev); + + err = register_cc770dev(dev); + if (err) { + dev_err(&pdev->dev, + "couldn't register CC700 device (err=%d)\n", err); + goto exit_free_cc770; + } + + return 0; + +exit_free_cc770: + free_cc770dev(dev); +exit_unmap_mem: + iounmap(base); +exit_release_mem: + release_mem_region(mem->start, mem_size); + + return err; +} + +static int __devexit cc770_platform_remove(struct platform_device *pdev) +{ + struct net_device *dev = dev_get_drvdata(&pdev->dev); + struct cc770_priv *priv = netdev_priv(dev); + struct resource *mem; + + unregister_cc770dev(dev); + iounmap(priv->reg_base); + free_cc770dev(dev); + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(mem->start, resource_size(mem)); + + return 0; +} + +static struct of_device_id __devinitdata cc770_platform_table[] = { + {.compatible = "bosch,cc770"}, /* CC770 from Bosch */ + {.compatible = "intc,82527"}, /* AN82527 from Intel CP */ + {}, +}; + +static struct platform_driver cc770_platform_driver = { + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + .of_match_table = cc770_platform_table, + }, + .probe = cc770_platform_probe, + .remove = __devexit_p(cc770_platform_remove), +}; + +module_platform_driver(cc770_platform_driver); diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 25695bde054..120f1ab5a2c 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -454,7 +454,7 @@ static void can_setup(struct net_device *dev) /* New-style flags. */ dev->flags = IFF_NOARP; - dev->features = NETIF_F_NO_CSUM; + dev->features = NETIF_F_HW_CSUM; } struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf) diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index e02337953f4..165a4c79802 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -1060,20 +1060,7 @@ static struct platform_driver flexcan_driver = { .remove = __devexit_p(flexcan_remove), }; -static int __init flexcan_init(void) -{ - pr_info("%s netdevice driver\n", DRV_NAME); - return platform_driver_register(&flexcan_driver); -} - -static void __exit flexcan_exit(void) -{ - platform_driver_unregister(&flexcan_driver); - pr_info("%s: driver removed\n", DRV_NAME); -} - -module_init(flexcan_init); -module_exit(flexcan_exit); +module_platform_driver(flexcan_driver); MODULE_AUTHOR("Sascha Hauer <kernel@pengutronix.de>, " "Marc Kleine-Budde <kernel@pengutronix.de>"); diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c index 32778d56d33..08c893cb789 100644 --- a/drivers/net/can/janz-ican3.c +++ b/drivers/net/can/janz-ican3.c @@ -1803,20 +1803,9 @@ static struct platform_driver ican3_driver = { .remove = __devexit_p(ican3_remove), }; -static int __init ican3_init(void) -{ - return platform_driver_register(&ican3_driver); -} - -static void __exit ican3_exit(void) -{ - platform_driver_unregister(&ican3_driver); -} +module_platform_driver(ican3_driver); MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>"); MODULE_DESCRIPTION("Janz MODULbus VMOD-ICAN3 Driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:janz-ican3"); - -module_init(ican3_init); -module_exit(ican3_exit); diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c index 5fedc337556..5caa572d71e 100644 --- a/drivers/net/can/mscan/mpc5xxx_can.c +++ b/drivers/net/can/mscan/mpc5xxx_can.c @@ -411,17 +411,7 @@ static struct platform_driver mpc5xxx_can_driver = { #endif }; -static int __init mpc5xxx_can_init(void) -{ - return platform_driver_register(&mpc5xxx_can_driver); -} -module_init(mpc5xxx_can_init); - -static void __exit mpc5xxx_can_exit(void) -{ - platform_driver_unregister(&mpc5xxx_can_driver); -}; -module_exit(mpc5xxx_can_exit); +module_platform_driver(mpc5xxx_can_driver); MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>"); MODULE_DESCRIPTION("Freescale MPC5xxx CAN driver"); diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c index ec4a3119e2c..1c82dd8b896 100644 --- a/drivers/net/can/mscan/mscan.c +++ b/drivers/net/can/mscan/mscan.c @@ -581,7 +581,10 @@ static int mscan_open(struct net_device *dev) priv->open_time = jiffies; - clrbits8(®s->canctl1, MSCAN_LISTEN); + if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) + setbits8(®s->canctl1, MSCAN_LISTEN); + else + clrbits8(®s->canctl1, MSCAN_LISTEN); ret = mscan_start(dev); if (ret) @@ -690,7 +693,8 @@ struct net_device *alloc_mscandev(void) priv->can.bittiming_const = &mscan_bittiming_const; priv->can.do_set_bittiming = mscan_do_set_bittiming; priv->can.do_set_mode = mscan_do_set_mode; - priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES; + priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | + CAN_CTRLMODE_LISTENONLY; for (i = 0; i < TX_QUEUE_SIZE; i++) { priv->tx_queue[i].id = i; diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig index fe9e64d476e..36e9d594069 100644 --- a/drivers/net/can/sja1000/Kconfig +++ b/drivers/net/can/sja1000/Kconfig @@ -6,7 +6,6 @@ if CAN_SJA1000 config CAN_SJA1000_ISA tristate "ISA Bus based legacy SJA1000 driver" - depends on ISA ---help--- This driver adds legacy support for SJA1000 chips connected to the ISA bus using I/O port, memory mapped or indirect access. diff --git a/drivers/net/can/sja1000/sja1000_isa.c b/drivers/net/can/sja1000/sja1000_isa.c index 496223e9e2f..90c5c2dfd2f 100644 --- a/drivers/net/can/sja1000/sja1000_isa.c +++ b/drivers/net/can/sja1000/sja1000_isa.c @@ -17,7 +17,7 @@ #include <linux/kernel.h> #include <linux/module.h> -#include <linux/isa.h> +#include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/delay.h> @@ -44,9 +44,9 @@ static unsigned long port[MAXDEV]; static unsigned long mem[MAXDEV]; static int __devinitdata irq[MAXDEV]; static int __devinitdata clk[MAXDEV]; -static char __devinitdata cdr[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1}; -static char __devinitdata ocr[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1}; -static char __devinitdata indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1}; +static unsigned char __devinitdata cdr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff}; +static unsigned char __devinitdata ocr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff}; +static int __devinitdata indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1}; module_param_array(port, ulong, NULL, S_IRUGO); MODULE_PARM_DESC(port, "I/O port number"); @@ -54,7 +54,7 @@ MODULE_PARM_DESC(port, "I/O port number"); module_param_array(mem, ulong, NULL, S_IRUGO); MODULE_PARM_DESC(mem, "I/O memory address"); -module_param_array(indirect, byte, NULL, S_IRUGO); +module_param_array(indirect, int, NULL, S_IRUGO); MODULE_PARM_DESC(indirect, "Indirect access via address and data port"); module_param_array(irq, int, NULL, S_IRUGO); @@ -75,6 +75,8 @@ MODULE_PARM_DESC(ocr, "Output control register " #define SJA1000_IOSIZE 0x20 #define SJA1000_IOSIZE_INDIRECT 0x02 +static struct platform_device *sja1000_isa_devs[MAXDEV]; + static u8 sja1000_isa_mem_read_reg(const struct sja1000_priv *priv, int reg) { return readb(priv->reg_base + reg); @@ -115,26 +117,18 @@ static void sja1000_isa_port_write_reg_indirect(const struct sja1000_priv *priv, outb(val, base + 1); } -static int __devinit sja1000_isa_match(struct device *pdev, unsigned int idx) -{ - if (port[idx] || mem[idx]) { - if (irq[idx]) - return 1; - } else if (idx) - return 0; - - dev_err(pdev, "insufficient parameters supplied\n"); - return 0; -} - -static int __devinit sja1000_isa_probe(struct device *pdev, unsigned int idx) +static int __devinit sja1000_isa_probe(struct platform_device *pdev) { struct net_device *dev; struct sja1000_priv *priv; void __iomem *base = NULL; int iosize = SJA1000_IOSIZE; + int idx = pdev->id; int err; + dev_dbg(&pdev->dev, "probing idx=%d: port=%#lx, mem=%#lx, irq=%d\n", + idx, port[idx], mem[idx], irq[idx]); + if (mem[idx]) { if (!request_mem_region(mem[idx], iosize, DRV_NAME)) { err = -EBUSY; @@ -189,31 +183,31 @@ static int __devinit sja1000_isa_probe(struct device *pdev, unsigned int idx) else priv->can.clock.freq = CLK_DEFAULT / 2; - if (ocr[idx] != -1) - priv->ocr = ocr[idx] & 0xff; - else if (ocr[0] != -1) - priv->ocr = ocr[0] & 0xff; + if (ocr[idx] != 0xff) + priv->ocr = ocr[idx]; + else if (ocr[0] != 0xff) + priv->ocr = ocr[0]; else priv->ocr = OCR_DEFAULT; - if (cdr[idx] != -1) - priv->cdr = cdr[idx] & 0xff; - else if (cdr[0] != -1) - priv->cdr = cdr[0] & 0xff; + if (cdr[idx] != 0xff) + priv->cdr = cdr[idx]; + else if (cdr[0] != 0xff) + priv->cdr = cdr[0]; else priv->cdr = CDR_DEFAULT; - dev_set_drvdata(pdev, dev); - SET_NETDEV_DEV(dev, pdev); + dev_set_drvdata(&pdev->dev, dev); + SET_NETDEV_DEV(dev, &pdev->dev); err = register_sja1000dev(dev); if (err) { - dev_err(pdev, "registering %s failed (err=%d)\n", + dev_err(&pdev->dev, "registering %s failed (err=%d)\n", DRV_NAME, err); goto exit_unmap; } - dev_info(pdev, "%s device registered (reg_base=0x%p, irq=%d)\n", + dev_info(&pdev->dev, "%s device registered (reg_base=0x%p, irq=%d)\n", DRV_NAME, priv->reg_base, dev->irq); return 0; @@ -229,13 +223,14 @@ static int __devinit sja1000_isa_probe(struct device *pdev, unsigned int idx) return err; } -static int __devexit sja1000_isa_remove(struct device *pdev, unsigned int idx) +static int __devexit sja1000_isa_remove(struct platform_device *pdev) { - struct net_device *dev = dev_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(&pdev->dev); struct sja1000_priv *priv = netdev_priv(dev); + int idx = pdev->id; unregister_sja1000dev(dev); - dev_set_drvdata(pdev, NULL); + dev_set_drvdata(&pdev->dev, NULL); if (mem[idx]) { iounmap(priv->reg_base); @@ -251,29 +246,70 @@ static int __devexit sja1000_isa_remove(struct device *pdev, unsigned int idx) return 0; } -static struct isa_driver sja1000_isa_driver = { - .match = sja1000_isa_match, +static struct platform_driver sja1000_isa_driver = { .probe = sja1000_isa_probe, .remove = __devexit_p(sja1000_isa_remove), .driver = { .name = DRV_NAME, + .owner = THIS_MODULE, }, }; static int __init sja1000_isa_init(void) { - int err = isa_register_driver(&sja1000_isa_driver, MAXDEV); + int idx, err; + + for (idx = 0; idx < MAXDEV; idx++) { + if ((port[idx] || mem[idx]) && irq[idx]) { + sja1000_isa_devs[idx] = + platform_device_alloc(DRV_NAME, idx); + if (!sja1000_isa_devs[idx]) { + err = -ENOMEM; + goto exit_free_devices; + } + err = platform_device_add(sja1000_isa_devs[idx]); + if (err) { + platform_device_put(sja1000_isa_devs[idx]); + goto exit_free_devices; + } + pr_debug("%s: platform device %d: port=%#lx, mem=%#lx, " + "irq=%d\n", + DRV_NAME, idx, port[idx], mem[idx], irq[idx]); + } else if (idx == 0 || port[idx] || mem[idx]) { + pr_err("%s: insufficient parameters supplied\n", + DRV_NAME); + err = -EINVAL; + goto exit_free_devices; + } + } + + err = platform_driver_register(&sja1000_isa_driver); + if (err) + goto exit_free_devices; + + pr_info("Legacy %s driver for max. %d devices registered\n", + DRV_NAME, MAXDEV); + + return 0; + +exit_free_devices: + while (--idx >= 0) { + if (sja1000_isa_devs[idx]) + platform_device_unregister(sja1000_isa_devs[idx]); + } - if (!err) - printk(KERN_INFO - "Legacy %s driver for max. %d devices registered\n", - DRV_NAME, MAXDEV); return err; } static void __exit sja1000_isa_exit(void) { - isa_unregister_driver(&sja1000_isa_driver); + int idx; + + platform_driver_unregister(&sja1000_isa_driver); + for (idx = 0; idx < MAXDEV; idx++) { + if (sja1000_isa_devs[idx]) + platform_device_unregister(sja1000_isa_devs[idx]); + } } module_init(sja1000_isa_init); diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c index c3dd9d09be5..f2683eb6a3d 100644 --- a/drivers/net/can/sja1000/sja1000_of_platform.c +++ b/drivers/net/can/sja1000/sja1000_of_platform.c @@ -220,14 +220,4 @@ static struct platform_driver sja1000_ofp_driver = { .remove = __devexit_p(sja1000_ofp_remove), }; -static int __init sja1000_ofp_init(void) -{ - return platform_driver_register(&sja1000_ofp_driver); -} -module_init(sja1000_ofp_init); - -static void __exit sja1000_ofp_exit(void) -{ - return platform_driver_unregister(&sja1000_ofp_driver); -}; -module_exit(sja1000_ofp_exit); +module_platform_driver(sja1000_ofp_driver); diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c index d9fadc489b3..4f50145f648 100644 --- a/drivers/net/can/sja1000/sja1000_platform.c +++ b/drivers/net/can/sja1000/sja1000_platform.c @@ -185,15 +185,4 @@ static struct platform_driver sp_driver = { }, }; -static int __init sp_init(void) -{ - return platform_driver_register(&sp_driver); -} - -static void __exit sp_exit(void) -{ - platform_driver_unregister(&sp_driver); -} - -module_init(sp_init); -module_exit(sp_exit); +module_platform_driver(sp_driver); diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c index a979b006f45..3f1ebcc2cb8 100644 --- a/drivers/net/can/slcan.c +++ b/drivers/net/can/slcan.c @@ -387,7 +387,7 @@ static void slc_setup(struct net_device *dev) /* New-style flags. */ dev->flags = IFF_NOARP; - dev->features = NETIF_F_NO_CSUM; + dev->features = NETIF_F_HW_CSUM; } /****************************************** diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c index 09a8b86cf1a..a7c77c744ee 100644 --- a/drivers/net/can/softing/softing_main.c +++ b/drivers/net/can/softing/softing_main.c @@ -874,21 +874,9 @@ static struct platform_driver softing_driver = { .remove = __devexit_p(softing_pdev_remove), }; -MODULE_ALIAS("platform:softing"); - -static int __init softing_start(void) -{ - return platform_driver_register(&softing_driver); -} - -static void __exit softing_stop(void) -{ - platform_driver_unregister(&softing_driver); -} - -module_init(softing_start); -module_exit(softing_stop); +module_platform_driver(softing_driver); +MODULE_ALIAS("platform:softing"); MODULE_DESCRIPTION("Softing DPRAM CAN driver"); MODULE_AUTHOR("Kurt Van Dijck <kurt.van.dijck@eia.be>"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c index 2adc294f512..df809e3f130 100644 --- a/drivers/net/can/ti_hecc.c +++ b/drivers/net/can/ti_hecc.c @@ -1037,20 +1037,7 @@ static struct platform_driver ti_hecc_driver = { .resume = ti_hecc_resume, }; -static int __init ti_hecc_init_driver(void) -{ - printk(KERN_INFO DRV_DESC "\n"); - return platform_driver_register(&ti_hecc_driver); -} - -static void __exit ti_hecc_exit_driver(void) -{ - printk(KERN_INFO DRV_DESC " unloaded\n"); - platform_driver_unregister(&ti_hecc_driver); -} - -module_exit(ti_hecc_exit_driver); -module_init(ti_hecc_init_driver); +module_platform_driver(ti_hecc_driver); MODULE_AUTHOR("Anant Gole <anantgole@ti.com>"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c index f93e2d6fc88..ea2d9428593 100644 --- a/drivers/net/can/vcan.c +++ b/drivers/net/can/vcan.c @@ -63,7 +63,7 @@ MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>"); * See Documentation/networking/can.txt for details. */ -static int echo; /* echo testing. Default: 0 (Off) */ +static bool echo; /* echo testing. Default: 0 (Off) */ module_param(echo, bool, S_IRUGO); MODULE_PARM_DESC(echo, "Echo sent frames (for testing). Default: 0 (Off)"); diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig new file mode 100644 index 00000000000..dd151d53d50 --- /dev/null +++ b/drivers/net/dsa/Kconfig @@ -0,0 +1,36 @@ +menu "Distributed Switch Architecture drivers" + depends on NET_DSA + +config NET_DSA_MV88E6XXX + tristate + default n + +config NET_DSA_MV88E6060 + tristate "Marvell 88E6060 ethernet switch chip support" + select NET_DSA_TAG_TRAILER + ---help--- + This enables support for the Marvell 88E6060 ethernet switch + chip. + +config NET_DSA_MV88E6XXX_NEED_PPU + bool + default n + +config NET_DSA_MV88E6131 + tristate "Marvell 88E6085/6095/6095F/6131 ethernet switch chip support" + select NET_DSA_MV88E6XXX + select NET_DSA_MV88E6XXX_NEED_PPU + select NET_DSA_TAG_DSA + ---help--- + This enables support for the Marvell 88E6085/6095/6095F/6131 + ethernet switch chips. + +config NET_DSA_MV88E6123_61_65 + tristate "Marvell 88E6123/6161/6165 ethernet switch chip support" + select NET_DSA_MV88E6XXX + select NET_DSA_TAG_EDSA + ---help--- + This enables support for the Marvell 88E6123/6161/6165 + ethernet switch chips. + +endmenu diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile new file mode 100644 index 00000000000..f3bda05536c --- /dev/null +++ b/drivers/net/dsa/Makefile @@ -0,0 +1,9 @@ +obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o +obj-$(CONFIG_NET_DSA_MV88E6XXX) += mv88e6xxx_drv.o +mv88e6xxx_drv-y += mv88e6xxx.o +ifdef CONFIG_NET_DSA_MV88E6123_61_65 +mv88e6xxx_drv-y += mv88e6123_61_65.o +endif +ifdef CONFIG_NET_DSA_MV88E6131 +mv88e6xxx_drv-y += mv88e6131.o +endif diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c new file mode 100644 index 00000000000..7fc4e81d4d4 --- /dev/null +++ b/drivers/net/dsa/mv88e6060.c @@ -0,0 +1,293 @@ +/* + * net/dsa/mv88e6060.c - Driver for Marvell 88e6060 switch chips + * Copyright (c) 2008-2009 Marvell Semiconductor + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/list.h> +#include <linux/netdevice.h> +#include <linux/phy.h> +#include <net/dsa.h> + +#define REG_PORT(p) (8 + (p)) +#define REG_GLOBAL 0x0f + +static int reg_read(struct dsa_switch *ds, int addr, int reg) +{ + return mdiobus_read(ds->master_mii_bus, ds->pd->sw_addr + addr, reg); +} + +#define REG_READ(addr, reg) \ + ({ \ + int __ret; \ + \ + __ret = reg_read(ds, addr, reg); \ + if (__ret < 0) \ + return __ret; \ + __ret; \ + }) + + +static int reg_write(struct dsa_switch *ds, int addr, int reg, u16 val) +{ + return mdiobus_write(ds->master_mii_bus, ds->pd->sw_addr + addr, + reg, val); +} + +#define REG_WRITE(addr, reg, val) \ + ({ \ + int __ret; \ + \ + __ret = reg_write(ds, addr, reg, val); \ + if (__ret < 0) \ + return __ret; \ + }) + +static char *mv88e6060_probe(struct mii_bus *bus, int sw_addr) +{ + int ret; + + ret = mdiobus_read(bus, sw_addr + REG_PORT(0), 0x03); + if (ret >= 0) { + ret &= 0xfff0; + if (ret == 0x0600) + return "Marvell 88E6060"; + } + + return NULL; +} + +static int mv88e6060_switch_reset(struct dsa_switch *ds) +{ + int i; + int ret; + + /* + * Set all ports to the disabled state. + */ + for (i = 0; i < 6; i++) { + ret = REG_READ(REG_PORT(i), 0x04); + REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc); + } + + /* + * Wait for transmit queues to drain. + */ + msleep(2); + + /* + * Reset the switch. + */ + REG_WRITE(REG_GLOBAL, 0x0a, 0xa130); + + /* + * Wait up to one second for reset to complete. + */ + for (i = 0; i < 1000; i++) { + ret = REG_READ(REG_GLOBAL, 0x00); + if ((ret & 0x8000) == 0x0000) + break; + + msleep(1); + } + if (i == 1000) + return -ETIMEDOUT; + + return 0; +} + +static int mv88e6060_setup_global(struct dsa_switch *ds) +{ + /* + * Disable discarding of frames with excessive collisions, + * set the maximum frame size to 1536 bytes, and mask all + * interrupt sources. + */ + REG_WRITE(REG_GLOBAL, 0x04, 0x0800); + + /* + * Enable automatic address learning, set the address + * database size to 1024 entries, and set the default aging + * time to 5 minutes. + */ + REG_WRITE(REG_GLOBAL, 0x0a, 0x2130); + + return 0; +} + +static int mv88e6060_setup_port(struct dsa_switch *ds, int p) +{ + int addr = REG_PORT(p); + + /* + * Do not force flow control, disable Ingress and Egress + * Header tagging, disable VLAN tunneling, and set the port + * state to Forwarding. Additionally, if this is the CPU + * port, enable Ingress and Egress Trailer tagging mode. + */ + REG_WRITE(addr, 0x04, dsa_is_cpu_port(ds, p) ? 0x4103 : 0x0003); + + /* + * Port based VLAN map: give each port its own address + * database, allow the CPU port to talk to each of the 'real' + * ports, and allow each of the 'real' ports to only talk to + * the CPU port. + */ + REG_WRITE(addr, 0x06, + ((p & 0xf) << 12) | + (dsa_is_cpu_port(ds, p) ? + ds->phys_port_mask : + (1 << ds->dst->cpu_port))); + + /* + * Port Association Vector: when learning source addresses + * of packets, add the address to the address database using + * a port bitmap that has only the bit for this port set and + * the other bits clear. + */ + REG_WRITE(addr, 0x0b, 1 << p); + + return 0; +} + +static int mv88e6060_setup(struct dsa_switch *ds) +{ + int i; + int ret; + + ret = mv88e6060_switch_reset(ds); + if (ret < 0) + return ret; + + /* @@@ initialise atu */ + + ret = mv88e6060_setup_global(ds); + if (ret < 0) + return ret; + + for (i = 0; i < 6; i++) { + ret = mv88e6060_setup_port(ds, i); + if (ret < 0) + return ret; + } + + return 0; +} + +static int mv88e6060_set_addr(struct dsa_switch *ds, u8 *addr) +{ + REG_WRITE(REG_GLOBAL, 0x01, (addr[0] << 8) | addr[1]); + REG_WRITE(REG_GLOBAL, 0x02, (addr[2] << 8) | addr[3]); + REG_WRITE(REG_GLOBAL, 0x03, (addr[4] << 8) | addr[5]); + + return 0; +} + +static int mv88e6060_port_to_phy_addr(int port) +{ + if (port >= 0 && port <= 5) + return port; + return -1; +} + +static int mv88e6060_phy_read(struct dsa_switch *ds, int port, int regnum) +{ + int addr; + + addr = mv88e6060_port_to_phy_addr(port); + if (addr == -1) + return 0xffff; + + return reg_read(ds, addr, regnum); +} + +static int +mv88e6060_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val) +{ + int addr; + + addr = mv88e6060_port_to_phy_addr(port); + if (addr == -1) + return 0xffff; + + return reg_write(ds, addr, regnum, val); +} + +static void mv88e6060_poll_link(struct dsa_switch *ds) +{ + int i; + + for (i = 0; i < DSA_MAX_PORTS; i++) { + struct net_device *dev; + int uninitialized_var(port_status); + int link; + int speed; + int duplex; + int fc; + + dev = ds->ports[i]; + if (dev == NULL) + continue; + + link = 0; + if (dev->flags & IFF_UP) { + port_status = reg_read(ds, REG_PORT(i), 0x00); + if (port_status < 0) + continue; + + link = !!(port_status & 0x1000); + } + + if (!link) { + if (netif_carrier_ok(dev)) { + printk(KERN_INFO "%s: link down\n", dev->name); + netif_carrier_off(dev); + } + continue; + } + + speed = (port_status & 0x0100) ? 100 : 10; + duplex = (port_status & 0x0200) ? 1 : 0; + fc = ((port_status & 0xc000) == 0xc000) ? 1 : 0; + + if (!netif_carrier_ok(dev)) { + printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, " + "flow control %sabled\n", dev->name, + speed, duplex ? "full" : "half", + fc ? "en" : "dis"); + netif_carrier_on(dev); + } + } +} + +static struct dsa_switch_driver mv88e6060_switch_driver = { + .tag_protocol = htons(ETH_P_TRAILER), + .probe = mv88e6060_probe, + .setup = mv88e6060_setup, + .set_addr = mv88e6060_set_addr, + .phy_read = mv88e6060_phy_read, + .phy_write = mv88e6060_phy_write, + .poll_link = mv88e6060_poll_link, +}; + +static int __init mv88e6060_init(void) +{ + register_switch_driver(&mv88e6060_switch_driver); + return 0; +} +module_init(mv88e6060_init); + +static void __exit mv88e6060_cleanup(void) +{ + unregister_switch_driver(&mv88e6060_switch_driver); +} +module_exit(mv88e6060_cleanup); + +MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>"); +MODULE_DESCRIPTION("Driver for Marvell 88E6060 ethernet switch chip"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:mv88e6060"); diff --git a/drivers/net/dsa/mv88e6123_61_65.c b/drivers/net/dsa/mv88e6123_61_65.c new file mode 100644 index 00000000000..c0a458fc698 --- /dev/null +++ b/drivers/net/dsa/mv88e6123_61_65.c @@ -0,0 +1,438 @@ +/* + * net/dsa/mv88e6123_61_65.c - Marvell 88e6123/6161/6165 switch chip support + * Copyright (c) 2008-2009 Marvell Semiconductor + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/list.h> +#include <linux/netdevice.h> +#include <linux/phy.h> +#include <net/dsa.h> +#include "mv88e6xxx.h" + +static char *mv88e6123_61_65_probe(struct mii_bus *bus, int sw_addr) +{ + int ret; + + ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03); + if (ret >= 0) { + ret &= 0xfff0; + if (ret == 0x1210) + return "Marvell 88E6123"; + if (ret == 0x1610) + return "Marvell 88E6161"; + if (ret == 0x1650) + return "Marvell 88E6165"; + } + + return NULL; +} + +static int mv88e6123_61_65_switch_reset(struct dsa_switch *ds) +{ + int i; + int ret; + + /* + * Set all ports to the disabled state. + */ + for (i = 0; i < 8; i++) { + ret = REG_READ(REG_PORT(i), 0x04); + REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc); + } + + /* + * Wait for transmit queues to drain. + */ + msleep(2); + + /* + * Reset the switch. + */ + REG_WRITE(REG_GLOBAL, 0x04, 0xc400); + + /* + * Wait up to one second for reset to complete. + */ + for (i = 0; i < 1000; i++) { + ret = REG_READ(REG_GLOBAL, 0x00); + if ((ret & 0xc800) == 0xc800) + break; + + msleep(1); + } + if (i == 1000) + return -ETIMEDOUT; + + return 0; +} + +static int mv88e6123_61_65_setup_global(struct dsa_switch *ds) +{ + int ret; + int i; + + /* + * Disable the PHY polling unit (since there won't be any + * external PHYs to poll), don't discard packets with + * excessive collisions, and mask all interrupt sources. + */ + REG_WRITE(REG_GLOBAL, 0x04, 0x0000); + + /* + * Set the default address aging time to 5 minutes, and + * enable address learn messages to be sent to all message + * ports. + */ + REG_WRITE(REG_GLOBAL, 0x0a, 0x0148); + + /* + * Configure the priority mapping registers. + */ + ret = mv88e6xxx_config_prio(ds); + if (ret < 0) + return ret; + + /* + * Configure the upstream port, and configure the upstream + * port as the port to which ingress and egress monitor frames + * are to be sent. + */ + REG_WRITE(REG_GLOBAL, 0x1a, (dsa_upstream_port(ds) * 0x1110)); + + /* + * Disable remote management for now, and set the switch's + * DSA device number. + */ + REG_WRITE(REG_GLOBAL, 0x1c, ds->index & 0x1f); + + /* + * Send all frames with destination addresses matching + * 01:80:c2:00:00:2x to the CPU port. + */ + REG_WRITE(REG_GLOBAL2, 0x02, 0xffff); + + /* + * Send all frames with destination addresses matching + * 01:80:c2:00:00:0x to the CPU port. + */ + REG_WRITE(REG_GLOBAL2, 0x03, 0xffff); + + /* + * Disable the loopback filter, disable flow control + * messages, disable flood broadcast override, disable + * removing of provider tags, disable ATU age violation + * interrupts, disable tag flow control, force flow + * control priority to the highest, and send all special + * multicast frames to the CPU at the highest priority. + */ + REG_WRITE(REG_GLOBAL2, 0x05, 0x00ff); + + /* + * Program the DSA routing table. + */ + for (i = 0; i < 32; i++) { + int nexthop; + + nexthop = 0x1f; + if (i != ds->index && i < ds->dst->pd->nr_chips) + nexthop = ds->pd->rtable[i] & 0x1f; + + REG_WRITE(REG_GLOBAL2, 0x06, 0x8000 | (i << 8) | nexthop); + } + + /* + * Clear all trunk masks. + */ + for (i = 0; i < 8; i++) + REG_WRITE(REG_GLOBAL2, 0x07, 0x8000 | (i << 12) | 0xff); + + /* + * Clear all trunk mappings. + */ + for (i = 0; i < 16; i++) + REG_WRITE(REG_GLOBAL2, 0x08, 0x8000 | (i << 11)); + + /* + * Disable ingress rate limiting by resetting all ingress + * rate limit registers to their initial state. + */ + for (i = 0; i < 6; i++) + REG_WRITE(REG_GLOBAL2, 0x09, 0x9000 | (i << 8)); + + /* + * Initialise cross-chip port VLAN table to reset defaults. + */ + REG_WRITE(REG_GLOBAL2, 0x0b, 0x9000); + + /* + * Clear the priority override table. + */ + for (i = 0; i < 16; i++) + REG_WRITE(REG_GLOBAL2, 0x0f, 0x8000 | (i << 8)); + + /* @@@ initialise AVB (22/23) watchdog (27) sdet (29) registers */ + + return 0; +} + +static int mv88e6123_61_65_setup_port(struct dsa_switch *ds, int p) +{ + int addr = REG_PORT(p); + u16 val; + + /* + * MAC Forcing register: don't force link, speed, duplex + * or flow control state to any particular values on physical + * ports, but force the CPU port and all DSA ports to 1000 Mb/s + * full duplex. + */ + if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p)) + REG_WRITE(addr, 0x01, 0x003e); + else + REG_WRITE(addr, 0x01, 0x0003); + + /* + * Do not limit the period of time that this port can be + * paused for by the remote end or the period of time that + * this port can pause the remote end. + */ + REG_WRITE(addr, 0x02, 0x0000); + + /* + * Port Control: disable Drop-on-Unlock, disable Drop-on-Lock, + * disable Header mode, enable IGMP/MLD snooping, disable VLAN + * tunneling, determine priority by looking at 802.1p and IP + * priority fields (IP prio has precedence), and set STP state + * to Forwarding. + * + * If this is the CPU link, use DSA or EDSA tagging depending + * on which tagging mode was configured. + * + * If this is a link to another switch, use DSA tagging mode. + * + * If this is the upstream port for this switch, enable + * forwarding of unknown unicasts and multicasts. + */ + val = 0x0433; + if (dsa_is_cpu_port(ds, p)) { + if (ds->dst->tag_protocol == htons(ETH_P_EDSA)) + val |= 0x3300; + else + val |= 0x0100; + } + if (ds->dsa_port_mask & (1 << p)) + val |= 0x0100; + if (p == dsa_upstream_port(ds)) + val |= 0x000c; + REG_WRITE(addr, 0x04, val); + + /* + * Port Control 1: disable trunking. Also, if this is the + * CPU port, enable learn messages to be sent to this port. + */ + REG_WRITE(addr, 0x05, dsa_is_cpu_port(ds, p) ? 0x8000 : 0x0000); + + /* + * Port based VLAN map: give each port its own address + * database, allow the CPU port to talk to each of the 'real' + * ports, and allow each of the 'real' ports to only talk to + * the upstream port. + */ + val = (p & 0xf) << 12; + if (dsa_is_cpu_port(ds, p)) + val |= ds->phys_port_mask; + else + val |= 1 << dsa_upstream_port(ds); + REG_WRITE(addr, 0x06, val); + + /* + * Default VLAN ID and priority: don't set a default VLAN + * ID, and set the default packet priority to zero. + */ + REG_WRITE(addr, 0x07, 0x0000); + + /* + * Port Control 2: don't force a good FCS, set the maximum + * frame size to 10240 bytes, don't let the switch add or + * strip 802.1q tags, don't discard tagged or untagged frames + * on this port, do a destination address lookup on all + * received packets as usual, disable ARP mirroring and don't + * send a copy of all transmitted/received frames on this port + * to the CPU. + */ + REG_WRITE(addr, 0x08, 0x2080); + + /* + * Egress rate control: disable egress rate control. + */ + REG_WRITE(addr, 0x09, 0x0001); + + /* + * Egress rate control 2: disable egress rate control. + */ + REG_WRITE(addr, 0x0a, 0x0000); + + /* + * Port Association Vector: when learning source addresses + * of packets, add the address to the address database using + * a port bitmap that has only the bit for this port set and + * the other bits clear. + */ + REG_WRITE(addr, 0x0b, 1 << p); + + /* + * Port ATU control: disable limiting the number of address + * database entries that this port is allowed to use. + */ + REG_WRITE(addr, 0x0c, 0x0000); + + /* + * Priorit Override: disable DA, SA and VTU priority override. + */ + REG_WRITE(addr, 0x0d, 0x0000); + + /* + * Port Ethertype: use the Ethertype DSA Ethertype value. + */ + REG_WRITE(addr, 0x0f, ETH_P_EDSA); + + /* + * Tag Remap: use an identity 802.1p prio -> switch prio + * mapping. + */ + REG_WRITE(addr, 0x18, 0x3210); + + /* + * Tag Remap 2: use an identity 802.1p prio -> switch prio + * mapping. + */ + REG_WRITE(addr, 0x19, 0x7654); + + return 0; +} + +static int mv88e6123_61_65_setup(struct dsa_switch *ds) +{ + struct mv88e6xxx_priv_state *ps = (void *)(ds + 1); + int i; + int ret; + + mutex_init(&ps->smi_mutex); + mutex_init(&ps->stats_mutex); + + ret = mv88e6123_61_65_switch_reset(ds); + if (ret < 0) + return ret; + + /* @@@ initialise vtu and atu */ + + ret = mv88e6123_61_65_setup_global(ds); + if (ret < 0) + return ret; + + for (i = 0; i < 6; i++) { + ret = mv88e6123_61_65_setup_port(ds, i); + if (ret < 0) + return ret; + } + + return 0; +} + +static int mv88e6123_61_65_port_to_phy_addr(int port) +{ + if (port >= 0 && port <= 4) + return port; + return -1; +} + +static int +mv88e6123_61_65_phy_read(struct dsa_switch *ds, int port, int regnum) +{ + int addr = mv88e6123_61_65_port_to_phy_addr(port); + return mv88e6xxx_phy_read(ds, addr, regnum); +} + +static int +mv88e6123_61_65_phy_write(struct dsa_switch *ds, + int port, int regnum, u16 val) +{ + int addr = mv88e6123_61_65_port_to_phy_addr(port); + return mv88e6xxx_phy_write(ds, addr, regnum, val); +} + +static struct mv88e6xxx_hw_stat mv88e6123_61_65_hw_stats[] = { + { "in_good_octets", 8, 0x00, }, + { "in_bad_octets", 4, 0x02, }, + { "in_unicast", 4, 0x04, }, + { "in_broadcasts", 4, 0x06, }, + { "in_multicasts", 4, 0x07, }, + { "in_pause", 4, 0x16, }, + { "in_undersize", 4, 0x18, }, + { "in_fragments", 4, 0x19, }, + { "in_oversize", 4, 0x1a, }, + { "in_jabber", 4, 0x1b, }, + { "in_rx_error", 4, 0x1c, }, + { "in_fcs_error", 4, 0x1d, }, + { "out_octets", 8, 0x0e, }, + { "out_unicast", 4, 0x10, }, + { "out_broadcasts", 4, 0x13, }, + { "out_multicasts", 4, 0x12, }, + { "out_pause", 4, 0x15, }, + { "excessive", 4, 0x11, }, + { "collisions", 4, 0x1e, }, + { "deferred", 4, 0x05, }, + { "single", 4, 0x14, }, + { "multiple", 4, 0x17, }, + { "out_fcs_error", 4, 0x03, }, + { "late", 4, 0x1f, }, + { "hist_64bytes", 4, 0x08, }, + { "hist_65_127bytes", 4, 0x09, }, + { "hist_128_255bytes", 4, 0x0a, }, + { "hist_256_511bytes", 4, 0x0b, }, + { "hist_512_1023bytes", 4, 0x0c, }, + { "hist_1024_max_bytes", 4, 0x0d, }, +}; + +static void +mv88e6123_61_65_get_strings(struct dsa_switch *ds, int port, uint8_t *data) +{ + mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6123_61_65_hw_stats), + mv88e6123_61_65_hw_stats, port, data); +} + +static void +mv88e6123_61_65_get_ethtool_stats(struct dsa_switch *ds, + int port, uint64_t *data) +{ + mv88e6xxx_get_ethtool_stats(ds, ARRAY_SIZE(mv88e6123_61_65_hw_stats), + mv88e6123_61_65_hw_stats, port, data); +} + +static int mv88e6123_61_65_get_sset_count(struct dsa_switch *ds) +{ + return ARRAY_SIZE(mv88e6123_61_65_hw_stats); +} + +struct dsa_switch_driver mv88e6123_61_65_switch_driver = { + .tag_protocol = cpu_to_be16(ETH_P_EDSA), + .priv_size = sizeof(struct mv88e6xxx_priv_state), + .probe = mv88e6123_61_65_probe, + .setup = mv88e6123_61_65_setup, + .set_addr = mv88e6xxx_set_addr_indirect, + .phy_read = mv88e6123_61_65_phy_read, + .phy_write = mv88e6123_61_65_phy_write, + .poll_link = mv88e6xxx_poll_link, + .get_strings = mv88e6123_61_65_get_strings, + .get_ethtool_stats = mv88e6123_61_65_get_ethtool_stats, + .get_sset_count = mv88e6123_61_65_get_sset_count, +}; + +MODULE_ALIAS("platform:mv88e6123"); +MODULE_ALIAS("platform:mv88e6161"); +MODULE_ALIAS("platform:mv88e6165"); diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c new file mode 100644 index 00000000000..e0eb6824383 --- /dev/null +++ b/drivers/net/dsa/mv88e6131.c @@ -0,0 +1,435 @@ +/* + * net/dsa/mv88e6131.c - Marvell 88e6095/6095f/6131 switch chip support + * Copyright (c) 2008-2009 Marvell Semiconductor + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/list.h> +#include <linux/netdevice.h> +#include <linux/phy.h> +#include <net/dsa.h> +#include "mv88e6xxx.h" + +/* + * Switch product IDs + */ +#define ID_6085 0x04a0 +#define ID_6095 0x0950 +#define ID_6131 0x1060 + +static char *mv88e6131_probe(struct mii_bus *bus, int sw_addr) +{ + int ret; + + ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03); + if (ret >= 0) { + ret &= 0xfff0; + if (ret == ID_6085) + return "Marvell 88E6085"; + if (ret == ID_6095) + return "Marvell 88E6095/88E6095F"; + if (ret == ID_6131) + return "Marvell 88E6131"; + } + + return NULL; +} + +static int mv88e6131_switch_reset(struct dsa_switch *ds) +{ + int i; + int ret; + + /* + * Set all ports to the disabled state. + */ + for (i = 0; i < 11; i++) { + ret = REG_READ(REG_PORT(i), 0x04); + REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc); + } + + /* + * Wait for transmit queues to drain. + */ + msleep(2); + + /* + * Reset the switch. + */ + REG_WRITE(REG_GLOBAL, 0x04, 0xc400); + + /* + * Wait up to one second for reset to complete. + */ + for (i = 0; i < 1000; i++) { + ret = REG_READ(REG_GLOBAL, 0x00); + if ((ret & 0xc800) == 0xc800) + break; + + msleep(1); + } + if (i == 1000) + return -ETIMEDOUT; + + return 0; +} + +static int mv88e6131_setup_global(struct dsa_switch *ds) +{ + int ret; + int i; + + /* + * Enable the PHY polling unit, don't discard packets with + * excessive collisions, use a weighted fair queueing scheme + * to arbitrate between packet queues, set the maximum frame + * size to 1632, and mask all interrupt sources. + */ + REG_WRITE(REG_GLOBAL, 0x04, 0x4400); + + /* + * Set the default address aging time to 5 minutes, and + * enable address learn messages to be sent to all message + * ports. + */ + REG_WRITE(REG_GLOBAL, 0x0a, 0x0148); + + /* + * Configure the priority mapping registers. + */ + ret = mv88e6xxx_config_prio(ds); + if (ret < 0) + return ret; + + /* + * Set the VLAN ethertype to 0x8100. + */ + REG_WRITE(REG_GLOBAL, 0x19, 0x8100); + + /* + * Disable ARP mirroring, and configure the upstream port as + * the port to which ingress and egress monitor frames are to + * be sent. + */ + REG_WRITE(REG_GLOBAL, 0x1a, (dsa_upstream_port(ds) * 0x1100) | 0x00f0); + + /* + * Disable cascade port functionality unless this device + * is used in a cascade configuration, and set the switch's + * DSA device number. + */ + if (ds->dst->pd->nr_chips > 1) + REG_WRITE(REG_GLOBAL, 0x1c, 0xf000 | (ds->index & 0x1f)); + else + REG_WRITE(REG_GLOBAL, 0x1c, 0xe000 | (ds->index & 0x1f)); + + /* + * Send all frames with destination addresses matching + * 01:80:c2:00:00:0x to the CPU port. + */ + REG_WRITE(REG_GLOBAL2, 0x03, 0xffff); + + /* + * Ignore removed tag data on doubly tagged packets, disable + * flow control messages, force flow control priority to the + * highest, and send all special multicast frames to the CPU + * port at the highest priority. + */ + REG_WRITE(REG_GLOBAL2, 0x05, 0x00ff); + + /* + * Program the DSA routing table. + */ + for (i = 0; i < 32; i++) { + int nexthop; + + nexthop = 0x1f; + if (i != ds->index && i < ds->dst->pd->nr_chips) + nexthop = ds->pd->rtable[i] & 0x1f; + + REG_WRITE(REG_GLOBAL2, 0x06, 0x8000 | (i << 8) | nexthop); + } + + /* + * Clear all trunk masks. + */ + for (i = 0; i < 8; i++) + REG_WRITE(REG_GLOBAL2, 0x07, 0x8000 | (i << 12) | 0x7ff); + + /* + * Clear all trunk mappings. + */ + for (i = 0; i < 16; i++) + REG_WRITE(REG_GLOBAL2, 0x08, 0x8000 | (i << 11)); + + /* + * Force the priority of IGMP/MLD snoop frames and ARP frames + * to the highest setting. + */ + REG_WRITE(REG_GLOBAL2, 0x0f, 0x00ff); + + return 0; +} + +static int mv88e6131_setup_port(struct dsa_switch *ds, int p) +{ + struct mv88e6xxx_priv_state *ps = (void *)(ds + 1); + int addr = REG_PORT(p); + u16 val; + + /* + * MAC Forcing register: don't force link, speed, duplex + * or flow control state to any particular values on physical + * ports, but force the CPU port and all DSA ports to 1000 Mb/s + * (100 Mb/s on 6085) full duplex. + */ + if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p)) + if (ps->id == ID_6085) + REG_WRITE(addr, 0x01, 0x003d); /* 100 Mb/s */ + else + REG_WRITE(addr, 0x01, 0x003e); /* 1000 Mb/s */ + else + REG_WRITE(addr, 0x01, 0x0003); + + /* + * Port Control: disable Core Tag, disable Drop-on-Lock, + * transmit frames unmodified, disable Header mode, + * enable IGMP/MLD snoop, disable DoubleTag, disable VLAN + * tunneling, determine priority by looking at 802.1p and + * IP priority fields (IP prio has precedence), and set STP + * state to Forwarding. + * + * If this is the upstream port for this switch, enable + * forwarding of unknown unicasts, and enable DSA tagging + * mode. + * + * If this is the link to another switch, use DSA tagging + * mode, but do not enable forwarding of unknown unicasts. + */ + val = 0x0433; + if (p == dsa_upstream_port(ds)) { + val |= 0x0104; + /* + * On 6085, unknown multicast forward is controlled + * here rather than in Port Control 2 register. + */ + if (ps->id == ID_6085) + val |= 0x0008; + } + if (ds->dsa_port_mask & (1 << p)) + val |= 0x0100; + REG_WRITE(addr, 0x04, val); + + /* + * Port Control 1: disable trunking. Also, if this is the + * CPU port, enable learn messages to be sent to this port. + */ + REG_WRITE(addr, 0x05, dsa_is_cpu_port(ds, p) ? 0x8000 : 0x0000); + + /* + * Port based VLAN map: give each port its own address + * database, allow the CPU port to talk to each of the 'real' + * ports, and allow each of the 'real' ports to only talk to + * the upstream port. + */ + val = (p & 0xf) << 12; + if (dsa_is_cpu_port(ds, p)) + val |= ds->phys_port_mask; + else + val |= 1 << dsa_upstream_port(ds); + REG_WRITE(addr, 0x06, val); + + /* + * Default VLAN ID and priority: don't set a default VLAN + * ID, and set the default packet priority to zero. + */ + REG_WRITE(addr, 0x07, 0x0000); + + /* + * Port Control 2: don't force a good FCS, don't use + * VLAN-based, source address-based or destination + * address-based priority overrides, don't let the switch + * add or strip 802.1q tags, don't discard tagged or + * untagged frames on this port, do a destination address + * lookup on received packets as usual, don't send a copy + * of all transmitted/received frames on this port to the + * CPU, and configure the upstream port number. + * + * If this is the upstream port for this switch, enable + * forwarding of unknown multicast addresses. + */ + if (ps->id == ID_6085) + /* + * on 6085, bits 3:0 are reserved, bit 6 control ARP + * mirroring, and multicast forward is handled in + * Port Control register. + */ + REG_WRITE(addr, 0x08, 0x0080); + else { + val = 0x0080 | dsa_upstream_port(ds); + if (p == dsa_upstream_port(ds)) + val |= 0x0040; + REG_WRITE(addr, 0x08, val); + } + + /* + * Rate Control: disable ingress rate limiting. + */ + REG_WRITE(addr, 0x09, 0x0000); + + /* + * Rate Control 2: disable egress rate limiting. + */ + REG_WRITE(addr, 0x0a, 0x0000); + + /* + * Port Association Vector: when learning source addresses + * of packets, add the address to the address database using + * a port bitmap that has only the bit for this port set and + * the other bits clear. + */ + REG_WRITE(addr, 0x0b, 1 << p); + + /* + * Tag Remap: use an identity 802.1p prio -> switch prio + * mapping. + */ + REG_WRITE(addr, 0x18, 0x3210); + + /* + * Tag Remap 2: use an identity 802.1p prio -> switch prio + * mapping. + */ + REG_WRITE(addr, 0x19, 0x7654); + + return 0; +} + +static int mv88e6131_setup(struct dsa_switch *ds) +{ + struct mv88e6xxx_priv_state *ps = (void *)(ds + 1); + int i; + int ret; + + mutex_init(&ps->smi_mutex); + mv88e6xxx_ppu_state_init(ds); + mutex_init(&ps->stats_mutex); + + ps->id = REG_READ(REG_PORT(0), 0x03) & 0xfff0; + + ret = mv88e6131_switch_reset(ds); + if (ret < 0) + return ret; + + /* @@@ initialise vtu and atu */ + + ret = mv88e6131_setup_global(ds); + if (ret < 0) + return ret; + + for (i = 0; i < 11; i++) { + ret = mv88e6131_setup_port(ds, i); + if (ret < 0) + return ret; + } + + return 0; +} + +static int mv88e6131_port_to_phy_addr(int port) +{ + if (port >= 0 && port <= 11) + return port; + return -1; +} + +static int +mv88e6131_phy_read(struct dsa_switch *ds, int port, int regnum) +{ + int addr = mv88e6131_port_to_phy_addr(port); + return mv88e6xxx_phy_read_ppu(ds, addr, regnum); +} + +static int +mv88e6131_phy_write(struct dsa_switch *ds, + int port, int regnum, u16 val) +{ + int addr = mv88e6131_port_to_phy_addr(port); + return mv88e6xxx_phy_write_ppu(ds, addr, regnum, val); +} + +static struct mv88e6xxx_hw_stat mv88e6131_hw_stats[] = { + { "in_good_octets", 8, 0x00, }, + { "in_bad_octets", 4, 0x02, }, + { "in_unicast", 4, 0x04, }, + { "in_broadcasts", 4, 0x06, }, + { "in_multicasts", 4, 0x07, }, + { "in_pause", 4, 0x16, }, + { "in_undersize", 4, 0x18, }, + { "in_fragments", 4, 0x19, }, + { "in_oversize", 4, 0x1a, }, + { "in_jabber", 4, 0x1b, }, + { "in_rx_error", 4, 0x1c, }, + { "in_fcs_error", 4, 0x1d, }, + { "out_octets", 8, 0x0e, }, + { "out_unicast", 4, 0x10, }, + { "out_broadcasts", 4, 0x13, }, + { "out_multicasts", 4, 0x12, }, + { "out_pause", 4, 0x15, }, + { "excessive", 4, 0x11, }, + { "collisions", 4, 0x1e, }, + { "deferred", 4, 0x05, }, + { "single", 4, 0x14, }, + { "multiple", 4, 0x17, }, + { "out_fcs_error", 4, 0x03, }, + { "late", 4, 0x1f, }, + { "hist_64bytes", 4, 0x08, }, + { "hist_65_127bytes", 4, 0x09, }, + { "hist_128_255bytes", 4, 0x0a, }, + { "hist_256_511bytes", 4, 0x0b, }, + { "hist_512_1023bytes", 4, 0x0c, }, + { "hist_1024_max_bytes", 4, 0x0d, }, +}; + +static void +mv88e6131_get_strings(struct dsa_switch *ds, int port, uint8_t *data) +{ + mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6131_hw_stats), + mv88e6131_hw_stats, port, data); +} + +static void +mv88e6131_get_ethtool_stats(struct dsa_switch *ds, + int port, uint64_t *data) +{ + mv88e6xxx_get_ethtool_stats(ds, ARRAY_SIZE(mv88e6131_hw_stats), + mv88e6131_hw_stats, port, data); +} + +static int mv88e6131_get_sset_count(struct dsa_switch *ds) +{ + return ARRAY_SIZE(mv88e6131_hw_stats); +} + +struct dsa_switch_driver mv88e6131_switch_driver = { + .tag_protocol = cpu_to_be16(ETH_P_DSA), + .priv_size = sizeof(struct mv88e6xxx_priv_state), + .probe = mv88e6131_probe, + .setup = mv88e6131_setup, + .set_addr = mv88e6xxx_set_addr_direct, + .phy_read = mv88e6131_phy_read, + .phy_write = mv88e6131_phy_write, + .poll_link = mv88e6xxx_poll_link, + .get_strings = mv88e6131_get_strings, + .get_ethtool_stats = mv88e6131_get_ethtool_stats, + .get_sset_count = mv88e6131_get_sset_count, +}; + +MODULE_ALIAS("platform:mv88e6085"); +MODULE_ALIAS("platform:mv88e6095"); +MODULE_ALIAS("platform:mv88e6095f"); +MODULE_ALIAS("platform:mv88e6131"); diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c new file mode 100644 index 00000000000..5467c040824 --- /dev/null +++ b/drivers/net/dsa/mv88e6xxx.c @@ -0,0 +1,549 @@ +/* + * net/dsa/mv88e6xxx.c - Marvell 88e6xxx switch chip support + * Copyright (c) 2008 Marvell Semiconductor + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/list.h> +#include <linux/netdevice.h> +#include <linux/phy.h> +#include <net/dsa.h> +#include "mv88e6xxx.h" + +/* + * If the switch's ADDR[4:0] strap pins are strapped to zero, it will + * use all 32 SMI bus addresses on its SMI bus, and all switch registers + * will be directly accessible on some {device address,register address} + * pair. If the ADDR[4:0] pins are not strapped to zero, the switch + * will only respond to SMI transactions to that specific address, and + * an indirect addressing mechanism needs to be used to access its + * registers. + */ +static int mv88e6xxx_reg_wait_ready(struct mii_bus *bus, int sw_addr) +{ + int ret; + int i; + + for (i = 0; i < 16; i++) { + ret = mdiobus_read(bus, sw_addr, 0); + if (ret < 0) + return ret; + + if ((ret & 0x8000) == 0) + return 0; + } + + return -ETIMEDOUT; +} + +int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg) +{ + int ret; + + if (sw_addr == 0) + return mdiobus_read(bus, addr, reg); + + /* + * Wait for the bus to become free. + */ + ret = mv88e6xxx_reg_wait_ready(bus, sw_addr); + if (ret < 0) + return ret; + + /* + * Transmit the read command. + */ + ret = mdiobus_write(bus, sw_addr, 0, 0x9800 | (addr << 5) | reg); + if (ret < 0) + return ret; + + /* + * Wait for the read command to complete. + */ + ret = mv88e6xxx_reg_wait_ready(bus, sw_addr); + if (ret < 0) + return ret; + + /* + * Read the data. + */ + ret = mdiobus_read(bus, sw_addr, 1); + if (ret < 0) + return ret; + + return ret & 0xffff; +} + +int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg) +{ + struct mv88e6xxx_priv_state *ps = (void *)(ds + 1); + int ret; + + mutex_lock(&ps->smi_mutex); + ret = __mv88e6xxx_reg_read(ds->master_mii_bus, + ds->pd->sw_addr, addr, reg); + mutex_unlock(&ps->smi_mutex); + + return ret; +} + +int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr, + int reg, u16 val) +{ + int ret; + + if (sw_addr == 0) + return mdiobus_write(bus, addr, reg, val); + + /* + * Wait for the bus to become free. + */ + ret = mv88e6xxx_reg_wait_ready(bus, sw_addr); + if (ret < 0) + return ret; + + /* + * Transmit the data to write. + */ + ret = mdiobus_write(bus, sw_addr, 1, val); + if (ret < 0) + return ret; + + /* + * Transmit the write command. + */ + ret = mdiobus_write(bus, sw_addr, 0, 0x9400 | (addr << 5) | reg); + if (ret < 0) + return ret; + + /* + * Wait for the write command to complete. + */ + ret = mv88e6xxx_reg_wait_ready(bus, sw_addr); + if (ret < 0) + return ret; + + return 0; +} + +int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val) +{ + struct mv88e6xxx_priv_state *ps = (void *)(ds + 1); + int ret; + + mutex_lock(&ps->smi_mutex); + ret = __mv88e6xxx_reg_write(ds->master_mii_bus, + ds->pd->sw_addr, addr, reg, val); + mutex_unlock(&ps->smi_mutex); + + return ret; +} + +int mv88e6xxx_config_prio(struct dsa_switch *ds) +{ + /* + * Configure the IP ToS mapping registers. + */ + REG_WRITE(REG_GLOBAL, 0x10, 0x0000); + REG_WRITE(REG_GLOBAL, 0x11, 0x0000); + REG_WRITE(REG_GLOBAL, 0x12, 0x5555); + REG_WRITE(REG_GLOBAL, 0x13, 0x5555); + REG_WRITE(REG_GLOBAL, 0x14, 0xaaaa); + REG_WRITE(REG_GLOBAL, 0x15, 0xaaaa); + REG_WRITE(REG_GLOBAL, 0x16, 0xffff); + REG_WRITE(REG_GLOBAL, 0x17, 0xffff); + + /* + * Configure the IEEE 802.1p priority mapping register. + */ + REG_WRITE(REG_GLOBAL, 0x18, 0xfa41); + + return 0; +} + +int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr) +{ + REG_WRITE(REG_GLOBAL, 0x01, (addr[0] << 8) | addr[1]); + REG_WRITE(REG_GLOBAL, 0x02, (addr[2] << 8) | addr[3]); + REG_WRITE(REG_GLOBAL, 0x03, (addr[4] << 8) | addr[5]); + + return 0; +} + +int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr) +{ + int i; + int ret; + + for (i = 0; i < 6; i++) { + int j; + + /* + * Write the MAC address byte. + */ + REG_WRITE(REG_GLOBAL2, 0x0d, 0x8000 | (i << 8) | addr[i]); + + /* + * Wait for the write to complete. + */ + for (j = 0; j < 16; j++) { + ret = REG_READ(REG_GLOBAL2, 0x0d); + if ((ret & 0x8000) == 0) + break; + } + if (j == 16) + return -ETIMEDOUT; + } + + return 0; +} + +int mv88e6xxx_phy_read(struct dsa_switch *ds, int addr, int regnum) +{ + if (addr >= 0) + return mv88e6xxx_reg_read(ds, addr, regnum); + return 0xffff; +} + +int mv88e6xxx_phy_write(struct dsa_switch *ds, int addr, int regnum, u16 val) +{ + if (addr >= 0) + return mv88e6xxx_reg_write(ds, addr, regnum, val); + return 0; +} + +#ifdef CONFIG_NET_DSA_MV88E6XXX_NEED_PPU +static int mv88e6xxx_ppu_disable(struct dsa_switch *ds) +{ + int ret; + int i; + + ret = REG_READ(REG_GLOBAL, 0x04); + REG_WRITE(REG_GLOBAL, 0x04, ret & ~0x4000); + + for (i = 0; i < 1000; i++) { + ret = REG_READ(REG_GLOBAL, 0x00); + msleep(1); + if ((ret & 0xc000) != 0xc000) + return 0; + } + + return -ETIMEDOUT; +} + +static int mv88e6xxx_ppu_enable(struct dsa_switch *ds) +{ + int ret; + int i; + + ret = REG_READ(REG_GLOBAL, 0x04); + REG_WRITE(REG_GLOBAL, 0x04, ret | 0x4000); + + for (i = 0; i < 1000; i++) { + ret = REG_READ(REG_GLOBAL, 0x00); + msleep(1); + if ((ret & 0xc000) == 0xc000) + return 0; + } + + return -ETIMEDOUT; +} + +static void mv88e6xxx_ppu_reenable_work(struct work_struct *ugly) +{ + struct mv88e6xxx_priv_state *ps; + + ps = container_of(ugly, struct mv88e6xxx_priv_state, ppu_work); + if (mutex_trylock(&ps->ppu_mutex)) { + struct dsa_switch *ds = ((struct dsa_switch *)ps) - 1; + + if (mv88e6xxx_ppu_enable(ds) == 0) + ps->ppu_disabled = 0; + mutex_unlock(&ps->ppu_mutex); + } +} + +static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps) +{ + struct mv88e6xxx_priv_state *ps = (void *)_ps; + + schedule_work(&ps->ppu_work); +} + +static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds) +{ + struct mv88e6xxx_priv_state *ps = (void *)(ds + 1); + int ret; + + mutex_lock(&ps->ppu_mutex); + + /* + * If the PHY polling unit is enabled, disable it so that + * we can access the PHY registers. If it was already + * disabled, cancel the timer that is going to re-enable + * it. + */ + if (!ps->ppu_disabled) { + ret = mv88e6xxx_ppu_disable(ds); + if (ret < 0) { + mutex_unlock(&ps->ppu_mutex); + return ret; + } + ps->ppu_disabled = 1; + } else { + del_timer(&ps->ppu_timer); + ret = 0; + } + + return ret; +} + +static void mv88e6xxx_ppu_access_put(struct dsa_switch *ds) +{ + struct mv88e6xxx_priv_state *ps = (void *)(ds + 1); + + /* + * Schedule a timer to re-enable the PHY polling unit. + */ + mod_timer(&ps->ppu_timer, jiffies + msecs_to_jiffies(10)); + mutex_unlock(&ps->ppu_mutex); +} + +void mv88e6xxx_ppu_state_init(struct dsa_switch *ds) +{ + struct mv88e6xxx_priv_state *ps = (void *)(ds + 1); + + mutex_init(&ps->ppu_mutex); + INIT_WORK(&ps->ppu_work, mv88e6xxx_ppu_reenable_work); + init_timer(&ps->ppu_timer); + ps->ppu_timer.data = (unsigned long)ps; + ps->ppu_timer.function = mv88e6xxx_ppu_reenable_timer; +} + +int mv88e6xxx_phy_read_ppu(struct dsa_switch *ds, int addr, int regnum) +{ + int ret; + + ret = mv88e6xxx_ppu_access_get(ds); + if (ret >= 0) { + ret = mv88e6xxx_reg_read(ds, addr, regnum); + mv88e6xxx_ppu_access_put(ds); + } + + return ret; +} + +int mv88e6xxx_phy_write_ppu(struct dsa_switch *ds, int addr, + int regnum, u16 val) +{ + int ret; + + ret = mv88e6xxx_ppu_access_get(ds); + if (ret >= 0) { + ret = mv88e6xxx_reg_write(ds, addr, regnum, val); + mv88e6xxx_ppu_access_put(ds); + } + + return ret; +} +#endif + +void mv88e6xxx_poll_link(struct dsa_switch *ds) +{ + int i; + + for (i = 0; i < DSA_MAX_PORTS; i++) { + struct net_device *dev; + int uninitialized_var(port_status); + int link; + int speed; + int duplex; + int fc; + + dev = ds->ports[i]; + if (dev == NULL) + continue; + + link = 0; + if (dev->flags & IFF_UP) { + port_status = mv88e6xxx_reg_read(ds, REG_PORT(i), 0x00); + if (port_status < 0) + continue; + + link = !!(port_status & 0x0800); + } + + if (!link) { + if (netif_carrier_ok(dev)) { + printk(KERN_INFO "%s: link down\n", dev->name); + netif_carrier_off(dev); + } + continue; + } + + switch (port_status & 0x0300) { + case 0x0000: + speed = 10; + break; + case 0x0100: + speed = 100; + break; + case 0x0200: + speed = 1000; + break; + default: + speed = -1; + break; + } + duplex = (port_status & 0x0400) ? 1 : 0; + fc = (port_status & 0x8000) ? 1 : 0; + + if (!netif_carrier_ok(dev)) { + printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, " + "flow control %sabled\n", dev->name, + speed, duplex ? "full" : "half", + fc ? "en" : "dis"); + netif_carrier_on(dev); + } + } +} + +static int mv88e6xxx_stats_wait(struct dsa_switch *ds) +{ + int ret; + int i; + + for (i = 0; i < 10; i++) { + ret = REG_READ(REG_GLOBAL, 0x1d); + if ((ret & 0x8000) == 0) + return 0; + } + + return -ETIMEDOUT; +} + +static int mv88e6xxx_stats_snapshot(struct dsa_switch *ds, int port) +{ + int ret; + + /* + * Snapshot the hardware statistics counters for this port. + */ + REG_WRITE(REG_GLOBAL, 0x1d, 0xdc00 | port); + + /* + * Wait for the snapshotting to complete. + */ + ret = mv88e6xxx_stats_wait(ds); + if (ret < 0) + return ret; + + return 0; +} + +static void mv88e6xxx_stats_read(struct dsa_switch *ds, int stat, u32 *val) +{ + u32 _val; + int ret; + + *val = 0; + + ret = mv88e6xxx_reg_write(ds, REG_GLOBAL, 0x1d, 0xcc00 | stat); + if (ret < 0) + return; + + ret = mv88e6xxx_stats_wait(ds); + if (ret < 0) + return; + + ret = mv88e6xxx_reg_read(ds, REG_GLOBAL, 0x1e); + if (ret < 0) + return; + + _val = ret << 16; + + ret = mv88e6xxx_reg_read(ds, REG_GLOBAL, 0x1f); + if (ret < 0) + return; + + *val = _val | ret; +} + +void mv88e6xxx_get_strings(struct dsa_switch *ds, + int nr_stats, struct mv88e6xxx_hw_stat *stats, + int port, uint8_t *data) +{ + int i; + + for (i = 0; i < nr_stats; i++) { + memcpy(data + i * ETH_GSTRING_LEN, + stats[i].string, ETH_GSTRING_LEN); + } +} + +void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, + int nr_stats, struct mv88e6xxx_hw_stat *stats, + int port, uint64_t *data) +{ + struct mv88e6xxx_priv_state *ps = (void *)(ds + 1); + int ret; + int i; + + mutex_lock(&ps->stats_mutex); + + ret = mv88e6xxx_stats_snapshot(ds, port); + if (ret < 0) { + mutex_unlock(&ps->stats_mutex); + return; + } + + /* + * Read each of the counters. + */ + for (i = 0; i < nr_stats; i++) { + struct mv88e6xxx_hw_stat *s = stats + i; + u32 low; + u32 high; + + mv88e6xxx_stats_read(ds, s->reg, &low); + if (s->sizeof_stat == 8) + mv88e6xxx_stats_read(ds, s->reg + 1, &high); + else + high = 0; + + data[i] = (((u64)high) << 32) | low; + } + + mutex_unlock(&ps->stats_mutex); +} + +static int __init mv88e6xxx_init(void) +{ +#if IS_ENABLED(CONFIG_NET_DSA_MV88E6131) + register_switch_driver(&mv88e6131_switch_driver); +#endif +#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65) + register_switch_driver(&mv88e6123_61_65_switch_driver); +#endif + return 0; +} +module_init(mv88e6xxx_init); + +static void __exit mv88e6xxx_cleanup(void) +{ +#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65) + unregister_switch_driver(&mv88e6123_61_65_switch_driver); +#endif +#if IS_ENABLED(CONFIG_NET_DSA_MV88E6131) + unregister_switch_driver(&mv88e6131_switch_driver); +#endif +} +module_exit(mv88e6xxx_cleanup); + +MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>"); +MODULE_DESCRIPTION("Driver for Marvell 88E6XXX ethernet switch chips"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h new file mode 100644 index 00000000000..fc2cd7b90e8 --- /dev/null +++ b/drivers/net/dsa/mv88e6xxx.h @@ -0,0 +1,98 @@ +/* + * net/dsa/mv88e6xxx.h - Marvell 88e6xxx switch chip support + * Copyright (c) 2008 Marvell Semiconductor + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __MV88E6XXX_H +#define __MV88E6XXX_H + +#define REG_PORT(p) (0x10 + (p)) +#define REG_GLOBAL 0x1b +#define REG_GLOBAL2 0x1c + +struct mv88e6xxx_priv_state { + /* + * When using multi-chip addressing, this mutex protects + * access to the indirect access registers. (In single-chip + * mode, this mutex is effectively useless.) + */ + struct mutex smi_mutex; + +#ifdef CONFIG_NET_DSA_MV88E6XXX_NEED_PPU + /* + * Handles automatic disabling and re-enabling of the PHY + * polling unit. + */ + struct mutex ppu_mutex; + int ppu_disabled; + struct work_struct ppu_work; + struct timer_list ppu_timer; +#endif + + /* + * This mutex serialises access to the statistics unit. + * Hold this mutex over snapshot + dump sequences. + */ + struct mutex stats_mutex; + + int id; /* switch product id */ +}; + +struct mv88e6xxx_hw_stat { + char string[ETH_GSTRING_LEN]; + int sizeof_stat; + int reg; +}; + +int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg); +int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg); +int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr, + int reg, u16 val); +int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val); +int mv88e6xxx_config_prio(struct dsa_switch *ds); +int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr); +int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr); +int mv88e6xxx_phy_read(struct dsa_switch *ds, int addr, int regnum); +int mv88e6xxx_phy_write(struct dsa_switch *ds, int addr, int regnum, u16 val); +void mv88e6xxx_ppu_state_init(struct dsa_switch *ds); +int mv88e6xxx_phy_read_ppu(struct dsa_switch *ds, int addr, int regnum); +int mv88e6xxx_phy_write_ppu(struct dsa_switch *ds, int addr, + int regnum, u16 val); +void mv88e6xxx_poll_link(struct dsa_switch *ds); +void mv88e6xxx_get_strings(struct dsa_switch *ds, + int nr_stats, struct mv88e6xxx_hw_stat *stats, + int port, uint8_t *data); +void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, + int nr_stats, struct mv88e6xxx_hw_stat *stats, + int port, uint64_t *data); + +extern struct dsa_switch_driver mv88e6131_switch_driver; +extern struct dsa_switch_driver mv88e6123_61_65_switch_driver; + +#define REG_READ(addr, reg) \ + ({ \ + int __ret; \ + \ + __ret = mv88e6xxx_reg_read(ds, addr, reg); \ + if (__ret < 0) \ + return __ret; \ + __ret; \ + }) + +#define REG_WRITE(addr, reg, val) \ + ({ \ + int __ret; \ + \ + __ret = mv88e6xxx_reg_write(ds, addr, reg, val); \ + if (__ret < 0) \ + return __ret; \ + }) + + + +#endif diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c index a7c5e8831e8..087648ea1ed 100644 --- a/drivers/net/dummy.c +++ b/drivers/net/dummy.c @@ -134,7 +134,7 @@ static void dummy_setup(struct net_device *dev) dev->flags |= IFF_NOARP; dev->flags &= ~IFF_MULTICAST; dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO; - dev->features |= NETIF_F_NO_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX; + dev->features |= NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX; random_ether_addr(dev->dev_addr); } diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c index 972f80ecc51..da410f03686 100644 --- a/drivers/net/ethernet/3com/3c589_cs.c +++ b/drivers/net/ethernet/3com/3c589_cs.c @@ -468,9 +468,10 @@ static void tc589_reset(struct net_device *dev) static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + snprintf(info->bus_info, sizeof(info->bus_info), + "PCMCIA 0x%lx", dev->base_addr); } static const struct ethtool_ops netdev_ethtool_ops = { diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index b42c06baba8..8153a3e0a1a 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -2929,15 +2929,17 @@ static void vortex_get_drvinfo(struct net_device *dev, { struct vortex_private *vp = netdev_priv(dev); - strcpy(info->driver, DRV_NAME); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); if (VORTEX_PCI(vp)) { - strcpy(info->bus_info, pci_name(VORTEX_PCI(vp))); + strlcpy(info->bus_info, pci_name(VORTEX_PCI(vp)), + sizeof(info->bus_info)); } else { if (VORTEX_EISA(vp)) - strcpy(info->bus_info, dev_name(vp->gendev)); + strlcpy(info->bus_info, dev_name(vp->gendev), + sizeof(info->bus_info)); else - sprintf(info->bus_info, "EISA 0x%lx %d", - dev->base_addr, dev->irq); + snprintf(info->bus_info, sizeof(info->bus_info), + "EISA 0x%lx %d", dev->base_addr, dev->irq); } } diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c index 20ea07508ac..6d6bc754b1a 100644 --- a/drivers/net/ethernet/3com/typhoon.c +++ b/drivers/net/ethernet/3com/typhoon.c @@ -988,21 +988,23 @@ typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) smp_rmb(); if(tp->card_state == Sleeping) { - strcpy(info->fw_version, "Sleep image"); + strlcpy(info->fw_version, "Sleep image", + sizeof(info->fw_version)); } else { INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS); if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) { - strcpy(info->fw_version, "Unknown runtime"); + strlcpy(info->fw_version, "Unknown runtime", + sizeof(info->fw_version)); } else { u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2); - snprintf(info->fw_version, 32, "%02x.%03x.%03x", - sleep_ver >> 24, (sleep_ver >> 12) & 0xfff, - sleep_ver & 0xfff); + snprintf(info->fw_version, sizeof(info->fw_version), + "%02x.%03x.%03x", sleep_ver >> 24, + (sleep_ver >> 12) & 0xfff, sleep_ver & 0xfff); } } - strcpy(info->driver, KBUILD_MODNAME); - strcpy(info->bus_info, pci_name(pci_dev)); + strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); + strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info)); } static int diff --git a/drivers/net/ethernet/8390/8390.h b/drivers/net/ethernet/8390/8390.h index 58a12e4c78f..ef325ffa1b5 100644 --- a/drivers/net/ethernet/8390/8390.h +++ b/drivers/net/ethernet/8390/8390.h @@ -14,8 +14,6 @@ #define TX_PAGES 12 /* Two Tx slots */ -#define ETHER_ADDR_LEN 6 - /* The 8390 specific per-packet-header format. */ struct e8390_pkt_hdr { unsigned char status; /* status */ diff --git a/drivers/net/ethernet/8390/apne.c b/drivers/net/ethernet/8390/apne.c index 547737340cb..3ad5d2f9a49 100644 --- a/drivers/net/ethernet/8390/apne.c +++ b/drivers/net/ethernet/8390/apne.c @@ -318,7 +318,7 @@ static int __init apne_probe1(struct net_device *dev, int ioaddr) i = request_irq(dev->irq, apne_interrupt, IRQF_SHARED, DRV_NAME, dev); if (i) return i; - for(i = 0; i < ETHER_ADDR_LEN; i++) + for (i = 0; i < ETH_ALEN; i++) dev->dev_addr[i] = SA_prom[i]; printk(" %pM\n", dev->dev_addr); diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c index e9f8432f55b..9e8ba4f5636 100644 --- a/drivers/net/ethernet/8390/ax88796.c +++ b/drivers/net/ethernet/8390/ax88796.c @@ -735,15 +735,14 @@ static int ax_init_dev(struct net_device *dev) if (ax->plat->flags & AXFLG_MAC_FROMDEV) { ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP, ei_local->mem + E8390_CMD); /* 0x61 */ - for (i = 0; i < ETHER_ADDR_LEN; i++) + for (i = 0; i < ETH_ALEN; i++) dev->dev_addr[i] = ei_inb(ioaddr + EN1_PHYS_SHIFT(i)); } if ((ax->plat->flags & AXFLG_MAC_FROMPLATFORM) && ax->plat->mac_addr) - memcpy(dev->dev_addr, ax->plat->mac_addr, - ETHER_ADDR_LEN); + memcpy(dev->dev_addr, ax->plat->mac_addr, ETH_ALEN); ax_reset_8390(dev); @@ -991,18 +990,7 @@ static struct platform_driver axdrv = { .resume = ax_resume, }; -static int __init axdrv_init(void) -{ - return platform_driver_register(&axdrv); -} - -static void __exit axdrv_exit(void) -{ - platform_driver_unregister(&axdrv); -} - -module_init(axdrv_init); -module_exit(axdrv_exit); +module_platform_driver(axdrv); MODULE_DESCRIPTION("AX88796 10/100 Ethernet platform driver"); MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>"); diff --git a/drivers/net/ethernet/8390/es3210.c b/drivers/net/ethernet/8390/es3210.c index 7a09575ecff..6428f9e7a55 100644 --- a/drivers/net/ethernet/8390/es3210.c +++ b/drivers/net/ethernet/8390/es3210.c @@ -195,7 +195,7 @@ static int __init es_probe1(struct net_device *dev, int ioaddr) goto out; } - for (i = 0; i < ETHER_ADDR_LEN ; i++) + for (i = 0; i < ETH_ALEN ; i++) dev->dev_addr[i] = inb(ioaddr + ES_SA_PROM + i); /* Check the Racal vendor ID as well. */ diff --git a/drivers/net/ethernet/8390/hp-plus.c b/drivers/net/ethernet/8390/hp-plus.c index eeac843dcd2..d42938b6b59 100644 --- a/drivers/net/ethernet/8390/hp-plus.c +++ b/drivers/net/ethernet/8390/hp-plus.c @@ -202,7 +202,7 @@ static int __init hpp_probe1(struct net_device *dev, int ioaddr) /* Retrieve and checksum the station address. */ outw(MAC_Page, ioaddr + HP_PAGING); - for(i = 0; i < ETHER_ADDR_LEN; i++) { + for(i = 0; i < ETH_ALEN; i++) { unsigned char inval = inb(ioaddr + 8 + i); dev->dev_addr[i] = inval; checksum += inval; diff --git a/drivers/net/ethernet/8390/hp.c b/drivers/net/ethernet/8390/hp.c index 18564d4a7c0..113f1e075a2 100644 --- a/drivers/net/ethernet/8390/hp.c +++ b/drivers/net/ethernet/8390/hp.c @@ -156,7 +156,7 @@ static int __init hp_probe1(struct net_device *dev, int ioaddr) printk("%s: %s (ID %02x) at %#3x,", dev->name, name, board_id, ioaddr); - for(i = 0; i < ETHER_ADDR_LEN; i++) + for(i = 0; i < ETH_ALEN; i++) dev->dev_addr[i] = inb(ioaddr + i); printk(" %pM", dev->dev_addr); diff --git a/drivers/net/ethernet/8390/hydra.c b/drivers/net/ethernet/8390/hydra.c index 3dac937a67c..5370c884620 100644 --- a/drivers/net/ethernet/8390/hydra.c +++ b/drivers/net/ethernet/8390/hydra.c @@ -129,7 +129,7 @@ static int __devinit hydra_init(struct zorro_dev *z) if (!dev) return -ENOMEM; - for(j = 0; j < ETHER_ADDR_LEN; j++) + for (j = 0; j < ETH_ALEN; j++) dev->dev_addr[j] = *((u8 *)(board + HYDRA_ADDRPROM + 2*j)); /* We must set the 8390 for word mode. */ diff --git a/drivers/net/ethernet/8390/lne390.c b/drivers/net/ethernet/8390/lne390.c index f9888d20177..69490ae018e 100644 --- a/drivers/net/ethernet/8390/lne390.c +++ b/drivers/net/ethernet/8390/lne390.c @@ -191,14 +191,14 @@ static int __init lne390_probe1(struct net_device *dev, int ioaddr) || inb(ioaddr + LNE390_SA_PROM + 1) != LNE390_ADDR1 || inb(ioaddr + LNE390_SA_PROM + 2) != LNE390_ADDR2 ) { printk("lne390.c: card not found"); - for(i = 0; i < ETHER_ADDR_LEN; i++) + for (i = 0; i < ETH_ALEN; i++) printk(" %02x", inb(ioaddr + LNE390_SA_PROM + i)); printk(" (invalid prefix).\n"); return -ENODEV; } #endif - for(i = 0; i < ETHER_ADDR_LEN; i++) + for (i = 0; i < ETH_ALEN; i++) dev->dev_addr[i] = inb(ioaddr + LNE390_SA_PROM + i); printk("lne390.c: LNE390%X in EISA slot %d, address %pM.\n", 0xa+revision, ioaddr/0x1000, dev->dev_addr); diff --git a/drivers/net/ethernet/8390/ne-h8300.c b/drivers/net/ethernet/8390/ne-h8300.c index cd36a6a5f40..9b9c77d5a65 100644 --- a/drivers/net/ethernet/8390/ne-h8300.c +++ b/drivers/net/ethernet/8390/ne-h8300.c @@ -312,7 +312,7 @@ static int __init ne_probe1(struct net_device *dev, int ioaddr) dev->base_addr = ioaddr; - for(i = 0; i < ETHER_ADDR_LEN; i++) + for (i = 0; i < ETH_ALEN; i++) dev->dev_addr[i] = SA_prom[i]; printk(" %pM\n", dev->dev_addr); diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c index 1063093b3af..f92ea2a65a5 100644 --- a/drivers/net/ethernet/8390/ne.c +++ b/drivers/net/ethernet/8390/ne.c @@ -503,12 +503,12 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr) #ifdef CONFIG_PLAT_MAPPI outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, ioaddr + E8390_CMD); /* 0x61 */ - for (i = 0 ; i < ETHER_ADDR_LEN ; i++) { + for (i = 0; i < ETH_ALEN; i++) { dev->dev_addr[i] = SA_prom[i] = inb_p(ioaddr + EN1_PHYS_SHIFT(i)); } #else - for(i = 0; i < ETHER_ADDR_LEN; i++) { + for (i = 0; i < ETH_ALEN; i++) { dev->dev_addr[i] = SA_prom[i]; } #endif diff --git a/drivers/net/ethernet/8390/ne2.c b/drivers/net/ethernet/8390/ne2.c index 70cdc699634..922b32036c6 100644 --- a/drivers/net/ethernet/8390/ne2.c +++ b/drivers/net/ethernet/8390/ne2.c @@ -460,7 +460,7 @@ static int __init ne2_probe1(struct net_device *dev, int slot) dev->base_addr = base_addr; - for(i = 0; i < ETHER_ADDR_LEN; i++) + for (i = 0; i < ETH_ALEN; i++) dev->dev_addr[i] = SA_prom[i]; printk(" %pM\n", dev->dev_addr); diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c index 39923425ba2..3fab04a0034 100644 --- a/drivers/net/ethernet/8390/ne2k-pci.c +++ b/drivers/net/ethernet/8390/ne2k-pci.c @@ -639,9 +639,9 @@ static void ne2k_pci_get_drvinfo(struct net_device *dev, struct ei_device *ei = netdev_priv(dev); struct pci_dev *pci_dev = (struct pci_dev *) ei->priv; - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - strcpy(info->bus_info, pci_name(pci_dev)); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info)); } static const struct ethtool_ops ne2k_pci_ethtool_ops = { diff --git a/drivers/net/ethernet/8390/ne3210.c b/drivers/net/ethernet/8390/ne3210.c index 243ed2aee88..2a3e8057fea 100644 --- a/drivers/net/ethernet/8390/ne3210.c +++ b/drivers/net/ethernet/8390/ne3210.c @@ -125,7 +125,7 @@ static int __init ne3210_eisa_probe (struct device *device) #endif port_index = inb(ioaddr + NE3210_CFG2) >> 6; - for(i = 0; i < ETHER_ADDR_LEN; i++) + for (i = 0; i < ETH_ALEN; i++) dev->dev_addr[i] = inb(ioaddr + NE3210_SA_PROM + i); printk("ne3210.c: NE3210 in EISA slot %d, media: %s, addr: %pM.\n", edev->slot, ifmap[port_index], dev->dev_addr); diff --git a/drivers/net/ethernet/8390/stnic.c b/drivers/net/ethernet/8390/stnic.c index d85f0a84bc7..3b903759980 100644 --- a/drivers/net/ethernet/8390/stnic.c +++ b/drivers/net/ethernet/8390/stnic.c @@ -114,7 +114,7 @@ static int __init stnic_probe(void) #ifdef CONFIG_SH_STANDARD_BIOS sh_bios_get_node_addr (stnic_eadr); #endif - for (i = 0; i < ETHER_ADDR_LEN; i++) + for (i = 0; i < ETH_ALEN; i++) dev->dev_addr[i] = stnic_eadr[i]; /* Set the base address to point to the NIC, not the "real" base! */ diff --git a/drivers/net/ethernet/8390/zorro8390.c b/drivers/net/ethernet/8390/zorro8390.c index 3aa9fe9999b..bcd27323b20 100644 --- a/drivers/net/ethernet/8390/zorro8390.c +++ b/drivers/net/ethernet/8390/zorro8390.c @@ -365,7 +365,7 @@ static int __devinit zorro8390_init(struct net_device *dev, if (i) return i; - for (i = 0; i < ETHER_ADDR_LEN; i++) + for (i = 0; i < ETH_ALEN; i++) dev->dev_addr[i] = SA_prom[i]; pr_debug("Found ethernet address: %pM\n", dev->dev_addr); diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 597f4d45c63..3474a61d470 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -28,6 +28,7 @@ source "drivers/net/ethernet/cadence/Kconfig" source "drivers/net/ethernet/adi/Kconfig" source "drivers/net/ethernet/broadcom/Kconfig" source "drivers/net/ethernet/brocade/Kconfig" +source "drivers/net/ethernet/calxeda/Kconfig" source "drivers/net/ethernet/chelsio/Kconfig" source "drivers/net/ethernet/cirrus/Kconfig" source "drivers/net/ethernet/cisco/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index be5dde04026..cd6d69a6a7d 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -14,6 +14,7 @@ obj-$(CONFIG_NET_ATMEL) += cadence/ obj-$(CONFIG_NET_BFIN) += adi/ obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/ obj-$(CONFIG_NET_VENDOR_BROCADE) += brocade/ +obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/ obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/ obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/ obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/ diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c index 6d9f6911000..cb4f38a17f2 100644 --- a/drivers/net/ethernet/adaptec/starfire.c +++ b/drivers/net/ethernet/adaptec/starfire.c @@ -607,7 +607,7 @@ static const struct ethtool_ops ethtool_ops; #ifdef VLAN_SUPPORT -static void netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) +static int netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) { struct netdev_private *np = netdev_priv(dev); @@ -617,9 +617,11 @@ static void netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) set_bit(vid, np->active_vlans); set_rx_mode(dev); spin_unlock(&np->lock); + + return 0; } -static void netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) +static int netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) { struct netdev_private *np = netdev_priv(dev); @@ -629,6 +631,8 @@ static void netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) clear_bit(vid, np->active_vlans); set_rx_mode(dev); spin_unlock(&np->lock); + + return 0; } #endif /* VLAN_SUPPORT */ @@ -1842,9 +1846,9 @@ static int check_if_running(struct net_device *dev) static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct netdev_private *np = netdev_priv(dev); - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - strcpy(info->bus_info, pci_name(np->pci_dev)); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); } static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c index 442fefa4f2c..c885aa905de 100644 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c @@ -1623,18 +1623,7 @@ static struct platform_driver greth_of_driver = { .remove = __devexit_p(greth_of_remove), }; -static int __init greth_init(void) -{ - return platform_driver_register(&greth_of_driver); -} - -static void __exit greth_cleanup(void) -{ - platform_driver_unregister(&greth_of_driver); -} - -module_init(greth_init); -module_exit(greth_cleanup); +module_platform_driver(greth_of_driver); MODULE_AUTHOR("Aeroflex Gaisler AB."); MODULE_DESCRIPTION("Aeroflex Gaisler Ethernet MAC driver"); diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c index a9745f4ddbf..33e0a8c20f6 100644 --- a/drivers/net/ethernet/amd/amd8111e.c +++ b/drivers/net/ethernet/amd/amd8111e.c @@ -499,7 +499,7 @@ static int amd8111e_restart(struct net_device *dev) writel( VAL0 | APAD_XMT | REX_RTRY, mmio + CMD2 ); /* Setting the MAC address to the device */ - for(i = 0; i < ETH_ADDR_LEN; i++) + for (i = 0; i < ETH_ALEN; i++) writeb( dev->dev_addr[i], mmio + PADR + i ); /* Enable interrupt coalesce */ @@ -1412,10 +1412,11 @@ static void amd8111e_get_drvinfo(struct net_device* dev, struct ethtool_drvinfo { struct amd8111e_priv *lp = netdev_priv(dev); struct pci_dev *pci_dev = lp->pci_dev; - strcpy (info->driver, MODULE_NAME); - strcpy (info->version, MODULE_VERS); - sprintf(info->fw_version,"%u",chip_version); - strcpy (info->bus_info, pci_name(pci_dev)); + strlcpy(info->driver, MODULE_NAME, sizeof(info->driver)); + strlcpy(info->version, MODULE_VERS, sizeof(info->version)); + snprintf(info->fw_version, sizeof(info->fw_version), + "%u", chip_version); + strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info)); } static int amd8111e_get_regs_len(struct net_device *dev) @@ -1549,7 +1550,7 @@ static int amd8111e_set_mac_address(struct net_device *dev, void *p) memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); spin_lock_irq(&lp->lock); /* Setting the MAC address to the device */ - for(i = 0; i < ETH_ADDR_LEN; i++) + for (i = 0; i < ETH_ALEN; i++) writeb( dev->dev_addr[i], lp->mmio + PADR + i ); spin_unlock_irq(&lp->lock); @@ -1885,7 +1886,7 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev, } /* Initializing MAC address */ - for(i = 0; i < ETH_ADDR_LEN; i++) + for (i = 0; i < ETH_ALEN; i++) dev->dev_addr[i] = readb(lp->mmio + PADR + i); /* Setting user defined parametrs */ diff --git a/drivers/net/ethernet/amd/amd8111e.h b/drivers/net/ethernet/amd/amd8111e.h index 2ff2e7a12dd..8baa3527ba7 100644 --- a/drivers/net/ethernet/amd/amd8111e.h +++ b/drivers/net/ethernet/amd/amd8111e.h @@ -586,7 +586,6 @@ typedef enum { #define PKT_BUFF_SZ 1536 #define MIN_PKT_LEN 60 -#define ETH_ADDR_LEN 6 #define AMD8111E_TX_TIMEOUT (3 * HZ)/* 3 sec */ #define SOFT_TIMER_FREQ 0xBEBC /* 0.5 sec */ @@ -808,8 +807,8 @@ typedef enum { static int card_idx; static int speed_duplex[MAX_UNITS] = { 0, }; -static int coalesce[MAX_UNITS] = {1,1,1,1,1,1,1,1}; -static int dynamic_ipg[MAX_UNITS] = {0,0,0,0,0,0,0,0}; +static bool coalesce[MAX_UNITS] = { [ 0 ... MAX_UNITS-1] = true }; +static bool dynamic_ipg[MAX_UNITS] = { [ 0 ... MAX_UNITS-1] = false }; static unsigned int chip_version; #endif /* _AMD8111E_H */ diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c index 4865ff14beb..cc9262be69c 100644 --- a/drivers/net/ethernet/amd/au1000_eth.c +++ b/drivers/net/ethernet/amd/au1000_eth.c @@ -1339,18 +1339,7 @@ static struct platform_driver au1000_eth_driver = { .owner = THIS_MODULE, }, }; -MODULE_ALIAS("platform:au1000-eth"); - - -static int __init au1000_init_module(void) -{ - return platform_driver_register(&au1000_eth_driver); -} -static void __exit au1000_exit_module(void) -{ - platform_driver_unregister(&au1000_eth_driver); -} +module_platform_driver(au1000_eth_driver); -module_init(au1000_init_module); -module_exit(au1000_exit_module); +MODULE_ALIAS("platform:au1000-eth"); diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c index 3accd5d21b0..6be0dd67631 100644 --- a/drivers/net/ethernet/amd/nmclan_cs.c +++ b/drivers/net/ethernet/amd/nmclan_cs.c @@ -160,8 +160,6 @@ Include Files Defines ---------------------------------------------------------------------------- */ -#define ETHER_ADDR_LEN ETH_ALEN - /* 6 bytes in an Ethernet Address */ #define MACE_LADRF_LEN 8 /* 8 bytes in Logical Address Filter */ @@ -600,7 +598,7 @@ static int mace_init(mace_private *lp, unsigned int ioaddr, char *enet_addr) } } /* Set PADR register */ - for (i = 0; i < ETHER_ADDR_LEN; i++) + for (i = 0; i < ETH_ALEN; i++) mace_write(lp, ioaddr, MACE_PADR, enet_addr[i]); /* MAC Configuration Control Register should be written last */ @@ -639,11 +637,11 @@ static int nmclan_config(struct pcmcia_device *link) /* Read the ethernet address from the CIS. */ len = pcmcia_get_tuple(link, 0x80, &buf); - if (!buf || len < ETHER_ADDR_LEN) { + if (!buf || len < ETH_ALEN) { kfree(buf); goto failed; } - memcpy(dev->dev_addr, buf, ETHER_ADDR_LEN); + memcpy(dev->dev_addr, buf, ETH_ALEN); kfree(buf); /* Verify configuration by reading the MACE ID. */ @@ -822,9 +820,10 @@ static int mace_close(struct net_device *dev) static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + snprintf(info->bus_info, sizeof(info->bus_info), + "PCMCIA 0x%lx", dev->base_addr); } static const struct ethtool_ops netdev_ethtool_ops = { @@ -1420,7 +1419,7 @@ Output static void set_multicast_list(struct net_device *dev) { mace_private *lp = netdev_priv(dev); - int adr[ETHER_ADDR_LEN] = {0}; /* Ethernet address */ + int adr[ETH_ALEN] = {0}; /* Ethernet address */ struct netdev_hw_addr *ha; #ifdef PCMCIA_DEBUG @@ -1442,7 +1441,7 @@ static void set_multicast_list(struct net_device *dev) /* Calculate multicast logical address filter */ memset(lp->multicast_ladrf, 0, MACE_LADRF_LEN); netdev_for_each_mc_addr(ha, dev) { - memcpy(adr, ha->addr, ETHER_ADDR_LEN); + memcpy(adr, ha->addr, ETH_ALEN); BuildLAF(lp->multicast_ladrf, adr); } } diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index f92bc6e3482..20e6dab0186 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c @@ -711,12 +711,14 @@ static void pcnet32_get_drvinfo(struct net_device *dev, { struct pcnet32_private *lp = netdev_priv(dev); - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); if (lp->pci_dev) - strcpy(info->bus_info, pci_name(lp->pci_dev)); + strlcpy(info->bus_info, pci_name(lp->pci_dev), + sizeof(info->bus_info)); else - sprintf(info->bus_info, "VLB 0x%lx", dev->base_addr); + snprintf(info->bus_info, sizeof(info->bus_info), + "VLB 0x%lx", dev->base_addr); } static u32 pcnet32_get_link(struct net_device *dev) diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c index 8fda457f94c..7ea16d32a5f 100644 --- a/drivers/net/ethernet/amd/sunlance.c +++ b/drivers/net/ethernet/amd/sunlance.c @@ -1540,17 +1540,4 @@ static struct platform_driver sunlance_sbus_driver = { .remove = __devexit_p(sunlance_sbus_remove), }; - -/* Find all the lance cards on the system and initialize them */ -static int __init sparc_lance_init(void) -{ - return platform_driver_register(&sunlance_sbus_driver); -} - -static void __exit sparc_lance_exit(void) -{ - platform_driver_unregister(&sunlance_sbus_driver); -} - -module_init(sparc_lance_init); -module_exit(sparc_lance_exit); +module_platform_driver(sunlance_sbus_driver); diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c index 7be884d0aaf..0a9326aa58b 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c @@ -232,7 +232,6 @@ static void atl1c_get_drvinfo(struct net_device *netdev, strlcpy(drvinfo->driver, atl1c_driver_name, sizeof(drvinfo->driver)); strlcpy(drvinfo->version, atl1c_driver_version, sizeof(drvinfo->version)); - strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); drvinfo->n_stats = 0; diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 02c7ed8d9ec..b8591246eb4 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -411,7 +411,7 @@ static void atl1c_set_multi(struct net_device *netdev) } } -static void __atl1c_vlan_mode(u32 features, u32 *mac_ctrl_data) +static void __atl1c_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data) { if (features & NETIF_F_HW_VLAN_RX) { /* enable VLAN tag insert/strip */ @@ -422,7 +422,8 @@ static void __atl1c_vlan_mode(u32 features, u32 *mac_ctrl_data) } } -static void atl1c_vlan_mode(struct net_device *netdev, u32 features) +static void atl1c_vlan_mode(struct net_device *netdev, + netdev_features_t features) { struct atl1c_adapter *adapter = netdev_priv(netdev); struct pci_dev *pdev = adapter->pdev; @@ -482,7 +483,8 @@ static void atl1c_set_rxbufsize(struct atl1c_adapter *adapter, roundup(mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN, 8) : AT_RX_BUF_SIZE; } -static u32 atl1c_fix_features(struct net_device *netdev, u32 features) +static netdev_features_t atl1c_fix_features(struct net_device *netdev, + netdev_features_t features) { /* * Since there is no support for separate rx/tx vlan accel @@ -499,9 +501,10 @@ static u32 atl1c_fix_features(struct net_device *netdev, u32 features) return features; } -static int atl1c_set_features(struct net_device *netdev, u32 features) +static int atl1c_set_features(struct net_device *netdev, + netdev_features_t features) { - u32 changed = netdev->features ^ features; + netdev_features_t changed = netdev->features ^ features; if (changed & NETIF_F_HW_VLAN_RX) atl1c_vlan_mode(netdev, features); diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c index 6269438d365..6e61f9f9ebb 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c @@ -310,10 +310,12 @@ static void atl1e_get_drvinfo(struct net_device *netdev, { struct atl1e_adapter *adapter = netdev_priv(netdev); - strncpy(drvinfo->driver, atl1e_driver_name, 32); - strncpy(drvinfo->version, atl1e_driver_version, 32); - strncpy(drvinfo->fw_version, "L1e", 32); - strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); + strlcpy(drvinfo->driver, atl1e_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, atl1e_driver_version, + sizeof(drvinfo->version)); + strlcpy(drvinfo->fw_version, "L1e", sizeof(drvinfo->fw_version)); + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); drvinfo->n_stats = 0; drvinfo->testinfo_len = 0; drvinfo->regdump_len = atl1e_get_regs_len(netdev); diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index 95483bcac1d..c915c087381 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c @@ -313,7 +313,7 @@ static void atl1e_set_multi(struct net_device *netdev) } } -static void __atl1e_vlan_mode(u32 features, u32 *mac_ctrl_data) +static void __atl1e_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data) { if (features & NETIF_F_HW_VLAN_RX) { /* enable VLAN tag insert/strip */ @@ -324,7 +324,8 @@ static void __atl1e_vlan_mode(u32 features, u32 *mac_ctrl_data) } } -static void atl1e_vlan_mode(struct net_device *netdev, u32 features) +static void atl1e_vlan_mode(struct net_device *netdev, + netdev_features_t features) { struct atl1e_adapter *adapter = netdev_priv(netdev); u32 mac_ctrl_data = 0; @@ -370,7 +371,8 @@ static int atl1e_set_mac_addr(struct net_device *netdev, void *p) return 0; } -static u32 atl1e_fix_features(struct net_device *netdev, u32 features) +static netdev_features_t atl1e_fix_features(struct net_device *netdev, + netdev_features_t features) { /* * Since there is no support for separate rx/tx vlan accel @@ -384,9 +386,10 @@ static u32 atl1e_fix_features(struct net_device *netdev, u32 features) return features; } -static int atl1e_set_features(struct net_device *netdev, u32 features) +static int atl1e_set_features(struct net_device *netdev, + netdev_features_t features) { - u32 changed = netdev->features ^ features; + netdev_features_t changed = netdev->features ^ features; if (changed & NETIF_F_HW_VLAN_RX) atl1e_vlan_mode(netdev, features); diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c index 33a4e35f5ee..9bd20497664 100644 --- a/drivers/net/ethernet/atheros/atlx/atl1.c +++ b/drivers/net/ethernet/atheros/atlx/atl1.c @@ -3365,7 +3365,6 @@ static void atl1_get_drvinfo(struct net_device *netdev, strlcpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver)); strlcpy(drvinfo->version, ATLX_DRIVER_VERSION, sizeof(drvinfo->version)); - strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); drvinfo->eedump_len = ATL1_EEDUMP_LEN; diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c index 1feae5928a4..071f4c85896 100644 --- a/drivers/net/ethernet/atheros/atlx/atl2.c +++ b/drivers/net/ethernet/atheros/atlx/atl2.c @@ -361,7 +361,7 @@ static inline void atl2_irq_disable(struct atl2_adapter *adapter) synchronize_irq(adapter->pdev->irq); } -static void __atl2_vlan_mode(u32 features, u32 *ctrl) +static void __atl2_vlan_mode(netdev_features_t features, u32 *ctrl) { if (features & NETIF_F_HW_VLAN_RX) { /* enable VLAN tag insert/strip */ @@ -372,7 +372,8 @@ static void __atl2_vlan_mode(u32 features, u32 *ctrl) } } -static void atl2_vlan_mode(struct net_device *netdev, u32 features) +static void atl2_vlan_mode(struct net_device *netdev, + netdev_features_t features) { struct atl2_adapter *adapter = netdev_priv(netdev); u32 ctrl; @@ -391,7 +392,8 @@ static void atl2_restore_vlan(struct atl2_adapter *adapter) atl2_vlan_mode(adapter->netdev, adapter->netdev->features); } -static u32 atl2_fix_features(struct net_device *netdev, u32 features) +static netdev_features_t atl2_fix_features(struct net_device *netdev, + netdev_features_t features) { /* * Since there is no support for separate rx/tx vlan accel @@ -405,9 +407,10 @@ static u32 atl2_fix_features(struct net_device *netdev, u32 features) return features; } -static int atl2_set_features(struct net_device *netdev, u32 features) +static int atl2_set_features(struct net_device *netdev, + netdev_features_t features) { - u32 changed = netdev->features ^ features; + netdev_features_t changed = netdev->features ^ features; if (changed & NETIF_F_HW_VLAN_RX) atl2_vlan_mode(netdev, features); @@ -2049,10 +2052,12 @@ static void atl2_get_drvinfo(struct net_device *netdev, { struct atl2_adapter *adapter = netdev_priv(netdev); - strncpy(drvinfo->driver, atl2_driver_name, 32); - strncpy(drvinfo->version, atl2_driver_version, 32); - strncpy(drvinfo->fw_version, "L2", 32); - strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); + strlcpy(drvinfo->driver, atl2_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, atl2_driver_version, + sizeof(drvinfo->version)); + strlcpy(drvinfo->fw_version, "L2", sizeof(drvinfo->fw_version)); + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); drvinfo->n_stats = 0; drvinfo->testinfo_len = 0; drvinfo->regdump_len = atl2_get_regs_len(netdev); diff --git a/drivers/net/ethernet/atheros/atlx/atlx.c b/drivers/net/ethernet/atheros/atlx/atlx.c index aabcf4b5745..8ff7411094d 100644 --- a/drivers/net/ethernet/atheros/atlx/atlx.c +++ b/drivers/net/ethernet/atheros/atlx/atlx.c @@ -211,7 +211,7 @@ static void atlx_link_chg_task(struct work_struct *work) spin_unlock_irqrestore(&adapter->lock, flags); } -static void __atlx_vlan_mode(u32 features, u32 *ctrl) +static void __atlx_vlan_mode(netdev_features_t features, u32 *ctrl) { if (features & NETIF_F_HW_VLAN_RX) { /* enable VLAN tag insert/strip */ @@ -222,7 +222,8 @@ static void __atlx_vlan_mode(u32 features, u32 *ctrl) } } -static void atlx_vlan_mode(struct net_device *netdev, u32 features) +static void atlx_vlan_mode(struct net_device *netdev, + netdev_features_t features) { struct atlx_adapter *adapter = netdev_priv(netdev); unsigned long flags; @@ -242,7 +243,8 @@ static void atlx_restore_vlan(struct atlx_adapter *adapter) atlx_vlan_mode(adapter->netdev, adapter->netdev->features); } -static u32 atlx_fix_features(struct net_device *netdev, u32 features) +static netdev_features_t atlx_fix_features(struct net_device *netdev, + netdev_features_t features) { /* * Since there is no support for separate rx/tx vlan accel @@ -256,9 +258,10 @@ static u32 atlx_fix_features(struct net_device *netdev, u32 features) return features; } -static int atlx_set_features(struct net_device *netdev, u32 features) +static int atlx_set_features(struct net_device *netdev, + netdev_features_t features) { - u32 changed = netdev->features ^ features; + netdev_features_t changed = netdev->features ^ features; if (changed & NETIF_F_HW_VLAN_RX) atlx_vlan_mode(netdev, features); diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index 965c7235804..021fb818007 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -57,11 +57,11 @@ #include "bnx2_fw.h" #define DRV_MODULE_NAME "bnx2" -#define DRV_MODULE_VERSION "2.1.11" -#define DRV_MODULE_RELDATE "July 20, 2011" -#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.1.fw" +#define DRV_MODULE_VERSION "2.2.1" +#define DRV_MODULE_RELDATE "Dec 18, 2011" +#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw" #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw" -#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1a.fw" +#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw" #define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-6.0.17.fw" #define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-6.0.17.fw" @@ -409,7 +409,7 @@ static int bnx2_unregister_cnic(struct net_device *dev) mutex_lock(&bp->cnic_lock); cp->drv_state = 0; bnapi->cnic_present = 0; - rcu_assign_pointer(bp->cnic_ops, NULL); + RCU_INIT_POINTER(bp->cnic_ops, NULL); mutex_unlock(&bp->cnic_lock); synchronize_rcu(); return 0; @@ -2054,8 +2054,8 @@ __acquires(&bp->phy_lock) if (bp->autoneg & AUTONEG_SPEED) { u32 adv_reg, adv1000_reg; - u32 new_adv_reg = 0; - u32 new_adv1000_reg = 0; + u32 new_adv = 0; + u32 new_adv1000 = 0; bnx2_read_phy(bp, bp->mii_adv, &adv_reg); adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP | @@ -2064,27 +2064,18 @@ __acquires(&bp->phy_lock) bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg); adv1000_reg &= PHY_ALL_1000_SPEED; - if (bp->advertising & ADVERTISED_10baseT_Half) - new_adv_reg |= ADVERTISE_10HALF; - if (bp->advertising & ADVERTISED_10baseT_Full) - new_adv_reg |= ADVERTISE_10FULL; - if (bp->advertising & ADVERTISED_100baseT_Half) - new_adv_reg |= ADVERTISE_100HALF; - if (bp->advertising & ADVERTISED_100baseT_Full) - new_adv_reg |= ADVERTISE_100FULL; - if (bp->advertising & ADVERTISED_1000baseT_Full) - new_adv1000_reg |= ADVERTISE_1000FULL; + new_adv = ethtool_adv_to_mii_adv_t(bp->advertising); + new_adv |= ADVERTISE_CSMA; + new_adv |= bnx2_phy_get_pause_adv(bp); - new_adv_reg |= ADVERTISE_CSMA; + new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising); - new_adv_reg |= bnx2_phy_get_pause_adv(bp); - - if ((adv1000_reg != new_adv1000_reg) || - (adv_reg != new_adv_reg) || + if ((adv1000_reg != new_adv1000) || + (adv_reg != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) { - bnx2_write_phy(bp, bp->mii_adv, new_adv_reg); - bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg); + bnx2_write_phy(bp, bp->mii_adv, new_adv); + bnx2_write_phy(bp, MII_CTRL1000, new_adv1000); bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART | BMCR_ANENABLE); } @@ -2734,31 +2725,27 @@ bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index) } static inline int -bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp) +bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp) { - struct sk_buff *skb; + u8 *data; struct sw_bd *rx_buf = &rxr->rx_buf_ring[index]; dma_addr_t mapping; struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)]; - unsigned long align; - skb = __netdev_alloc_skb(bp->dev, bp->rx_buf_size, gfp); - if (skb == NULL) { + data = kmalloc(bp->rx_buf_size, gfp); + if (!data) return -ENOMEM; - } - if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1)))) - skb_reserve(skb, BNX2_RX_ALIGN - align); - - mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_use_size, + mapping = dma_map_single(&bp->pdev->dev, + get_l2_fhdr(data), + bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); if (dma_mapping_error(&bp->pdev->dev, mapping)) { - dev_kfree_skb(skb); + kfree(data); return -EIO; } - rx_buf->skb = skb; - rx_buf->desc = (struct l2_fhdr *) skb->data; + rx_buf->data = data; dma_unmap_addr_set(rx_buf, mapping, mapping); rxbd->rx_bd_haddr_hi = (u64) mapping >> 32; @@ -2823,6 +2810,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) struct bnx2_tx_ring_info *txr = &bnapi->tx_ring; u16 hw_cons, sw_cons, sw_ring_cons; int tx_pkt = 0, index; + unsigned int tx_bytes = 0; struct netdev_queue *txq; index = (bnapi - bp->bnx2_napi); @@ -2877,6 +2865,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) sw_cons = NEXT_TX_BD(sw_cons); + tx_bytes += skb->len; dev_kfree_skb(skb); tx_pkt++; if (tx_pkt == budget) @@ -2886,6 +2875,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) hw_cons = bnx2_get_hw_tx_cons(bnapi); } + netdev_tx_completed_queue(txq, tx_pkt, tx_bytes); txr->hw_tx_cons = hw_cons; txr->tx_cons = sw_cons; @@ -2965,8 +2955,8 @@ bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, } static inline void -bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, - struct sk_buff *skb, u16 cons, u16 prod) +bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, + u8 *data, u16 cons, u16 prod) { struct sw_bd *cons_rx_buf, *prod_rx_buf; struct rx_bd *cons_bd, *prod_bd; @@ -2980,8 +2970,7 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, rxr->rx_prod_bseq += bp->rx_buf_use_size; - prod_rx_buf->skb = skb; - prod_rx_buf->desc = (struct l2_fhdr *) skb->data; + prod_rx_buf->data = data; if (cons == prod) return; @@ -2995,33 +2984,39 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo; } -static int -bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb, +static struct sk_buff * +bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data, unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr, u32 ring_idx) { int err; u16 prod = ring_idx & 0xffff; + struct sk_buff *skb; - err = bnx2_alloc_rx_skb(bp, rxr, prod, GFP_ATOMIC); + err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); if (unlikely(err)) { - bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod); + bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod); +error: if (hdr_len) { unsigned int raw_len = len + 4; int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT; bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages); } - return err; + return NULL; } - skb_reserve(skb, BNX2_RX_OFFSET); dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); - + skb = build_skb(data); + if (!skb) { + kfree(data); + goto error; + } + skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET); if (hdr_len == 0) { skb_put(skb, len); - return 0; + return skb; } else { unsigned int i, frag_len, frag_size, pages; struct sw_pg *rx_pg; @@ -3052,7 +3047,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb, skb_frag_size_sub(frag, tail); skb->data_len -= tail; } - return 0; + return skb; } rx_pg = &rxr->rx_pg_ring[pg_cons]; @@ -3074,7 +3069,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb, rxr->rx_pg_prod = pg_prod; bnx2_reuse_rx_skb_pages(bp, rxr, skb, pages - i); - return err; + return NULL; } dma_unmap_page(&bp->pdev->dev, mapping_old, @@ -3091,7 +3086,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb, rxr->rx_pg_prod = pg_prod; rxr->rx_pg_cons = pg_cons; } - return 0; + return skb; } static inline u16 @@ -3130,19 +3125,17 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) struct sw_bd *rx_buf, *next_rx_buf; struct sk_buff *skb; dma_addr_t dma_addr; + u8 *data; sw_ring_cons = RX_RING_IDX(sw_cons); sw_ring_prod = RX_RING_IDX(sw_prod); rx_buf = &rxr->rx_buf_ring[sw_ring_cons]; - skb = rx_buf->skb; - prefetchw(skb); - - next_rx_buf = - &rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))]; - prefetch(next_rx_buf->desc); + data = rx_buf->data; + rx_buf->data = NULL; - rx_buf->skb = NULL; + rx_hdr = get_l2_fhdr(data); + prefetch(rx_hdr); dma_addr = dma_unmap_addr(rx_buf, mapping); @@ -3150,7 +3143,10 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE); - rx_hdr = rx_buf->desc; + next_rx_buf = + &rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))]; + prefetch(get_l2_fhdr(next_rx_buf->data)); + len = rx_hdr->l2_fhdr_pkt_len; status = rx_hdr->l2_fhdr_status; @@ -3169,7 +3165,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) L2_FHDR_ERRORS_TOO_SHORT | L2_FHDR_ERRORS_GIANT_FRAME))) { - bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons, + bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons, sw_ring_prod); if (pg_ring_used) { int pages; @@ -3184,30 +3180,29 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) len -= 4; if (len <= bp->rx_copy_thresh) { - struct sk_buff *new_skb; - - new_skb = netdev_alloc_skb(bp->dev, len + 6); - if (new_skb == NULL) { - bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons, + skb = netdev_alloc_skb(bp->dev, len + 6); + if (skb == NULL) { + bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons, sw_ring_prod); goto next_rx; } /* aligned copy */ - skb_copy_from_linear_data_offset(skb, - BNX2_RX_OFFSET - 6, - new_skb->data, len + 6); - skb_reserve(new_skb, 6); - skb_put(new_skb, len); + memcpy(skb->data, + (u8 *)rx_hdr + BNX2_RX_OFFSET - 6, + len + 6); + skb_reserve(skb, 6); + skb_put(skb, len); - bnx2_reuse_rx_skb(bp, rxr, skb, + bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons, sw_ring_prod); - skb = new_skb; - } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len, - dma_addr, (sw_ring_cons << 16) | sw_ring_prod))) - goto next_rx; - + } else { + skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr, + (sw_ring_cons << 16) | sw_ring_prod); + if (!skb) + goto next_rx; + } if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) __vlan_hwaccel_put_tag(skb, rx_hdr->l2_fhdr_vlan_tag); @@ -5234,7 +5229,7 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num) ring_prod = prod = rxr->rx_prod; for (i = 0; i < bp->rx_ring_size; i++) { - if (bnx2_alloc_rx_skb(bp, rxr, ring_prod, GFP_KERNEL) < 0) { + if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) { netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n", ring_num, i, bp->rx_ring_size); break; @@ -5329,7 +5324,7 @@ bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size) rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8; rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD + - sizeof(struct skb_shared_info); + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); bp->rx_copy_thresh = BNX2_RX_COPY_THRESH; bp->rx_pg_ring_size = 0; @@ -5351,8 +5346,9 @@ bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size) } bp->rx_buf_use_size = rx_size; - /* hw alignment */ - bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN; + /* hw alignment + build_skb() overhead*/ + bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) + + NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET; bp->rx_ring_size = size; bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS); @@ -5400,6 +5396,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp) } dev_kfree_skb(skb); } + netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i)); } } @@ -5418,9 +5415,9 @@ bnx2_free_rx_skbs(struct bnx2 *bp) for (j = 0; j < bp->rx_max_ring_idx; j++) { struct sw_bd *rx_buf = &rxr->rx_buf_ring[j]; - struct sk_buff *skb = rx_buf->skb; + u8 *data = rx_buf->data; - if (skb == NULL) + if (data == NULL) continue; dma_unmap_single(&bp->pdev->dev, @@ -5428,9 +5425,9 @@ bnx2_free_rx_skbs(struct bnx2 *bp) bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); - rx_buf->skb = NULL; + rx_buf->data = NULL; - dev_kfree_skb(skb); + kfree(data); } for (j = 0; j < bp->rx_max_pg_ring_idx; j++) bnx2_free_rx_page(bp, rxr, j); @@ -5736,7 +5733,8 @@ static int bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) { unsigned int pkt_size, num_pkts, i; - struct sk_buff *skb, *rx_skb; + struct sk_buff *skb; + u8 *data; unsigned char *packet; u16 rx_start_idx, rx_idx; dma_addr_t map; @@ -5828,14 +5826,14 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) } rx_buf = &rxr->rx_buf_ring[rx_start_idx]; - rx_skb = rx_buf->skb; + data = rx_buf->data; - rx_hdr = rx_buf->desc; - skb_reserve(rx_skb, BNX2_RX_OFFSET); + rx_hdr = get_l2_fhdr(data); + data = (u8 *)rx_hdr + BNX2_RX_OFFSET; dma_sync_single_for_cpu(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), - bp->rx_buf_size, PCI_DMA_FROMDEVICE); + bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); if (rx_hdr->l2_fhdr_status & (L2_FHDR_ERRORS_BAD_CRC | @@ -5852,7 +5850,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) } for (i = 14; i < pkt_size; i++) { - if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) { + if (*(data + i) != (unsigned char) (i & 0xff)) { goto loopback_test_done; } } @@ -6552,6 +6550,8 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) } txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END; + netdev_tx_sent_queue(txq, skb->len); + prod = NEXT_TX_BD(prod); txr->tx_prod_bseq += skb->len; @@ -6873,10 +6873,10 @@ bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct bnx2 *bp = netdev_priv(dev); - strcpy(info->driver, DRV_MODULE_NAME); - strcpy(info->version, DRV_MODULE_VERSION); - strcpy(info->bus_info, pci_name(bp->pdev)); - strcpy(info->fw_version, bp->fw_version); + strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info)); + strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version)); } #define BNX2_REGDUMP_LEN (32 * 1024) @@ -7571,8 +7571,8 @@ bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state) return 0; } -static u32 -bnx2_fix_features(struct net_device *dev, u32 features) +static netdev_features_t +bnx2_fix_features(struct net_device *dev, netdev_features_t features) { struct bnx2 *bp = netdev_priv(dev); @@ -7583,7 +7583,7 @@ bnx2_fix_features(struct net_device *dev, u32 features) } static int -bnx2_set_features(struct net_device *dev, u32 features) +bnx2_set_features(struct net_device *dev, netdev_features_t features) { struct bnx2 *bp = netdev_priv(dev); diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h index 99d31a7d6aa..1db2d51ba3f 100644 --- a/drivers/net/ethernet/broadcom/bnx2.h +++ b/drivers/net/ethernet/broadcom/bnx2.h @@ -6563,12 +6563,25 @@ struct l2_fhdr { #define MB_TX_CID_ADDR MB_GET_CID_ADDR(TX_CID) #define MB_RX_CID_ADDR MB_GET_CID_ADDR(RX_CID) +/* + * This driver uses new build_skb() API : + * RX ring buffer contains pointer to kmalloc() data only, + * skb are built only after Hardware filled the frame. + */ struct sw_bd { - struct sk_buff *skb; - struct l2_fhdr *desc; + u8 *data; DEFINE_DMA_UNMAP_ADDR(mapping); }; +/* Its faster to compute this from data than storing it in sw_bd + * (less cache misses) + */ +static inline struct l2_fhdr *get_l2_fhdr(u8 *data) +{ + return (struct l2_fhdr *)(PTR_ALIGN(data, BNX2_RX_ALIGN) + NET_SKB_PAD); +} + + struct sw_pg { struct page *page; DEFINE_DMA_UNMAP_ADDR(mapping); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index aec7212ac98..8c73d34b2ff 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -23,8 +23,8 @@ * (you will need to reboot afterwards) */ /* #define BNX2X_STOP_ON_ERROR */ -#define DRV_MODULE_VERSION "1.70.30-0" -#define DRV_MODULE_RELDATE "2011/10/25" +#define DRV_MODULE_VERSION "1.70.35-0" +#define DRV_MODULE_RELDATE "2011/11/10" #define BNX2X_BC_VER 0x040200 #if defined(CONFIG_DCB) @@ -293,8 +293,13 @@ enum { #define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp)) /* fast path */ +/* + * This driver uses new build_skb() API : + * RX ring buffer contains pointer to kmalloc() data only, + * skb are built only after Hardware filled the frame. + */ struct sw_rx_bd { - struct sk_buff *skb; + u8 *data; DEFINE_DMA_UNMAP_ADDR(mapping); }; @@ -411,8 +416,7 @@ union db_prod { /* Number of u64 elements in SGE mask array */ -#define RX_SGE_MASK_LEN ((NUM_RX_SGE_PAGES * RX_SGE_CNT) / \ - BIT_VEC64_ELEM_SZ) +#define RX_SGE_MASK_LEN (NUM_RX_SGE / BIT_VEC64_ELEM_SZ) #define RX_SGE_MASK_LEN_MASK (RX_SGE_MASK_LEN - 1) #define NEXT_SGE_MASK_ELEM(el) (((el) + 1) & RX_SGE_MASK_LEN_MASK) @@ -425,8 +429,8 @@ union host_hc_status_block { struct bnx2x_agg_info { /* - * First aggregation buffer is an skb, the following - are pages. - * We will preallocate the skbs for each aggregation when + * First aggregation buffer is a data buffer, the following - are pages. + * We will preallocate the data buffer for each aggregation when * we open the interface and will replace the BD at the consumer * with this one when we receive the TPA_START CQE in order to * keep the Rx BD ring consistent. @@ -440,6 +444,7 @@ struct bnx2x_agg_info { u16 parsing_flags; u16 vlan_tag; u16 len_on_bd; + u32 rxhash; }; #define Q_STATS_OFFSET32(stat_name) \ @@ -507,6 +512,7 @@ struct bnx2x_fastpath { __le16 fp_hc_idx; u8 index; /* number in fp array */ + u8 rx_queue; /* index for skb_record */ u8 cl_id; /* eth client id */ u8 cl_qzone_id; u8 fw_sb_id; /* status block number in FW */ @@ -881,6 +887,8 @@ struct bnx2x_common { #define CHIP_PORT_MODE_NONE 0x2 #define CHIP_MODE(bp) (bp->common.chip_port_mode) #define CHIP_MODE_IS_4_PORT(bp) (CHIP_MODE(bp) == CHIP_4_PORT_MODE) + + u32 boot_mode; }; /* IGU MSIX STATISTICS on 57712: 64 for VFs; 4 for PFs; 4 for Attentions */ @@ -1042,6 +1050,8 @@ struct bnx2x_slowpath { u32 wb_comp; u32 wb_data[4]; + + union drv_info_to_mcp drv_info_to_mcp; }; #define bnx2x_sp(bp, var) (&bp->slowpath->var) @@ -1122,18 +1132,21 @@ enum { enum { BNX2X_PORT_QUERY_IDX, BNX2X_PF_QUERY_IDX, + BNX2X_FCOE_QUERY_IDX, BNX2X_FIRST_QUEUE_QUERY_IDX, }; struct bnx2x_fw_stats_req { struct stats_query_header hdr; - struct stats_query_entry query[STATS_QUERY_CMD_COUNT]; + struct stats_query_entry query[FP_SB_MAX_E1x+ + BNX2X_FIRST_QUEUE_QUERY_IDX]; }; struct bnx2x_fw_stats_data { struct stats_counter storm_counters; struct per_port_stats port; struct per_pf_stats pf; + struct fcoe_statistics_params fcoe; struct per_queue_stats queue_stats[1]; }; @@ -1141,6 +1154,7 @@ struct bnx2x_fw_stats_data { enum { BNX2X_SP_RTNL_SETUP_TC, BNX2X_SP_RTNL_TX_TIMEOUT, + BNX2X_SP_RTNL_FAN_FAILURE, }; @@ -1186,10 +1200,20 @@ struct bnx2x { #define ETH_MAX_JUMBO_PACKET_SIZE 9600 /* Max supported alignment is 256 (8 shift) */ -#define BNX2X_RX_ALIGN_SHIFT ((L1_CACHE_SHIFT < 8) ? \ - L1_CACHE_SHIFT : 8) - /* FW use 2 Cache lines Alignment for start packet and size */ -#define BNX2X_FW_RX_ALIGN (2 << BNX2X_RX_ALIGN_SHIFT) +#define BNX2X_RX_ALIGN_SHIFT min(8, L1_CACHE_SHIFT) + + /* FW uses 2 Cache lines Alignment for start packet and size + * + * We assume skb_build() uses sizeof(struct skb_shared_info) bytes + * at the end of skb->data, to avoid wasting a full cache line. + * This reduces memory use (skb->truesize). + */ +#define BNX2X_FW_RX_ALIGN_START (1UL << BNX2X_RX_ALIGN_SHIFT) + +#define BNX2X_FW_RX_ALIGN_END \ + max(1UL << BNX2X_RX_ALIGN_SHIFT, \ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) + #define BNX2X_PXP_DRAM_ALIGN (BNX2X_RX_ALIGN_SHIFT - 5) struct host_sp_status_block *def_status_blk; @@ -1249,6 +1273,7 @@ struct bnx2x { #define NO_ISCSI_OOO_FLAG (1 << 13) #define NO_ISCSI_FLAG (1 << 14) #define NO_FCOE_FLAG (1 << 15) +#define BC_SUPPORTS_PFC_STATS (1 << 17) #define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG) #define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG) @@ -1984,13 +2009,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, #define HW_PRTY_ASSERT_SET_4 (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | \ AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR) -#define RSS_FLAGS(bp) \ - (TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY | \ - TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY | \ - TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY | \ - TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY | \ - (bp->multi_mode << \ - TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT)) #define MULTI_MASK 0x7f @@ -2055,6 +2073,8 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, #define BNX2X_VPD_LEN 128 #define VENDOR_ID_LEN 4 +int bnx2x_close(struct net_device *dev); + /* Congestion management fairness mode */ #define CMNG_FNS_NONE 0 #define CMNG_FNS_MINMAX 1 @@ -2072,4 +2092,16 @@ static const u32 dmae_reg_go_c[] = { void bnx2x_set_ethtool_ops(struct net_device *netdev); void bnx2x_notify_link_changed(struct bnx2x *bp); + + +#define BNX2X_MF_PROTOCOL(bp) \ + ((bp)->mf_config[BP_VN(bp)] & FUNC_MF_CFG_PROTOCOL_MASK) + +#ifdef BCM_CNIC +#define BNX2X_IS_MF_PROTOCOL_ISCSI(bp) \ + (BNX2X_MF_PROTOCOL(bp) == FUNC_MF_CFG_PROTOCOL_ISCSI) + +#define IS_MF_ISCSI_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_PROTOCOL_ISCSI(bp)) +#endif + #endif /* bnx2x.h */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 580b44edb06..2b731b25359 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -79,19 +79,21 @@ static inline void bnx2x_bz_fp(struct bnx2x *bp, int index) * @to: destination FP index * * Makes sure the contents of the bp->fp[to].napi is kept - * intact. + * intact. This is done by first copying the napi struct from + * the target to the source, and then mem copying the entire + * source onto the target */ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to) { struct bnx2x_fastpath *from_fp = &bp->fp[from]; struct bnx2x_fastpath *to_fp = &bp->fp[to]; - struct napi_struct orig_napi = to_fp->napi; + + /* Copy the NAPI object as it has been already initialized */ + from_fp->napi = to_fp->napi; + /* Move bnx2x_fastpath contents */ memcpy(to_fp, from_fp, sizeof(*to_fp)); to_fp->index = to; - - /* Restore the NAPI object as it has been already initialized */ - to_fp->napi = orig_napi; } int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */ @@ -100,7 +102,8 @@ int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */ * return idx of last bd freed */ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, - u16 idx) + u16 idx, unsigned int *pkts_compl, + unsigned int *bytes_compl) { struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx]; struct eth_tx_start_bd *tx_start_bd; @@ -157,6 +160,10 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, /* release skb */ WARN_ON(!skb); + if (skb) { + (*pkts_compl)++; + (*bytes_compl) += skb->len; + } dev_kfree_skb_any(skb); tx_buf->first_bd = 0; tx_buf->skb = NULL; @@ -168,6 +175,7 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata) { struct netdev_queue *txq; u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons; + unsigned int pkts_compl = 0, bytes_compl = 0; #ifdef BNX2X_STOP_ON_ERROR if (unlikely(bp->panic)) @@ -187,10 +195,14 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata) " pkt_cons %u\n", txdata->txq_index, hw_cons, sw_cons, pkt_cons); - bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons); + bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons, + &pkts_compl, &bytes_compl); + sw_cons++; } + netdev_tx_completed_queue(txq, pkts_compl, bytes_compl); + txdata->tx_pkt_cons = sw_cons; txdata->tx_bd_cons = bd_cons; @@ -292,8 +304,21 @@ static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp, fp->last_max_sge, fp->rx_sge_prod); } +/* Set Toeplitz hash value in the skb using the value from the + * CQE (calculated by HW). + */ +static u32 bnx2x_get_rxhash(const struct bnx2x *bp, + const struct eth_fast_path_rx_cqe *cqe) +{ + /* Set Toeplitz hash from CQE */ + if ((bp->dev->features & NETIF_F_RXHASH) && + (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) + return le32_to_cpu(cqe->rss_hash_result); + return 0; +} + static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, - struct sk_buff *skb, u16 cons, u16 prod, + u16 cons, u16 prod, struct eth_fast_path_rx_cqe *cqe) { struct bnx2x *bp = fp->bp; @@ -308,9 +333,9 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, if (tpa_info->tpa_state != BNX2X_TPA_STOP) BNX2X_ERR("start of bin not in stop [%d]\n", queue); - /* Try to map an empty skb from the aggregation info */ + /* Try to map an empty data buffer from the aggregation info */ mapping = dma_map_single(&bp->pdev->dev, - first_buf->skb->data, + first_buf->data + NET_SKB_PAD, fp->rx_buf_size, DMA_FROM_DEVICE); /* * ...if it fails - move the skb from the consumer to the producer @@ -320,15 +345,15 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { /* Move the BD from the consumer to the producer */ - bnx2x_reuse_rx_skb(fp, cons, prod); + bnx2x_reuse_rx_data(fp, cons, prod); tpa_info->tpa_state = BNX2X_TPA_ERROR; return; } - /* move empty skb from pool to prod */ - prod_rx_buf->skb = first_buf->skb; + /* move empty data from pool to prod */ + prod_rx_buf->data = first_buf->data; dma_unmap_addr_set(prod_rx_buf, mapping, mapping); - /* point prod_bd to new skb */ + /* point prod_bd to new data */ prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); @@ -342,6 +367,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, tpa_info->tpa_state = BNX2X_TPA_START; tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd); tpa_info->placement_offset = cqe->placement_offset; + tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe); #ifdef BNX2X_STOP_ON_ERROR fp->tpa_queue_used |= (1 << queue); @@ -469,11 +495,12 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, { struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue]; struct sw_rx_bd *rx_buf = &tpa_info->first_buf; - u8 pad = tpa_info->placement_offset; + u32 pad = tpa_info->placement_offset; u16 len = tpa_info->len_on_bd; - struct sk_buff *skb = rx_buf->skb; + struct sk_buff *skb = NULL; + u8 *data = rx_buf->data; /* alloc new skb */ - struct sk_buff *new_skb; + u8 *new_data; u8 old_tpa_state = tpa_info->tpa_state; tpa_info->tpa_state = BNX2X_TPA_STOP; @@ -484,18 +511,18 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, if (old_tpa_state == BNX2X_TPA_ERROR) goto drop; - /* Try to allocate the new skb */ - new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size); + /* Try to allocate the new data */ + new_data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC); /* Unmap skb in the pool anyway, as we are going to change pool entry status to BNX2X_TPA_STOP even if new skb allocation fails. */ dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), fp->rx_buf_size, DMA_FROM_DEVICE); + if (likely(new_data)) + skb = build_skb(data); - if (likely(new_skb)) { - prefetch(skb); - prefetch(((char *)(skb)) + L1_CACHE_BYTES); + if (likely(skb)) { #ifdef BNX2X_STOP_ON_ERROR if (pad + len > fp->rx_buf_size) { @@ -507,8 +534,9 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, } #endif - skb_reserve(skb, pad); + skb_reserve(skb, pad + NET_SKB_PAD); skb_put(skb, len); + skb->rxhash = tpa_info->rxhash; skb->protocol = eth_type_trans(skb, bp->dev); skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -524,8 +552,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, } - /* put new skb in bin */ - rx_buf->skb = new_skb; + /* put new data in bin */ + rx_buf->data = new_data; return; } @@ -537,19 +565,6 @@ drop: fp->eth_q_stats.rx_skb_alloc_failed++; } -/* Set Toeplitz hash value in the skb using the value from the - * CQE (calculated by HW). - */ -static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe, - struct sk_buff *skb) -{ - /* Set Toeplitz hash from CQE */ - if ((bp->dev->features & NETIF_F_RXHASH) && - (cqe->fast_path_cqe.status_flags & - ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) - skb->rxhash = - le32_to_cpu(cqe->fast_path_cqe.rss_hash_result); -} int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) { @@ -592,6 +607,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) u8 cqe_fp_flags; enum eth_rx_cqe_type cqe_fp_type; u16 len, pad; + u8 *data; #ifdef BNX2X_STOP_ON_ERROR if (unlikely(bp->panic)) @@ -602,13 +618,6 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) bd_prod = RX_BD(bd_prod); bd_cons = RX_BD(bd_cons); - /* Prefetch the page containing the BD descriptor - at producer's index. It will be needed when new skb is - allocated */ - prefetch((void *)(PAGE_ALIGN((unsigned long) - (&fp->rx_desc_ring[bd_prod])) - - PAGE_SIZE + 1)); - cqe = &fp->rx_comp_ring[comp_ring_cons]; cqe_fp = &cqe->fast_path_cqe; cqe_fp_flags = cqe_fp->type_error_flags; @@ -624,138 +633,123 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) { bnx2x_sp_event(fp, cqe); goto next_cqe; + } + rx_buf = &fp->rx_buf_ring[bd_cons]; + data = rx_buf->data; - /* this is an rx packet */ - } else { - rx_buf = &fp->rx_buf_ring[bd_cons]; - skb = rx_buf->skb; - prefetch(skb); - - if (!CQE_TYPE_FAST(cqe_fp_type)) { + if (!CQE_TYPE_FAST(cqe_fp_type)) { #ifdef BNX2X_STOP_ON_ERROR - /* sanity check */ - if (fp->disable_tpa && - (CQE_TYPE_START(cqe_fp_type) || - CQE_TYPE_STOP(cqe_fp_type))) - BNX2X_ERR("START/STOP packet while " - "disable_tpa type %x\n", - CQE_TYPE(cqe_fp_type)); + /* sanity check */ + if (fp->disable_tpa && + (CQE_TYPE_START(cqe_fp_type) || + CQE_TYPE_STOP(cqe_fp_type))) + BNX2X_ERR("START/STOP packet while " + "disable_tpa type %x\n", + CQE_TYPE(cqe_fp_type)); #endif - if (CQE_TYPE_START(cqe_fp_type)) { - u16 queue = cqe_fp->queue_index; - DP(NETIF_MSG_RX_STATUS, - "calling tpa_start on queue %d\n", - queue); + if (CQE_TYPE_START(cqe_fp_type)) { + u16 queue = cqe_fp->queue_index; + DP(NETIF_MSG_RX_STATUS, + "calling tpa_start on queue %d\n", + queue); - bnx2x_tpa_start(fp, queue, skb, - bd_cons, bd_prod, - cqe_fp); - - /* Set Toeplitz hash for LRO skb */ - bnx2x_set_skb_rxhash(bp, cqe, skb); - - goto next_rx; - - } else { - u16 queue = - cqe->end_agg_cqe.queue_index; - DP(NETIF_MSG_RX_STATUS, - "calling tpa_stop on queue %d\n", - queue); - - bnx2x_tpa_stop(bp, fp, queue, - &cqe->end_agg_cqe, - comp_ring_cons); + bnx2x_tpa_start(fp, queue, + bd_cons, bd_prod, + cqe_fp); + goto next_rx; + } else { + u16 queue = + cqe->end_agg_cqe.queue_index; + DP(NETIF_MSG_RX_STATUS, + "calling tpa_stop on queue %d\n", + queue); + + bnx2x_tpa_stop(bp, fp, queue, + &cqe->end_agg_cqe, + comp_ring_cons); #ifdef BNX2X_STOP_ON_ERROR - if (bp->panic) - return 0; + if (bp->panic) + return 0; #endif - bnx2x_update_sge_prod(fp, cqe_fp); - goto next_cqe; - } + bnx2x_update_sge_prod(fp, cqe_fp); + goto next_cqe; } - /* non TPA */ - len = le16_to_cpu(cqe_fp->pkt_len); - pad = cqe_fp->placement_offset; - dma_sync_single_for_cpu(&bp->pdev->dev, + } + /* non TPA */ + len = le16_to_cpu(cqe_fp->pkt_len); + pad = cqe_fp->placement_offset; + dma_sync_single_for_cpu(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), - pad + RX_COPY_THRESH, - DMA_FROM_DEVICE); - prefetch(((char *)(skb)) + L1_CACHE_BYTES); + pad + RX_COPY_THRESH, + DMA_FROM_DEVICE); + pad += NET_SKB_PAD; + prefetch(data + pad); /* speedup eth_type_trans() */ + /* is this an error packet? */ + if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) { + DP(NETIF_MSG_RX_ERR, + "ERROR flags %x rx packet %u\n", + cqe_fp_flags, sw_comp_cons); + fp->eth_q_stats.rx_err_discard_pkt++; + goto reuse_rx; + } - /* is this an error packet? */ - if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) { + /* Since we don't have a jumbo ring + * copy small packets if mtu > 1500 + */ + if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) && + (len <= RX_COPY_THRESH)) { + skb = netdev_alloc_skb_ip_align(bp->dev, len); + if (skb == NULL) { DP(NETIF_MSG_RX_ERR, - "ERROR flags %x rx packet %u\n", - cqe_fp_flags, sw_comp_cons); - fp->eth_q_stats.rx_err_discard_pkt++; + "ERROR packet dropped because of alloc failure\n"); + fp->eth_q_stats.rx_skb_alloc_failed++; goto reuse_rx; } - - /* Since we don't have a jumbo ring - * copy small packets if mtu > 1500 - */ - if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) && - (len <= RX_COPY_THRESH)) { - struct sk_buff *new_skb; - - new_skb = netdev_alloc_skb(bp->dev, len + pad); - if (new_skb == NULL) { - DP(NETIF_MSG_RX_ERR, - "ERROR packet dropped " - "because of alloc failure\n"); - fp->eth_q_stats.rx_skb_alloc_failed++; - goto reuse_rx; - } - - /* aligned copy */ - skb_copy_from_linear_data_offset(skb, pad, - new_skb->data + pad, len); - skb_reserve(new_skb, pad); - skb_put(new_skb, len); - - bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod); - - skb = new_skb; - - } else - if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) { + memcpy(skb->data, data + pad, len); + bnx2x_reuse_rx_data(fp, bd_cons, bd_prod); + } else { + if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) { dma_unmap_single(&bp->pdev->dev, - dma_unmap_addr(rx_buf, mapping), + dma_unmap_addr(rx_buf, mapping), fp->rx_buf_size, DMA_FROM_DEVICE); + skb = build_skb(data); + if (unlikely(!skb)) { + kfree(data); + fp->eth_q_stats.rx_skb_alloc_failed++; + goto next_rx; + } skb_reserve(skb, pad); - skb_put(skb, len); - } else { DP(NETIF_MSG_RX_ERR, "ERROR packet dropped because " "of alloc failure\n"); fp->eth_q_stats.rx_skb_alloc_failed++; reuse_rx: - bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod); + bnx2x_reuse_rx_data(fp, bd_cons, bd_prod); goto next_rx; } + } - skb->protocol = eth_type_trans(skb, bp->dev); + skb_put(skb, len); + skb->protocol = eth_type_trans(skb, bp->dev); - /* Set Toeplitz hash for a none-LRO skb */ - bnx2x_set_skb_rxhash(bp, cqe, skb); + /* Set Toeplitz hash for a none-LRO skb */ + skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp); - skb_checksum_none_assert(skb); + skb_checksum_none_assert(skb); - if (bp->dev->features & NETIF_F_RXCSUM) { + if (bp->dev->features & NETIF_F_RXCSUM) { - if (likely(BNX2X_RX_CSUM_OK(cqe))) - skb->ip_summed = CHECKSUM_UNNECESSARY; - else - fp->eth_q_stats.hw_csum_err++; - } + if (likely(BNX2X_RX_CSUM_OK(cqe))) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + fp->eth_q_stats.hw_csum_err++; } - skb_record_rx_queue(skb, fp->index); + skb_record_rx_queue(skb, fp->rx_queue); if (le16_to_cpu(cqe_fp->pars_flags.flags) & PARSING_FLAGS_VLAN) @@ -765,7 +759,7 @@ reuse_rx: next_rx: - rx_buf->skb = NULL; + rx_buf->data = NULL; bd_cons = NEXT_RX_IDX(bd_cons); bd_prod = NEXT_RX_IDX(bd_prod); @@ -1011,9 +1005,9 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) struct sw_rx_bd *first_buf = &tpa_info->first_buf; - first_buf->skb = netdev_alloc_skb(bp->dev, - fp->rx_buf_size); - if (!first_buf->skb) { + first_buf->data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, + GFP_ATOMIC); + if (!first_buf->data) { BNX2X_ERR("Failed to allocate TPA " "skb pool for queue[%d] - " "disabling TPA on this " @@ -1093,16 +1087,18 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp) struct bnx2x_fastpath *fp = &bp->fp[i]; for_each_cos_in_tx_queue(fp, cos) { struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; + unsigned pkts_compl = 0, bytes_compl = 0; - u16 bd_cons = txdata->tx_bd_cons; u16 sw_prod = txdata->tx_pkt_prod; u16 sw_cons = txdata->tx_pkt_cons; while (sw_cons != sw_prod) { - bd_cons = bnx2x_free_tx_pkt(bp, txdata, - TX_BD(sw_cons)); + bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons), + &pkts_compl, &bytes_compl); sw_cons++; } + netdev_tx_reset_queue( + netdev_get_tx_queue(bp->dev, txdata->txq_index)); } } } @@ -1118,16 +1114,16 @@ static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp) for (i = 0; i < NUM_RX_BD; i++) { struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i]; - struct sk_buff *skb = rx_buf->skb; + u8 *data = rx_buf->data; - if (skb == NULL) + if (data == NULL) continue; dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), fp->rx_buf_size, DMA_FROM_DEVICE); - rx_buf->skb = NULL; - dev_kfree_skb(skb); + rx_buf->data = NULL; + kfree(data); } } @@ -1445,6 +1441,11 @@ void bnx2x_set_num_queues(struct bnx2x *bp) break; } +#ifdef BCM_CNIC + /* override in ISCSI SD mod */ + if (IS_MF_ISCSI_SD(bp)) + bp->num_queues = 1; +#endif /* Add special queues */ bp->num_queues += NON_ETH_CONTEXT_USE; } @@ -1509,6 +1510,7 @@ static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp) for_each_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; + u32 mtu; /* Always use a mini-jumbo MTU for the FCoE L2 ring */ if (IS_FCOE_IDX(i)) @@ -1518,13 +1520,15 @@ static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp) * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer * overrun attack. */ - fp->rx_buf_size = - BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD + - BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING; + mtu = BNX2X_FCOE_MINI_JUMBO_MTU; else - fp->rx_buf_size = - bp->dev->mtu + ETH_OVREHEAD + - BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING; + mtu = bp->dev->mtu; + fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START + + IP_HEADER_ALIGNMENT_PADDING + + ETH_OVREHEAD + + mtu + + BNX2X_FW_RX_ALIGN_END; + /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */ } } @@ -1541,7 +1545,8 @@ static inline int bnx2x_init_rss_pf(struct bnx2x *bp) if (bp->multi_mode != ETH_RSS_MODE_DISABLED) { for (i = 0; i < sizeof(ind_table); i++) ind_table[i] = - bp->fp->cl_id + (i % num_eth_queues); + bp->fp->cl_id + + ethtool_rxfh_indir_default(i, num_eth_queues); } /* @@ -1929,13 +1934,17 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) break; } - if (!bp->port.pmf) + if (bp->port.pmf) + bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 0); + else bnx2x__link_status_update(bp); /* start the timer */ mod_timer(&bp->timer, jiffies + bp->current_interval); #ifdef BCM_CNIC + /* re-read iscsi info */ + bnx2x_get_iscsi_info(bp); bnx2x_setup_cnic_irq_info(bp); if (bp->state == BNX2X_STATE_OPEN) bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD); @@ -2799,6 +2808,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { + unsigned int pkts_compl = 0, bytes_compl = 0; DP(NETIF_MSG_TX_QUEUED, "Unable to map page - " "dropping packet...\n"); @@ -2810,7 +2820,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) */ first_bd->nbd = cpu_to_le16(nbd); bnx2x_free_tx_pkt(bp, txdata, - TX_BD(txdata->tx_pkt_prod)); + TX_BD(txdata->tx_pkt_prod), + &pkts_compl, &bytes_compl); return NETDEV_TX_OK; } @@ -2871,6 +2882,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) pbd_e2->parsing_data); DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod); + netdev_tx_sent_queue(txq, skb->len); + txdata->tx_pkt_prod++; /* * Make sure that the BD data is updated before updating the producer @@ -2981,9 +2994,14 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p) struct bnx2x *bp = netdev_priv(dev); int rc = 0; - if (!is_valid_ether_addr((u8 *)(addr->sa_data))) + if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) return -EINVAL; +#ifdef BCM_CNIC + if (IS_MF_ISCSI_SD(bp) && !is_zero_ether_addr(addr->sa_data)) + return -EINVAL; +#endif + if (netif_running(dev)) { rc = bnx2x_set_eth_mac(bp, false); if (rc) @@ -3098,7 +3116,12 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) u8 cos; int rx_ring_size = 0; - /* if rx_ring_size specified - use it */ +#ifdef BCM_CNIC + if (IS_MF_ISCSI_SD(bp)) { + rx_ring_size = MIN_RX_SIZE_NONTPA; + bp->rx_ring_size = rx_ring_size; + } else +#endif if (!bp->rx_ring_size) { rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp); @@ -3108,7 +3131,7 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) MIN_RX_SIZE_TPA, rx_ring_size); bp->rx_ring_size = rx_ring_size; - } else + } else /* if rx_ring_size specified - use it */ rx_ring_size = bp->rx_ring_size; /* Common */ @@ -3278,14 +3301,14 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp) msix_table_size = bp->igu_sb_cnt + 1; /* fp array: RSS plus CNIC related L2 queues */ - fp = kzalloc((BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE) * + fp = kcalloc(BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE, sizeof(*fp), GFP_KERNEL); if (!fp) goto alloc_err; bp->fp = fp; /* msix table */ - tbl = kzalloc(msix_table_size * sizeof(*tbl), GFP_KERNEL); + tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL); if (!tbl) goto alloc_err; bp->msix_table = tbl; @@ -3409,7 +3432,8 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu) return bnx2x_reload_if_running(dev); } -u32 bnx2x_fix_features(struct net_device *dev, u32 features) +netdev_features_t bnx2x_fix_features(struct net_device *dev, + netdev_features_t features) { struct bnx2x *bp = netdev_priv(dev); @@ -3420,7 +3444,7 @@ u32 bnx2x_fix_features(struct net_device *dev, u32 features) return features; } -int bnx2x_set_features(struct net_device *dev, u32 features) +int bnx2x_set_features(struct net_device *dev, netdev_features_t features) { struct bnx2x *bp = netdev_priv(dev); u32 flags = bp->flags; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index 283d663da18..bf27c54ff2e 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -20,6 +20,7 @@ #include <linux/types.h> #include <linux/pci.h> #include <linux/netdevice.h> +#include <linux/etherdevice.h> #include "bnx2x.h" @@ -533,8 +534,9 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu); */ int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type); #endif -u32 bnx2x_fix_features(struct net_device *dev, u32 features); -int bnx2x_set_features(struct net_device *dev, u32 features); +netdev_features_t bnx2x_fix_features(struct net_device *dev, + netdev_features_t features); +int bnx2x_set_features(struct net_device *dev, netdev_features_t features); /** * bnx2x_tx_timeout - tx timeout netdev callback @@ -874,8 +876,7 @@ static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp) static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp) { /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */ - memset(fp->sge_mask, 0xff, - (NUM_RX_SGE >> BIT_VEC64_ELEM_SHIFT)*sizeof(u64)); + memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask)); /* Clear the two last indices in the page to 1: these are the indices that correspond to the "next" element, @@ -911,26 +912,27 @@ static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp, return 0; } -static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp, - struct bnx2x_fastpath *fp, u16 index) +static inline int bnx2x_alloc_rx_data(struct bnx2x *bp, + struct bnx2x_fastpath *fp, u16 index) { - struct sk_buff *skb; + u8 *data; struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index]; struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index]; dma_addr_t mapping; - skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size); - if (unlikely(skb == NULL)) + data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC); + if (unlikely(data == NULL)) return -ENOMEM; - mapping = dma_map_single(&bp->pdev->dev, skb->data, fp->rx_buf_size, + mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD, + fp->rx_buf_size, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { - dev_kfree_skb_any(skb); + kfree(data); return -ENOMEM; } - rx_buf->skb = skb; + rx_buf->data = data; dma_unmap_addr_set(rx_buf, mapping, mapping); rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); @@ -939,12 +941,12 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp, return 0; } -/* note that we are not allocating a new skb, +/* note that we are not allocating a new buffer, * we are just moving one from cons to prod * we are not creating a new mapping, * so there is no need to check for dma_mapping_error(). */ -static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp, +static inline void bnx2x_reuse_rx_data(struct bnx2x_fastpath *fp, u16 cons, u16 prod) { struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons]; @@ -954,7 +956,7 @@ static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp, dma_unmap_addr_set(prod_rx_buf, mapping, dma_unmap_addr(cons_rx_buf, mapping)); - prod_rx_buf->skb = cons_rx_buf->skb; + prod_rx_buf->data = cons_rx_buf->data; *prod_bd = *cons_bd; } @@ -1030,9 +1032,9 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp, for (i = 0; i < last; i++) { struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i]; struct sw_rx_bd *first_buf = &tpa_info->first_buf; - struct sk_buff *skb = first_buf->skb; + u8 *data = first_buf->data; - if (skb == NULL) { + if (data == NULL) { DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i); continue; } @@ -1040,8 +1042,8 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp, dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(first_buf, mapping), fp->rx_buf_size, DMA_FROM_DEVICE); - dev_kfree_skb(skb); - first_buf->skb = NULL; + kfree(data); + first_buf->data = NULL; } } @@ -1149,7 +1151,7 @@ static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp, * fp->eth_q_stats.rx_skb_alloc_failed = 0 */ for (i = 0; i < rx_ring_size; i++) { - if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) { + if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) { fp->eth_q_stats.rx_skb_alloc_failed++; continue; } @@ -1318,6 +1320,7 @@ static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp) struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); unsigned long q_type = 0; + bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp); bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp, BNX2X_FCOE_ETH_CL_ID_IDX); /** Current BNX2X_FCOE_ETH_CID deffinition implies not more than @@ -1488,4 +1491,77 @@ static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg) return max_cfg; } +/** + * bnx2x_get_iscsi_info - update iSCSI params according to licensing info. + * + * @bp: driver handle + * + */ +void bnx2x_get_iscsi_info(struct bnx2x *bp); + +/* returns func by VN for current port */ +static inline int func_by_vn(struct bnx2x *bp, int vn) +{ + return 2 * vn + BP_PORT(bp); +} + +/** + * bnx2x_link_sync_notify - send notification to other functions. + * + * @bp: driver handle + * + */ +static inline void bnx2x_link_sync_notify(struct bnx2x *bp) +{ + int func; + int vn; + + /* Set the attention towards other drivers on the same port */ + for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { + if (vn == BP_VN(bp)) + continue; + + func = func_by_vn(bp, vn); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 + + (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1); + } +} + +/** + * bnx2x_update_drv_flags - update flags in shmem + * + * @bp: driver handle + * @flags: flags to update + * @set: set or clear + * + */ +static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set) +{ + if (SHMEM2_HAS(bp, drv_flags)) { + u32 drv_flags; + bnx2x_acquire_hw_lock(bp, HW_LOCK_DRV_FLAGS); + drv_flags = SHMEM2_RD(bp, drv_flags); + + if (set) + SET_FLAGS(drv_flags, flags); + else + RESET_FLAGS(drv_flags, flags); + + SHMEM2_WR(bp, drv_flags, drv_flags); + DP(NETIF_MSG_HW, "drv_flags 0x%08x\n", drv_flags); + bnx2x_release_hw_lock(bp, HW_LOCK_DRV_FLAGS); + } +} + +static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr) +{ + if (is_valid_ether_addr(addr)) + return true; +#ifdef BCM_CNIC + if (is_zero_ether_addr(addr) && IS_MF_ISCSI_SD(bp)) + return true; +#endif + return false; +} + #endif /* BNX2X_CMN_H */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c index 51bd7485ab1..5051cf3deb2 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c @@ -685,24 +685,6 @@ int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall) } #endif -static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set) -{ - if (SHMEM2_HAS(bp, drv_flags)) { - u32 drv_flags; - bnx2x_acquire_hw_lock(bp, HW_LOCK_DRV_FLAGS); - drv_flags = SHMEM2_RD(bp, drv_flags); - - if (set) - SET_FLAGS(drv_flags, flags); - else - RESET_FLAGS(drv_flags, flags); - - SHMEM2_WR(bp, drv_flags, drv_flags); - DP(NETIF_MSG_HW, "drv_flags 0x%08x\n", drv_flags); - bnx2x_release_hw_lock(bp, HW_LOCK_DRV_FLAGS); - } -} - static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp) { u8 prio, cos; @@ -755,18 +737,26 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) /* mark DCBX result for PMF migration */ bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 1); #ifdef BCM_DCBNL - /** + /* * Add new app tlvs to dcbnl */ bnx2x_dcbnl_update_applist(bp, false); #endif - bnx2x_dcbx_stop_hw_tx(bp); - - /* reconfigure the netdevice with the results of the new + /* + * reconfigure the netdevice with the results of the new * dcbx negotiation. */ bnx2x_dcbx_update_tc_mapping(bp); + /* + * allow other funtions to update their netdevices + * accordingly + */ + if (IS_MF(bp)) + bnx2x_link_sync_notify(bp); + + bnx2x_dcbx_stop_hw_tx(bp); + return; } case BNX2X_DCBX_STATE_TX_PAUSED: @@ -775,6 +765,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) bnx2x_dcbx_update_ets_params(bp); bnx2x_dcbx_resume_hw_tx(bp); + return; case BNX2X_DCBX_STATE_TX_RELEASED: DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_RELEASED\n"); @@ -883,7 +874,7 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp, /*For IEEE admin_recommendation_bw_precentage *For IEEE admin_recommendation_ets_pg */ af->pfc.pri_en_bitmap = (u8)dp->admin_pfc_bitmap; - for (i = 0; i < 4; i++) { + for (i = 0; i < DCBX_CONFIG_MAX_APP_PROTOCOL; i++) { if (dp->admin_priority_app_table[i].valid) { struct bnx2x_admin_priority_app_table *table = dp->admin_priority_app_table; @@ -923,7 +914,7 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp, void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled) { - if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3(bp)) { + if (!CHIP_IS_E1x(bp)) { bp->dcb_state = dcb_on; bp->dcbx_enabled = dcbx_enabled; } else { @@ -1863,7 +1854,7 @@ static void bnx2x_dcbx_fw_struct(struct bnx2x *bp, void bnx2x_dcbx_pmf_update(struct bnx2x *bp) { /* if we need to syncronize DCBX result from prev PMF - * read it from shmem and update bp accordingly + * read it from shmem and update bp and netdev accordingly */ if (SHMEM2_HAS(bp, drv_flags) && GET_FLAGS(SHMEM2_RD(bp, drv_flags), DRV_FLAGS_DCB_CONFIGURED)) { @@ -1875,6 +1866,22 @@ void bnx2x_dcbx_pmf_update(struct bnx2x *bp) bp->dcbx_error); bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat, bp->dcbx_error); +#ifdef BCM_DCBNL + /* + * Add new app tlvs to dcbnl + */ + bnx2x_dcbnl_update_applist(bp, false); + /* + * Send a notification for the new negotiated parameters + */ + dcbnl_cee_notify(bp->dev, RTM_GETDCB, DCB_CMD_CEE_GET, 0, 0); +#endif + /* + * reconfigure the netdevice with the results of the new + * dcbx negotiation. + */ + bnx2x_dcbx_update_tc_mapping(bp); + } } @@ -2242,7 +2249,7 @@ static int bnx2x_set_admin_app_up(struct bnx2x *bp, u8 idtype, u16 idval, u8 up) int i, ff; /* iterate over the app entries looking for idtype and idval */ - for (i = 0, ff = -1; i < 4; i++) { + for (i = 0, ff = -1; i < DCBX_CONFIG_MAX_APP_PROTOCOL; i++) { struct bnx2x_admin_priority_app_table *app_ent = &bp->dcbx_config_params.admin_priority_app_table[i]; if (bnx2x_admin_app_is_equal(app_ent, idtype, idval)) @@ -2251,7 +2258,7 @@ static int bnx2x_set_admin_app_up(struct bnx2x *bp, u8 idtype, u16 idval, u8 up) if (ff < 0 && !app_ent->valid) ff = i; } - if (i < 4) + if (i < DCBX_CONFIG_MAX_APP_PROTOCOL) /* if found overwrite up */ bp->dcbx_config_params. admin_priority_app_table[i].priority = up; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h index 2c6a3bca6f2..2ab9254e2d5 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h @@ -90,6 +90,7 @@ struct bnx2x_admin_priority_app_table { u32 app_id; }; +#define DCBX_CONFIG_MAX_APP_PROTOCOL 4 struct bnx2x_config_dcbx_params { u32 overwrite_settings; u32 admin_dcbx_version; @@ -109,7 +110,8 @@ struct bnx2x_config_dcbx_params { u32 admin_recommendation_bw_precentage[8]; u32 admin_recommendation_ets_pg[8]; u32 admin_pfc_bitmap; - struct bnx2x_admin_priority_app_table admin_priority_app_table[4]; + struct bnx2x_admin_priority_app_table + admin_priority_app_table[DCBX_CONFIG_MAX_APP_PROTOCOL]; u32 admin_default_priority; }; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index f0ca8b27a55..a688b9d975a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -107,6 +107,10 @@ static const struct { 4, STATS_FLAGS_PORT, "rx_filtered_packets" }, { STATS_OFFSET32(mf_tag_discard), 4, STATS_FLAGS_PORT, "rx_mf_tag_discard" }, + { STATS_OFFSET32(pfc_frames_received_hi), + 8, STATS_FLAGS_PORT, "pfc_frames_received" }, + { STATS_OFFSET32(pfc_frames_sent_hi), + 8, STATS_FLAGS_PORT, "pfc_frames_sent" }, { STATS_OFFSET32(brb_drop_hi), 8, STATS_FLAGS_PORT, "rx_brb_discard" }, { STATS_OFFSET32(brb_truncate_hi), @@ -352,7 +356,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) DP(NETIF_MSG_LINK, "Unsupported port type\n"); return -EINVAL; } - /* Save new config in case command complete successuly */ + /* Save new config in case command complete successully */ new_multi_phy_config = bp->link_params.multi_phy_config; /* Get the new cfg_idx */ cfg_idx = bnx2x_get_link_cfg_idx(bp); @@ -761,8 +765,8 @@ static void bnx2x_get_drvinfo(struct net_device *dev, struct bnx2x *bp = netdev_priv(dev); u8 phy_fw_ver[PHY_FW_VER_LEN]; - strcpy(info->driver, DRV_MODULE_NAME); - strcpy(info->version, DRV_MODULE_VERSION); + strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); phy_fw_ver[0] = '\0'; if (bp->port.pmf) { @@ -773,14 +777,14 @@ static void bnx2x_get_drvinfo(struct net_device *dev, bnx2x_release_phy_lock(bp); } - strncpy(info->fw_version, bp->fw_ver, 32); + strlcpy(info->fw_version, bp->fw_ver, sizeof(info->fw_version)); snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver), "bc %d.%d.%d%s%s", (bp->common.bc_ver & 0xff0000) >> 16, (bp->common.bc_ver & 0xff00) >> 8, (bp->common.bc_ver & 0xff), ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver); - strcpy(info->bus_info, pci_name(bp->pdev)); + strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info)); info->n_stats = BNX2X_NUM_STATS; info->testinfo_len = BNX2X_NUM_TESTS; info->eedump_len = bp->common.flash_size; @@ -1740,6 +1744,8 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode) struct sw_rx_bd *rx_buf; u16 len; int rc = -ENODEV; + u8 *data; + struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txdata->txq_index); /* check the loopback mode */ switch (loopback_mode) { @@ -1748,8 +1754,18 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode) return -EINVAL; break; case BNX2X_MAC_LOOPBACK: - bp->link_params.loopback_mode = CHIP_IS_E3(bp) ? - LOOPBACK_XMAC : LOOPBACK_BMAC; + if (CHIP_IS_E3(bp)) { + int cfg_idx = bnx2x_get_link_cfg_idx(bp); + if (bp->port.supported[cfg_idx] & + (SUPPORTED_10000baseT_Full | + SUPPORTED_20000baseMLD2_Full | + SUPPORTED_20000baseKR2_Full)) + bp->link_params.loopback_mode = LOOPBACK_XMAC; + else + bp->link_params.loopback_mode = LOOPBACK_UMAC; + } else + bp->link_params.loopback_mode = LOOPBACK_BMAC; + bnx2x_phy_init(&bp->link_params, &bp->link_vars); break; default: @@ -1784,6 +1800,8 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode) tx_start_idx = le16_to_cpu(*txdata->tx_cons_sb); rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb); + netdev_tx_sent_queue(txq, skb->len); + pkt_prod = txdata->tx_pkt_prod++; tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)]; tx_buf->first_bd = txdata->tx_bd_prod; @@ -1865,10 +1883,9 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode) dma_sync_single_for_cpu(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), fp_rx->rx_buf_size, DMA_FROM_DEVICE); - skb = rx_buf->skb; - skb_reserve(skb, cqe->fast_path_cqe.placement_offset); + data = rx_buf->data + NET_SKB_PAD + cqe->fast_path_cqe.placement_offset; for (i = ETH_HLEN; i < pkt_size; i++) - if (*(skb->data + i) != (unsigned char) (i & 0xff)) + if (*(data + i) != (unsigned char) (i & 0xff)) goto test_loopback_rx_exit; rc = 0; @@ -2285,18 +2302,20 @@ static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, } } -static int bnx2x_get_rxfh_indir(struct net_device *dev, - struct ethtool_rxfh_indir *indir) +static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + + return (bp->multi_mode == ETH_RSS_MODE_DISABLED ? + 0 : T_ETH_INDIRECTION_TABLE_SIZE); +} + +static int bnx2x_get_rxfh_indir(struct net_device *dev, u32 *indir) { struct bnx2x *bp = netdev_priv(dev); - size_t copy_size = - min_t(size_t, indir->size, T_ETH_INDIRECTION_TABLE_SIZE); u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0}; size_t i; - if (bp->multi_mode == ETH_RSS_MODE_DISABLED) - return -EOPNOTSUPP; - /* Get the current configuration of the RSS indirection table */ bnx2x_get_rss_ind_table(&bp->rss_conf_obj, ind_table); @@ -2309,33 +2328,19 @@ static int bnx2x_get_rxfh_indir(struct net_device *dev, * align the returned table to the Client ID of the leading RSS * queue. */ - for (i = 0; i < copy_size; i++) - indir->ring_index[i] = ind_table[i] - bp->fp->cl_id; - - indir->size = T_ETH_INDIRECTION_TABLE_SIZE; + for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) + indir[i] = ind_table[i] - bp->fp->cl_id; return 0; } -static int bnx2x_set_rxfh_indir(struct net_device *dev, - const struct ethtool_rxfh_indir *indir) +static int bnx2x_set_rxfh_indir(struct net_device *dev, const u32 *indir) { struct bnx2x *bp = netdev_priv(dev); size_t i; u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0}; - u32 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp); - - if (bp->multi_mode == ETH_RSS_MODE_DISABLED) - return -EOPNOTSUPP; - - /* validate the size */ - if (indir->size != T_ETH_INDIRECTION_TABLE_SIZE) - return -EINVAL; for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) { - /* validate the indices */ - if (indir->ring_index[i] >= num_eth_queues) - return -EINVAL; /* * The same as in bnx2x_get_rxfh_indir: we can't use a memcpy() * as an internal storage of an indirection table is a u8 array @@ -2345,7 +2350,7 @@ static int bnx2x_set_rxfh_indir(struct net_device *dev, * align the received table to the Client ID of the leading RSS * queue */ - ind_table[i] = indir->ring_index[i] + bp->fp->cl_id; + ind_table[i] = indir[i] + bp->fp->cl_id; } return bnx2x_config_rss_pf(bp, ind_table, false); @@ -2378,6 +2383,7 @@ static const struct ethtool_ops bnx2x_ethtool_ops = { .set_phys_id = bnx2x_set_phys_id, .get_ethtool_stats = bnx2x_get_ethtool_stats, .get_rxnfc = bnx2x_get_rxnfc, + .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size, .get_rxfh_indir = bnx2x_get_rxfh_indir, .set_rxfh_indir = bnx2x_set_rxfh_indir, }; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index fc754cb6cc0..3e30c8642c2 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h @@ -1247,11 +1247,14 @@ struct drv_func_mb { #define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL 0xa1000000 #define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL 0x00050234 #define REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED 0x00070014 + #define REQ_BC_VER_4_PFC_STATS_SUPPORTED 0x00070201 #define DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG 0xb0000000 #define DRV_MSG_CODE_DCBX_PMF_DRV_OK 0xb2000000 #define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000 + #define DRV_MSG_CODE_DRV_INFO_ACK 0xd8000000 + #define DRV_MSG_CODE_DRV_INFO_NACK 0xd9000000 #define DRV_MSG_CODE_SET_MF_BW 0xe0000000 #define REQ_BC_VER_4_SET_MF_BW 0x00060202 @@ -1304,6 +1307,8 @@ struct drv_func_mb { #define FW_MSG_CODE_VRFY_OPT_MDL_INVLD_IMG 0xa0200000 #define FW_MSG_CODE_VRFY_OPT_MDL_UNAPPROVED 0xa0300000 #define FW_MSG_CODE_VF_DISABLED_DONE 0xb0000000 + #define FW_MSG_CODE_DRV_INFO_ACK 0xd8100000 + #define FW_MSG_CODE_DRV_INFO_NACK 0xd9100000 #define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000 #define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000 @@ -1360,6 +1365,7 @@ struct drv_func_mb { #define DRV_STATUS_DCBX_EVENT_MASK 0x000f0000 #define DRV_STATUS_DCBX_NEGOTIATION_RESULTS 0x00010000 + #define DRV_STATUS_DRV_INFO_REQ 0x04000000 u32 virt_mac_upper; #define VIRT_MAC_SIGN_MASK 0xffff0000 @@ -1964,9 +1970,38 @@ struct shmem2_region { u32 extended_dev_info_shared_addr; u32 ncsi_oem_data_addr; - u32 ocsd_host_addr; - u32 ocbb_host_addr; - u32 ocsd_req_update_interval; + u32 ocsd_host_addr; /* initialized by option ROM */ + u32 ocbb_host_addr; /* initialized by option ROM */ + u32 ocsd_req_update_interval; /* initialized by option ROM */ + u32 temperature_in_half_celsius; + u32 glob_struct_in_host; + + u32 dcbx_neg_res_ext_offset; +#define SHMEM_DCBX_NEG_RES_EXT_NONE 0x00000000 + + u32 drv_capabilities_flag[E2_FUNC_MAX]; +#define DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED 0x00000001 +#define DRV_FLAGS_CAPABILITIES_LOADED_L2 0x00000002 +#define DRV_FLAGS_CAPABILITIES_LOADED_FCOE 0x00000004 +#define DRV_FLAGS_CAPABILITIES_LOADED_ISCSI 0x00000008 + + u32 extended_dev_info_shared_cfg_size; + + u32 dcbx_en[PORT_MAX]; + + /* The offset points to the multi threaded meta structure */ + u32 multi_thread_data_offset; + + /* address of DMAable host address holding values from the drivers */ + u32 drv_info_host_addr_lo; + u32 drv_info_host_addr_hi; + + /* general values written by the MFW (such as current version) */ + u32 drv_info_control; +#define DRV_INFO_CONTROL_VER_MASK 0x000000ff +#define DRV_INFO_CONTROL_VER_SHIFT 0 +#define DRV_INFO_CONTROL_OP_CODE_MASK 0x0000ff00 +#define DRV_INFO_CONTROL_OP_CODE_SHIFT 8 }; @@ -2501,14 +2536,18 @@ struct mac_stx { #define MAC_STX_IDX_MAX 2 struct host_port_stats { - u32 host_port_stats_start; + u32 host_port_stats_counter; struct mac_stx mac_stx[MAC_STX_IDX_MAX]; u32 brb_drop_hi; u32 brb_drop_lo; - u32 host_port_stats_end; + u32 not_used; /* obsolete */ + u32 pfc_frames_tx_hi; + u32 pfc_frames_tx_lo; + u32 pfc_frames_rx_hi; + u32 pfc_frames_rx_lo; }; @@ -2548,6 +2587,118 @@ struct host_func_stats { /* VIC definitions */ #define VICSTATST_UIF_INDEX 2 +/* current drv_info version */ +#define DRV_INFO_CUR_VER 1 + +/* drv_info op codes supported */ +enum drv_info_opcode { + ETH_STATS_OPCODE, + FCOE_STATS_OPCODE, + ISCSI_STATS_OPCODE +}; + +#define ETH_STAT_INFO_VERSION_LEN 12 +/* Per PCI Function Ethernet Statistics required from the driver */ +struct eth_stats_info { + /* Function's Driver Version. padded to 12 */ + u8 version[ETH_STAT_INFO_VERSION_LEN]; + /* Locally Admin Addr. BigEndian EIU48. Actual size is 6 bytes */ + u8 mac_local[8]; + u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */ + u8 mac_add2[8]; /* Additional Programmed MAC Addr 2. */ + u32 mtu_size; /* MTU Size. Note : Negotiated MTU */ + u32 feature_flags; /* Feature_Flags. */ +#define FEATURE_ETH_CHKSUM_OFFLOAD_MASK 0x01 +#define FEATURE_ETH_LSO_MASK 0x02 +#define FEATURE_ETH_BOOTMODE_MASK 0x1C +#define FEATURE_ETH_BOOTMODE_SHIFT 2 +#define FEATURE_ETH_BOOTMODE_NONE (0x0 << 2) +#define FEATURE_ETH_BOOTMODE_PXE (0x1 << 2) +#define FEATURE_ETH_BOOTMODE_ISCSI (0x2 << 2) +#define FEATURE_ETH_BOOTMODE_FCOE (0x3 << 2) +#define FEATURE_ETH_TOE_MASK 0x20 + u32 lso_max_size; /* LSO MaxOffloadSize. */ + u32 lso_min_seg_cnt; /* LSO MinSegmentCount. */ + /* Num Offloaded Connections TCP_IPv4. */ + u32 ipv4_ofld_cnt; + /* Num Offloaded Connections TCP_IPv6. */ + u32 ipv6_ofld_cnt; + u32 promiscuous_mode; /* Promiscuous Mode. non-zero true */ + u32 txq_size; /* TX Descriptors Queue Size */ + u32 rxq_size; /* RX Descriptors Queue Size */ + /* TX Descriptor Queue Avg Depth. % Avg Queue Depth since last poll */ + u32 txq_avg_depth; + /* RX Descriptors Queue Avg Depth. % Avg Queue Depth since last poll */ + u32 rxq_avg_depth; + /* IOV_Offload. 0=none; 1=MultiQueue, 2=VEB 3= VEPA*/ + u32 iov_offload; + /* Number of NetQueue/VMQ Config'd. */ + u32 netq_cnt; + u32 vf_cnt; /* Num VF assigned to this PF. */ +}; + +/* Per PCI Function FCOE Statistics required from the driver */ +struct fcoe_stats_info { + u8 version[12]; /* Function's Driver Version. */ + u8 mac_local[8]; /* Locally Admin Addr. */ + u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */ + u8 mac_add2[8]; /* Additional Programmed MAC Addr 2. */ + /* QoS Priority (per 802.1p). 0-7255 */ + u32 qos_priority; + u32 txq_size; /* FCoE TX Descriptors Queue Size. */ + u32 rxq_size; /* FCoE RX Descriptors Queue Size. */ + /* FCoE TX Descriptor Queue Avg Depth. */ + u32 txq_avg_depth; + /* FCoE RX Descriptors Queue Avg Depth. */ + u32 rxq_avg_depth; + u32 rx_frames_lo; /* FCoE RX Frames received. */ + u32 rx_frames_hi; /* FCoE RX Frames received. */ + u32 rx_bytes_lo; /* FCoE RX Bytes received. */ + u32 rx_bytes_hi; /* FCoE RX Bytes received. */ + u32 tx_frames_lo; /* FCoE TX Frames sent. */ + u32 tx_frames_hi; /* FCoE TX Frames sent. */ + u32 tx_bytes_lo; /* FCoE TX Bytes sent. */ + u32 tx_bytes_hi; /* FCoE TX Bytes sent. */ +}; + +/* Per PCI Function iSCSI Statistics required from the driver*/ +struct iscsi_stats_info { + u8 version[12]; /* Function's Driver Version. */ + u8 mac_local[8]; /* Locally Admin iSCSI MAC Addr. */ + u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */ + /* QoS Priority (per 802.1p). 0-7255 */ + u32 qos_priority; + u8 initiator_name[64]; /* iSCSI Boot Initiator Node name. */ + u8 ww_port_name[64]; /* iSCSI World wide port name */ + u8 boot_target_name[64];/* iSCSI Boot Target Name. */ + u8 boot_target_ip[16]; /* iSCSI Boot Target IP. */ + u32 boot_target_portal; /* iSCSI Boot Target Portal. */ + u8 boot_init_ip[16]; /* iSCSI Boot Initiator IP Address. */ + u32 max_frame_size; /* Max Frame Size. bytes */ + u32 txq_size; /* PDU TX Descriptors Queue Size. */ + u32 rxq_size; /* PDU RX Descriptors Queue Size. */ + u32 txq_avg_depth; /* PDU TX Descriptor Queue Avg Depth. */ + u32 rxq_avg_depth; /* PDU RX Descriptors Queue Avg Depth. */ + u32 rx_pdus_lo; /* iSCSI PDUs received. */ + u32 rx_pdus_hi; /* iSCSI PDUs received. */ + u32 rx_bytes_lo; /* iSCSI RX Bytes received. */ + u32 rx_bytes_hi; /* iSCSI RX Bytes received. */ + u32 tx_pdus_lo; /* iSCSI PDUs sent. */ + u32 tx_pdus_hi; /* iSCSI PDUs sent. */ + u32 tx_bytes_lo; /* iSCSI PDU TX Bytes sent. */ + u32 tx_bytes_hi; /* iSCSI PDU TX Bytes sent. */ + u32 pcp_prior_map_tbl; /* C-PCP to S-PCP Priority MapTable. + * 9 nibbles, the position of each nibble + * represents the C-PCP value, the value + * of the nibble = S-PCP value. + */ +}; + +union drv_info_to_mcp { + struct eth_stats_info ether_stat; + struct fcoe_stats_info fcoe_stat; + struct iscsi_stats_info iscsi_stat; +}; #define BCM_5710_FW_MAJOR_VERSION 7 #define BCM_5710_FW_MINOR_VERSION 0 #define BCM_5710_FW_REVISION_VERSION 29 @@ -4161,8 +4312,62 @@ struct ustorm_eth_rx_producers { /* - * cfc delete event data + * FCoE RX statistics parameters section#0 */ +struct fcoe_rx_stat_params_section0 { + __le32 fcoe_rx_pkt_cnt; + __le32 fcoe_rx_byte_cnt; +}; + + +/* + * FCoE RX statistics parameters section#1 + */ +struct fcoe_rx_stat_params_section1 { + __le32 fcoe_ver_cnt; + __le32 fcoe_rx_drop_pkt_cnt; +}; + + +/* + * FCoE RX statistics parameters section#2 + */ +struct fcoe_rx_stat_params_section2 { + __le32 fc_crc_cnt; + __le32 eofa_del_cnt; + __le32 miss_frame_cnt; + __le32 seq_timeout_cnt; + __le32 drop_seq_cnt; + __le32 fcoe_rx_drop_pkt_cnt; + __le32 fcp_rx_pkt_cnt; + __le32 reserved0; +}; + + +/* + * FCoE TX statistics parameters + */ +struct fcoe_tx_stat_params { + __le32 fcoe_tx_pkt_cnt; + __le32 fcoe_tx_byte_cnt; + __le32 fcp_tx_pkt_cnt; + __le32 reserved0; +}; + +/* + * FCoE statistics parameters + */ +struct fcoe_statistics_params { + struct fcoe_tx_stat_params tx_stat; + struct fcoe_rx_stat_params_section0 rx_stat0; + struct fcoe_rx_stat_params_section1 rx_stat1; + struct fcoe_rx_stat_params_section2 rx_stat2; +}; + + +/* + * cfc delete event data +*/ struct cfc_del_event_data { u32 cid; u32 reserved0; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 882f48f0a03..4df9505b67b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -27,7 +27,6 @@ #include "bnx2x.h" #include "bnx2x_cmn.h" - /********************************************************/ #define ETH_HLEN 14 /* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */ @@ -163,6 +162,11 @@ #define EDC_MODE_LIMITING 0x0044 #define EDC_MODE_PASSIVE_DAC 0x0055 +/* BRB default for class 0 E2 */ +#define DEFAULT0_E2_BRB_MAC_PAUSE_XOFF_THR 170 +#define DEFAULT0_E2_BRB_MAC_PAUSE_XON_THR 250 +#define DEFAULT0_E2_BRB_MAC_FULL_XOFF_THR 10 +#define DEFAULT0_E2_BRB_MAC_FULL_XON_THR 50 /* BRB thresholds for E2*/ #define PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE 170 @@ -177,6 +181,12 @@ #define PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE 50 #define PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE 250 +/* BRB default for class 0 E3A0 */ +#define DEFAULT0_E3A0_BRB_MAC_PAUSE_XOFF_THR 290 +#define DEFAULT0_E3A0_BRB_MAC_PAUSE_XON_THR 410 +#define DEFAULT0_E3A0_BRB_MAC_FULL_XOFF_THR 10 +#define DEFAULT0_E3A0_BRB_MAC_FULL_XON_THR 50 + /* BRB thresholds for E3A0 */ #define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE 290 #define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0 @@ -190,6 +200,11 @@ #define PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE 50 #define PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE 410 +/* BRB default for E3B0 */ +#define DEFAULT0_E3B0_BRB_MAC_PAUSE_XOFF_THR 330 +#define DEFAULT0_E3B0_BRB_MAC_PAUSE_XON_THR 490 +#define DEFAULT0_E3B0_BRB_MAC_FULL_XOFF_THR 15 +#define DEFAULT0_E3B0_BRB_MAC_FULL_XON_THR 55 /* BRB thresholds for E3B0 2 port mode*/ #define PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE 1025 @@ -239,18 +254,29 @@ #define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE 50 #define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE 384 - /* only for E3B0*/ #define PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR 304 #define PFC_E3B0_4P_BRB_FULL_LB_XON_THR 384 -#define PFC_E3B0_4P_LB_GUART 120 +#define PFC_E3B0_4P_LB_GUART 120 #define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART 120 -#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST 80 +#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST 80 #define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART 80 -#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST 120 +#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST 120 + +/* Pause defines*/ +#define DEFAULT_E3B0_BRB_FULL_LB_XOFF_THR 330 +#define DEFAULT_E3B0_BRB_FULL_LB_XON_THR 490 +#define DEFAULT_E3B0_LB_GUART 40 + +#define DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART 40 +#define DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART_HYST 0 + +#define DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART 40 +#define DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART_HYST 0 +/* ETS defines*/ #define DCBX_INVALID_COS (0xFF) #define ETS_BW_LIMIT_CREDIT_UPPER_BOUND (0x5000) @@ -440,7 +466,7 @@ static u32 bnx2x_ets_get_min_w_val_nig(const struct link_vars *vars) u32 min_w_val = 0; /* Calculate min_w_val.*/ if (vars->link_up) { - if (SPEED_20000 == vars->line_speed) + if (vars->line_speed == SPEED_20000) min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS; else min_w_val = ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS; @@ -490,7 +516,7 @@ static void bnx2x_ets_e3b0_set_credit_upper_bound_nig( REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_5 : NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_5, credit_upper_bound); - if (0 == port) { + if (!port) { REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_6, credit_upper_bound); REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_7, @@ -584,7 +610,7 @@ static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4, 0x0); REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_5 : NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5, 0x0); - if (0 == port) { + if (!port) { REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_6, 0x0); REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_7, 0x0); REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_8, 0x0); @@ -612,7 +638,7 @@ static void bnx2x_ets_e3b0_set_credit_upper_bound_pbf( * In 2 port mode port0 has COS0-5 that can be used for WFQ.In 4 * port mode port1 has COS0-2 that can be used for WFQ. */ - if (0 == port) { + if (!port) { base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P0; max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0; } else { @@ -674,7 +700,7 @@ static void bnx2x_ets_e3b0_pbf_disabled(const struct link_params *params) * In 2 port mode port0 has COS0-5 that can be used for WFQ. * In 4 port mode port1 has COS0-2 that can be used for WFQ. */ - if (0 == port) { + if (!port) { base_weight = PBF_REG_COS0_WEIGHT_P0; max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0; } else { @@ -846,34 +872,47 @@ static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp, ******************************************************************************/ static int bnx2x_ets_e3b0_get_total_bw( const struct link_params *params, - const struct bnx2x_ets_params *ets_params, + struct bnx2x_ets_params *ets_params, u16 *total_bw) { struct bnx2x *bp = params->bp; u8 cos_idx = 0; + u8 is_bw_cos_exist = 0; *total_bw = 0 ; + /* Calculate total BW requested */ for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) { - if (bnx2x_cos_state_bw == ets_params->cos[cos_idx].state) { + if (ets_params->cos[cos_idx].state == bnx2x_cos_state_bw) { + is_bw_cos_exist = 1; + if (!ets_params->cos[cos_idx].params.bw_params.bw) { + DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config BW" + "was set to 0\n"); + /* + * This is to prevent a state when ramrods + * can't be sent + */ + ets_params->cos[cos_idx].params.bw_params.bw + = 1; + } *total_bw += ets_params->cos[cos_idx].params.bw_params.bw; } } /* Check total BW is valid */ - if ((100 != *total_bw) || (0 == *total_bw)) { - if (0 == *total_bw) { + if ((is_bw_cos_exist == 1) && (*total_bw != 100)) { + if (*total_bw == 0) { DP(NETIF_MSG_LINK, - "bnx2x_ets_E3B0_config toatl BW shouldn't be 0\n"); + "bnx2x_ets_E3B0_config total BW shouldn't be 0\n"); return -EINVAL; } DP(NETIF_MSG_LINK, - "bnx2x_ets_E3B0_config toatl BW should be 100\n"); - /** - * We can handle a case whre the BW isn't 100 this can happen - * if the TC are joined. - */ + "bnx2x_ets_E3B0_config total BW should be 100\n"); + /* + * We can handle a case whre the BW isn't 100 this can happen + * if the TC are joined. + */ } return 0; } @@ -904,7 +943,7 @@ static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params, const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 : DCBX_E3B0_MAX_NUM_COS_PORT0; - if (DCBX_INVALID_COS != sp_pri_to_cos[pri]) { + if (sp_pri_to_cos[pri] != DCBX_INVALID_COS) { DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid " "parameter There can't be two COS's with " "the same strict pri\n"); @@ -913,7 +952,7 @@ static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params, if (pri > max_num_of_cos) { DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid " - "parameter Illegal strict priority\n"); + "parameter Illegal strict priority\n"); return -EINVAL; } @@ -995,8 +1034,8 @@ static int bnx2x_ets_e3b0_sp_set_pri_cli_reg(const struct link_params *params, /* Set all the strict priority first */ for (i = 0; i < max_num_of_cos; i++) { - if (DCBX_INVALID_COS != sp_pri_to_cos[i]) { - if (DCBX_MAX_NUM_COS <= sp_pri_to_cos[i]) { + if (sp_pri_to_cos[i] != DCBX_INVALID_COS) { + if (sp_pri_to_cos[i] >= DCBX_MAX_NUM_COS) { DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_set_pri_cli_reg " "invalid cos entry\n"); @@ -1010,7 +1049,7 @@ static int bnx2x_ets_e3b0_sp_set_pri_cli_reg(const struct link_params *params, sp_pri_to_cos[i], pri_set); pri_bitmask = 1 << sp_pri_to_cos[i]; /* COS is used remove it from bitmap.*/ - if (0 == (pri_bitmask & cos_bit_to_set)) { + if (!(pri_bitmask & cos_bit_to_set)) { DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_set_pri_cli_reg " "invalid There can't be two COS's with" @@ -1072,7 +1111,7 @@ static int bnx2x_ets_e3b0_sp_set_pri_cli_reg(const struct link_params *params, ******************************************************************************/ int bnx2x_ets_e3b0_config(const struct link_params *params, const struct link_vars *vars, - const struct bnx2x_ets_params *ets_params) + struct bnx2x_ets_params *ets_params) { struct bnx2x *bp = params->bp; int bnx2x_status = 0; @@ -1105,15 +1144,15 @@ int bnx2x_ets_e3b0_config(const struct link_params *params, /* Prepare BW parameters*/ bnx2x_status = bnx2x_ets_e3b0_get_total_bw(params, ets_params, &total_bw); - if (0 != bnx2x_status) { + if (bnx2x_status) { DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config get_total_bw failed\n"); return -EINVAL; } - /** - * Upper bound is set according to current link speed (min_w_val - * should be the same for upper bound and COS credit val). + /* + * Upper bound is set according to current link speed (min_w_val + * should be the same for upper bound and COS credit val). */ bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val_nig); bnx2x_ets_e3b0_set_credit_upper_bound_pbf(params, min_w_val_pbf); @@ -1122,7 +1161,7 @@ int bnx2x_ets_e3b0_config(const struct link_params *params, for (cos_entry = 0; cos_entry < ets_params->num_of_cos; cos_entry++) { if (bnx2x_cos_state_bw == ets_params->cos[cos_entry].state) { cos_bw_bitmap |= (1 << cos_entry); - /** + /* * The function also sets the BW in HW(not the mappin * yet) */ @@ -1146,7 +1185,7 @@ int bnx2x_ets_e3b0_config(const struct link_params *params, "bnx2x_ets_e3b0_config cos state not valid\n"); return -EINVAL; } - if (0 != bnx2x_status) { + if (bnx2x_status) { DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_config set cos bw failed\n"); return bnx2x_status; @@ -1157,7 +1196,7 @@ int bnx2x_ets_e3b0_config(const struct link_params *params, bnx2x_status = bnx2x_ets_e3b0_sp_set_pri_cli_reg(params, sp_pri_to_cos); - if (0 != bnx2x_status) { + if (bnx2x_status) { DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config set_pri_cli_reg failed\n"); return bnx2x_status; @@ -1168,7 +1207,7 @@ int bnx2x_ets_e3b0_config(const struct link_params *params, cos_sp_bitmap, cos_bw_bitmap); - if (0 != bnx2x_status) { + if (bnx2x_status) { DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config SP failed\n"); return bnx2x_status; } @@ -1232,9 +1271,9 @@ void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw, DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n"); - if ((0 == total_bw) || - (0 == cos0_bw) || - (0 == cos1_bw)) { + if ((!total_bw) || + (!cos0_bw) || + (!cos1_bw)) { DP(NETIF_MSG_LINK, "Total BW can't be zero\n"); return; } @@ -1290,7 +1329,7 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos) * dbg0-010 dbg1-001 cos1-100 cos0-011 MCP-000 * dbg0-010 dbg1-001 cos0-011 cos1-100 MCP-000 */ - val = (0 == strict_cos) ? 0x2318 : 0x22E0; + val = (!strict_cos) ? 0x2318 : 0x22E0; REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, val); return 0; @@ -1298,7 +1337,6 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos) /******************************************************************/ /* PFC section */ /******************************************************************/ - static void bnx2x_update_pfc_xmac(struct link_params *params, struct link_vars *vars, u8 is_lb) @@ -1401,7 +1439,7 @@ void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars, if (!vars->link_up) return; - if (MAC_TYPE_EMAC == vars->mac_type) { + if (vars->mac_type == MAC_TYPE_EMAC) { DP(NETIF_MSG_LINK, "About to read PFC stats from EMAC\n"); bnx2x_emac_get_pfc_stat(params, pfc_frames_sent, pfc_frames_received); @@ -1435,6 +1473,18 @@ static void bnx2x_set_mdio_clk(struct bnx2x *bp, u32 chip_id, u8 port) udelay(40); } +static u8 bnx2x_is_4_port_mode(struct bnx2x *bp) +{ + u32 port4mode_ovwr_val; + /* Check 4-port override enabled */ + port4mode_ovwr_val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR); + if (port4mode_ovwr_val & (1<<0)) { + /* Return 4-port mode override value */ + return ((port4mode_ovwr_val & (1<<1)) == (1<<1)); + } + /* Return 4-port mode from input pin */ + return (u8)REG_RD(bp, MISC_REG_PORT4MODE_EN); +} static void bnx2x_emac_init(struct link_params *params, struct link_vars *vars) @@ -1601,31 +1651,18 @@ static void bnx2x_umac_enable(struct link_params *params, } -static u8 bnx2x_is_4_port_mode(struct bnx2x *bp) -{ - u32 port4mode_ovwr_val; - /* Check 4-port override enabled */ - port4mode_ovwr_val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR); - if (port4mode_ovwr_val & (1<<0)) { - /* Return 4-port mode override value */ - return ((port4mode_ovwr_val & (1<<1)) == (1<<1)); - } - /* Return 4-port mode from input pin */ - return (u8)REG_RD(bp, MISC_REG_PORT4MODE_EN); -} - /* Define the XMAC mode */ static void bnx2x_xmac_init(struct link_params *params, u32 max_speed) { struct bnx2x *bp = params->bp; u32 is_port4mode = bnx2x_is_4_port_mode(bp); - /** - * In 4-port mode, need to set the mode only once, so if XMAC is - * already out of reset, it means the mode has already been set, - * and it must not* reset the XMAC again, since it controls both - * ports of the path - **/ + /* + * In 4-port mode, need to set the mode only once, so if XMAC is + * already out of reset, it means the mode has already been set, + * and it must not* reset the XMAC again, since it controls both + * ports of the path + */ if ((CHIP_NUM(bp) == CHIP_NUM_57840) && (REG_RD(bp, MISC_REG_RESET_REG_2) & @@ -1743,6 +1780,7 @@ static int bnx2x_xmac_enable(struct link_params *params, return 0; } + static int bnx2x_emac_enable(struct link_params *params, struct link_vars *vars, u8 lb) { @@ -1999,7 +2037,6 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params, REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2); } - /* PFC BRB internal port configuration params */ struct bnx2x_pfc_brb_threshold_val { u32 pause_xoff; @@ -2009,6 +2046,8 @@ struct bnx2x_pfc_brb_threshold_val { }; struct bnx2x_pfc_brb_e3b0_val { + u32 per_class_guaranty_mode; + u32 lb_guarantied_hyst; u32 full_lb_xoff_th; u32 full_lb_xon_threshold; u32 lb_guarantied; @@ -2021,6 +2060,9 @@ struct bnx2x_pfc_brb_e3b0_val { struct bnx2x_pfc_brb_th_val { struct bnx2x_pfc_brb_threshold_val pauseable_th; struct bnx2x_pfc_brb_threshold_val non_pauseable_th; + struct bnx2x_pfc_brb_threshold_val default_class0; + struct bnx2x_pfc_brb_threshold_val default_class1; + }; static int bnx2x_pfc_brb_get_config_params( struct link_params *params, @@ -2028,140 +2070,200 @@ static int bnx2x_pfc_brb_get_config_params( { struct bnx2x *bp = params->bp; DP(NETIF_MSG_LINK, "Setting PFC BRB configuration\n"); + + config_val->default_class1.pause_xoff = 0; + config_val->default_class1.pause_xon = 0; + config_val->default_class1.full_xoff = 0; + config_val->default_class1.full_xon = 0; + if (CHIP_IS_E2(bp)) { + /* class0 defaults */ + config_val->default_class0.pause_xoff = + DEFAULT0_E2_BRB_MAC_PAUSE_XOFF_THR; + config_val->default_class0.pause_xon = + DEFAULT0_E2_BRB_MAC_PAUSE_XON_THR; + config_val->default_class0.full_xoff = + DEFAULT0_E2_BRB_MAC_FULL_XOFF_THR; + config_val->default_class0.full_xon = + DEFAULT0_E2_BRB_MAC_FULL_XON_THR; + /* pause able*/ config_val->pauseable_th.pause_xoff = - PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE; + PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE; config_val->pauseable_th.pause_xon = - PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE; + PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE; config_val->pauseable_th.full_xoff = - PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE; + PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE; config_val->pauseable_th.full_xon = - PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE; + PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE; /* non pause able*/ config_val->non_pauseable_th.pause_xoff = - PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE; + PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE; config_val->non_pauseable_th.pause_xon = - PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE; + PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE; config_val->non_pauseable_th.full_xoff = - PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE; + PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE; config_val->non_pauseable_th.full_xon = - PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE; + PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE; } else if (CHIP_IS_E3A0(bp)) { + /* class0 defaults */ + config_val->default_class0.pause_xoff = + DEFAULT0_E3A0_BRB_MAC_PAUSE_XOFF_THR; + config_val->default_class0.pause_xon = + DEFAULT0_E3A0_BRB_MAC_PAUSE_XON_THR; + config_val->default_class0.full_xoff = + DEFAULT0_E3A0_BRB_MAC_FULL_XOFF_THR; + config_val->default_class0.full_xon = + DEFAULT0_E3A0_BRB_MAC_FULL_XON_THR; + /* pause able */ config_val->pauseable_th.pause_xoff = - PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE; + PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE; config_val->pauseable_th.pause_xon = - PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE; + PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE; config_val->pauseable_th.full_xoff = - PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE; + PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE; config_val->pauseable_th.full_xon = - PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE; + PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE; /* non pause able*/ config_val->non_pauseable_th.pause_xoff = - PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE; + PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE; config_val->non_pauseable_th.pause_xon = - PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE; + PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE; config_val->non_pauseable_th.full_xoff = - PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE; + PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE; config_val->non_pauseable_th.full_xon = - PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE; + PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE; } else if (CHIP_IS_E3B0(bp)) { + /* class0 defaults */ + config_val->default_class0.pause_xoff = + DEFAULT0_E3B0_BRB_MAC_PAUSE_XOFF_THR; + config_val->default_class0.pause_xon = + DEFAULT0_E3B0_BRB_MAC_PAUSE_XON_THR; + config_val->default_class0.full_xoff = + DEFAULT0_E3B0_BRB_MAC_FULL_XOFF_THR; + config_val->default_class0.full_xon = + DEFAULT0_E3B0_BRB_MAC_FULL_XON_THR; + if (params->phy[INT_PHY].flags & FLAGS_4_PORT_MODE) { config_val->pauseable_th.pause_xoff = - PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE; + PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE; config_val->pauseable_th.pause_xon = - PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE; + PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE; config_val->pauseable_th.full_xoff = - PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE; + PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE; config_val->pauseable_th.full_xon = - PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE; + PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE; /* non pause able*/ config_val->non_pauseable_th.pause_xoff = - PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE; + PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE; config_val->non_pauseable_th.pause_xon = - PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE; + PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE; config_val->non_pauseable_th.full_xoff = - PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE; + PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE; config_val->non_pauseable_th.full_xon = - PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE; - } else { - config_val->pauseable_th.pause_xoff = - PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE; - config_val->pauseable_th.pause_xon = - PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE; - config_val->pauseable_th.full_xoff = - PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE; - config_val->pauseable_th.full_xon = - PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE; - /* non pause able*/ - config_val->non_pauseable_th.pause_xoff = - PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE; - config_val->non_pauseable_th.pause_xon = - PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE; - config_val->non_pauseable_th.full_xoff = - PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE; - config_val->non_pauseable_th.full_xon = - PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE; - } + PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE; + } else { + config_val->pauseable_th.pause_xoff = + PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE; + config_val->pauseable_th.pause_xon = + PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE; + config_val->pauseable_th.full_xoff = + PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE; + config_val->pauseable_th.full_xon = + PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE; + /* non pause able*/ + config_val->non_pauseable_th.pause_xoff = + PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE; + config_val->non_pauseable_th.pause_xon = + PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE; + config_val->non_pauseable_th.full_xoff = + PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE; + config_val->non_pauseable_th.full_xon = + PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE; + } } else return -EINVAL; return 0; } - -static void bnx2x_pfc_brb_get_e3b0_config_params(struct link_params *params, - struct bnx2x_pfc_brb_e3b0_val - *e3b0_val, - u32 cos0_pauseable, - u32 cos1_pauseable) +static void bnx2x_pfc_brb_get_e3b0_config_params( + struct link_params *params, + struct bnx2x_pfc_brb_e3b0_val + *e3b0_val, + struct bnx2x_nig_brb_pfc_port_params *pfc_params, + const u8 pfc_enabled) { - if (params->phy[INT_PHY].flags & FLAGS_4_PORT_MODE) { + if (pfc_enabled && pfc_params) { + e3b0_val->per_class_guaranty_mode = 1; + e3b0_val->lb_guarantied_hyst = 80; + + if (params->phy[INT_PHY].flags & + FLAGS_4_PORT_MODE) { + e3b0_val->full_lb_xoff_th = + PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR; + e3b0_val->full_lb_xon_threshold = + PFC_E3B0_4P_BRB_FULL_LB_XON_THR; + e3b0_val->lb_guarantied = + PFC_E3B0_4P_LB_GUART; + e3b0_val->mac_0_class_t_guarantied = + PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART; + e3b0_val->mac_0_class_t_guarantied_hyst = + PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST; + e3b0_val->mac_1_class_t_guarantied = + PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART; + e3b0_val->mac_1_class_t_guarantied_hyst = + PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST; + } else { + e3b0_val->full_lb_xoff_th = + PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR; + e3b0_val->full_lb_xon_threshold = + PFC_E3B0_2P_BRB_FULL_LB_XON_THR; + e3b0_val->mac_0_class_t_guarantied_hyst = + PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST; + e3b0_val->mac_1_class_t_guarantied = + PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART; + e3b0_val->mac_1_class_t_guarantied_hyst = + PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST; + + if (pfc_params->cos0_pauseable != + pfc_params->cos1_pauseable) { + /* nonpauseable= Lossy + pauseable = Lossless*/ + e3b0_val->lb_guarantied = + PFC_E3B0_2P_MIX_PAUSE_LB_GUART; + e3b0_val->mac_0_class_t_guarantied = + PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART; + } else if (pfc_params->cos0_pauseable) { + /* Lossless +Lossless*/ + e3b0_val->lb_guarantied = + PFC_E3B0_2P_PAUSE_LB_GUART; + e3b0_val->mac_0_class_t_guarantied = + PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART; + } else { + /* Lossy +Lossy*/ + e3b0_val->lb_guarantied = + PFC_E3B0_2P_NON_PAUSE_LB_GUART; + e3b0_val->mac_0_class_t_guarantied = + PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART; + } + } + } else { + e3b0_val->per_class_guaranty_mode = 0; + e3b0_val->lb_guarantied_hyst = 0; e3b0_val->full_lb_xoff_th = - PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR; + DEFAULT_E3B0_BRB_FULL_LB_XOFF_THR; e3b0_val->full_lb_xon_threshold = - PFC_E3B0_4P_BRB_FULL_LB_XON_THR; + DEFAULT_E3B0_BRB_FULL_LB_XON_THR; e3b0_val->lb_guarantied = - PFC_E3B0_4P_LB_GUART; + DEFAULT_E3B0_LB_GUART; e3b0_val->mac_0_class_t_guarantied = - PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART; + DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART; e3b0_val->mac_0_class_t_guarantied_hyst = - PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST; + DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART_HYST; e3b0_val->mac_1_class_t_guarantied = - PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART; + DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART; e3b0_val->mac_1_class_t_guarantied_hyst = - PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST; - } else { - e3b0_val->full_lb_xoff_th = - PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR; - e3b0_val->full_lb_xon_threshold = - PFC_E3B0_2P_BRB_FULL_LB_XON_THR; - e3b0_val->mac_0_class_t_guarantied_hyst = - PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST; - e3b0_val->mac_1_class_t_guarantied = - PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART; - e3b0_val->mac_1_class_t_guarantied_hyst = - PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST; - - if (cos0_pauseable != cos1_pauseable) { - /* nonpauseable= Lossy + pauseable = Lossless*/ - e3b0_val->lb_guarantied = - PFC_E3B0_2P_MIX_PAUSE_LB_GUART; - e3b0_val->mac_0_class_t_guarantied = - PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART; - } else if (cos0_pauseable) { - /* Lossless +Lossless*/ - e3b0_val->lb_guarantied = - PFC_E3B0_2P_PAUSE_LB_GUART; - e3b0_val->mac_0_class_t_guarantied = - PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART; - } else { - /* Lossy +Lossy*/ - e3b0_val->lb_guarantied = - PFC_E3B0_2P_NON_PAUSE_LB_GUART; - e3b0_val->mac_0_class_t_guarantied = - PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART; - } + DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART_HYST; } } static int bnx2x_update_pfc_brb(struct link_params *params, @@ -2172,23 +2274,28 @@ static int bnx2x_update_pfc_brb(struct link_params *params, struct bnx2x *bp = params->bp; struct bnx2x_pfc_brb_th_val config_val = { {0} }; struct bnx2x_pfc_brb_threshold_val *reg_th_config = - &config_val.pauseable_th; + &config_val.pauseable_th; struct bnx2x_pfc_brb_e3b0_val e3b0_val = {0}; - int set_pfc = params->feature_config_flags & + const int set_pfc = params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED; + const u8 pfc_enabled = (set_pfc && pfc_params); int bnx2x_status = 0; u8 port = params->port; /* default - pause configuration */ reg_th_config = &config_val.pauseable_th; bnx2x_status = bnx2x_pfc_brb_get_config_params(params, &config_val); - if (0 != bnx2x_status) + if (bnx2x_status) return bnx2x_status; - if (set_pfc && pfc_params) + if (pfc_enabled) { /* First COS */ - if (!pfc_params->cos0_pauseable) + if (pfc_params->cos0_pauseable) + reg_th_config = &config_val.pauseable_th; + else reg_th_config = &config_val.non_pauseable_th; + } else + reg_th_config = &config_val.default_class0; /* * The number of free blocks below which the pause signal to class 0 * of MAC #n is asserted. n=0,1 @@ -2215,122 +2322,119 @@ static int bnx2x_update_pfc_brb(struct link_params *params, REG_WR(bp, (port) ? BRB1_REG_FULL_0_XON_THRESHOLD_1 : BRB1_REG_FULL_0_XON_THRESHOLD_0 , reg_th_config->full_xon); - if (set_pfc && pfc_params) { + if (pfc_enabled) { /* Second COS */ if (pfc_params->cos1_pauseable) reg_th_config = &config_val.pauseable_th; else reg_th_config = &config_val.non_pauseable_th; + } else + reg_th_config = &config_val.default_class1; + /* + * The number of free blocks below which the pause signal to + * class 1 of MAC #n is asserted. n=0,1 + */ + REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 : + BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0, + reg_th_config->pause_xoff); + + /* + * The number of free blocks above which the pause signal to + * class 1 of MAC #n is de-asserted. n=0,1 + */ + REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XON_THRESHOLD_1 : + BRB1_REG_PAUSE_1_XON_THRESHOLD_0, + reg_th_config->pause_xon); + /* + * The number of free blocks below which the full signal to + * class 1 of MAC #n is asserted. n=0,1 + */ + REG_WR(bp, (port) ? BRB1_REG_FULL_1_XOFF_THRESHOLD_1 : + BRB1_REG_FULL_1_XOFF_THRESHOLD_0, + reg_th_config->full_xoff); + /* + * The number of free blocks above which the full signal to + * class 1 of MAC #n is de-asserted. n=0,1 + */ + REG_WR(bp, (port) ? BRB1_REG_FULL_1_XON_THRESHOLD_1 : + BRB1_REG_FULL_1_XON_THRESHOLD_0, + reg_th_config->full_xon); + + if (CHIP_IS_E3B0(bp)) { + bnx2x_pfc_brb_get_e3b0_config_params( + params, + &e3b0_val, + pfc_params, + pfc_enabled); + + REG_WR(bp, BRB1_REG_PER_CLASS_GUARANTY_MODE, + e3b0_val.per_class_guaranty_mode); + /* - * The number of free blocks below which the pause signal to - * class 1 of MAC #n is asserted. n=0,1 - **/ - REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 : - BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0, - reg_th_config->pause_xoff); + * The hysteresis on the guarantied buffer space for the Lb + * port before signaling XON. + */ + REG_WR(bp, BRB1_REG_LB_GUARANTIED_HYST, + e3b0_val.lb_guarantied_hyst); + /* - * The number of free blocks above which the pause signal to - * class 1 of MAC #n is de-asserted. n=0,1 + * The number of free blocks below which the full signal to the + * LB port is asserted. */ - REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XON_THRESHOLD_1 : - BRB1_REG_PAUSE_1_XON_THRESHOLD_0, - reg_th_config->pause_xon); + REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, + e3b0_val.full_lb_xoff_th); /* - * The number of free blocks below which the full signal to - * class 1 of MAC #n is asserted. n=0,1 + * The number of free blocks above which the full signal to the + * LB port is de-asserted. */ - REG_WR(bp, (port) ? BRB1_REG_FULL_1_XOFF_THRESHOLD_1 : - BRB1_REG_FULL_1_XOFF_THRESHOLD_0, - reg_th_config->full_xoff); + REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, + e3b0_val.full_lb_xon_threshold); /* - * The number of free blocks above which the full signal to - * class 1 of MAC #n is de-asserted. n=0,1 + * The number of blocks guarantied for the MAC #n port. n=0,1 */ - REG_WR(bp, (port) ? BRB1_REG_FULL_1_XON_THRESHOLD_1 : - BRB1_REG_FULL_1_XON_THRESHOLD_0, - reg_th_config->full_xon); + /* The number of blocks guarantied for the LB port.*/ + REG_WR(bp, BRB1_REG_LB_GUARANTIED, + e3b0_val.lb_guarantied); - if (CHIP_IS_E3B0(bp)) { - /*Should be done by init tool */ - /* - * BRB_empty_for_dup = BRB1_REG_BRB_EMPTY_THRESHOLD - * reset value - * 944 - */ - - /** - * The hysteresis on the guarantied buffer space for the Lb port - * before signaling XON. - **/ - REG_WR(bp, BRB1_REG_LB_GUARANTIED_HYST, 80); - - bnx2x_pfc_brb_get_e3b0_config_params( - params, - &e3b0_val, - pfc_params->cos0_pauseable, - pfc_params->cos1_pauseable); - /** - * The number of free blocks below which the full signal to the - * LB port is asserted. - */ - REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, - e3b0_val.full_lb_xoff_th); - /** - * The number of free blocks above which the full signal to the - * LB port is de-asserted. - */ - REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, - e3b0_val.full_lb_xon_threshold); - /** - * The number of blocks guarantied for the MAC #n port. n=0,1 - */ - - /*The number of blocks guarantied for the LB port.*/ - REG_WR(bp, BRB1_REG_LB_GUARANTIED, - e3b0_val.lb_guarantied); - - /** - * The number of blocks guarantied for the MAC #n port. - */ - REG_WR(bp, BRB1_REG_MAC_GUARANTIED_0, - 2 * e3b0_val.mac_0_class_t_guarantied); - REG_WR(bp, BRB1_REG_MAC_GUARANTIED_1, - 2 * e3b0_val.mac_1_class_t_guarantied); - /** - * The number of blocks guarantied for class #t in MAC0. t=0,1 - */ - REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED, - e3b0_val.mac_0_class_t_guarantied); - REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED, - e3b0_val.mac_0_class_t_guarantied); - /** - * The hysteresis on the guarantied buffer space for class in - * MAC0. t=0,1 - */ - REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST, - e3b0_val.mac_0_class_t_guarantied_hyst); - REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST, - e3b0_val.mac_0_class_t_guarantied_hyst); - - /** - * The number of blocks guarantied for class #t in MAC1.t=0,1 - */ - REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED, - e3b0_val.mac_1_class_t_guarantied); - REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED, - e3b0_val.mac_1_class_t_guarantied); - /** - * The hysteresis on the guarantied buffer space for class #t - * in MAC1. t=0,1 - */ - REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST, - e3b0_val.mac_1_class_t_guarantied_hyst); - REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED_HYST, - e3b0_val.mac_1_class_t_guarantied_hyst); - - } + /* + * The number of blocks guarantied for the MAC #n port. + */ + REG_WR(bp, BRB1_REG_MAC_GUARANTIED_0, + 2 * e3b0_val.mac_0_class_t_guarantied); + REG_WR(bp, BRB1_REG_MAC_GUARANTIED_1, + 2 * e3b0_val.mac_1_class_t_guarantied); + /* + * The number of blocks guarantied for class #t in MAC0. t=0,1 + */ + REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED, + e3b0_val.mac_0_class_t_guarantied); + REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED, + e3b0_val.mac_0_class_t_guarantied); + /* + * The hysteresis on the guarantied buffer space for class in + * MAC0. t=0,1 + */ + REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST, + e3b0_val.mac_0_class_t_guarantied_hyst); + REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST, + e3b0_val.mac_0_class_t_guarantied_hyst); + /* + * The number of blocks guarantied for class #t in MAC1.t=0,1 + */ + REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED, + e3b0_val.mac_1_class_t_guarantied); + REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED, + e3b0_val.mac_1_class_t_guarantied); + /* + * The hysteresis on the guarantied buffer space for class #t + * in MAC1. t=0,1 + */ + REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST, + e3b0_val.mac_1_class_t_guarantied_hyst); + REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED_HYST, + e3b0_val.mac_1_class_t_guarantied_hyst); } return bnx2x_status; @@ -2515,7 +2619,7 @@ int bnx2x_update_pfc(struct link_params *params, /* update BRB params */ bnx2x_status = bnx2x_update_pfc_brb(params, vars, pfc_params); - if (0 != bnx2x_status) + if (bnx2x_status) return bnx2x_status; if (!vars->link_up) @@ -2533,7 +2637,6 @@ int bnx2x_update_pfc(struct link_params *params, bnx2x_emac_enable(params, vars, 0); return bnx2x_status; } - if (CHIP_IS_E2(bp)) bnx2x_update_pfc_bmac2(params, vars, bmac_loopback); else @@ -3053,7 +3156,6 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "write phy register failed\n"); netdev_err(bp->dev, "MDC/MDIO access timeout\n"); rc = -EFAULT; - } else { /* data */ tmp = ((phy->addr << 21) | (devad << 16) | val | @@ -3090,8 +3192,6 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy, EMAC_MDIO_STATUS_10MB); return rc; } - - /******************************************************************/ /* BSC access functions from E3 */ /******************************************************************/ @@ -3339,7 +3439,7 @@ static void bnx2x_set_aer_mmd(struct link_params *params, aer_val = 0x3800 + offset - 1; else aer_val = 0x3800 + offset; - DP(NETIF_MSG_LINK, "Set AER to 0x%x\n", aer_val); + CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, MDIO_AER_BLOCK_AER_REG, aer_val); @@ -3942,13 +4042,11 @@ static void bnx2x_warpcore_set_20G_DXGXS(struct bnx2x *bp, static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy, struct link_params *params, - u8 fiber_mode) + u8 fiber_mode, + u8 always_autoneg) { struct bnx2x *bp = params->bp; u16 val16, digctrl_kx1, digctrl_kx2; - u8 lane; - - lane = bnx2x_get_warpcore_lane(phy, params); /* Clear XFI clock comp in non-10G single lane mode. */ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, @@ -3956,7 +4054,7 @@ static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy, bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_RX66_CONTROL, val16 & ~(3<<13)); - if (phy->req_line_speed == SPEED_AUTO_NEG) { + if (always_autoneg || phy->req_line_speed == SPEED_AUTO_NEG) { /* SGMII Autoneg */ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16); @@ -3967,7 +4065,7 @@ static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy, } else { bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16); - val16 &= 0xcfbf; + val16 &= 0xcebf; switch (phy->req_line_speed) { case SPEED_10: break; @@ -4043,9 +4141,7 @@ static void bnx2x_warpcore_reset_lane(struct bnx2x *bp, bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL5_MISC6, &val); } - - - /* Clear SFI/XFI link settings registers */ +/* Clear SFI/XFI link settings registers */ static void bnx2x_warpcore_clear_regs(struct bnx2x_phy *phy, struct link_params *params, u16 lane) @@ -4250,7 +4346,7 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy, vars->phy_flags |= PHY_SGMII_FLAG; DP(NETIF_MSG_LINK, "Setting SGMII mode\n"); bnx2x_warpcore_clear_regs(phy, params, lane); - bnx2x_warpcore_set_sgmii_speed(phy, params, 0); + bnx2x_warpcore_set_sgmii_speed(phy, params, 0, 1); } else { switch (serdes_net_if) { case PORT_HW_CFG_NET_SERDES_IF_KR: @@ -4278,7 +4374,8 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy, } bnx2x_warpcore_set_sgmii_speed(phy, params, - fiber_mode); + fiber_mode, + 0); } break; @@ -4291,7 +4388,8 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy, bnx2x_warpcore_set_10G_XFI(phy, params, 0); } else if (vars->line_speed == SPEED_1000) { DP(NETIF_MSG_LINK, "Setting 1G Fiber\n"); - bnx2x_warpcore_set_sgmii_speed(phy, params, 1); + bnx2x_warpcore_set_sgmii_speed( + phy, params, 1, 0); } /* Issue Module detection */ if (bnx2x_is_sfp_module_plugged(phy, params)) @@ -4428,12 +4526,6 @@ static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy, /* Switch back to 4-copy registers */ bnx2x_set_aer_mmd(params, phy); - /* Global loopback, not recommended. */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 | - 0x4000); } else { /* 10G & 20G */ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, @@ -4450,25 +4542,14 @@ static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy, } -void bnx2x_link_status_update(struct link_params *params, - struct link_vars *vars) +void bnx2x_sync_link(struct link_params *params, + struct link_vars *vars) { struct bnx2x *bp = params->bp; u8 link_10g_plus; - u8 port = params->port; - u32 sync_offset, media_types; - /* Update PHY configuration */ - set_phy_vars(params, vars); - - vars->link_status = REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, - port_mb[port].link_status)); - - vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP); - vars->phy_flags = PHY_XGXS_FLAG; if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG) vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG; - + vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP); if (vars->link_up) { DP(NETIF_MSG_LINK, "phy link up\n"); @@ -4563,7 +4644,23 @@ void bnx2x_link_status_update(struct link_params *params, if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG) vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG; } +} + +void bnx2x_link_status_update(struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u8 port = params->port; + u32 sync_offset, media_types; + /* Update PHY configuration */ + set_phy_vars(params, vars); + + vars->link_status = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + port_mb[port].link_status)); + vars->phy_flags = PHY_XGXS_FLAG; + bnx2x_sync_link(params, vars); /* Sync media type */ sync_offset = params->shmem_base + offsetof(struct shmem_region, @@ -4602,7 +4699,6 @@ void bnx2x_link_status_update(struct link_params *params, vars->line_speed, vars->duplex, vars->flow_ctrl); } - static void bnx2x_set_master_ln(struct link_params *params, struct bnx2x_phy *phy) { @@ -4676,11 +4772,8 @@ static void bnx2x_set_swap_lanes(struct link_params *params, * Each two bits represents a lane number: * No swap is 0123 => 0x1b no need to enable the swap */ - u16 ser_lane, rx_lane_swap, tx_lane_swap; + u16 rx_lane_swap, tx_lane_swap; - ser_lane = ((params->lane_config & - PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> - PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); rx_lane_swap = ((params->lane_config & PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >> PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT); @@ -5356,7 +5449,6 @@ static int bnx2x_link_settings_status(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars) { - struct bnx2x *bp = params->bp; u16 gp_status, duplex = DUPLEX_HALF, link_up = 0, speed_mask; @@ -5403,9 +5495,7 @@ static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars) { - struct bnx2x *bp = params->bp; - u8 lane; u16 gp_status1, gp_speed, link_up, duplex = DUPLEX_FULL; int rc = 0; @@ -6678,7 +6768,6 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars) return rc; } - /*****************************************************************************/ /* External Phy section */ /*****************************************************************************/ @@ -8103,7 +8192,15 @@ static void bnx2x_warpcore_power_module(struct link_params *params, static void bnx2x_warpcore_hw_reset(struct bnx2x_phy *phy, struct link_params *params) { + struct bnx2x *bp = params->bp; bnx2x_warpcore_power_module(params, phy, 0); + /* Put Warpcore in low power mode */ + REG_WR(bp, MISC_REG_WC0_RESET, 0x0c0e); + + /* Put LCPLL in low power mode */ + REG_WR(bp, MISC_REG_LCPLL_E40_PWRDWN, 1); + REG_WR(bp, MISC_REG_LCPLL_E40_RESETB_ANA, 0); + REG_WR(bp, MISC_REG_LCPLL_E40_RESETB_DIG, 0); } static void bnx2x_power_sfp_module(struct link_params *params, @@ -9040,13 +9137,13 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "8727 Power fault has been detected on port %d\n", oc_port); - netdev_err(bp->dev, "Error: Power fault on Port %d has" - " been detected and the power to " - "that SFP+ module has been removed" - " to prevent failure of the card." - " Please remove the SFP+ module and" - " restart the system to clear this" - " error.\n", + netdev_err(bp->dev, "Error: Power fault on Port %d has " + "been detected and the power to " + "that SFP+ module has been removed " + "to prevent failure of the card. " + "Please remove the SFP+ module and " + "restart the system to clear this " + "error.\n", oc_port); /* Disable all RX_ALARMs except for mod_abs */ bnx2x_cl45_write(bp, phy, @@ -9228,7 +9325,7 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy, static void bnx2x_848xx_set_led(struct bnx2x *bp, struct bnx2x_phy *phy) { - u16 val; + u16 val, offset; /* PHYC_CTL_LED_CTL */ bnx2x_cl45_read(bp, phy, @@ -9263,14 +9360,22 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp, MDIO_PMA_REG_8481_LED3_BLINK, 0); - bnx2x_cl45_read(bp, phy, + /* Configure the blink rate to ~15.9 Hz */ + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, - MDIO_PMA_REG_84823_CTL_LED_CTL_1, &val); - val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/ + MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH, + MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ); + + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) + offset = MDIO_PMA_REG_84833_CTL_LED_CTL_1; + else + offset = MDIO_PMA_REG_84823_CTL_LED_CTL_1; + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, offset, &val); + val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/ bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_84823_CTL_LED_CTL_1, val); + MDIO_PMA_DEVAD, offset, val); /* 'Interrupt Mask' */ bnx2x_cl45_write(bp, phy, @@ -9283,7 +9388,7 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, struct link_vars *vars) { struct bnx2x *bp = params->bp; - u16 autoneg_val, an_1000_val, an_10_100_val; + u16 autoneg_val, an_1000_val, an_10_100_val, an_10g_val; u16 tmp_req_line_speed; tmp_req_line_speed = phy->req_line_speed; @@ -9378,6 +9483,8 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL, (1<<15 | 1<<9 | 7<<0)); + /* The PHY needs this set even for forced link. */ + an_10_100_val |= (1<<8) | (1<<7); DP(NETIF_MSG_LINK, "Setting 100M force\n"); } if ((phy->req_line_speed == SPEED_10) && @@ -9415,9 +9522,17 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "Advertising 10G\n"); /* Restart autoneg for 10G*/ + bnx2x_cl45_read(bp, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_8481_10GBASE_T_AN_CTRL, + &an_10g_val); + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_8481_10GBASE_T_AN_CTRL, + an_10g_val | 0x1000); bnx2x_cl45_write(bp, phy, - MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, - 0x3200); + MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, + 0x3200); } else bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, @@ -9449,74 +9564,95 @@ static int bnx2x_8481_config_init(struct bnx2x_phy *phy, return bnx2x_848xx_cmn_config_init(phy, params, vars); } - -#define PHY84833_HDSHK_WAIT 300 -static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy, +#define PHY84833_CMDHDLR_WAIT 300 +#define PHY84833_CMDHDLR_MAX_ARGS 5 +static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy, struct link_params *params, - struct link_vars *vars) + u16 fw_cmd, + u16 cmd_args[]) { u32 idx; - u32 pair_swap; u16 val; - u16 data; struct bnx2x *bp = params->bp; - /* Do pair swap */ - - /* Check for configuration. */ - pair_swap = REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, - dev_info.port_hw_config[params->port].xgbt_phy_cfg)) & - PORT_HW_CFG_RJ45_PAIR_SWAP_MASK; - - if (pair_swap == 0) - return 0; - - data = (u16)pair_swap; - /* Write CMD_OPEN_OVERRIDE to STATUS reg */ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_TOP_CFG_SCRATCH_REG2, - PHY84833_CMD_OPEN_OVERRIDE); - for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) { + MDIO_84833_CMD_HDLR_STATUS, + PHY84833_STATUS_CMD_OPEN_OVERRIDE); + for (idx = 0; idx < PHY84833_CMDHDLR_WAIT; idx++) { bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_TOP_CFG_SCRATCH_REG2, &val); - if (val == PHY84833_CMD_OPEN_FOR_CMDS) + MDIO_84833_CMD_HDLR_STATUS, &val); + if (val == PHY84833_STATUS_CMD_OPEN_FOR_CMDS) break; msleep(1); } - if (idx >= PHY84833_HDSHK_WAIT) { - DP(NETIF_MSG_LINK, "Pairswap: FW not ready.\n"); + if (idx >= PHY84833_CMDHDLR_WAIT) { + DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n"); return -EINVAL; } + /* Prepare argument(s) and issue command */ + for (idx = 0; idx < PHY84833_CMDHDLR_MAX_ARGS; idx++) { + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, + MDIO_84833_CMD_HDLR_DATA1 + idx, + cmd_args[idx]); + } bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_TOP_CFG_SCRATCH_REG4, - data); - /* Issue pair swap command */ - bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_TOP_CFG_SCRATCH_REG0, - PHY84833_DIAG_CMD_PAIR_SWAP_CHANGE); - for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) { + MDIO_84833_CMD_HDLR_COMMAND, fw_cmd); + for (idx = 0; idx < PHY84833_CMDHDLR_WAIT; idx++) { bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_TOP_CFG_SCRATCH_REG2, &val); - if ((val == PHY84833_CMD_COMPLETE_PASS) || - (val == PHY84833_CMD_COMPLETE_ERROR)) + MDIO_84833_CMD_HDLR_STATUS, &val); + if ((val == PHY84833_STATUS_CMD_COMPLETE_PASS) || + (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) break; msleep(1); } - if ((idx >= PHY84833_HDSHK_WAIT) || - (val == PHY84833_CMD_COMPLETE_ERROR)) { - DP(NETIF_MSG_LINK, "Pairswap: override failed.\n"); + if ((idx >= PHY84833_CMDHDLR_WAIT) || + (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) { + DP(NETIF_MSG_LINK, "FW cmd failed.\n"); return -EINVAL; } + /* Gather returning data */ + for (idx = 0; idx < PHY84833_CMDHDLR_MAX_ARGS; idx++) { + bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, + MDIO_84833_CMD_HDLR_DATA1 + idx, + &cmd_args[idx]); + } bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_TOP_CFG_SCRATCH_REG2, - PHY84833_CMD_CLEAR_COMPLETE); - DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data); + MDIO_84833_CMD_HDLR_STATUS, + PHY84833_STATUS_CMD_CLEAR_COMPLETE); return 0; } +static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + u32 pair_swap; + u16 data[PHY84833_CMDHDLR_MAX_ARGS]; + int status; + struct bnx2x *bp = params->bp; + + /* Check for configuration. */ + pair_swap = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[params->port].xgbt_phy_cfg)) & + PORT_HW_CFG_RJ45_PAIR_SWAP_MASK; + + if (pair_swap == 0) + return 0; + + /* Only the second argument is used for this command */ + data[1] = (u16)pair_swap; + + status = bnx2x_84833_cmd_hdlr(phy, params, + PHY84833_CMD_SET_PAIR_SWAP, data); + if (status == 0) + DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data[1]); + + return status; +} + static u8 bnx2x_84833_get_reset_gpios(struct bnx2x *bp, u32 shmem_base_path[], u32 chip_id) @@ -9579,24 +9715,6 @@ static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy, return 0; } -static int bnx2x_84833_common_init_phy(struct bnx2x *bp, - u32 shmem_base_path[], - u32 chip_id) -{ - u8 reset_gpios; - - reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path, chip_id); - - bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW); - udelay(10); - bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_HIGH); - msleep(800); - DP(NETIF_MSG_LINK, "84833 reset pulse on pin values 0x%x\n", - reset_gpios); - - return 0; -} - #define PHY84833_CONSTANT_LATENCY 1193 static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, struct link_params *params, @@ -9605,8 +9723,8 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, struct bnx2x *bp = params->bp; u8 port, initialize = 1; u16 val; - u16 temp; - u32 actual_phy_selection, cms_enable, idx; + u32 actual_phy_selection, cms_enable; + u16 cmd_args[PHY84833_CMDHDLR_MAX_ARGS]; int rc = 0; msleep(1); @@ -9625,6 +9743,13 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x8000); + } + + bnx2x_wait_reset_complete(bp, phy, params); + + /* Wait for GPHY to come out of reset */ + msleep(50); + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) { /* Bring PHY out of super isolate mode */ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, @@ -9633,26 +9758,19 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, MDIO_84833_TOP_CFG_XGPHY_STRAP1, val); - } - - bnx2x_wait_reset_complete(bp, phy, params); - - /* Wait for GPHY to come out of reset */ - msleep(50); - - if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) bnx2x_84833_pair_swap_cfg(phy, params, vars); - - /* - * BCM84823 requires that XGXS links up first @ 10G for normal behavior - */ - temp = vars->line_speed; - vars->line_speed = SPEED_10000; - bnx2x_set_autoneg(¶ms->phy[INT_PHY], params, vars, 0); - bnx2x_program_serdes(¶ms->phy[INT_PHY], params, vars); - vars->line_speed = temp; - - /* Set dual-media configuration according to configuration */ + } else { + /* + * BCM84823 requires that XGXS links up first @ 10G for normal + * behavior. + */ + u16 temp; + temp = vars->line_speed; + vars->line_speed = SPEED_10000; + bnx2x_set_autoneg(¶ms->phy[INT_PHY], params, vars, 0); + bnx2x_program_serdes(¶ms->phy[INT_PHY], params, vars); + vars->line_speed = temp; + } bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, MDIO_CTL_REG_84823_MEDIA, &val); @@ -9700,64 +9818,18 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, /* AutogrEEEn */ if (params->feature_config_flags & - FEATURE_CONFIG_AUTOGREEEN_ENABLED) { - /* Ensure that f/w is ready */ - for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) { - bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_TOP_CFG_SCRATCH_REG2, &val); - if (val == PHY84833_CMD_OPEN_FOR_CMDS) - break; - usleep_range(1000, 1000); - } - if (idx >= PHY84833_HDSHK_WAIT) { - DP(NETIF_MSG_LINK, "AutogrEEEn: FW not ready.\n"); - return -EINVAL; - } - - /* Select EEE mode */ - bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_TOP_CFG_SCRATCH_REG3, - 0x2); - - /* Set Idle and Latency */ - bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_TOP_CFG_SCRATCH_REG4, - PHY84833_CONSTANT_LATENCY + 1); - - bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_TOP_CFG_DATA3_REG, - PHY84833_CONSTANT_LATENCY + 1); - - bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_TOP_CFG_DATA4_REG, - PHY84833_CONSTANT_LATENCY); - - /* Send EEE instruction to command register */ - bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_TOP_CFG_SCRATCH_REG0, - PHY84833_DIAG_CMD_SET_EEE_MODE); - - /* Ensure that the command has completed */ - for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) { - bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_TOP_CFG_SCRATCH_REG2, &val); - if ((val == PHY84833_CMD_COMPLETE_PASS) || - (val == PHY84833_CMD_COMPLETE_ERROR)) - break; - usleep_range(1000, 1000); - } - if ((idx >= PHY84833_HDSHK_WAIT) || - (val == PHY84833_CMD_COMPLETE_ERROR)) { - DP(NETIF_MSG_LINK, "AutogrEEEn: command failed.\n"); - return -EINVAL; - } - - /* Reset command handler */ - bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_TOP_CFG_SCRATCH_REG2, - PHY84833_CMD_CLEAR_COMPLETE); - } + FEATURE_CONFIG_AUTOGREEEN_ENABLED) + cmd_args[0] = 0x2; + else + cmd_args[0] = 0x0; + cmd_args[1] = 0x0; + cmd_args[2] = PHY84833_CONSTANT_LATENCY + 1; + cmd_args[3] = PHY84833_CONSTANT_LATENCY; + rc = bnx2x_84833_cmd_hdlr(phy, params, + PHY84833_CMD_SET_EEE_MODE, cmd_args); + if (rc != 0) + DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n"); if (initialize) rc = bnx2x_848xx_cmn_config_init(phy, params, vars); else @@ -10144,8 +10216,10 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "54618SE cfg init\n"); usleep_range(1000, 1000); - /* This works with E3 only, no need to check the chip - before determining the port. */ + /* + * This works with E3 only, no need to check the chip + * before determining the port. + */ port = params->port; cfg_pin = (REG_RD(bp, params->shmem_base + @@ -11218,7 +11292,9 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port, offsetof(struct shmem_region, dev_info.port_feature_config[port].link_config)) & PORT_FEATURE_CONNECTED_SWITCH_MASK); - chip_id = REG_RD(bp, MISC_REG_CHIP_NUM) << 16; + chip_id = (REG_RD(bp, MISC_REG_CHIP_NUM) << 16) | + ((REG_RD(bp, MISC_REG_CHIP_REV) & 0xf) << 12); + DP(NETIF_MSG_LINK, ":chip_id = 0x%x\n", chip_id); if (USES_WARPCORE(bp)) { u32 serdes_net_if; @@ -11397,6 +11473,10 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp, return -EINVAL; default: *phy = phy_null; + /* In case external PHY wasn't found */ + if ((phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) && + (phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) + return -EINVAL; return 0; } @@ -11570,7 +11650,7 @@ u32 bnx2x_phy_selection(struct link_params *params) int bnx2x_phy_probe(struct link_params *params) { - u8 phy_index, actual_phy_idx, link_cfg_idx; + u8 phy_index, actual_phy_idx; u32 phy_config_swapped, sync_offset, media_types; struct bnx2x *bp = params->bp; struct bnx2x_phy *phy; @@ -11581,7 +11661,6 @@ int bnx2x_phy_probe(struct link_params *params) for (phy_index = INT_PHY; phy_index < MAX_PHYS; phy_index++) { - link_cfg_idx = LINK_CONFIG_IDX(phy_index); actual_phy_idx = phy_index; if (phy_config_swapped) { if (phy_index == EXT_PHY1) @@ -12247,6 +12326,63 @@ static int bnx2x_8727_common_init_phy(struct bnx2x *bp, return 0; } +static int bnx2x_84833_common_init_phy(struct bnx2x *bp, + u32 shmem_base_path[], + u32 shmem2_base_path[], + u8 phy_index, + u32 chip_id) +{ + u8 reset_gpios; + struct bnx2x_phy phy; + u32 shmem_base, shmem2_base, cnt; + s8 port = 0; + u16 val; + + reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path, chip_id); + bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW); + udelay(10); + bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_HIGH); + DP(NETIF_MSG_LINK, "84833 reset pulse on pin values 0x%x\n", + reset_gpios); + for (port = PORT_MAX - 1; port >= PORT_0; port--) { + /* This PHY is for E2 and E3. */ + shmem_base = shmem_base_path[port]; + shmem2_base = shmem2_base_path[port]; + /* Extract the ext phy address for the port */ + if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base, + 0, &phy) != + 0) { + DP(NETIF_MSG_LINK, "populate_phy failed\n"); + return -EINVAL; + } + + /* Wait for FW completing its initialization. */ + for (cnt = 0; cnt < 1000; cnt++) { + bnx2x_cl45_read(bp, &phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_CTRL, &val); + if (!(val & (1<<15))) + break; + msleep(1); + } + if (cnt >= 1000) + DP(NETIF_MSG_LINK, + "84833 Cmn reset timeout (%d)\n", port); + + /* Put the port in super isolate mode. */ + bnx2x_cl45_read(bp, &phy, + MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val); + val |= MDIO_84833_SUPER_ISOLATE; + bnx2x_cl45_write(bp, &phy, + MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_XGPHY_STRAP1, val); + } + + return 0; +} + + static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[], u32 shmem2_base_path[], u8 phy_index, u32 ext_phy_type, u32 chip_id) @@ -12281,7 +12417,9 @@ static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[], * GPIO3's are linked, and so both need to be toggled * to obtain required 2us pulse. */ - rc = bnx2x_84833_common_init_phy(bp, shmem_base_path, chip_id); + rc = bnx2x_84833_common_init_phy(bp, shmem_base_path, + shmem2_base_path, + phy_index, chip_id); break; case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE: rc = -EINVAL; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h index 2a46e633abe..e02a68a7fb8 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h @@ -479,7 +479,7 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos); /* Configure the COS to ETS according to BW and SP settings.*/ int bnx2x_ets_e3b0_config(const struct link_params *params, const struct link_vars *vars, - const struct bnx2x_ets_params *ets_params); + struct bnx2x_ets_params *ets_params); /* Read pfc statistic*/ void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars, u32 pfc_frames_sent[2], diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 2f6361e949f..ffeaaa95ed9 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -2318,12 +2318,6 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp) CMNG_FLAGS_PER_PORT_FAIRNESS_VN; } -/* returns func by VN for current port */ -static inline int func_by_vn(struct bnx2x *bp, int vn) -{ - return 2 * vn + BP_PORT(bp); -} - static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn) { struct rate_shaping_vars_per_vn m_rs_vn; @@ -2475,22 +2469,6 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type) "rate shaping and fairness are disabled\n"); } -static inline void bnx2x_link_sync_notify(struct bnx2x *bp) -{ - int func; - int vn; - - /* Set the attention towards other drivers on the same port */ - for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { - if (vn == BP_VN(bp)) - continue; - - func = func_by_vn(bp, vn); - REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 + - (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1); - } -} - /* This function is called upon link interrupt */ static void bnx2x_link_attn(struct bnx2x *bp) { @@ -2549,6 +2527,9 @@ void bnx2x__link_status_update(struct bnx2x *bp) if (bp->state != BNX2X_STATE_OPEN) return; + /* read updated dcb configuration */ + bnx2x_dcbx_pmf_update(bp); + bnx2x_link_status_update(&bp->link_params, &bp->link_vars); if (bp->link_vars.link_up) @@ -2643,15 +2624,6 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param) return rc; } -static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp) -{ -#ifdef BCM_CNIC - /* Statistics are not supported for CNIC Clients at the moment */ - if (IS_FCOE_FP(fp)) - return false; -#endif - return true; -} void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p) { @@ -2695,11 +2667,11 @@ static inline unsigned long bnx2x_get_common_flags(struct bnx2x *bp, * parent connection). The statistics are zeroed when the parent * connection is initialized. */ - if (stat_counter_valid(bp, fp)) { - __set_bit(BNX2X_Q_FLG_STATS, &flags); - if (zero_stats) - __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags); - } + + __set_bit(BNX2X_Q_FLG_STATS, &flags); + if (zero_stats) + __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags); + return flags; } @@ -2808,8 +2780,8 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, /* This should be a maximum number of data bytes that may be * placed on the BD (not including paddings). */ - rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN - - IP_HEADER_ALIGNMENT_PADDING; + rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START - + BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING; rxq_init->cl_qzone_id = fp->cl_qzone_id; rxq_init->tpa_agg_sz = tpa_agg_size; @@ -2940,6 +2912,143 @@ static void bnx2x_e1h_enable(struct bnx2x *bp) */ } +#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3 + +static void bnx2x_drv_info_ether_stat(struct bnx2x *bp) +{ + struct eth_stats_info *ether_stat = + &bp->slowpath->drv_info_to_mcp.ether_stat; + + /* leave last char as NULL */ + memcpy(ether_stat->version, DRV_MODULE_VERSION, + ETH_STAT_INFO_VERSION_LEN - 1); + + bp->fp[0].mac_obj.get_n_elements(bp, &bp->fp[0].mac_obj, + DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED, + ether_stat->mac_local); + + ether_stat->mtu_size = bp->dev->mtu; + + if (bp->dev->features & NETIF_F_RXCSUM) + ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK; + if (bp->dev->features & NETIF_F_TSO) + ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK; + ether_stat->feature_flags |= bp->common.boot_mode; + + ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0; + + ether_stat->txq_size = bp->tx_ring_size; + ether_stat->rxq_size = bp->rx_ring_size; +} + +static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp) +{ +#ifdef BCM_CNIC + struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app; + struct fcoe_stats_info *fcoe_stat = + &bp->slowpath->drv_info_to_mcp.fcoe_stat; + + memcpy(fcoe_stat->mac_local, bp->fip_mac, ETH_ALEN); + + fcoe_stat->qos_priority = + app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE]; + + /* insert FCoE stats from ramrod response */ + if (!NO_FCOE(bp)) { + struct tstorm_per_queue_stats *fcoe_q_tstorm_stats = + &bp->fw_stats_data->queue_stats[FCOE_IDX]. + tstorm_queue_statistics; + + struct xstorm_per_queue_stats *fcoe_q_xstorm_stats = + &bp->fw_stats_data->queue_stats[FCOE_IDX]. + xstorm_queue_statistics; + + struct fcoe_statistics_params *fw_fcoe_stat = + &bp->fw_stats_data->fcoe; + + ADD_64(fcoe_stat->rx_bytes_hi, 0, fcoe_stat->rx_bytes_lo, + fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt); + + ADD_64(fcoe_stat->rx_bytes_hi, + fcoe_q_tstorm_stats->rcv_ucast_bytes.hi, + fcoe_stat->rx_bytes_lo, + fcoe_q_tstorm_stats->rcv_ucast_bytes.lo); + + ADD_64(fcoe_stat->rx_bytes_hi, + fcoe_q_tstorm_stats->rcv_bcast_bytes.hi, + fcoe_stat->rx_bytes_lo, + fcoe_q_tstorm_stats->rcv_bcast_bytes.lo); + + ADD_64(fcoe_stat->rx_bytes_hi, + fcoe_q_tstorm_stats->rcv_mcast_bytes.hi, + fcoe_stat->rx_bytes_lo, + fcoe_q_tstorm_stats->rcv_mcast_bytes.lo); + + ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo, + fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt); + + ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo, + fcoe_q_tstorm_stats->rcv_ucast_pkts); + + ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo, + fcoe_q_tstorm_stats->rcv_bcast_pkts); + + ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo, + fcoe_q_tstorm_stats->rcv_mcast_pkts); + + ADD_64(fcoe_stat->tx_bytes_hi, 0, fcoe_stat->tx_bytes_lo, + fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt); + + ADD_64(fcoe_stat->tx_bytes_hi, + fcoe_q_xstorm_stats->ucast_bytes_sent.hi, + fcoe_stat->tx_bytes_lo, + fcoe_q_xstorm_stats->ucast_bytes_sent.lo); + + ADD_64(fcoe_stat->tx_bytes_hi, + fcoe_q_xstorm_stats->bcast_bytes_sent.hi, + fcoe_stat->tx_bytes_lo, + fcoe_q_xstorm_stats->bcast_bytes_sent.lo); + + ADD_64(fcoe_stat->tx_bytes_hi, + fcoe_q_xstorm_stats->mcast_bytes_sent.hi, + fcoe_stat->tx_bytes_lo, + fcoe_q_xstorm_stats->mcast_bytes_sent.lo); + + ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo, + fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt); + + ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo, + fcoe_q_xstorm_stats->ucast_pkts_sent); + + ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo, + fcoe_q_xstorm_stats->bcast_pkts_sent); + + ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo, + fcoe_q_xstorm_stats->mcast_pkts_sent); + } + + /* ask L5 driver to add data to the struct */ + bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD); +#endif +} + +static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp) +{ +#ifdef BCM_CNIC + struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app; + struct iscsi_stats_info *iscsi_stat = + &bp->slowpath->drv_info_to_mcp.iscsi_stat; + + memcpy(iscsi_stat->mac_local, bp->cnic_eth_dev.iscsi_mac, ETH_ALEN); + + iscsi_stat->qos_priority = + app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI]; + + /* ask L5 driver to add data to the struct */ + bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD); +#endif +} + /* called due to MCP event (on pmf): * reread new bandwidth configuration * configure FW @@ -2960,6 +3069,50 @@ static inline void bnx2x_set_mf_bw(struct bnx2x *bp) bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0); } +static void bnx2x_handle_drv_info_req(struct bnx2x *bp) +{ + enum drv_info_opcode op_code; + u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control); + + /* if drv_info version supported by MFW doesn't match - send NACK */ + if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) { + bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0); + return; + } + + op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >> + DRV_INFO_CONTROL_OP_CODE_SHIFT; + + memset(&bp->slowpath->drv_info_to_mcp, 0, + sizeof(union drv_info_to_mcp)); + + switch (op_code) { + case ETH_STATS_OPCODE: + bnx2x_drv_info_ether_stat(bp); + break; + case FCOE_STATS_OPCODE: + bnx2x_drv_info_fcoe_stat(bp); + break; + case ISCSI_STATS_OPCODE: + bnx2x_drv_info_iscsi_stat(bp); + break; + default: + /* if op code isn't supported - send NACK */ + bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0); + return; + } + + /* if we got drv_info attn from MFW then these fields are defined in + * shmem2 for sure + */ + SHMEM2_WR(bp, drv_info_host_addr_lo, + U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp))); + SHMEM2_WR(bp, drv_info_host_addr_hi, + U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp))); + + bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0); +} + static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event) { DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event); @@ -3318,6 +3471,17 @@ static inline void bnx2x_fan_failure(struct bnx2x *bp) netdev_err(bp->dev, "Fan Failure on Network Controller has caused" " the driver to shutdown the card to prevent permanent" " damage. Please contact OEM Support for assistance\n"); + + /* + * Scheudle device reset (unload) + * This is due to some boards consuming sufficient power when driver is + * up to overheat if fan fails. + */ + smp_mb__before_clear_bit(); + set_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state); + smp_mb__after_clear_bit(); + schedule_delayed_work(&bp->sp_rtnl_task, 0); + } static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) @@ -3456,6 +3620,8 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) if (val & DRV_STATUS_SET_MF_BW) bnx2x_set_mf_bw(bp); + if (val & DRV_STATUS_DRV_INFO_REQ) + bnx2x_handle_drv_info_req(bp); if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF)) bnx2x_pmf_update(bp); @@ -5247,7 +5413,7 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx) u8 cos; unsigned long q_type = 0; u32 cids[BNX2X_MULTI_TX_COS] = { 0 }; - + fp->rx_queue = fp_idx; fp->cid = fp_idx; fp->cl_id = bnx2x_fp_cl_id(fp); fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp); @@ -6856,13 +7022,16 @@ void bnx2x_free_mem(struct bnx2x *bp) static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp) { int num_groups; + int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1; - /* number of eth_queues */ - u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp); + /* number of queues for statistics is number of eth queues + FCoE */ + u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats; /* Total number of FW statistics requests = - * 1 for port stats + 1 for PF stats + num_eth_queues */ - bp->fw_stats_num = 2 + num_queue_stats; + * 1 for port stats + 1 for PF stats + potential 1 for FCoE stats + + * num of queues + */ + bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats; /* Request is built from stats_query_header and an array of @@ -6870,8 +7039,8 @@ static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp) * STATS_QUERY_CMD_COUNT rules. The real number or requests is * configured in the stats_query_header. */ - num_groups = (2 + num_queue_stats) / STATS_QUERY_CMD_COUNT + - (((2 + num_queue_stats) % STATS_QUERY_CMD_COUNT) ? 1 : 0); + num_groups = ((bp->fw_stats_num) / STATS_QUERY_CMD_COUNT) + + (((bp->fw_stats_num) % STATS_QUERY_CMD_COUNT) ? 1 : 0); bp->fw_stats_req_sz = sizeof(struct stats_query_header) + num_groups * sizeof(struct stats_query_cmd_group); @@ -6880,9 +7049,13 @@ static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp) * * stats_counter holds per-STORM counters that are incremented * when STORM has finished with the current request. + * + * memory for FCoE offloaded statistics are counted anyway, + * even if they will not be sent. */ bp->fw_stats_data_sz = sizeof(struct per_port_stats) + sizeof(struct per_pf_stats) + + sizeof(struct fcoe_statistics_params) + sizeof(struct per_queue_stats) * num_queue_stats + sizeof(struct stats_counter); @@ -7025,6 +7198,13 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set) { unsigned long ramrod_flags = 0; +#ifdef BCM_CNIC + if (is_zero_ether_addr(bp->dev->dev_addr) && IS_MF_ISCSI_SD(bp)) { + DP(NETIF_MSG_IFUP, "Ignoring Zero MAC for iSCSI SD mode\n"); + return 0; + } +#endif + DP(NETIF_MSG_IFUP, "Adding Eth MAC\n"); __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); @@ -8522,6 +8702,17 @@ sp_rtnl_not_reset: if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state)) bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos); + /* + * in case of fan failure we need to reset id if the "stop on error" + * debug flag is set, since we trying to prevent permanent overheating + * damage + */ + if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) { + DP(BNX2X_MSG_SP, "fan failure detected. Unloading driver\n"); + netif_device_detach(bp->dev); + bnx2x_close(bp->dev); + } + sp_rtnl_exit: rtnl_unlock(); } @@ -8708,7 +8899,7 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp) static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) { - u32 val, val2, val3, val4, id; + u32 val, val2, val3, val4, id, boot_mode; u16 pmc; /* Get the chip revision id and number. */ @@ -8817,6 +9008,26 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) bp->link_params.feature_config_flags |= (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ? FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0; + bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ? + BC_SUPPORTS_PFC_STATS : 0; + + boot_mode = SHMEM_RD(bp, + dev_info.port_feature_config[BP_PORT(bp)].mba_config) & + PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK; + switch (boot_mode) { + case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE: + bp->common.boot_mode = FEATURE_ETH_BOOTMODE_PXE; + break; + case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB: + bp->common.boot_mode = FEATURE_ETH_BOOTMODE_ISCSI; + break; + case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT: + bp->common.boot_mode = FEATURE_ETH_BOOTMODE_FCOE; + break; + case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE: + bp->common.boot_mode = FEATURE_ETH_BOOTMODE_NONE; + break; + } pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc); bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG; @@ -9267,22 +9478,43 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp) bp->common.shmem2_base); } -#ifdef BCM_CNIC -static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp) +void bnx2x_get_iscsi_info(struct bnx2x *bp) { +#ifdef BCM_CNIC int port = BP_PORT(bp); - int func = BP_ABS_FUNC(bp); u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, drv_lic_key[port].max_iscsi_conn); - u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, - drv_lic_key[port].max_fcoe_conn); - /* Get the number of maximum allowed iSCSI and FCoE connections */ + /* Get the number of maximum allowed iSCSI connections */ bp->cnic_eth_dev.max_iscsi_conn = (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >> BNX2X_MAX_ISCSI_INIT_CONN_SHIFT; + BNX2X_DEV_INFO("max_iscsi_conn 0x%x\n", + bp->cnic_eth_dev.max_iscsi_conn); + + /* + * If maximum allowed number of connections is zero - + * disable the feature. + */ + if (!bp->cnic_eth_dev.max_iscsi_conn) + bp->flags |= NO_ISCSI_FLAG; +#else + bp->flags |= NO_ISCSI_FLAG; +#endif +} + +static void __devinit bnx2x_get_fcoe_info(struct bnx2x *bp) +{ +#ifdef BCM_CNIC + int port = BP_PORT(bp); + int func = BP_ABS_FUNC(bp); + + u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, + drv_lic_key[port].max_fcoe_conn); + + /* Get the number of maximum allowed FCoE connections */ bp->cnic_eth_dev.max_fcoe_conn = (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >> BNX2X_MAX_FCOE_INIT_CONN_SHIFT; @@ -9334,21 +9566,29 @@ static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp) } } - BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n", - bp->cnic_eth_dev.max_iscsi_conn, - bp->cnic_eth_dev.max_fcoe_conn); + BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn); /* * If maximum allowed number of connections is zero - * disable the feature. */ - if (!bp->cnic_eth_dev.max_iscsi_conn) - bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; - if (!bp->cnic_eth_dev.max_fcoe_conn) bp->flags |= NO_FCOE_FLAG; -} +#else + bp->flags |= NO_FCOE_FLAG; #endif +} + +static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp) +{ + /* + * iSCSI may be dynamically disabled but reading + * info here we will decrease memory usage by driver + * if the feature is disabled for good + */ + bnx2x_get_iscsi_info(bp); + bnx2x_get_fcoe_info(bp); +} static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) { @@ -9374,7 +9614,8 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2); #ifdef BCM_CNIC - /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or + /* + * iSCSI and FCoE NPAR MACs: if there is no either iSCSI or * FCoE MAC then the appropriate feature should be disabled. */ if (IS_MF_SI(bp)) { @@ -9396,11 +9637,22 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) val = MF_CFG_RD(bp, func_ext_config[func]. fcoe_mac_addr_lower); bnx2x_set_mac_buf(fip_mac, val, val2); - BNX2X_DEV_INFO("Read FCoE L2 MAC to %pM\n", + BNX2X_DEV_INFO("Read FCoE L2 MAC: %pM\n", fip_mac); } else bp->flags |= NO_FCOE_FLAG; + } else { /* SD mode */ + if (BNX2X_IS_MF_PROTOCOL_ISCSI(bp)) { + /* use primary mac as iscsi mac */ + memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN); + /* Zero primary MAC configuration */ + memset(bp->dev->dev_addr, 0, ETH_ALEN); + + BNX2X_DEV_INFO("SD ISCSI MODE\n"); + BNX2X_DEV_INFO("Read iSCSI MAC: %pM\n", + iscsi_mac); + } } #endif } else { @@ -9449,7 +9701,7 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) } #endif - if (!is_valid_ether_addr(bp->dev->dev_addr)) + if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr)) dev_err(&bp->pdev->dev, "bad Ethernet MAC address configuration: " "%pM, change it manually before bringing up " @@ -9661,9 +9913,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) /* Get MAC addresses */ bnx2x_get_mac_hwinfo(bp); -#ifdef BCM_CNIC bnx2x_get_cnic_info(bp); -#endif /* Get current FW pulse sequence */ if (!BP_NOMCP(bp)) { @@ -9681,30 +9931,49 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp) { int cnt, i, block_end, rodi; - char vpd_data[BNX2X_VPD_LEN+1]; + char vpd_start[BNX2X_VPD_LEN+1]; char str_id_reg[VENDOR_ID_LEN+1]; char str_id_cap[VENDOR_ID_LEN+1]; + char *vpd_data; + char *vpd_extended_data = NULL; u8 len; - cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data); + cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_start); memset(bp->fw_ver, 0, sizeof(bp->fw_ver)); if (cnt < BNX2X_VPD_LEN) goto out_not_found; - i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN, + /* VPD RO tag should be first tag after identifier string, hence + * we should be able to find it in first BNX2X_VPD_LEN chars + */ + i = pci_vpd_find_tag(vpd_start, 0, BNX2X_VPD_LEN, PCI_VPD_LRDT_RO_DATA); if (i < 0) goto out_not_found; - block_end = i + PCI_VPD_LRDT_TAG_SIZE + - pci_vpd_lrdt_size(&vpd_data[i]); + pci_vpd_lrdt_size(&vpd_start[i]); i += PCI_VPD_LRDT_TAG_SIZE; - if (block_end > BNX2X_VPD_LEN) - goto out_not_found; + if (block_end > BNX2X_VPD_LEN) { + vpd_extended_data = kmalloc(block_end, GFP_KERNEL); + if (vpd_extended_data == NULL) + goto out_not_found; + + /* read rest of vpd image into vpd_extended_data */ + memcpy(vpd_extended_data, vpd_start, BNX2X_VPD_LEN); + cnt = pci_read_vpd(bp->pdev, BNX2X_VPD_LEN, + block_end - BNX2X_VPD_LEN, + vpd_extended_data + BNX2X_VPD_LEN); + if (cnt < (block_end - BNX2X_VPD_LEN)) + goto out_not_found; + vpd_data = vpd_extended_data; + } else + vpd_data = vpd_start; + + /* now vpd_data holds full vpd content in both cases */ rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end, PCI_VPD_RO_KEYWORD_MFR_ID); @@ -9736,9 +10005,11 @@ static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp) bp->fw_ver[len] = ' '; } } + kfree(vpd_extended_data); return; } out_not_found: + kfree(vpd_extended_data); return; } @@ -9840,15 +10111,20 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp) bp->multi_mode = multi_mode; + bp->disable_tpa = disable_tpa; + +#ifdef BCM_CNIC + bp->disable_tpa |= IS_MF_ISCSI_SD(bp); +#endif + /* Set TPA flags */ - if (disable_tpa) { + if (bp->disable_tpa) { bp->flags &= ~TPA_ENABLE_FLAG; bp->dev->features &= ~NETIF_F_LRO; } else { bp->flags |= TPA_ENABLE_FLAG; bp->dev->features |= NETIF_F_LRO; } - bp->disable_tpa = disable_tpa; if (CHIP_IS_E1(bp)) bp->dropless_fc = 0; @@ -9965,7 +10241,7 @@ static int bnx2x_open(struct net_device *dev) } /* called with rtnl_lock */ -static int bnx2x_close(struct net_device *dev) +int bnx2x_close(struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); @@ -10119,6 +10395,11 @@ void bnx2x_set_rx_mode(struct net_device *dev) } bp->rx_mode = rx_mode; +#ifdef BCM_CNIC + /* handle ISCSI SD mode */ + if (IS_MF_ISCSI_SD(bp)) + bp->rx_mode = BNX2X_RX_MODE_NONE; +#endif /* Schedule the rx_mode command */ if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) { @@ -10198,6 +10479,15 @@ static void poll_bnx2x(struct net_device *dev) } #endif +static int bnx2x_validate_addr(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + + if (!bnx2x_is_valid_ether_addr(bp, dev->dev_addr)) + return -EADDRNOTAVAIL; + return 0; +} + static const struct net_device_ops bnx2x_netdev_ops = { .ndo_open = bnx2x_open, .ndo_stop = bnx2x_close, @@ -10205,7 +10495,7 @@ static const struct net_device_ops bnx2x_netdev_ops = { .ndo_select_queue = bnx2x_select_queue, .ndo_set_rx_mode = bnx2x_set_rx_mode, .ndo_set_mac_address = bnx2x_change_mac_addr, - .ndo_validate_addr = eth_validate_addr, + .ndo_validate_addr = bnx2x_validate_addr, .ndo_do_ioctl = bnx2x_ioctl, .ndo_change_mtu = bnx2x_change_mtu, .ndo_fix_features = bnx2x_fix_features, @@ -10823,8 +11113,8 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, bp->qm_cid_count = bnx2x_set_qm_cid_count(bp); #ifdef BCM_CNIC - /* disable FCOE L2 queue for E1x and E3*/ - if (CHIP_IS_E1x(bp) || CHIP_IS_E3(bp)) + /* disable FCOE L2 queue for E1x */ + if (CHIP_IS_E1x(bp)) bp->flags |= NO_FCOE_FLAG; #endif @@ -11486,6 +11776,38 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) smp_mb__after_atomic_inc(); break; } + case DRV_CTL_ULP_REGISTER_CMD: { + int ulp_type = ctl->data.ulp_type; + + if (CHIP_IS_E3(bp)) { + int idx = BP_FW_MB_IDX(bp); + u32 cap; + + cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]); + if (ulp_type == CNIC_ULP_ISCSI) + cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI; + else if (ulp_type == CNIC_ULP_FCOE) + cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE; + SHMEM2_WR(bp, drv_capabilities_flag[idx], cap); + } + break; + } + case DRV_CTL_ULP_UNREGISTER_CMD: { + int ulp_type = ctl->data.ulp_type; + + if (CHIP_IS_E3(bp)) { + int idx = BP_FW_MB_IDX(bp); + u32 cap; + + cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]); + if (ulp_type == CNIC_ULP_ISCSI) + cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_ISCSI; + else if (ulp_type == CNIC_ULP_FCOE) + cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE; + SHMEM2_WR(bp, drv_capabilities_flag[idx], cap); + } + break; + } default: BNX2X_ERR("unknown command %x\n", ctl->cmd); @@ -11561,7 +11883,7 @@ static int bnx2x_unregister_cnic(struct net_device *dev) mutex_lock(&bp->cnic_mutex); cp->drv_state = 0; - rcu_assign_pointer(bp->cnic_ops, NULL); + RCU_INIT_POINTER(bp->cnic_ops, NULL); mutex_unlock(&bp->cnic_mutex); synchronize_rcu(); kfree(bp->cnic_kwq); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h index e58073ef33b..44609de4e5d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h @@ -160,8 +160,11 @@ #define BRB1_REG_PAUSE_HIGH_THRESHOLD_1 0x6007c /* [RW 10] Write client 0: Assert pause threshold. */ #define BRB1_REG_PAUSE_LOW_THRESHOLD_0 0x60068 -#define BRB1_REG_PAUSE_LOW_THRESHOLD_1 0x6006c -/* [R 24] The number of full blocks occupied by port. */ +/* [RW 1] Indicates if to use per-class guaranty mode (new mode) or per-MAC + * guaranty mode (backwards-compatible mode). 0=per-MAC guaranty mode (BC + * mode). 1=per-class guaranty mode (new mode). */ +#define BRB1_REG_PER_CLASS_GUARANTY_MODE 0x60268 +/* [R 24] The number of full blocks occpied by port. */ #define BRB1_REG_PORT_NUM_OCC_BLOCKS_0 0x60094 /* [RW 1] Reset the design by software. */ #define BRB1_REG_SOFT_RESET 0x600dc @@ -1619,6 +1622,14 @@ register bits. */ #define MISC_REG_LCPLL_CTRL_1 0xa2a4 #define MISC_REG_LCPLL_CTRL_REG_2 0xa2a8 +/* [RW 1] LCPLL power down. Global register. Active High. Reset on POR + * reset. */ +#define MISC_REG_LCPLL_E40_PWRDWN 0xaa74 +/* [RW 1] LCPLL VCO reset. Global register. Active Low Reset on POR reset. */ +#define MISC_REG_LCPLL_E40_RESETB_ANA 0xaa78 +/* [RW 1] LCPLL post-divider reset. Global register. Active Low Reset on POR + * reset. */ +#define MISC_REG_LCPLL_E40_RESETB_DIG 0xaa7c /* [RW 4] Interrupt mask register #0 read/write */ #define MISC_REG_MISC_INT_MASK 0xa388 /* [RW 1] Parity mask register #0 read/write */ @@ -1754,6 +1765,7 @@ * is compared to the value on ctrl_md_devad. Drives output * misc_xgxs0_phy_addr. Global register. */ #define MISC_REG_WC0_CTRL_PHY_ADDR 0xa9cc +#define MISC_REG_WC0_RESET 0xac30 /* [RW 2] XMAC Core port mode. Indicates the number of ports on the system side. This should be less than or equal to phy_port_mode; if some of the ports are not used. This enables reduction of frequency on the core side. @@ -6823,11 +6835,13 @@ Theotherbitsarereservedandshouldbezero*/ #define MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER 0x0000 #define MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER 0x0100 #define MDIO_CTL_REG_84823_MEDIA_FIBER_1G 0x1000 -#define MDIO_CTL_REG_84823_USER_CTRL_REG 0x4005 -#define MDIO_CTL_REG_84823_USER_CTRL_CMS 0x0080 - -#define MDIO_PMA_REG_84823_CTL_LED_CTL_1 0xa8e3 -#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080 +#define MDIO_CTL_REG_84823_USER_CTRL_REG 0x4005 +#define MDIO_CTL_REG_84823_USER_CTRL_CMS 0x0080 +#define MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH 0xa82b +#define MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ 0x2f +#define MDIO_PMA_REG_84823_CTL_LED_CTL_1 0xa8e3 +#define MDIO_PMA_REG_84833_CTL_LED_CTL_1 0xa8ec +#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080 /* BCM84833 only */ #define MDIO_84833_TOP_CFG_XGPHY_STRAP1 0x401a @@ -6838,26 +6852,35 @@ Theotherbitsarereservedandshouldbezero*/ #define MDIO_84833_TOP_CFG_SCRATCH_REG2 0x4007 #define MDIO_84833_TOP_CFG_SCRATCH_REG3 0x4008 #define MDIO_84833_TOP_CFG_SCRATCH_REG4 0x4009 -#define MDIO_84833_TOP_CFG_DATA3_REG 0x4011 -#define MDIO_84833_TOP_CFG_DATA4_REG 0x4012 +#define MDIO_84833_TOP_CFG_SCRATCH_REG26 0x4037 +#define MDIO_84833_TOP_CFG_SCRATCH_REG27 0x4038 +#define MDIO_84833_TOP_CFG_SCRATCH_REG28 0x4039 +#define MDIO_84833_TOP_CFG_SCRATCH_REG29 0x403a +#define MDIO_84833_TOP_CFG_SCRATCH_REG30 0x403b +#define MDIO_84833_TOP_CFG_SCRATCH_REG31 0x403c +#define MDIO_84833_CMD_HDLR_COMMAND MDIO_84833_TOP_CFG_SCRATCH_REG0 +#define MDIO_84833_CMD_HDLR_STATUS MDIO_84833_TOP_CFG_SCRATCH_REG26 +#define MDIO_84833_CMD_HDLR_DATA1 MDIO_84833_TOP_CFG_SCRATCH_REG27 +#define MDIO_84833_CMD_HDLR_DATA2 MDIO_84833_TOP_CFG_SCRATCH_REG28 +#define MDIO_84833_CMD_HDLR_DATA3 MDIO_84833_TOP_CFG_SCRATCH_REG29 +#define MDIO_84833_CMD_HDLR_DATA4 MDIO_84833_TOP_CFG_SCRATCH_REG30 +#define MDIO_84833_CMD_HDLR_DATA5 MDIO_84833_TOP_CFG_SCRATCH_REG31 /* Mailbox command set used by 84833. */ -#define PHY84833_DIAG_CMD_PAIR_SWAP_CHANGE 0x2 +#define PHY84833_CMD_SET_PAIR_SWAP 0x8001 +#define PHY84833_CMD_GET_EEE_MODE 0x8008 +#define PHY84833_CMD_SET_EEE_MODE 0x8009 /* Mailbox status set used by 84833. */ -#define PHY84833_CMD_RECEIVED 0x0001 -#define PHY84833_CMD_IN_PROGRESS 0x0002 -#define PHY84833_CMD_COMPLETE_PASS 0x0004 -#define PHY84833_CMD_COMPLETE_ERROR 0x0008 -#define PHY84833_CMD_OPEN_FOR_CMDS 0x0010 -#define PHY84833_CMD_SYSTEM_BOOT 0x0020 -#define PHY84833_CMD_NOT_OPEN_FOR_CMDS 0x0040 -#define PHY84833_CMD_CLEAR_COMPLETE 0x0080 -#define PHY84833_CMD_OPEN_OVERRIDE 0xa5a5 - +#define PHY84833_STATUS_CMD_RECEIVED 0x0001 +#define PHY84833_STATUS_CMD_IN_PROGRESS 0x0002 +#define PHY84833_STATUS_CMD_COMPLETE_PASS 0x0004 +#define PHY84833_STATUS_CMD_COMPLETE_ERROR 0x0008 +#define PHY84833_STATUS_CMD_OPEN_FOR_CMDS 0x0010 +#define PHY84833_STATUS_CMD_SYSTEM_BOOT 0x0020 +#define PHY84833_STATUS_CMD_NOT_OPEN_FOR_CMDS 0x0040 +#define PHY84833_STATUS_CMD_CLEAR_COMPLETE 0x0080 +#define PHY84833_STATUS_CMD_OPEN_OVERRIDE 0xa5a5 -/* 84833 F/W Feature Commands */ -#define PHY84833_DIAG_CMD_GET_EEE_MODE 0x27 -#define PHY84833_DIAG_CMD_SET_EEE_MODE 0x28 /* Warpcore clause 45 addressing */ #define MDIO_WC_DEVAD 0x3 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 14517691f8d..5ac616093f9 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -30,6 +30,8 @@ #define BNX2X_MAX_EMUL_MULTI 16 +#define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN) + /**** Exe Queue interfaces ****/ /** @@ -441,6 +443,36 @@ static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o) return true; } +static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, + int n, u8 *buf) +{ + struct bnx2x_vlan_mac_registry_elem *pos; + u8 *next = buf; + int counter = 0; + + /* traverse list */ + list_for_each_entry(pos, &o->head, link) { + if (counter < n) { + /* place leading zeroes in buffer */ + memset(next, 0, MAC_LEADING_ZERO_CNT); + + /* place mac after leading zeroes*/ + memcpy(next + MAC_LEADING_ZERO_CNT, pos->u.mac.mac, + ETH_ALEN); + + /* calculate address of next element and + * advance counter + */ + counter++; + next = buf + counter * ALIGN(ETH_ALEN, sizeof(u32)); + + DP(BNX2X_MSG_SP, "copied element number %d to address %p element was %pM\n", + counter, next, pos->u.mac.mac); + } + } + return counter * ETH_ALEN; +} + /* check_add() callbacks */ static int bnx2x_check_mac_add(struct bnx2x_vlan_mac_obj *o, union bnx2x_classification_ramrod_data *data) @@ -1886,6 +1918,7 @@ void bnx2x_init_mac_obj(struct bnx2x *bp, mac_obj->check_move = bnx2x_check_move; mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; + mac_obj->get_n_elements = bnx2x_get_n_elements; /* Exe Queue */ bnx2x_exe_queue_init(bp, @@ -3342,7 +3375,7 @@ static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp, if (!list_empty(&o->registry.exact_match.macs)) return 0; - elem = kzalloc(sizeof(*elem)*len, GFP_ATOMIC); + elem = kcalloc(len, sizeof(*elem), GFP_ATOMIC); if (!elem) { BNX2X_ERR("Failed to allocate registry memory\n"); return -ENOMEM; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h index 9a517c2e9f1..992308ff82e 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h @@ -285,6 +285,19 @@ struct bnx2x_vlan_mac_obj { /* RAMROD command to be used */ int ramrod_cmd; + /* copy first n elements onto preallocated buffer + * + * @param n number of elements to get + * @param buf buffer preallocated by caller into which elements + * will be copied. Note elements are 4-byte aligned + * so buffer size must be able to accomodate the + * aligned elements. + * + * @return number of copied bytes + */ + int (*get_n_elements)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, + int n, u8 *buf); + /** * Checks if ADD-ramrod with the given params may be performed. * diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c index 02ac6a771bf..bc0121ac291 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c @@ -39,6 +39,17 @@ static inline long bnx2x_hilo(u32 *hiref) #endif } +static u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp) +{ + u16 res = sizeof(struct host_port_stats) >> 2; + + /* if PFC stats are not supported by the MFW, don't DMA them */ + if (!(bp->flags & BC_SUPPORTS_PFC_STATS)) + res -= (sizeof(u32)*4) >> 2; + + return res; +} + /* * Init service functions */ @@ -178,7 +189,8 @@ static void bnx2x_stats_pmf_update(struct bnx2x *bp) DMAE_LEN32_RD_MAX * 4); dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) + DMAE_LEN32_RD_MAX * 4); - dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX; + dmae->len = bnx2x_get_port_stats_dma_len(bp) - DMAE_LEN32_RD_MAX; + dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); dmae->comp_val = DMAE_COMP_VAL; @@ -217,7 +229,7 @@ static void bnx2x_port_stats_init(struct bnx2x *bp) dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); dmae->dst_addr_lo = bp->port.port_stx >> 2; dmae->dst_addr_hi = 0; - dmae->len = sizeof(struct host_port_stats) >> 2; + dmae->len = bnx2x_get_port_stats_dma_len(bp); dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; dmae->comp_addr_hi = 0; dmae->comp_val = 1; @@ -540,6 +552,25 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp) UPDATE_STAT64(tx_stat_gterr, tx_stat_dot3statsinternalmactransmiterrors); UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl); + + /* collect PFC stats */ + DIFF_64(diff.hi, new->tx_stat_gtpp_hi, + pstats->pfc_frames_tx_hi, + diff.lo, new->tx_stat_gtpp_lo, + pstats->pfc_frames_tx_lo); + pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi; + pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo; + ADD_64(pstats->pfc_frames_tx_hi, diff.hi, + pstats->pfc_frames_tx_lo, diff.lo); + + DIFF_64(diff.hi, new->rx_stat_grpp_hi, + pstats->pfc_frames_rx_hi, + diff.lo, new->rx_stat_grpp_lo, + pstats->pfc_frames_rx_lo); + pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi; + pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo; + ADD_64(pstats->pfc_frames_rx_hi, diff.hi, + pstats->pfc_frames_rx_lo, diff.lo); } estats->pause_frames_received_hi = @@ -551,6 +582,15 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp) pstats->mac_stx[1].tx_stat_outxoffsent_hi; estats->pause_frames_sent_lo = pstats->mac_stx[1].tx_stat_outxoffsent_lo; + + estats->pfc_frames_received_hi = + pstats->pfc_frames_rx_hi; + estats->pfc_frames_received_lo = + pstats->pfc_frames_rx_lo; + estats->pfc_frames_sent_hi = + pstats->pfc_frames_tx_hi; + estats->pfc_frames_sent_lo = + pstats->pfc_frames_tx_lo; } static void bnx2x_mstat_stats_update(struct bnx2x *bp) @@ -571,6 +611,11 @@ static void bnx2x_mstat_stats_update(struct bnx2x *bp) ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent); ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone); + /* collect pfc stats */ + ADD_64(pstats->pfc_frames_tx_hi, new->stats_tx.tx_gtxpp_hi, + pstats->pfc_frames_tx_lo, new->stats_tx.tx_gtxpp_lo); + ADD_64(pstats->pfc_frames_rx_hi, new->stats_rx.rx_grxpp_hi, + pstats->pfc_frames_rx_lo, new->stats_rx.rx_grxpp_lo); ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets); ADD_STAT64(stats_tx.tx_gt127, @@ -628,6 +673,15 @@ static void bnx2x_mstat_stats_update(struct bnx2x *bp) pstats->mac_stx[1].tx_stat_outxoffsent_hi; estats->pause_frames_sent_lo = pstats->mac_stx[1].tx_stat_outxoffsent_lo; + + estats->pfc_frames_received_hi = + pstats->pfc_frames_rx_hi; + estats->pfc_frames_received_lo = + pstats->pfc_frames_rx_lo; + estats->pfc_frames_sent_hi = + pstats->pfc_frames_tx_hi; + estats->pfc_frames_sent_lo = + pstats->pfc_frames_tx_lo; } static void bnx2x_emac_stats_update(struct bnx2x *bp) @@ -740,7 +794,7 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp) estats->brb_drop_hi = pstats->brb_drop_hi; estats->brb_drop_lo = pstats->brb_drop_lo; - pstats->host_port_stats_start = ++pstats->host_port_stats_end; + pstats->host_port_stats_counter++; if (!BP_NOMCP(bp)) { u32 nig_timer_max = @@ -1265,7 +1319,7 @@ static void bnx2x_port_stats_stop(struct bnx2x *bp) dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); dmae->dst_addr_lo = bp->port.port_stx >> 2; dmae->dst_addr_hi = 0; - dmae->len = sizeof(struct host_port_stats) >> 2; + dmae->len = bnx2x_get_port_stats_dma_len(bp); if (bp->func_stx) { dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; dmae->comp_addr_hi = 0; @@ -1349,12 +1403,14 @@ void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) enum bnx2x_stats_state state; if (unlikely(bp->panic)) return; - bnx2x_stats_stm[bp->stats_state][event].action(bp); + spin_lock_bh(&bp->stats_lock); state = bp->stats_state; bp->stats_state = bnx2x_stats_stm[state][event].next_state; spin_unlock_bh(&bp->stats_lock); + bnx2x_stats_stm[state][event].action(bp); + if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", state, event, bp->stats_state); @@ -1380,7 +1436,7 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp) dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); dmae->dst_addr_lo = bp->port.port_stx >> 2; dmae->dst_addr_hi = 0; - dmae->len = sizeof(struct host_port_stats) >> 2; + dmae->len = bnx2x_get_port_stats_dma_len(bp); dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); dmae->comp_val = DMAE_COMP_VAL; @@ -1457,6 +1513,7 @@ static void bnx2x_func_stats_base_update(struct bnx2x *bp) static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp) { int i; + int first_queue_query_index; struct stats_query_header *stats_hdr = &bp->fw_stats_req->hdr; dma_addr_t cur_data_offset; @@ -1512,14 +1569,40 @@ static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp) cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset)); cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset)); + /**** FCoE FW statistics data ****/ + if (!NO_FCOE(bp)) { + cur_data_offset = bp->fw_stats_data_mapping + + offsetof(struct bnx2x_fw_stats_data, fcoe); + + cur_query_entry = + &bp->fw_stats_req->query[BNX2X_FCOE_QUERY_IDX]; + + cur_query_entry->kind = STATS_TYPE_FCOE; + /* For FCoE query index is a DONT CARE */ + cur_query_entry->index = BP_PORT(bp); + cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); + cur_query_entry->address.hi = + cpu_to_le32(U64_HI(cur_data_offset)); + cur_query_entry->address.lo = + cpu_to_le32(U64_LO(cur_data_offset)); + } + /**** Clients' queries ****/ cur_data_offset = bp->fw_stats_data_mapping + offsetof(struct bnx2x_fw_stats_data, queue_stats); + /* first queue query index depends whether FCoE offloaded request will + * be included in the ramrod + */ + if (!NO_FCOE(bp)) + first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX; + else + first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 1; + for_each_eth_queue(bp, i) { cur_query_entry = &bp->fw_stats_req-> - query[BNX2X_FIRST_QUEUE_QUERY_IDX + i]; + query[first_queue_query_index + i]; cur_query_entry->kind = STATS_TYPE_QUEUE; cur_query_entry->index = bnx2x_stats_id(&bp->fp[i]); @@ -1531,6 +1614,21 @@ static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp) cur_data_offset += sizeof(struct per_queue_stats); } + + /* add FCoE queue query if needed */ + if (!NO_FCOE(bp)) { + cur_query_entry = + &bp->fw_stats_req-> + query[first_queue_query_index + i]; + + cur_query_entry->kind = STATS_TYPE_QUEUE; + cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX]); + cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); + cur_query_entry->address.hi = + cpu_to_le32(U64_HI(cur_data_offset)); + cur_query_entry->address.lo = + cpu_to_le32(U64_LO(cur_data_offset)); + } } void bnx2x_stats_init(struct bnx2x *bp) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h index 5d8ce2f6afe..683deb05310 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h @@ -193,6 +193,12 @@ struct bnx2x_eth_stats { u32 total_tpa_aggregated_frames_lo; u32 total_tpa_bytes_hi; u32 total_tpa_bytes_lo; + + /* PFC */ + u32 pfc_frames_received_hi; + u32 pfc_frames_received_lo; + u32 pfc_frames_sent_hi; + u32 pfc_frames_sent_lo; }; diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 6f10c693983..dd3a0a232ea 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -250,6 +250,21 @@ static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off) return io->data; } +static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + struct drv_ctl_info info; + + if (reg) + info.cmd = DRV_CTL_ULP_REGISTER_CMD; + else + info.cmd = DRV_CTL_ULP_UNREGISTER_CMD; + + info.data.ulp_type = ulp_type; + ethdev->drv_ctl(dev->netdev, &info); +} + static int cnic_in_use(struct cnic_sock *csk) { return test_bit(SK_F_INUSE, &csk->flags); @@ -506,7 +521,7 @@ int cnic_unregister_driver(int ulp_type) } read_unlock(&cnic_dev_lock); - rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL); + RCU_INIT_POINTER(cnic_ulp_tbl[ulp_type], NULL); mutex_unlock(&cnic_lock); synchronize_rcu(); @@ -563,6 +578,8 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type, mutex_unlock(&cnic_lock); + cnic_ulp_ctl(dev, ulp_type, true); + return 0; } @@ -579,7 +596,7 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type) } mutex_lock(&cnic_lock); if (rcu_dereference(cp->ulp_ops[ulp_type])) { - rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL); + RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL); cnic_put(dev); } else { pr_err("%s: device not registered to this ulp type %d\n", @@ -602,6 +619,8 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type) if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type])) netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n"); + cnic_ulp_ctl(dev, ulp_type, false); + return 0; } EXPORT_SYMBOL(cnic_unregister_driver); @@ -1342,7 +1361,7 @@ static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid, if (ret == 1) return 0; - return -EBUSY; + return ret; } static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type, @@ -1830,7 +1849,7 @@ static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[], done: cqes[0] = (struct kcqe *) &kcqe; cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); - return ret; + return 0; } @@ -1928,7 +1947,7 @@ destroy_reply: cqes[0] = (struct kcqe *) &kcqe; cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); - return ret; + return 0; } static void cnic_init_storm_conn_bufs(struct cnic_dev *dev, @@ -2494,6 +2513,57 @@ static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe) return ret; } +static void cnic_bnx2x_kwqe_err(struct cnic_dev *dev, struct kwqe *kwqe) +{ + struct cnic_local *cp = dev->cnic_priv; + struct kcqe kcqe; + struct kcqe *cqes[1]; + u32 cid; + u32 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag); + u32 layer_code = kwqe->kwqe_op_flag & KWQE_LAYER_MASK; + int ulp_type; + + cid = kwqe->kwqe_info0; + memset(&kcqe, 0, sizeof(kcqe)); + + if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_ISCSI) { + ulp_type = CNIC_ULP_ISCSI; + if (opcode == ISCSI_KWQE_OPCODE_UPDATE_CONN) + cid = kwqe->kwqe_info1; + + kcqe.kcqe_op_flag = (opcode + 0x10) << KCQE_FLAGS_OPCODE_SHIFT; + kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_ISCSI; + kcqe.kcqe_info1 = ISCSI_KCQE_COMPLETION_STATUS_NIC_ERROR; + kcqe.kcqe_info2 = cid; + cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &kcqe.kcqe_info0); + + } else if (layer_code == KWQE_FLAGS_LAYER_MASK_L4) { + struct l4_kcq *l4kcqe = (struct l4_kcq *) &kcqe; + u32 kcqe_op; + + ulp_type = CNIC_ULP_L4; + if (opcode == L4_KWQE_OPCODE_VALUE_CONNECT1) + kcqe_op = L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE; + else if (opcode == L4_KWQE_OPCODE_VALUE_RESET) + kcqe_op = L4_KCQE_OPCODE_VALUE_RESET_COMP; + else if (opcode == L4_KWQE_OPCODE_VALUE_CLOSE) + kcqe_op = L4_KCQE_OPCODE_VALUE_CLOSE_COMP; + else + return; + + kcqe.kcqe_op_flag = (kcqe_op << KCQE_FLAGS_OPCODE_SHIFT) | + KCQE_FLAGS_LAYER_MASK_L4; + l4kcqe->status = L4_KCQE_COMPLETION_STATUS_NIC_ERROR; + l4kcqe->cid = cid; + cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &l4kcqe->conn_id); + } else { + return; + } + + cqes[0] = (struct kcqe *) &kcqe; + cnic_reply_bnx2x_kcqes(dev, ulp_type, cqes, 1); +} + static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev, struct kwqe *wqes[], u32 num_wqes) { @@ -2551,9 +2621,17 @@ static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev, opcode); break; } - if (ret < 0) + if (ret < 0) { netdev_err(dev->netdev, "KWQE(0x%x) failed\n", opcode); + + /* Possibly bnx2x parity error, send completion + * to ulp drivers with error code to speed up + * cleanup and reset recovery. + */ + if (ret == -EIO || ret == -EAGAIN) + cnic_bnx2x_kwqe_err(dev, kwqe); + } i += work; } return 0; @@ -3052,9 +3130,26 @@ static void cnic_ulp_start(struct cnic_dev *dev) } } +static int cnic_copy_ulp_stats(struct cnic_dev *dev, int ulp_type) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_ulp_ops *ulp_ops; + int rc; + + mutex_lock(&cnic_lock); + ulp_ops = cnic_ulp_tbl_prot(ulp_type); + if (ulp_ops && ulp_ops->cnic_get_stats) + rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]); + else + rc = -ENODEV; + mutex_unlock(&cnic_lock); + return rc; +} + static int cnic_ctl(void *data, struct cnic_ctl_info *info) { struct cnic_dev *dev = data; + int ulp_type = CNIC_ULP_ISCSI; switch (info->cmd) { case CNIC_CTL_STOP_CMD: @@ -3100,6 +3195,15 @@ static int cnic_ctl(void *data, struct cnic_ctl_info *info) } break; } + case CNIC_CTL_FCOE_STATS_GET_CMD: + ulp_type = CNIC_ULP_FCOE; + /* fall through */ + case CNIC_CTL_ISCSI_STATS_GET_CMD: + cnic_hold(dev); + cnic_copy_ulp_stats(dev, ulp_type); + cnic_put(dev); + break; + default: return -EINVAL; } @@ -3475,7 +3579,7 @@ static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr, struct flowi6 fl6; memset(&fl6, 0, sizeof(fl6)); - ipv6_addr_copy(&fl6.daddr, &dst_addr->sin6_addr); + fl6.daddr = dst_addr->sin6_addr; if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL) fl6.flowi6_oif = dst_addr->sin6_scope_id; @@ -3804,6 +3908,9 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe) case L4_KCQE_OPCODE_VALUE_RESET_COMP: case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE: case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD: + if (l4kcqe->status == L4_KCQE_COMPLETION_STATUS_NIC_ERROR) + set_bit(SK_F_HW_ERR, &csk->flags); + cp->close_conn(csk, opcode); break; @@ -3931,7 +4038,9 @@ static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode) case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: case L4_KCQE_OPCODE_VALUE_RESET_COMP: if (cnic_ready_to_close(csk, opcode)) { - if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) + if (test_bit(SK_F_HW_ERR, &csk->flags)) + close_complete = 1; + else if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE; else close_complete = 1; @@ -4824,6 +4933,7 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev) int func = CNIC_FUNC(cp), ret; u32 pfid; + dev->stats_addr = ethdev->addr_drv_info_to_mcp; cp->port_mode = CHIP_PORT_MODE_NONE; if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { @@ -5134,7 +5244,7 @@ static void cnic_stop_hw(struct cnic_dev *dev) } cnic_shutdown_rings(dev); clear_bit(CNIC_F_CNIC_UP, &dev->flags); - rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL); + RCU_INIT_POINTER(cp->ulp_ops[CNIC_ULP_L4], NULL); synchronize_rcu(); cnic_cm_shutdown(dev); cp->stop_hw(dev); @@ -5288,6 +5398,8 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev) cdev->pcidev = pdev; cp->chip_id = ethdev->chip_id; + cdev->stats_addr = ethdev->addr_drv_info_to_mcp; + if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)) cdev->max_iscsi_conn = ethdev->max_iscsi_conn; if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) && diff --git a/drivers/net/ethernet/broadcom/cnic_defs.h b/drivers/net/ethernet/broadcom/cnic_defs.h index 239de898f07..86936f6b6db 100644 --- a/drivers/net/ethernet/broadcom/cnic_defs.h +++ b/drivers/net/ethernet/broadcom/cnic_defs.h @@ -85,6 +85,7 @@ /* KCQ (kernel completion queue) completion status */ #define L4_KCQE_COMPLETION_STATUS_SUCCESS (0) +#define L4_KCQE_COMPLETION_STATUS_NIC_ERROR (4) #define L4_KCQE_COMPLETION_STATUS_TIMEOUT (0x93) #define L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL (0x83) diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h index 79443e0dbf9..1517763d4e5 100644 --- a/drivers/net/ethernet/broadcom/cnic_if.h +++ b/drivers/net/ethernet/broadcom/cnic_if.h @@ -1,6 +1,6 @@ /* cnic_if.h: Broadcom CNIC core network driver. * - * Copyright (c) 2006-2011 Broadcom Corporation + * Copyright (c) 2006-2012 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -12,8 +12,8 @@ #ifndef CNIC_IF_H #define CNIC_IF_H -#define CNIC_MODULE_VERSION "2.5.7" -#define CNIC_MODULE_RELDATE "July 20, 2011" +#define CNIC_MODULE_VERSION "2.5.8" +#define CNIC_MODULE_RELDATE "Jan 3, 2012" #define CNIC_ULP_RDMA 0 #define CNIC_ULP_ISCSI 1 @@ -86,6 +86,8 @@ struct kcqe { #define CNIC_CTL_START_CMD 2 #define CNIC_CTL_COMPLETION_CMD 3 #define CNIC_CTL_STOP_ISCSI_CMD 4 +#define CNIC_CTL_FCOE_STATS_GET_CMD 5 +#define CNIC_CTL_ISCSI_STATS_GET_CMD 6 #define DRV_CTL_IO_WR_CMD 0x101 #define DRV_CTL_IO_RD_CMD 0x102 @@ -96,6 +98,8 @@ struct kcqe { #define DRV_CTL_STOP_L2_CMD 0x107 #define DRV_CTL_RET_L2_SPQ_CREDIT_CMD 0x10c #define DRV_CTL_ISCSI_STOPPED_CMD 0x10d +#define DRV_CTL_ULP_REGISTER_CMD 0x10e +#define DRV_CTL_ULP_UNREGISTER_CMD 0x10f struct cnic_ctl_completion { u32 cid; @@ -133,6 +137,7 @@ struct drv_ctl_info { struct drv_ctl_spq_credit credit; struct drv_ctl_io io; struct drv_ctl_l2_ring ring; + int ulp_type; char bytes[MAX_DRV_CTL_DATA]; } data; }; @@ -201,6 +206,7 @@ struct cnic_eth_dev { struct kwqe_16 *[], u32); int (*drv_ctl)(struct net_device *, struct drv_ctl_info *); unsigned long reserved1[2]; + union drv_info_to_mcp *addr_drv_info_to_mcp; }; struct cnic_sockaddr { @@ -255,6 +261,7 @@ struct cnic_sock { #define SK_F_CONNECT_START 4 #define SK_F_IPV6 5 #define SK_F_CLOSING 7 +#define SK_F_HW_ERR 8 atomic_t ref_count; u32 state; @@ -297,6 +304,8 @@ struct cnic_dev { int max_fcoe_conn; int max_rdma_conn; + union drv_info_to_mcp *stats_addr; + void *cnic_priv; }; @@ -326,6 +335,7 @@ struct cnic_ulp_ops { void (*cm_remote_abort)(struct cnic_sock *); int (*iscsi_nl_send_msg)(void *ulp_ctx, u32 msg_type, char *data, u16 data_size); + int (*cnic_get_stats)(void *ulp_ctx); struct module *owner; atomic_t ref_count; }; diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c index 0a1d7f279fc..8fa7abc53ec 100644 --- a/drivers/net/ethernet/broadcom/sb1250-mac.c +++ b/drivers/net/ethernet/broadcom/sb1250-mac.c @@ -163,7 +163,6 @@ enum sbmac_state { #define SBMAC_MAX_TXDESCR 256 #define SBMAC_MAX_RXDESCR 256 -#define ETHER_ADDR_LEN 6 #define ENET_PACKET_SIZE 1518 /*#define ENET_PACKET_SIZE 9216 */ @@ -266,7 +265,7 @@ struct sbmac_softc { int sbm_pause; /* current pause setting */ int sbm_link; /* current link state */ - unsigned char sbm_hwaddr[ETHER_ADDR_LEN]; + unsigned char sbm_hwaddr[ETH_ALEN]; struct sbmacdma sbm_txdma; /* only channel 0 for now */ struct sbmacdma sbm_rxdma; @@ -2676,15 +2675,4 @@ static struct platform_driver sbmac_driver = { }, }; -static int __init sbmac_init_module(void) -{ - return platform_driver_register(&sbmac_driver); -} - -static void __exit sbmac_cleanup_module(void) -{ - platform_driver_unregister(&sbmac_driver); -} - -module_init(sbmac_init_module); -module_exit(sbmac_cleanup_module); +module_platform_driver(sbmac_driver); diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index bf4074167d6..076e02a415a 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -89,10 +89,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits) #define DRV_MODULE_NAME "tg3" #define TG3_MAJ_NUM 3 -#define TG3_MIN_NUM 121 +#define TG3_MIN_NUM 122 #define DRV_MODULE_VERSION \ __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) -#define DRV_MODULE_RELDATE "November 2, 2011" +#define DRV_MODULE_RELDATE "December 7, 2011" #define RESET_KIND_SHUTDOWN 0 #define RESET_KIND_INIT 1 @@ -135,7 +135,6 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits) (tg3_flag(tp, LRG_PROD_RING_CAP) ? \ TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700) #define TG3_DEF_RX_JUMBO_RING_PENDING 100 -#define TG3_RSS_INDIR_TBL_SIZE 128 /* Do not place this n-ring entries value into the tp struct itself, * we really want to expose these constants to GCC so that modulo et @@ -194,12 +193,13 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits) #if (NET_IP_ALIGN != 0) #define TG3_RX_OFFSET(tp) ((tp)->rx_offset) #else -#define TG3_RX_OFFSET(tp) 0 +#define TG3_RX_OFFSET(tp) (NET_SKB_PAD) #endif /* minimum number of free TX descriptors required to wake up TX process */ #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4) -#define TG3_TX_BD_DMA_MAX 4096 +#define TG3_TX_BD_DMA_MAX_2K 2048 +#define TG3_TX_BD_DMA_MAX_4K 4096 #define TG3_RAW_IP_ALIGN 2 @@ -1670,22 +1670,6 @@ static void tg3_link_report(struct tg3 *tp) } } -static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl) -{ - u16 miireg; - - if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX)) - miireg = ADVERTISE_PAUSE_CAP; - else if (flow_ctrl & FLOW_CTRL_TX) - miireg = ADVERTISE_PAUSE_ASYM; - else if (flow_ctrl & FLOW_CTRL_RX) - miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; - else - miireg = 0; - - return miireg; -} - static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl) { u16 miireg; @@ -1706,18 +1690,12 @@ static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv) { u8 cap = 0; - if (lcladv & ADVERTISE_1000XPAUSE) { - if (lcladv & ADVERTISE_1000XPSE_ASYM) { - if (rmtadv & LPA_1000XPAUSE) - cap = FLOW_CTRL_TX | FLOW_CTRL_RX; - else if (rmtadv & LPA_1000XPAUSE_ASYM) - cap = FLOW_CTRL_RX; - } else { - if (rmtadv & LPA_1000XPAUSE) - cap = FLOW_CTRL_TX | FLOW_CTRL_RX; - } - } else if (lcladv & ADVERTISE_1000XPSE_ASYM) { - if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM)) + if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) { + cap = FLOW_CTRL_TX | FLOW_CTRL_RX; + } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) { + if (lcladv & ADVERTISE_1000XPAUSE) + cap = FLOW_CTRL_RX; + if (rmtadv & ADVERTISE_1000XPAUSE) cap = FLOW_CTRL_TX; } @@ -1792,7 +1770,7 @@ static void tg3_adjust_link(struct net_device *dev) if (phydev->duplex == DUPLEX_HALF) mac_mode |= MAC_MODE_HALF_DUPLEX; else { - lcl_adv = tg3_advert_flowctrl_1000T( + lcl_adv = mii_advertise_flowctrl( tp->link_config.flowctrl); if (phydev->pause) @@ -2160,7 +2138,7 @@ static void tg3_phy_eee_enable(struct tg3 *tp) if (tp->link_config.active_speed == SPEED_1000 && (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) && + tg3_flag(tp, 57765_CLASS)) && !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { val = MII_TG3_DSP_TAP26_ALNOKO | MII_TG3_DSP_TAP26_RMRXSTO; @@ -2679,8 +2657,7 @@ static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol) bool need_vaux = false; /* The GPIOs do something completely different on 57765. */ - if (!tg3_flag(tp, IS_NIC) || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS)) return; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || @@ -3594,37 +3571,24 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl) u32 val, new_adv; new_adv = ADVERTISE_CSMA; - if (advertise & ADVERTISED_10baseT_Half) - new_adv |= ADVERTISE_10HALF; - if (advertise & ADVERTISED_10baseT_Full) - new_adv |= ADVERTISE_10FULL; - if (advertise & ADVERTISED_100baseT_Half) - new_adv |= ADVERTISE_100HALF; - if (advertise & ADVERTISED_100baseT_Full) - new_adv |= ADVERTISE_100FULL; - - new_adv |= tg3_advert_flowctrl_1000T(flowctrl); + new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL; + new_adv |= mii_advertise_flowctrl(flowctrl); err = tg3_writephy(tp, MII_ADVERTISE, new_adv); if (err) goto done; - if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) - goto done; - - new_adv = 0; - if (advertise & ADVERTISED_1000baseT_Half) - new_adv |= ADVERTISE_1000HALF; - if (advertise & ADVERTISED_1000baseT_Full) - new_adv |= ADVERTISE_1000FULL; + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { + new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise); - if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || - tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) - new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER; + if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || + tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) + new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER; - err = tg3_writephy(tp, MII_CTRL1000, new_adv); - if (err) - goto done; + err = tg3_writephy(tp, MII_CTRL1000, new_adv); + if (err) + goto done; + } if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) goto done; @@ -3650,6 +3614,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl) switch (GET_ASIC_REV(tp->pci_chip_rev_id)) { case ASIC_REV_5717: case ASIC_REV_57765: + case ASIC_REV_57766: case ASIC_REV_5719: /* If we advertised any eee advertisements above... */ if (val) @@ -3786,76 +3751,61 @@ static int tg3_init_5401phy_dsp(struct tg3 *tp) return err; } -static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask) +static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv) { - u32 adv_reg, all_mask = 0; + u32 advmsk, tgtadv, advertising; - if (mask & ADVERTISED_10baseT_Half) - all_mask |= ADVERTISE_10HALF; - if (mask & ADVERTISED_10baseT_Full) - all_mask |= ADVERTISE_10FULL; - if (mask & ADVERTISED_100baseT_Half) - all_mask |= ADVERTISE_100HALF; - if (mask & ADVERTISED_100baseT_Full) - all_mask |= ADVERTISE_100FULL; + advertising = tp->link_config.advertising; + tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL; - if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg)) - return 0; + advmsk = ADVERTISE_ALL; + if (tp->link_config.active_duplex == DUPLEX_FULL) { + tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl); + advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + } - if ((adv_reg & ADVERTISE_ALL) != all_mask) - return 0; + if (tg3_readphy(tp, MII_ADVERTISE, lcladv)) + return false; + + if ((*lcladv & advmsk) != tgtadv) + return false; if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { u32 tg3_ctrl; - all_mask = 0; - if (mask & ADVERTISED_1000baseT_Half) - all_mask |= ADVERTISE_1000HALF; - if (mask & ADVERTISED_1000baseT_Full) - all_mask |= ADVERTISE_1000FULL; + tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising); if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl)) - return 0; + return false; tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL); - if (tg3_ctrl != all_mask) - return 0; + if (tg3_ctrl != tgtadv) + return false; } - return 1; + return true; } -static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv) +static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv) { - u32 curadv, reqadv; + u32 lpeth = 0; - if (tg3_readphy(tp, MII_ADVERTISE, lcladv)) - return 1; - - curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); - reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl); + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { + u32 val; - if (tp->link_config.active_duplex == DUPLEX_FULL) { - if (curadv != reqadv) - return 0; + if (tg3_readphy(tp, MII_STAT1000, &val)) + return false; - if (tg3_flag(tp, PAUSE_AUTONEG)) - tg3_readphy(tp, MII_LPA, rmtadv); - } else { - /* Reprogram the advertisement register, even if it - * does not affect the current link. If the link - * gets renegotiated in the future, we can save an - * additional renegotiation cycle by advertising - * it correctly in the first place. - */ - if (curadv != reqadv) { - *lcladv &= ~(ADVERTISE_PAUSE_CAP | - ADVERTISE_PAUSE_ASYM); - tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv); - } + lpeth = mii_stat1000_to_ethtool_lpa_t(val); } - return 1; + if (tg3_readphy(tp, MII_LPA, rmtadv)) + return false; + + lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv); + tp->link_config.rmt_adv = lpeth; + + return true; } static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) @@ -3961,6 +3911,8 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) current_link_up = 0; current_speed = SPEED_INVALID; current_duplex = DUPLEX_INVALID; + tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE; + tp->link_config.rmt_adv = 0; if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) { err = tg3_phy_auxctl_read(tp, @@ -4016,12 +3968,9 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) if (tp->link_config.autoneg == AUTONEG_ENABLE) { if ((bmcr & BMCR_ANENABLE) && - tg3_copper_is_advertising_all(tp, - tp->link_config.advertising)) { - if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv, - &rmt_adv)) - current_link_up = 1; - } + tg3_phy_copper_an_config_ok(tp, &lcl_adv) && + tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv)) + current_link_up = 1; } else { if (!(bmcr & BMCR_ANENABLE) && tp->link_config.speed == current_speed && @@ -4033,8 +3982,22 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) } if (current_link_up == 1 && - tp->link_config.active_duplex == DUPLEX_FULL) + tp->link_config.active_duplex == DUPLEX_FULL) { + u32 reg, bit; + + if (tp->phy_flags & TG3_PHYFLG_IS_FET) { + reg = MII_TG3_FET_GEN_STAT; + bit = MII_TG3_FET_GEN_STAT_MDIXSTAT; + } else { + reg = MII_TG3_EXT_STAT; + bit = MII_TG3_EXT_STAT_MDIX; + } + + if (!tg3_readphy(tp, reg, &val) && (val & bit)) + tp->phy_flags |= TG3_PHYFLG_MDIX_STATE; + tg3_setup_flow_control(tp, lcl_adv, rmt_adv); + } } relink: @@ -4643,6 +4606,9 @@ restart_autoneg: if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE) remote_adv |= LPA_1000XPAUSE_ASYM; + tp->link_config.rmt_adv = + mii_adv_to_ethtool_adv_x(remote_adv); + tg3_setup_flow_control(tp, local_adv, remote_adv); current_link_up = 1; tp->serdes_counter = 0; @@ -4714,6 +4680,9 @@ static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status) if (rxflags & MR_LP_ADV_ASYM_PAUSE) remote_adv |= LPA_1000XPAUSE_ASYM; + tp->link_config.rmt_adv = + mii_adv_to_ethtool_adv_x(remote_adv); + tg3_setup_flow_control(tp, local_adv, remote_adv); current_link_up = 1; @@ -4796,6 +4765,7 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset) udelay(40); current_link_up = 0; + tp->link_config.rmt_adv = 0; mac_status = tr32(MAC_STATUS); if (tg3_flag(tp, HW_AUTONEG)) @@ -4887,6 +4857,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset) current_link_up = 0; current_speed = SPEED_INVALID; current_duplex = DUPLEX_INVALID; + tp->link_config.rmt_adv = 0; err |= tg3_readphy(tp, MII_BMSR, &bmsr); err |= tg3_readphy(tp, MII_BMSR, &bmsr); @@ -4903,23 +4874,19 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset) (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { /* do nothing, just check for link up at the end */ } else if (tp->link_config.autoneg == AUTONEG_ENABLE) { - u32 adv, new_adv; + u32 adv, newadv; err |= tg3_readphy(tp, MII_ADVERTISE, &adv); - new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF | - ADVERTISE_1000XPAUSE | - ADVERTISE_1000XPSE_ASYM | - ADVERTISE_SLCT); + newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF | + ADVERTISE_1000XPAUSE | + ADVERTISE_1000XPSE_ASYM | + ADVERTISE_SLCT); - new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); + newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); + newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising); - if (tp->link_config.advertising & ADVERTISED_1000baseT_Half) - new_adv |= ADVERTISE_1000XHALF; - if (tp->link_config.advertising & ADVERTISED_1000baseT_Full) - new_adv |= ADVERTISE_1000XFULL; - - if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) { - tg3_writephy(tp, MII_ADVERTISE, new_adv); + if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) { + tg3_writephy(tp, MII_ADVERTISE, newadv); bmcr |= BMCR_ANENABLE | BMCR_ANRESTART; tg3_writephy(tp, MII_BMCR, bmcr); @@ -4997,6 +4964,9 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset) current_duplex = DUPLEX_FULL; else current_duplex = DUPLEX_HALF; + + tp->link_config.rmt_adv = + mii_adv_to_ethtool_adv_x(remote_adv); } else if (!tg3_flag(tp, 5780_CLASS)) { /* Link is up via parallel detect */ } else { @@ -5320,6 +5290,7 @@ static void tg3_tx(struct tg3_napi *tnapi) u32 sw_idx = tnapi->tx_cons; struct netdev_queue *txq; int index = tnapi - tp->napi; + unsigned int pkts_compl = 0, bytes_compl = 0; if (tg3_flag(tp, ENABLE_TSS)) index--; @@ -5370,6 +5341,9 @@ static void tg3_tx(struct tg3_napi *tnapi) sw_idx = NEXT_TX(sw_idx); } + pkts_compl++; + bytes_compl += skb->len; + dev_kfree_skb(skb); if (unlikely(tx_bug)) { @@ -5378,6 +5352,8 @@ static void tg3_tx(struct tg3_napi *tnapi) } } + netdev_completed_queue(tp->dev, pkts_compl, bytes_compl); + tnapi->tx_cons = sw_idx; /* Need to make the tx_cons update visible to tg3_start_xmit() @@ -5397,15 +5373,15 @@ static void tg3_tx(struct tg3_napi *tnapi) } } -static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz) +static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz) { - if (!ri->skb) + if (!ri->data) return; pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping), map_sz, PCI_DMA_FROMDEVICE); - dev_kfree_skb_any(ri->skb); - ri->skb = NULL; + kfree(ri->data); + ri->data = NULL; } /* Returns size of skb allocated or < 0 on error. @@ -5419,28 +5395,28 @@ static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz) * buffers the cpu only reads the last cacheline of the RX descriptor * (to fetch the error flags, vlan tag, checksum, and opaque cookie). */ -static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, +static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, u32 opaque_key, u32 dest_idx_unmasked) { struct tg3_rx_buffer_desc *desc; struct ring_info *map; - struct sk_buff *skb; + u8 *data; dma_addr_t mapping; - int skb_size, dest_idx; + int skb_size, data_size, dest_idx; switch (opaque_key) { case RXD_OPAQUE_RING_STD: dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; desc = &tpr->rx_std[dest_idx]; map = &tpr->rx_std_buffers[dest_idx]; - skb_size = tp->rx_pkt_map_sz; + data_size = tp->rx_pkt_map_sz; break; case RXD_OPAQUE_RING_JUMBO: dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask; desc = &tpr->rx_jmb[dest_idx].std; map = &tpr->rx_jmb_buffers[dest_idx]; - skb_size = TG3_RX_JMB_MAP_SZ; + data_size = TG3_RX_JMB_MAP_SZ; break; default: @@ -5453,31 +5429,33 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, * Callers depend upon this behavior and assume that * we leave everything unchanged if we fail. */ - skb = netdev_alloc_skb(tp->dev, skb_size + TG3_RX_OFFSET(tp)); - if (skb == NULL) + skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + data = kmalloc(skb_size, GFP_ATOMIC); + if (!data) return -ENOMEM; - skb_reserve(skb, TG3_RX_OFFSET(tp)); - - mapping = pci_map_single(tp->pdev, skb->data, skb_size, + mapping = pci_map_single(tp->pdev, + data + TG3_RX_OFFSET(tp), + data_size, PCI_DMA_FROMDEVICE); if (pci_dma_mapping_error(tp->pdev, mapping)) { - dev_kfree_skb(skb); + kfree(data); return -EIO; } - map->skb = skb; + map->data = data; dma_unmap_addr_set(map, mapping, mapping); desc->addr_hi = ((u64)mapping >> 32); desc->addr_lo = ((u64)mapping & 0xffffffff); - return skb_size; + return data_size; } /* We only need to move over in the address because the other * members of the RX descriptor are invariant. See notes above - * tg3_alloc_rx_skb for full details. + * tg3_alloc_rx_data for full details. */ static void tg3_recycle_rx(struct tg3_napi *tnapi, struct tg3_rx_prodring_set *dpr, @@ -5511,7 +5489,7 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi, return; } - dest_map->skb = src_map->skb; + dest_map->data = src_map->data; dma_unmap_addr_set(dest_map, mapping, dma_unmap_addr(src_map, mapping)); dest_desc->addr_hi = src_desc->addr_hi; @@ -5522,7 +5500,7 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi, */ smp_wmb(); - src_map->skb = NULL; + src_map->data = NULL; } /* The RX ring scheme is composed of multiple rings which post fresh @@ -5576,19 +5554,20 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) struct sk_buff *skb; dma_addr_t dma_addr; u32 opaque_key, desc_idx, *post_ptr; + u8 *data; desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; if (opaque_key == RXD_OPAQUE_RING_STD) { ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx]; dma_addr = dma_unmap_addr(ri, mapping); - skb = ri->skb; + data = ri->data; post_ptr = &std_prod_idx; rx_std_posted++; } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx]; dma_addr = dma_unmap_addr(ri, mapping); - skb = ri->skb; + data = ri->data; post_ptr = &jmb_prod_idx; } else goto next_pkt_nopost; @@ -5606,13 +5585,14 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) goto next_pkt; } + prefetch(data + TG3_RX_OFFSET(tp)); len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - ETH_FCS_LEN; if (len > TG3_RX_COPY_THRESH(tp)) { int skb_size; - skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key, + skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key, *post_ptr); if (skb_size < 0) goto drop_it; @@ -5620,35 +5600,37 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) pci_unmap_single(tp->pdev, dma_addr, skb_size, PCI_DMA_FROMDEVICE); - /* Ensure that the update to the skb happens + skb = build_skb(data); + if (!skb) { + kfree(data); + goto drop_it_no_recycle; + } + skb_reserve(skb, TG3_RX_OFFSET(tp)); + /* Ensure that the update to the data happens * after the usage of the old DMA mapping. */ smp_wmb(); - ri->skb = NULL; + ri->data = NULL; - skb_put(skb, len); } else { - struct sk_buff *copy_skb; - tg3_recycle_rx(tnapi, tpr, opaque_key, desc_idx, *post_ptr); - copy_skb = netdev_alloc_skb(tp->dev, len + - TG3_RAW_IP_ALIGN); - if (copy_skb == NULL) + skb = netdev_alloc_skb(tp->dev, + len + TG3_RAW_IP_ALIGN); + if (skb == NULL) goto drop_it_no_recycle; - skb_reserve(copy_skb, TG3_RAW_IP_ALIGN); - skb_put(copy_skb, len); + skb_reserve(skb, TG3_RAW_IP_ALIGN); pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); - skb_copy_from_linear_data(skb, copy_skb->data, len); + memcpy(skb->data, + data + TG3_RX_OFFSET(tp), + len); pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); - - /* We'll reuse the original ring buffer. */ - skb = copy_skb; } + skb_put(skb, len); if ((tp->dev->features & NETIF_F_RXCSUM) && (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK) @@ -5787,7 +5769,7 @@ static int tg3_rx_prodring_xfer(struct tg3 *tp, di = dpr->rx_std_prod_idx; for (i = di; i < di + cpycnt; i++) { - if (dpr->rx_std_buffers[i].skb) { + if (dpr->rx_std_buffers[i].data) { cpycnt = i - di; err = -ENOSPC; break; @@ -5845,7 +5827,7 @@ static int tg3_rx_prodring_xfer(struct tg3 *tp, di = dpr->rx_jmb_prod_idx; for (i = di; i < di + cpycnt; i++) { - if (dpr->rx_jmb_buffers[i].skb) { + if (dpr->rx_jmb_buffers[i].data) { cpycnt = i - di; err = -ENOSPC; break; @@ -6443,25 +6425,25 @@ static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget, bool hwbug = false; if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8) - hwbug = 1; + hwbug = true; if (tg3_4g_overflow_test(map, len)) - hwbug = 1; + hwbug = true; if (tg3_40bit_overflow_test(tp, map, len)) - hwbug = 1; + hwbug = true; - if (tg3_flag(tp, 4K_FIFO_LIMIT)) { + if (tp->dma_limit) { u32 prvidx = *entry; u32 tmp_flag = flags & ~TXD_FLAG_END; - while (len > TG3_TX_BD_DMA_MAX && *budget) { - u32 frag_len = TG3_TX_BD_DMA_MAX; - len -= TG3_TX_BD_DMA_MAX; + while (len > tp->dma_limit && *budget) { + u32 frag_len = tp->dma_limit; + len -= tp->dma_limit; /* Avoid the 8byte DMA problem */ if (len <= 8) { - len += TG3_TX_BD_DMA_MAX / 2; - frag_len = TG3_TX_BD_DMA_MAX / 2; + len += tp->dma_limit / 2; + frag_len = tp->dma_limit / 2; } tnapi->tx_buffers[*entry].fragmented = true; @@ -6482,7 +6464,7 @@ static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget, *budget -= 1; *entry = NEXT_TX(*entry); } else { - hwbug = 1; + hwbug = true; tnapi->tx_buffers[prvidx].fragmented = false; } } @@ -6816,6 +6798,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) } skb_tx_timestamp(skb); + netdev_sent_queue(tp->dev, skb->len); /* Packets are ready, update Tx producer idx local and on card. */ tw32_tx_mbox(tnapi->prodmbox, entry); @@ -6968,7 +6951,7 @@ static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk) return 0; } -static void tg3_set_loopback(struct net_device *dev, u32 features) +static void tg3_set_loopback(struct net_device *dev, netdev_features_t features) { struct tg3 *tp = netdev_priv(dev); @@ -6994,7 +6977,8 @@ static void tg3_set_loopback(struct net_device *dev, u32 features) } } -static u32 tg3_fix_features(struct net_device *dev, u32 features) +static netdev_features_t tg3_fix_features(struct net_device *dev, + netdev_features_t features) { struct tg3 *tp = netdev_priv(dev); @@ -7004,9 +6988,9 @@ static u32 tg3_fix_features(struct net_device *dev, u32 features) return features; } -static int tg3_set_features(struct net_device *dev, u32 features) +static int tg3_set_features(struct net_device *dev, netdev_features_t features) { - u32 changed = dev->features ^ features; + netdev_features_t changed = dev->features ^ features; if ((changed & NETIF_F_LOOPBACK) && netif_running(dev)) tg3_set_loopback(dev, features); @@ -7082,14 +7066,14 @@ static void tg3_rx_prodring_free(struct tg3 *tp, if (tpr != &tp->napi[0].prodring) { for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx; i = (i + 1) & tp->rx_std_ring_mask) - tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i], + tg3_rx_data_free(tp, &tpr->rx_std_buffers[i], tp->rx_pkt_map_sz); if (tg3_flag(tp, JUMBO_CAPABLE)) { for (i = tpr->rx_jmb_cons_idx; i != tpr->rx_jmb_prod_idx; i = (i + 1) & tp->rx_jmb_ring_mask) { - tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i], + tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i], TG3_RX_JMB_MAP_SZ); } } @@ -7098,12 +7082,12 @@ static void tg3_rx_prodring_free(struct tg3 *tp, } for (i = 0; i <= tp->rx_std_ring_mask; i++) - tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i], + tg3_rx_data_free(tp, &tpr->rx_std_buffers[i], tp->rx_pkt_map_sz); if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) { for (i = 0; i <= tp->rx_jmb_ring_mask; i++) - tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i], + tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i], TG3_RX_JMB_MAP_SZ); } } @@ -7159,7 +7143,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp, /* Now allocate fresh SKBs for each rx ring. */ for (i = 0; i < tp->rx_pending; i++) { - if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) { + if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) { netdev_warn(tp->dev, "Using a smaller RX standard ring. Only " "%d out of %d buffers were allocated " @@ -7191,7 +7175,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp, } for (i = 0; i < tp->rx_jumbo_pending; i++) { - if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) { + if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) { netdev_warn(tp->dev, "Using a smaller RX jumbo ring. Only %d " "out of %d buffers were allocated " @@ -7297,6 +7281,7 @@ static void tg3_free_rings(struct tg3 *tp) dev_kfree_skb_any(skb); } } + netdev_reset_queue(tp->dev); } /* Initialize tx/rx rings for packet processing. @@ -7591,8 +7576,6 @@ static int tg3_abort_hw(struct tg3 *tp, int silent) if (tnapi->hw_status) memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); } - if (tp->hw_stats) - memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); return err; } @@ -7626,15 +7609,11 @@ static void tg3_restore_pci_state(struct tg3 *tp) pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd); - if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) { - if (tg3_flag(tp, PCI_EXPRESS)) - pcie_set_readrq(tp->pdev, tp->pcie_readrq); - else { - pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, - tp->pci_cacheline_sz); - pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, - tp->pci_lat_timer); - } + if (!tg3_flag(tp, PCI_EXPRESS)) { + pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, + tp->pci_cacheline_sz); + pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, + tp->pci_lat_timer); } /* Make sure PCI-X relaxed ordering bit is clear. */ @@ -7819,8 +7798,6 @@ static int tg3_chip_reset(struct tg3 *tp) pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL, val16); - pcie_set_readrq(tp->pdev, tp->pcie_readrq); - /* Clear error status */ pci_write_config_word(tp->pdev, pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA, @@ -7914,6 +7891,11 @@ static int tg3_chip_reset(struct tg3 *tp) return 0; } +static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *, + struct rtnl_link_stats64 *); +static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *, + struct tg3_ethtool_stats *); + /* tp->lock is held. */ static int tg3_halt(struct tg3 *tp, int kind, int silent) { @@ -7931,6 +7913,15 @@ static int tg3_halt(struct tg3 *tp, int kind, int silent) tg3_write_sig_legacy(tp, kind); tg3_write_sig_post_reset(tp, kind); + if (tp->hw_stats) { + /* Save the stats across chip resets... */ + tg3_get_stats64(tp->dev, &tp->net_stats_prev), + tg3_get_estats(tp, &tp->estats_prev); + + /* And make sure the next sample is new data */ + memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); + } + if (err) return err; @@ -8074,7 +8065,7 @@ static void tg3_rings_reset(struct tg3 *tp) limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16; else if (tg3_flag(tp, 5717_PLUS)) limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4; - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + else if (tg3_flag(tp, 57765_CLASS)) limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2; else limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; @@ -8091,7 +8082,7 @@ static void tg3_rings_reset(struct tg3 *tp) else if (!tg3_flag(tp, 5705_PLUS)) limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16; else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + tg3_flag(tp, 57765_CLASS)) limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4; else limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; @@ -8197,7 +8188,8 @@ static void tg3_setup_rxbd_thresholds(struct tg3 *tp) if (!tg3_flag(tp, 5750_PLUS) || tg3_flag(tp, 5780_CLASS) || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || + tg3_flag(tp, 57765_PLUS)) bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700; else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) @@ -8217,10 +8209,7 @@ static void tg3_setup_rxbd_thresholds(struct tg3 *tp) if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS)) return; - if (!tg3_flag(tp, 5705_PLUS)) - bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700; - else - bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717; + bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700; host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1); @@ -8231,6 +8220,54 @@ static void tg3_setup_rxbd_thresholds(struct tg3 *tp) tw32(JMB_REPLENISH_LWM, bdcache_maxcnt); } +static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp) +{ + int i; + + for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) + tp->rss_ind_tbl[i] = + ethtool_rxfh_indir_default(i, tp->irq_cnt - 1); +} + +static void tg3_rss_check_indir_tbl(struct tg3 *tp) +{ + int i; + + if (!tg3_flag(tp, SUPPORT_MSIX)) + return; + + if (tp->irq_cnt <= 2) { + memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl)); + return; + } + + /* Validate table against current IRQ count */ + for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) { + if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1) + break; + } + + if (i != TG3_RSS_INDIR_TBL_SIZE) + tg3_rss_init_dflt_indir_tbl(tp); +} + +static void tg3_rss_write_indir_tbl(struct tg3 *tp) +{ + int i = 0; + u32 reg = MAC_RSS_INDIR_TBL_0; + + while (i < TG3_RSS_INDIR_TBL_SIZE) { + u32 val = tp->rss_ind_tbl[i]; + i++; + for (; i % 8; i++) { + val <<= 4; + val |= tp->rss_ind_tbl[i]; + } + tw32(reg, val); + reg += 4; + } +} + /* tp->lock is held. */ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) { @@ -8337,7 +8374,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tw32(GRC_MODE, grc_mode); } - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { + if (tg3_flag(tp, 57765_CLASS)) { if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) { u32 grc_mode = tr32(GRC_MODE); @@ -8425,7 +8462,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT; if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK; - if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 && + if (!tg3_flag(tp, 57765_CLASS) && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) val |= DMA_RWCTRL_TAGGED_STAT_WA; tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl); @@ -8572,7 +8609,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, val | BDINFO_FLAGS_USE_EXT_RECV); if (!tg3_flag(tp, USE_JUMBO_BDFLAG) || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + tg3_flag(tp, 57765_CLASS)) tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, NIC_SRAM_RX_JUMBO_BUFFER_DESC); } else { @@ -8581,10 +8618,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) } if (tg3_flag(tp, 57765_PLUS)) { - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) - val = TG3_RX_STD_MAX_SIZE_5700; - else - val = TG3_RX_STD_MAX_SIZE_5717; + val = TG3_RX_STD_RING_SIZE(tp); val <<= BDINFO_FLAGS_MAXLEN_SHIFT; val |= (TG3_RX_STD_DMA_SZ << 2); } else @@ -8661,6 +8695,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) if (tg3_flag(tp, PCI_EXPRESS)) rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) + rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR; + if (tg3_flag(tp, HW_TSO_1) || tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) @@ -8924,28 +8961,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) udelay(100); if (tg3_flag(tp, ENABLE_RSS)) { - int i = 0; - u32 reg = MAC_RSS_INDIR_TBL_0; - - if (tp->irq_cnt == 2) { - for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i += 8) { - tw32(reg, 0x0); - reg += 4; - } - } else { - u32 val; - - while (i < TG3_RSS_INDIR_TBL_SIZE) { - val = i % (tp->irq_cnt - 1); - i++; - for (; i % 8; i++) { - val <<= 4; - val |= (i % (tp->irq_cnt - 1)); - } - tw32(reg, val); - reg += 4; - } - } + tg3_rss_write_indir_tbl(tp); /* Setup the "secret" hash key. */ tw32(MAC_RSS_HASH_KEY_0, 0x5f865437); @@ -9002,7 +9018,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) /* Prevent chip from dropping frames when flow control * is enabled. */ - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + if (tg3_flag(tp, 57765_CLASS)) val = 1; else val = 2; @@ -9217,7 +9233,7 @@ static void tg3_timer(unsigned long __opaque) spin_lock(&tp->lock); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + tg3_flag(tp, 57765_CLASS)) tg3_chk_missed_msi(tp); if (!tg3_flag(tp, TAGGED_STATUS)) { @@ -9669,6 +9685,8 @@ static int tg3_open(struct net_device *dev) */ tg3_ints_init(tp); + tg3_rss_check_indir_tbl(tp); + /* The placement of this call is tied * to the setup and use of Host TX descriptors. */ @@ -9700,8 +9718,8 @@ static int tg3_open(struct net_device *dev) tg3_free_rings(tp); } else { if (tg3_flag(tp, TAGGED_STATUS) && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && + !tg3_flag(tp, 57765_CLASS)) tp->timer_offset = HZ; else tp->timer_offset = HZ / 10; @@ -9782,10 +9800,6 @@ err_out1: return err; } -static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *, - struct rtnl_link_stats64 *); -static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *); - static int tg3_close(struct net_device *dev) { int i; @@ -9817,10 +9831,9 @@ static int tg3_close(struct net_device *dev) tg3_ints_fini(tp); - tg3_get_stats64(tp->dev, &tp->net_stats_prev); - - memcpy(&tp->estats_prev, tg3_get_estats(tp), - sizeof(tp->estats_prev)); + /* Clear stats across close / open calls */ + memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev)); + memset(&tp->estats_prev, 0, sizeof(tp->estats_prev)); tg3_napi_fini(tp); @@ -9868,9 +9881,9 @@ static u64 calc_crc_errors(struct tg3 *tp) estats->member = old_estats->member + \ get_stat64(&hw_stats->member) -static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp) +static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp, + struct tg3_ethtool_stats *estats) { - struct tg3_ethtool_stats *estats = &tp->estats; struct tg3_ethtool_stats *old_estats = &tp->estats_prev; struct tg3_hw_stats *hw_stats = tp->hw_stats; @@ -10318,12 +10331,20 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) cmd->advertising |= ADVERTISED_Asym_Pause; } } - if (netif_running(dev)) { + if (netif_running(dev) && netif_carrier_ok(dev)) { ethtool_cmd_speed_set(cmd, tp->link_config.active_speed); cmd->duplex = tp->link_config.active_duplex; + cmd->lp_advertising = tp->link_config.rmt_adv; + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { + if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE) + cmd->eth_tp_mdix = ETH_TP_MDI_X; + else + cmd->eth_tp_mdix = ETH_TP_MDI; + } } else { ethtool_cmd_speed_set(cmd, SPEED_INVALID); cmd->duplex = DUPLEX_INVALID; + cmd->eth_tp_mdix = ETH_TP_MDI_INVALID; } cmd->phy_address = tp->phy_addr; cmd->transceiver = XCVR_INTERNAL; @@ -10428,10 +10449,10 @@ static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info { struct tg3 *tp = netdev_priv(dev); - strcpy(info->driver, DRV_MODULE_NAME); - strcpy(info->version, DRV_MODULE_VERSION); - strcpy(info->fw_version, tp->fw_ver); - strcpy(info->bus_info, pci_name(tp->pdev)); + strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); + strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version)); + strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info)); } static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) @@ -10590,12 +10611,12 @@ static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG); - if (tp->link_config.active_flowctrl & FLOW_CTRL_RX) + if (tp->link_config.flowctrl & FLOW_CTRL_RX) epause->rx_pause = 1; else epause->rx_pause = 0; - if (tp->link_config.active_flowctrl & FLOW_CTRL_TX) + if (tp->link_config.flowctrl & FLOW_CTRL_TX) epause->tx_pause = 1; else epause->tx_pause = 0; @@ -10715,6 +10736,78 @@ static int tg3_get_sset_count(struct net_device *dev, int sset) } } +static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, + u32 *rules __always_unused) +{ + struct tg3 *tp = netdev_priv(dev); + + if (!tg3_flag(tp, SUPPORT_MSIX)) + return -EOPNOTSUPP; + + switch (info->cmd) { + case ETHTOOL_GRXRINGS: + if (netif_running(tp->dev)) + info->data = tp->irq_cnt; + else { + info->data = num_online_cpus(); + if (info->data > TG3_IRQ_MAX_VECS_RSS) + info->data = TG3_IRQ_MAX_VECS_RSS; + } + + /* The first interrupt vector only + * handles link interrupts. + */ + info->data -= 1; + return 0; + + default: + return -EOPNOTSUPP; + } +} + +static u32 tg3_get_rxfh_indir_size(struct net_device *dev) +{ + u32 size = 0; + struct tg3 *tp = netdev_priv(dev); + + if (tg3_flag(tp, SUPPORT_MSIX)) + size = TG3_RSS_INDIR_TBL_SIZE; + + return size; +} + +static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir) +{ + struct tg3 *tp = netdev_priv(dev); + int i; + + for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) + indir[i] = tp->rss_ind_tbl[i]; + + return 0; +} + +static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir) +{ + struct tg3 *tp = netdev_priv(dev); + size_t i; + + for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) + tp->rss_ind_tbl[i] = indir[i]; + + if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS)) + return 0; + + /* It is legal to write the indirection + * table while the device is running. + */ + tg3_full_lock(tp, 0); + tg3_rss_write_indir_tbl(tp); + tg3_full_unlock(tp); + + return 0; +} + static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf) { switch (stringset) { @@ -10769,7 +10862,8 @@ static void tg3_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *tmp_stats) { struct tg3 *tp = netdev_priv(dev); - memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats)); + + tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats); } static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen) @@ -11352,7 +11446,7 @@ static int tg3_test_memory(struct tg3 *tp) if (tg3_flag(tp, 5717_PLUS)) mem_tbl = mem_tbl_5717; - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + else if (tg3_flag(tp, 57765_CLASS)) mem_tbl = mem_tbl_57765; else if (tg3_flag(tp, 5755_PLUS)) mem_tbl = mem_tbl_5755; @@ -11400,8 +11494,8 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback) u32 rx_start_idx, rx_idx, tx_idx, opaque_key; u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val; u32 budget; - struct sk_buff *skb, *rx_skb; - u8 *tx_data; + struct sk_buff *skb; + u8 *tx_data, *rx_data; dma_addr_t map; int num_pkts, tx_len, rx_len, i, err; struct tg3_rx_buffer_desc *desc; @@ -11569,11 +11663,11 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback) } if (opaque_key == RXD_OPAQUE_RING_STD) { - rx_skb = tpr->rx_std_buffers[desc_idx].skb; + rx_data = tpr->rx_std_buffers[desc_idx].data; map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping); } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { - rx_skb = tpr->rx_jmb_buffers[desc_idx].skb; + rx_data = tpr->rx_jmb_buffers[desc_idx].data; map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx], mapping); } else @@ -11582,15 +11676,16 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback) pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE); + rx_data += TG3_RX_OFFSET(tp); for (i = data_off; i < rx_len; i++, val++) { - if (*(rx_skb->data + i) != (u8) (val & 0xff)) + if (*(rx_data + i) != (u8) (val & 0xff)) goto out; } } err = 0; - /* tg3_free_rings will unmap and free the rx_skb */ + /* tg3_free_rings will unmap and free the rx_data */ out: return err; } @@ -11943,6 +12038,10 @@ static const struct ethtool_ops tg3_ethtool_ops = { .get_coalesce = tg3_get_coalesce, .set_coalesce = tg3_set_coalesce, .get_sset_count = tg3_get_sset_count, + .get_rxnfc = tg3_get_rxnfc, + .get_rxfh_indir_size = tg3_get_rxfh_indir_size, + .get_rxfh_indir = tg3_get_rxfh_indir, + .set_rxfh_indir = tg3_set_rxfh_indir, }; static void __devinit tg3_get_eeprom_size(struct tg3 *tp) @@ -12612,7 +12711,7 @@ static void __devinit tg3_nvram_init(struct tg3 *tp) else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) tg3_get_5906_nvram_info(tp); else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + tg3_flag(tp, 57765_CLASS)) tg3_get_57780_nvram_info(tp); else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) @@ -13218,8 +13317,7 @@ static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp) static void __devinit tg3_phy_init_link_config(struct tg3 *tp) { - u32 adv = ADVERTISED_Autoneg | - ADVERTISED_Pause; + u32 adv = ADVERTISED_Autoneg; if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) adv |= ADVERTISED_1000baseT_Half | @@ -13322,7 +13420,7 @@ static int __devinit tg3_phy_probe(struct tg3 *tp) if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && !tg3_flag(tp, ENABLE_APE) && !tg3_flag(tp, ENABLE_ASF)) { - u32 bmsr, mask; + u32 bmsr, dummy; tg3_readphy(tp, MII_BMSR, &bmsr); if (!tg3_readphy(tp, MII_BMSR, &bmsr) && @@ -13335,10 +13433,7 @@ static int __devinit tg3_phy_probe(struct tg3 *tp) tg3_phy_set_wirespeed(tp); - mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | - ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | - ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full); - if (!tg3_copper_is_advertising_all(tp, mask)) { + if (!tg3_phy_copper_an_config_ok(tp, &dummy)) { tg3_phy_autoneg_cfg(tp, tp->link_config.advertising, tp->link_config.flowctrl); @@ -13460,6 +13555,17 @@ out_no_vpd: strcpy(tp->board_part_number, "BCM57795"); else goto nomatch; + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) { + if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762) + strcpy(tp->board_part_number, "BCM57762"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766) + strcpy(tp->board_part_number, "BCM57766"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782) + strcpy(tp->board_part_number, "BCM57782"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786) + strcpy(tp->board_part_number, "BCM57786"); + else + goto nomatch; } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { strcpy(tp->board_part_number, "BCM95906"); } else { @@ -13798,7 +13904,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 || - tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795) + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786) pci_read_config_dword(tp->pdev, TG3PCI_GEN15_PRODID_ASICREV, &prod_id_asic_rev); @@ -13945,7 +14055,10 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) tg3_flag_set(tp, 5717_PLUS); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 || - tg3_flag(tp, 5717_PLUS)) + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) + tg3_flag_set(tp, 57765_CLASS); + + if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS)) tg3_flag_set(tp, 57765_PLUS); /* Intentionally exclude ASIC_REV_5906 */ @@ -13997,9 +14110,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) if (tg3_flag(tp, HW_TSO_1) || tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3) || - (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF))) + tp->fw_needed) { + /* For firmware TSO, assume ASF is disabled. + * We'll disable TSO later if we discover ASF + * is enabled in tg3_get_eeprom_hw_cfg(). + */ tg3_flag_set(tp, TSO_CAPABLE); - else { + } else { tg3_flag_clear(tp, TSO_CAPABLE); tg3_flag_clear(tp, TSO_BUG); tp->fw_needed = NULL; @@ -14027,6 +14144,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) if (tg3_flag(tp, 57765_PLUS)) { tg3_flag_set(tp, SUPPORT_MSIX); tp->irq_max = TG3_IRQ_MAX_VECS; + tg3_rss_init_dflt_indir_tbl(tp); } } @@ -14034,9 +14152,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) tg3_flag_set(tp, SHORT_DMA_BUG); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) - tg3_flag_set(tp, 4K_FIFO_LIMIT); + tp->dma_limit = TG3_TX_BD_DMA_MAX_4K; + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) + tp->dma_limit = TG3_TX_BD_DMA_MAX_2K; - if (tg3_flag(tp, 5717_PLUS)) + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) tg3_flag_set(tp, LRG_PROD_RING_CAP); if (tg3_flag(tp, 57765_PLUS) && @@ -14056,12 +14178,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) tg3_flag_set(tp, PCI_EXPRESS); - tp->pcie_readrq = 4096; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) - tp->pcie_readrq = 2048; - - pcie_set_readrq(tp->pdev, tp->pcie_readrq); + if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) { + int readrq = pcie_get_readrq(tp->pdev); + if (readrq > 2048) + pcie_set_readrq(tp->pdev, 2048); + } pci_read_config_word(tp->pdev, pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL, @@ -14273,6 +14394,12 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) */ tg3_get_eeprom_hw_cfg(tp); + if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) { + tg3_flag_clear(tp, TSO_CAPABLE); + tg3_flag_clear(tp, TSO_BUG); + tp->fw_needed = NULL; + } + if (tg3_flag(tp, ENABLE_APE)) { /* Allow reads and writes to the * APE register and memory space. @@ -14311,7 +14438,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + tg3_flag(tp, 57765_CLASS)) tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || @@ -14548,11 +14675,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) else tg3_flag_clear(tp, POLL_SERDES); - tp->rx_offset = NET_IP_ALIGN; + tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN; tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && tg3_flag(tp, PCIX_MODE)) { - tp->rx_offset = 0; + tp->rx_offset = NET_SKB_PAD; #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS tp->rx_copy_thresh = ~(u16)0; #endif @@ -15313,7 +15440,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, u32 sndmbx, rcvmbx, intmbx; char str[40]; u64 dma_mask, persist_dma_mask; - u32 features = 0; + netdev_features_t features = 0; printk_once(KERN_INFO "%s\n", version); diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index 94b4bd049a3..aea8f72c24f 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h @@ -31,6 +31,8 @@ #define TG3_RX_RET_MAX_SIZE_5705 512 #define TG3_RX_RET_MAX_SIZE_5717 4096 +#define TG3_RSS_INDIR_TBL_SIZE 128 + /* First 256 bytes are a mirror of PCI config space. */ #define TG3PCI_VENDOR 0x00000000 #define TG3PCI_VENDOR_BROADCOM 0x14e4 @@ -57,6 +59,10 @@ #define TG3PCI_DEVICE_TIGON3_57795 0x16b6 #define TG3PCI_DEVICE_TIGON3_5719 0x1657 #define TG3PCI_DEVICE_TIGON3_5720 0x165f +#define TG3PCI_DEVICE_TIGON3_57762 0x1682 +#define TG3PCI_DEVICE_TIGON3_57766 0x1686 +#define TG3PCI_DEVICE_TIGON3_57786 0x16b3 +#define TG3PCI_DEVICE_TIGON3_57782 0x16b7 /* 0x04 --> 0x2c unused */ #define TG3PCI_SUBVENDOR_ID_BROADCOM PCI_VENDOR_ID_BROADCOM #define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6 0x1644 @@ -168,6 +174,7 @@ #define ASIC_REV_57765 0x57785 #define ASIC_REV_5719 0x5719 #define ASIC_REV_5720 0x5720 +#define ASIC_REV_57766 0x57766 #define GET_CHIP_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 8) #define CHIPREV_5700_AX 0x70 #define CHIPREV_5700_BX 0x71 @@ -1340,6 +1347,7 @@ #define RDMAC_MODE_MBUF_SBD_CRPT_ENAB 0x00002000 #define RDMAC_MODE_FIFO_SIZE_128 0x00020000 #define RDMAC_MODE_FIFO_LONG_BURST 0x00030000 +#define RDMAC_MODE_JMB_2K_MMRR 0x00800000 #define RDMAC_MODE_MULT_DMA_RD_DIS 0x01000000 #define RDMAC_MODE_IPV4_LSO_EN 0x08000000 #define RDMAC_MODE_IPV6_LSO_EN 0x10000000 @@ -2174,6 +2182,7 @@ #define MII_TG3_EXT_CTRL_TBI 0x8000 #define MII_TG3_EXT_STAT 0x11 /* Extended status register */ +#define MII_TG3_EXT_STAT_MDIX 0x2000 #define MII_TG3_EXT_STAT_LPASS 0x0100 #define MII_TG3_RXR_COUNTERS 0x14 /* Local/Remote Receiver Counts */ @@ -2277,6 +2286,9 @@ #define MII_TG3_FET_PTEST_FRC_TX_LINK 0x1000 #define MII_TG3_FET_PTEST_FRC_TX_LOCK 0x0800 +#define MII_TG3_FET_GEN_STAT 0x1c +#define MII_TG3_FET_GEN_STAT_MDIXSTAT 0x2000 + #define MII_TG3_FET_TEST 0x1f #define MII_TG3_FET_SHADOW_EN 0x0080 @@ -2662,9 +2674,13 @@ struct tg3_hw_stats { /* 'mapping' is superfluous as the chip does not write into * the tx/rx post rings so we could just fetch it from there. * But the cache behavior is better how we are doing it now. + * + * This driver uses new build_skb() API : + * RX ring buffer contains pointer to kmalloc() data only, + * skb are built only after Hardware filled the frame. */ struct ring_info { - struct sk_buff *skb; + u8 *data; DEFINE_DMA_UNMAP_ADDR(mapping); }; @@ -2690,6 +2706,7 @@ struct tg3_link_config { #define DUPLEX_INVALID 0xff #define AUTONEG_INVALID 0xff u16 active_speed; + u32 rmt_adv; /* When we go in and out of low power mode we need * to swap with this state. @@ -2865,6 +2882,8 @@ enum TG3_FLAGS { TG3_FLAG_NVRAM_BUFFERED, TG3_FLAG_SUPPORT_MSI, TG3_FLAG_SUPPORT_MSIX, + TG3_FLAG_USING_MSI, + TG3_FLAG_USING_MSIX, TG3_FLAG_PCIX_MODE, TG3_FLAG_PCI_HIGH_SPEED, TG3_FLAG_PCI_32BIT, @@ -2880,7 +2899,6 @@ enum TG3_FLAGS { TG3_FLAG_CHIP_RESETTING, TG3_FLAG_INIT_COMPLETE, TG3_FLAG_TSO_BUG, - TG3_FLAG_IS_5788, TG3_FLAG_MAX_RXPEND_64, TG3_FLAG_TSO_CAPABLE, TG3_FLAG_PCI_EXPRESS, /* BCM5785 + pci_is_pcie() */ @@ -2889,14 +2907,9 @@ enum TG3_FLAGS { TG3_FLAG_IS_NIC, TG3_FLAG_FLASH, TG3_FLAG_HW_TSO_1, - TG3_FLAG_5705_PLUS, - TG3_FLAG_5750_PLUS, + TG3_FLAG_HW_TSO_2, TG3_FLAG_HW_TSO_3, - TG3_FLAG_USING_MSI, - TG3_FLAG_USING_MSIX, TG3_FLAG_ICH_WORKAROUND, - TG3_FLAG_5780_CLASS, - TG3_FLAG_HW_TSO_2, TG3_FLAG_1SHOT_MSI, TG3_FLAG_NO_FWARE_REPORTED, TG3_FLAG_NO_NVRAM_ADDR_TRANS, @@ -2910,18 +2923,23 @@ enum TG3_FLAGS { TG3_FLAG_RGMII_EXT_IBND_RX_EN, TG3_FLAG_RGMII_EXT_IBND_TX_EN, TG3_FLAG_CLKREQ_BUG, - TG3_FLAG_5755_PLUS, TG3_FLAG_NO_NVRAM, TG3_FLAG_ENABLE_RSS, TG3_FLAG_ENABLE_TSS, TG3_FLAG_SHORT_DMA_BUG, TG3_FLAG_USE_JUMBO_BDFLAG, TG3_FLAG_L1PLLPD_EN, - TG3_FLAG_57765_PLUS, TG3_FLAG_APE_HAS_NCSI, - TG3_FLAG_5717_PLUS, TG3_FLAG_4K_FIFO_LIMIT, TG3_FLAG_RESET_TASK_PENDING, + TG3_FLAG_5705_PLUS, + TG3_FLAG_IS_5788, + TG3_FLAG_5750_PLUS, + TG3_FLAG_5780_CLASS, + TG3_FLAG_5755_PLUS, + TG3_FLAG_57765_PLUS, + TG3_FLAG_57765_CLASS, + TG3_FLAG_5717_PLUS, /* Add new flags before this comment and TG3_FLAG_NUMBER_OF_FLAGS */ TG3_FLAG_NUMBER_OF_FLAGS, /* Last entry in enum TG3_FLAGS */ @@ -2985,6 +3003,7 @@ struct tg3 { /* begin "tx thread" cacheline section */ void (*write32_tx_mbox) (struct tg3 *, u32, u32); + u32 dma_limit; /* begin "rx thread" cacheline section */ struct tg3_napi napi[TG3_IRQ_MAX_VECS]; @@ -3005,7 +3024,6 @@ struct tg3 { unsigned long rx_dropped; unsigned long tx_dropped; struct rtnl_link_stats64 net_stats_prev; - struct tg3_ethtool_stats estats; struct tg3_ethtool_stats estats_prev; DECLARE_BITMAP(tg3_flags, TG3_FLAG_NUMBER_OF_FLAGS); @@ -3131,10 +3149,12 @@ struct tg3 { #define TG3_PHYFLG_SERDES_PREEMPHASIS 0x00010000 #define TG3_PHYFLG_PARALLEL_DETECT 0x00020000 #define TG3_PHYFLG_EEE_CAP 0x00040000 +#define TG3_PHYFLG_MDIX_STATE 0x00200000 u32 led_ctrl; u32 phy_otp; u32 setlpicnt; + u8 rss_ind_tbl[TG3_RSS_INDIR_TBL_SIZE]; #define TG3_BPN_SIZE 24 char board_part_number[TG3_BPN_SIZE]; diff --git a/drivers/net/ethernet/brocade/bna/Makefile b/drivers/net/ethernet/brocade/bna/Makefile index 74d3abca196..6027302ae73 100644 --- a/drivers/net/ethernet/brocade/bna/Makefile +++ b/drivers/net/ethernet/brocade/bna/Makefile @@ -5,7 +5,7 @@ obj-$(CONFIG_BNA) += bna.o -bna-objs := bnad.o bnad_ethtool.o bna_enet.o bna_tx_rx.o +bna-objs := bnad.o bnad_ethtool.o bnad_debugfs.o bna_enet.o bna_tx_rx.o bna-objs += bfa_msgq.o bfa_ioc.o bfa_ioc_ct.o bfa_cee.o bna-objs += cna_fwimg.o diff --git a/drivers/net/ethernet/brocade/bna/bfa_cee.c b/drivers/net/ethernet/brocade/bna/bfa_cee.c index 8e627186507..29f284f79e0 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_cee.c +++ b/drivers/net/ethernet/brocade/bna/bfa_cee.c @@ -185,6 +185,41 @@ bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa) } /** + * bfa_cee_get_attr() + * + * @brief Send the request to the f/w to fetch CEE attributes. + * + * @param[in] Pointer to the CEE module data structure. + * + * @return Status + */ +enum bfa_status +bfa_nw_cee_get_attr(struct bfa_cee *cee, struct bfa_cee_attr *attr, + bfa_cee_get_attr_cbfn_t cbfn, void *cbarg) +{ + struct bfi_cee_get_req *cmd; + + BUG_ON(!((cee != NULL) && (cee->ioc != NULL))); + if (!bfa_nw_ioc_is_operational(cee->ioc)) + return BFA_STATUS_IOC_FAILURE; + + if (cee->get_attr_pending == true) + return BFA_STATUS_DEVBUSY; + + cee->get_attr_pending = true; + cmd = (struct bfi_cee_get_req *) cee->get_cfg_mb.msg; + cee->attr = attr; + cee->cbfn.get_attr_cbfn = cbfn; + cee->cbfn.get_attr_cbarg = cbarg; + bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_CFG_REQ, + bfa_ioc_portid(cee->ioc)); + bfa_dma_be_addr_set(cmd->dma_addr, cee->attr_dma.pa); + bfa_nw_ioc_mbox_queue(cee->ioc, &cee->get_cfg_mb, NULL, NULL); + + return BFA_STATUS_OK; +} + +/** * bfa_cee_isrs() * * @brief Handles Mail-box interrupts for CEE module. diff --git a/drivers/net/ethernet/brocade/bna/bfa_cee.h b/drivers/net/ethernet/brocade/bna/bfa_cee.h index 58d54e98d59..93fde633d6f 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_cee.h +++ b/drivers/net/ethernet/brocade/bna/bfa_cee.h @@ -59,5 +59,7 @@ u32 bfa_nw_cee_meminfo(void); void bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa); void bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc, void *dev); - +enum bfa_status bfa_nw_cee_get_attr(struct bfa_cee *cee, + struct bfa_cee_attr *attr, + bfa_cee_get_attr_cbfn_t cbfn, void *cbarg); #endif /* __BFA_CEE_H__ */ diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs.h b/drivers/net/ethernet/brocade/bna/bfa_defs.h index 2f12d68021d..871c6309334 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_defs.h +++ b/drivers/net/ethernet/brocade/bna/bfa_defs.h @@ -219,41 +219,39 @@ enum { * All numerical fields are in big-endian format. */ struct bfa_mfg_block { - u8 version; /*!< manufacturing block version */ - u8 mfg_sig[3]; /*!< characters 'M', 'F', 'G' */ - u16 mfgsize; /*!< mfg block size */ - u16 u16_chksum; /*!< old u16 checksum */ - char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)]; - char brcd_partnum[STRSZ(BFA_MFG_PARTNUM_SIZE)]; - u8 mfg_day; /*!< manufacturing day */ - u8 mfg_month; /*!< manufacturing month */ - u16 mfg_year; /*!< manufacturing year */ - u64 mfg_wwn; /*!< wwn base for this adapter */ - u8 num_wwn; /*!< number of wwns assigned */ - u8 mfg_speeds; /*!< speeds allowed for this adapter */ - u8 rsv[2]; - char supplier_id[STRSZ(BFA_MFG_SUPPLIER_ID_SIZE)]; - char supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)]; - char - supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)]; - char - supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)]; - mac_t mfg_mac; /*!< mac address */ - u8 num_mac; /*!< number of mac addresses */ - u8 rsv2; - u32 card_type; /*!< card type */ - char cap_nic; /*!< capability nic */ - char cap_cna; /*!< capability cna */ - char cap_hba; /*!< capability hba */ - char cap_fc16g; /*!< capability fc 16g */ - char cap_sriov; /*!< capability sriov */ - char cap_mezz; /*!< capability mezz */ - u8 rsv3; - u8 mfg_nports; /*!< number of ports */ - char media[8]; /*!< xfi/xaui */ - char initial_mode[8];/*!< initial mode: hba/cna/nic */ - u8 rsv4[84]; - u8 md5_chksum[BFA_MFG_CHKSUM_SIZE]; /*!< md5 checksum */ + u8 version; /* manufacturing block version */ + u8 mfg_sig[3]; /* characters 'M', 'F', 'G' */ + u16 mfgsize; /* mfg block size */ + u16 u16_chksum; /* old u16 checksum */ + char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)]; + char brcd_partnum[STRSZ(BFA_MFG_PARTNUM_SIZE)]; + u8 mfg_day; /* manufacturing day */ + u8 mfg_month; /* manufacturing month */ + u16 mfg_year; /* manufacturing year */ + u64 mfg_wwn; /* wwn base for this adapter */ + u8 num_wwn; /* number of wwns assigned */ + u8 mfg_speeds; /* speeds allowed for this adapter */ + u8 rsv[2]; + char supplier_id[STRSZ(BFA_MFG_SUPPLIER_ID_SIZE)]; + char supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)]; + char supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)]; + char supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)]; + mac_t mfg_mac; /* base mac address */ + u8 num_mac; /* number of mac addresses */ + u8 rsv2; + u32 card_type; /* card type */ + char cap_nic; /* capability nic */ + char cap_cna; /* capability cna */ + char cap_hba; /* capability hba */ + char cap_fc16g; /* capability fc 16g */ + char cap_sriov; /* capability sriov */ + char cap_mezz; /* capability mezz */ + u8 rsv3; + u8 mfg_nports; /* number of ports */ + char media[8]; /* xfi/xaui */ + char initial_mode[8]; /* initial mode: hba/cna/nic */ + u8 rsv4[84]; + u8 md5_chksum[BFA_MFG_CHKSUM_SIZE]; /* md5 checksum */ }; #pragma pack() @@ -293,4 +291,34 @@ enum bfa_mode { BFA_MODE_NIC = 3 }; +/* + * Flash module specific + */ +#define BFA_FLASH_PART_ENTRY_SIZE 32 /* partition entry size */ +#define BFA_FLASH_PART_MAX 32 /* maximal # of partitions */ +#define BFA_TOTAL_FLASH_SIZE 0x400000 +#define BFA_FLASH_PART_MFG 7 + +/* + * flash partition attributes + */ +struct bfa_flash_part_attr { + u32 part_type; /* partition type */ + u32 part_instance; /* partition instance */ + u32 part_off; /* partition offset */ + u32 part_size; /* partition size */ + u32 part_len; /* partition content length */ + u32 part_status; /* partition status */ + char rsv[BFA_FLASH_PART_ENTRY_SIZE - 24]; +}; + +/* + * flash attributes + */ +struct bfa_flash_attr { + u32 status; /* flash overall status */ + u32 npart; /* num of partitions */ + struct bfa_flash_part_attr part[BFA_FLASH_PART_MAX]; +}; + #endif /* __BFA_DEFS_H__ */ diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c index b0307a00a10..abfad275b5f 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c @@ -74,6 +74,7 @@ static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc); static void bfa_ioc_event_notify(struct bfa_ioc *, enum bfa_ioc_event); static void bfa_ioc_disable_comp(struct bfa_ioc *ioc); static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc); +static void bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc); static void bfa_ioc_fail_notify(struct bfa_ioc *ioc); static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc); static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc); @@ -997,6 +998,7 @@ bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event) static void bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf) { + bfa_nw_ioc_debug_save_ftrc(iocpf->ioc); bfa_ioc_hw_sem_get(iocpf->ioc); } @@ -1743,6 +1745,114 @@ bfa_ioc_mbox_flush(struct bfa_ioc *ioc) bfa_q_deq(&mod->cmd_q, &cmd); } +/** + * Read data from SMEM to host through PCI memmap + * + * @param[in] ioc memory for IOC + * @param[in] tbuf app memory to store data from smem + * @param[in] soff smem offset + * @param[in] sz size of smem in bytes + */ +static int +bfa_nw_ioc_smem_read(struct bfa_ioc *ioc, void *tbuf, u32 soff, u32 sz) +{ + u32 pgnum, loff, r32; + int i, len; + u32 *buf = tbuf; + + pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff); + loff = PSS_SMEM_PGOFF(soff); + + /* + * Hold semaphore to serialize pll init and fwtrc. + */ + if (bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg) == 0) + return 1; + + writel(pgnum, ioc->ioc_regs.host_page_num_fn); + + len = sz/sizeof(u32); + for (i = 0; i < len; i++) { + r32 = swab32(readl((loff) + (ioc->ioc_regs.smem_page_start))); + buf[i] = be32_to_cpu(r32); + loff += sizeof(u32); + + /** + * handle page offset wrap around + */ + loff = PSS_SMEM_PGOFF(loff); + if (loff == 0) { + pgnum++; + writel(pgnum, ioc->ioc_regs.host_page_num_fn); + } + } + + writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0), + ioc->ioc_regs.host_page_num_fn); + + /* + * release semaphore + */ + readl(ioc->ioc_regs.ioc_init_sem_reg); + writel(1, ioc->ioc_regs.ioc_init_sem_reg); + return 0; +} + +/** + * Retrieve saved firmware trace from a prior IOC failure. + */ +int +bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen) +{ + u32 loff = BFI_IOC_TRC_OFF + BNA_DBG_FWTRC_LEN * ioc->port_id; + int tlen, status = 0; + + tlen = *trclen; + if (tlen > BNA_DBG_FWTRC_LEN) + tlen = BNA_DBG_FWTRC_LEN; + + status = bfa_nw_ioc_smem_read(ioc, trcdata, loff, tlen); + *trclen = tlen; + return status; +} + +/** + * Save firmware trace if configured. + */ +static void +bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc) +{ + int tlen; + + if (ioc->dbg_fwsave_once) { + ioc->dbg_fwsave_once = 0; + if (ioc->dbg_fwsave_len) { + tlen = ioc->dbg_fwsave_len; + bfa_nw_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen); + } + } +} + +/** + * Retrieve saved firmware trace from a prior IOC failure. + */ +int +bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen) +{ + int tlen; + + if (ioc->dbg_fwsave_len == 0) + return BFA_STATUS_ENOFSAVE; + + tlen = *trclen; + if (tlen > ioc->dbg_fwsave_len) + tlen = ioc->dbg_fwsave_len; + + memcpy(trcdata, ioc->dbg_fwsave, tlen); + *trclen = tlen; + return BFA_STATUS_OK; +} + static void bfa_ioc_fail_notify(struct bfa_ioc *ioc) { @@ -1751,6 +1861,7 @@ bfa_ioc_fail_notify(struct bfa_ioc *ioc) */ ioc->cbfn->hbfail_cbfn(ioc->bfa); bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED); + bfa_nw_ioc_debug_save_ftrc(ioc); } /** @@ -2058,6 +2169,16 @@ bfa_nw_ioc_disable(struct bfa_ioc *ioc) bfa_fsm_send_event(ioc, IOC_E_DISABLE); } +/** + * Initialize memory for saving firmware trace. + */ +void +bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave) +{ + ioc->dbg_fwsave = dbg_fwsave; + ioc->dbg_fwsave_len = ioc->iocpf.auto_recover ? BNA_DBG_FWTRC_LEN : 0; +} + static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr) { @@ -2172,6 +2293,15 @@ bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc) } /** + * return true if IOC is operational + */ +bool +bfa_nw_ioc_is_operational(struct bfa_ioc *ioc) +{ + return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op); +} + +/** * Add to IOC heartbeat failure notification queue. To be used by common * modules such as cee, port, diag. */ @@ -2471,3 +2601,366 @@ bfa_ioc_poll_fwinit(struct bfa_ioc *ioc) msecs_to_jiffies(BFA_IOC_POLL_TOV)); } } + +/* + * Flash module specific + */ + +/* + * FLASH DMA buffer should be big enough to hold both MFG block and + * asic block(64k) at the same time and also should be 2k aligned to + * avoid write segement to cross sector boundary. + */ +#define BFA_FLASH_SEG_SZ 2048 +#define BFA_FLASH_DMA_BUF_SZ \ + roundup(0x010000 + sizeof(struct bfa_mfg_block), BFA_FLASH_SEG_SZ) + +static void +bfa_flash_cb(struct bfa_flash *flash) +{ + flash->op_busy = 0; + if (flash->cbfn) + flash->cbfn(flash->cbarg, flash->status); +} + +static void +bfa_flash_notify(void *cbarg, enum bfa_ioc_event event) +{ + struct bfa_flash *flash = cbarg; + + switch (event) { + case BFA_IOC_E_DISABLED: + case BFA_IOC_E_FAILED: + if (flash->op_busy) { + flash->status = BFA_STATUS_IOC_FAILURE; + flash->cbfn(flash->cbarg, flash->status); + flash->op_busy = 0; + } + break; + default: + break; + } +} + +/* + * Send flash write request. + * + * @param[in] cbarg - callback argument + */ +static void +bfa_flash_write_send(struct bfa_flash *flash) +{ + struct bfi_flash_write_req *msg = + (struct bfi_flash_write_req *) flash->mb.msg; + u32 len; + + msg->type = be32_to_cpu(flash->type); + msg->instance = flash->instance; + msg->offset = be32_to_cpu(flash->addr_off + flash->offset); + len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ? + flash->residue : BFA_FLASH_DMA_BUF_SZ; + msg->length = be32_to_cpu(len); + + /* indicate if it's the last msg of the whole write operation */ + msg->last = (len == flash->residue) ? 1 : 0; + + bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ, + bfa_ioc_portid(flash->ioc)); + bfa_alen_set(&msg->alen, len, flash->dbuf_pa); + memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len); + bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL); + + flash->residue -= len; + flash->offset += len; +} + +/* + * Send flash read request. + * + * @param[in] cbarg - callback argument + */ +static void +bfa_flash_read_send(void *cbarg) +{ + struct bfa_flash *flash = cbarg; + struct bfi_flash_read_req *msg = + (struct bfi_flash_read_req *) flash->mb.msg; + u32 len; + + msg->type = be32_to_cpu(flash->type); + msg->instance = flash->instance; + msg->offset = be32_to_cpu(flash->addr_off + flash->offset); + len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ? + flash->residue : BFA_FLASH_DMA_BUF_SZ; + msg->length = be32_to_cpu(len); + bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ, + bfa_ioc_portid(flash->ioc)); + bfa_alen_set(&msg->alen, len, flash->dbuf_pa); + bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL); +} + +/* + * Process flash response messages upon receiving interrupts. + * + * @param[in] flasharg - flash structure + * @param[in] msg - message structure + */ +static void +bfa_flash_intr(void *flasharg, struct bfi_mbmsg *msg) +{ + struct bfa_flash *flash = flasharg; + u32 status; + + union { + struct bfi_flash_query_rsp *query; + struct bfi_flash_write_rsp *write; + struct bfi_flash_read_rsp *read; + struct bfi_mbmsg *msg; + } m; + + m.msg = msg; + + /* receiving response after ioc failure */ + if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT) + return; + + switch (msg->mh.msg_id) { + case BFI_FLASH_I2H_QUERY_RSP: + status = be32_to_cpu(m.query->status); + if (status == BFA_STATUS_OK) { + u32 i; + struct bfa_flash_attr *attr, *f; + + attr = (struct bfa_flash_attr *) flash->ubuf; + f = (struct bfa_flash_attr *) flash->dbuf_kva; + attr->status = be32_to_cpu(f->status); + attr->npart = be32_to_cpu(f->npart); + for (i = 0; i < attr->npart; i++) { + attr->part[i].part_type = + be32_to_cpu(f->part[i].part_type); + attr->part[i].part_instance = + be32_to_cpu(f->part[i].part_instance); + attr->part[i].part_off = + be32_to_cpu(f->part[i].part_off); + attr->part[i].part_size = + be32_to_cpu(f->part[i].part_size); + attr->part[i].part_len = + be32_to_cpu(f->part[i].part_len); + attr->part[i].part_status = + be32_to_cpu(f->part[i].part_status); + } + } + flash->status = status; + bfa_flash_cb(flash); + break; + case BFI_FLASH_I2H_WRITE_RSP: + status = be32_to_cpu(m.write->status); + if (status != BFA_STATUS_OK || flash->residue == 0) { + flash->status = status; + bfa_flash_cb(flash); + } else + bfa_flash_write_send(flash); + break; + case BFI_FLASH_I2H_READ_RSP: + status = be32_to_cpu(m.read->status); + if (status != BFA_STATUS_OK) { + flash->status = status; + bfa_flash_cb(flash); + } else { + u32 len = be32_to_cpu(m.read->length); + memcpy(flash->ubuf + flash->offset, + flash->dbuf_kva, len); + flash->residue -= len; + flash->offset += len; + if (flash->residue == 0) { + flash->status = status; + bfa_flash_cb(flash); + } else + bfa_flash_read_send(flash); + } + break; + case BFI_FLASH_I2H_BOOT_VER_RSP: + case BFI_FLASH_I2H_EVENT: + break; + default: + WARN_ON(1); + } +} + +/* + * Flash memory info API. + */ +u32 +bfa_nw_flash_meminfo(void) +{ + return roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); +} + +/* + * Flash attach API. + * + * @param[in] flash - flash structure + * @param[in] ioc - ioc structure + * @param[in] dev - device structure + */ +void +bfa_nw_flash_attach(struct bfa_flash *flash, struct bfa_ioc *ioc, void *dev) +{ + flash->ioc = ioc; + flash->cbfn = NULL; + flash->cbarg = NULL; + flash->op_busy = 0; + + bfa_nw_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash); + bfa_q_qe_init(&flash->ioc_notify); + bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash); + list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q); +} + +/* + * Claim memory for flash + * + * @param[in] flash - flash structure + * @param[in] dm_kva - pointer to virtual memory address + * @param[in] dm_pa - physical memory address + */ +void +bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa) +{ + flash->dbuf_kva = dm_kva; + flash->dbuf_pa = dm_pa; + memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ); + dm_kva += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); + dm_pa += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); +} + +/* + * Get flash attribute. + * + * @param[in] flash - flash structure + * @param[in] attr - flash attribute structure + * @param[in] cbfn - callback function + * @param[in] cbarg - callback argument + * + * Return status. + */ +enum bfa_status +bfa_nw_flash_get_attr(struct bfa_flash *flash, struct bfa_flash_attr *attr, + bfa_cb_flash cbfn, void *cbarg) +{ + struct bfi_flash_query_req *msg = + (struct bfi_flash_query_req *) flash->mb.msg; + + if (!bfa_nw_ioc_is_operational(flash->ioc)) + return BFA_STATUS_IOC_NON_OP; + + if (flash->op_busy) + return BFA_STATUS_DEVBUSY; + + flash->op_busy = 1; + flash->cbfn = cbfn; + flash->cbarg = cbarg; + flash->ubuf = (u8 *) attr; + + bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ, + bfa_ioc_portid(flash->ioc)); + bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr), flash->dbuf_pa); + bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL); + + return BFA_STATUS_OK; +} + +/* + * Update flash partition. + * + * @param[in] flash - flash structure + * @param[in] type - flash partition type + * @param[in] instance - flash partition instance + * @param[in] buf - update data buffer + * @param[in] len - data buffer length + * @param[in] offset - offset relative to the partition starting address + * @param[in] cbfn - callback function + * @param[in] cbarg - callback argument + * + * Return status. + */ +enum bfa_status +bfa_nw_flash_update_part(struct bfa_flash *flash, u32 type, u8 instance, + void *buf, u32 len, u32 offset, + bfa_cb_flash cbfn, void *cbarg) +{ + if (!bfa_nw_ioc_is_operational(flash->ioc)) + return BFA_STATUS_IOC_NON_OP; + + /* + * 'len' must be in word (4-byte) boundary + */ + if (!len || (len & 0x03)) + return BFA_STATUS_FLASH_BAD_LEN; + + if (type == BFA_FLASH_PART_MFG) + return BFA_STATUS_EINVAL; + + if (flash->op_busy) + return BFA_STATUS_DEVBUSY; + + flash->op_busy = 1; + flash->cbfn = cbfn; + flash->cbarg = cbarg; + flash->type = type; + flash->instance = instance; + flash->residue = len; + flash->offset = 0; + flash->addr_off = offset; + flash->ubuf = buf; + + bfa_flash_write_send(flash); + + return BFA_STATUS_OK; +} + +/* + * Read flash partition. + * + * @param[in] flash - flash structure + * @param[in] type - flash partition type + * @param[in] instance - flash partition instance + * @param[in] buf - read data buffer + * @param[in] len - data buffer length + * @param[in] offset - offset relative to the partition starting address + * @param[in] cbfn - callback function + * @param[in] cbarg - callback argument + * + * Return status. + */ +enum bfa_status +bfa_nw_flash_read_part(struct bfa_flash *flash, u32 type, u8 instance, + void *buf, u32 len, u32 offset, + bfa_cb_flash cbfn, void *cbarg) +{ + if (!bfa_nw_ioc_is_operational(flash->ioc)) + return BFA_STATUS_IOC_NON_OP; + + /* + * 'len' must be in word (4-byte) boundary + */ + if (!len || (len & 0x03)) + return BFA_STATUS_FLASH_BAD_LEN; + + if (flash->op_busy) + return BFA_STATUS_DEVBUSY; + + flash->op_busy = 1; + flash->cbfn = cbfn; + flash->cbarg = cbarg; + flash->type = type; + flash->instance = instance; + flash->residue = len; + flash->offset = 0; + flash->addr_off = offset; + flash->ubuf = buf; + + bfa_flash_read_send(flash); + + return BFA_STATUS_OK; +} diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.h b/drivers/net/ethernet/brocade/bna/bfa_ioc.h index ca158d1eaef..3b4460fdc14 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.h +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.h @@ -27,6 +27,8 @@ #define BFA_IOC_HWSEM_TOV 500 /* msecs */ #define BFA_IOC_HB_TOV 500 /* msecs */ #define BFA_IOC_POLL_TOV 200 /* msecs */ +#define BNA_DBG_FWTRC_LEN (BFI_IOC_TRC_ENTS * BFI_IOC_TRC_ENT_SZ + \ + BFI_IOC_TRC_HDR_SZ) /** * PCI device information required by IOC @@ -68,6 +70,16 @@ __bfa_dma_be_addr_set(union bfi_addr_u *dma_addr, u64 pa) dma_addr->a32.addr_hi = (u32) htonl(upper_32_bits(pa)); } +#define bfa_alen_set(__alen, __len, __pa) \ + __bfa_alen_set(__alen, __len, (u64)__pa) + +static inline void +__bfa_alen_set(struct bfi_alen *alen, u32 len, u64 pa) +{ + alen->al_len = cpu_to_be32(len); + bfa_dma_be_addr_set(alen->al_addr, pa); +} + struct bfa_ioc_regs { void __iomem *hfn_mbox_cmd; void __iomem *hfn_mbox; @@ -296,6 +308,7 @@ void bfa_nw_ioc_disable(struct bfa_ioc *ioc); void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc); bool bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc); +bool bfa_nw_ioc_is_operational(struct bfa_ioc *ioc); void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr); void bfa_nw_ioc_notify_register(struct bfa_ioc *ioc, struct bfa_ioc_notify *notify); @@ -307,6 +320,9 @@ void bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, bool bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr); mac_t bfa_nw_ioc_get_mac(struct bfa_ioc *ioc); +void bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave); +int bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen); +int bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen); /* * Timeout APIs @@ -322,4 +338,42 @@ void bfa_nw_iocpf_sem_timeout(void *ioc); u32 *bfa_cb_image_get_chunk(enum bfi_asic_gen asic_gen, u32 off); u32 bfa_cb_image_get_size(enum bfi_asic_gen asic_gen); +/* + * Flash module specific + */ +typedef void (*bfa_cb_flash) (void *cbarg, enum bfa_status status); + +struct bfa_flash { + struct bfa_ioc *ioc; /* back pointer to ioc */ + u32 type; /* partition type */ + u8 instance; /* partition instance */ + u8 rsv[3]; + u32 op_busy; /* operation busy flag */ + u32 residue; /* residual length */ + u32 offset; /* offset */ + enum bfa_status status; /* status */ + u8 *dbuf_kva; /* dma buf virtual address */ + u64 dbuf_pa; /* dma buf physical address */ + bfa_cb_flash cbfn; /* user callback function */ + void *cbarg; /* user callback arg */ + u8 *ubuf; /* user supplied buffer */ + u32 addr_off; /* partition address offset */ + struct bfa_mbox_cmd mb; /* mailbox */ + struct bfa_ioc_notify ioc_notify; /* ioc event notify */ +}; + +enum bfa_status bfa_nw_flash_get_attr(struct bfa_flash *flash, + struct bfa_flash_attr *attr, + bfa_cb_flash cbfn, void *cbarg); +enum bfa_status bfa_nw_flash_update_part(struct bfa_flash *flash, + u32 type, u8 instance, void *buf, u32 len, u32 offset, + bfa_cb_flash cbfn, void *cbarg); +enum bfa_status bfa_nw_flash_read_part(struct bfa_flash *flash, + u32 type, u8 instance, void *buf, u32 len, u32 offset, + bfa_cb_flash cbfn, void *cbarg); +u32 bfa_nw_flash_meminfo(void); +void bfa_nw_flash_attach(struct bfa_flash *flash, + struct bfa_ioc *ioc, void *dev); +void bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa); + #endif /* __BFA_IOC_H__ */ diff --git a/drivers/net/ethernet/brocade/bna/bfi.h b/drivers/net/ethernet/brocade/bna/bfi.h index 7a1393aabd4..0d9df695397 100644 --- a/drivers/net/ethernet/brocade/bna/bfi.h +++ b/drivers/net/ethernet/brocade/bna/bfi.h @@ -83,6 +83,14 @@ union bfi_addr_u { } a32; }; +/** + * Generic DMA addr-len pair. + */ +struct bfi_alen { + union bfi_addr_u al_addr; /* DMA addr of buffer */ + u32 al_len; /* length of buffer */ +}; + /* * Large Message structure - 128 Bytes size Msgs */ @@ -249,6 +257,8 @@ struct bfi_ioc_getattr_reply { */ #define BFI_IOC_TRC_OFF (0x4b00) #define BFI_IOC_TRC_ENTS 256 +#define BFI_IOC_TRC_ENT_SZ 16 +#define BFI_IOC_TRC_HDR_SZ 32 #define BFI_IOC_FW_SIGNATURE (0xbfadbfad) #define BFI_IOC_MD5SUM_SZ 4 @@ -476,6 +486,93 @@ struct bfi_msgq_i2h_cmdq_copy_req { u16 len; }; +/* + * FLASH module specific + */ +enum bfi_flash_h2i_msgs { + BFI_FLASH_H2I_QUERY_REQ = 1, + BFI_FLASH_H2I_ERASE_REQ = 2, + BFI_FLASH_H2I_WRITE_REQ = 3, + BFI_FLASH_H2I_READ_REQ = 4, + BFI_FLASH_H2I_BOOT_VER_REQ = 5, +}; + +enum bfi_flash_i2h_msgs { + BFI_FLASH_I2H_QUERY_RSP = BFA_I2HM(1), + BFI_FLASH_I2H_ERASE_RSP = BFA_I2HM(2), + BFI_FLASH_I2H_WRITE_RSP = BFA_I2HM(3), + BFI_FLASH_I2H_READ_RSP = BFA_I2HM(4), + BFI_FLASH_I2H_BOOT_VER_RSP = BFA_I2HM(5), + BFI_FLASH_I2H_EVENT = BFA_I2HM(127), +}; + +/* + * Flash query request + */ +struct bfi_flash_query_req { + struct bfi_mhdr mh; /* Common msg header */ + struct bfi_alen alen; +}; + +/* + * Flash write request + */ +struct bfi_flash_write_req { + struct bfi_mhdr mh; /* Common msg header */ + struct bfi_alen alen; + u32 type; /* partition type */ + u8 instance; /* partition instance */ + u8 last; + u8 rsv[2]; + u32 offset; + u32 length; +}; + +/* + * Flash read request + */ +struct bfi_flash_read_req { + struct bfi_mhdr mh; /* Common msg header */ + u32 type; /* partition type */ + u8 instance; /* partition instance */ + u8 rsv[3]; + u32 offset; + u32 length; + struct bfi_alen alen; +}; + +/* + * Flash query response + */ +struct bfi_flash_query_rsp { + struct bfi_mhdr mh; /* Common msg header */ + u32 status; +}; + +/* + * Flash read response + */ +struct bfi_flash_read_rsp { + struct bfi_mhdr mh; /* Common msg header */ + u32 type; /* partition type */ + u8 instance; /* partition instance */ + u8 rsv[3]; + u32 status; + u32 length; +}; + +/* + * Flash write response + */ +struct bfi_flash_write_rsp { + struct bfi_mhdr mh; /* Common msg header */ + u32 type; /* partition type */ + u8 instance; /* partition instance */ + u8 rsv[3]; + u32 status; + u32 length; +}; + #pragma pack() #endif /* __BFI_H__ */ diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c index 26f5c5abfd1..9ccc586e376 100644 --- a/drivers/net/ethernet/brocade/bna/bna_enet.c +++ b/drivers/net/ethernet/brocade/bna/bna_enet.c @@ -1727,6 +1727,7 @@ bna_ioceth_init(struct bna_ioceth *ioceth, struct bna *bna, bfa_nw_ioc_mem_claim(&ioceth->ioc, kva, dma); kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva; + bfa_nw_ioc_debug_memclaim(&ioceth->ioc, kva); /** * Attach common modules (Diag, SFP, CEE, Port) and claim respective @@ -1740,6 +1741,11 @@ bna_ioceth_init(struct bna_ioceth *ioceth, struct bna *bna, kva += bfa_nw_cee_meminfo(); dma += bfa_nw_cee_meminfo(); + bfa_nw_flash_attach(&bna->flash, &ioceth->ioc, bna); + bfa_nw_flash_memclaim(&bna->flash, kva, dma); + kva += bfa_nw_flash_meminfo(); + dma += bfa_nw_flash_meminfo(); + bfa_msgq_attach(&bna->msgq, &ioceth->ioc); bfa_msgq_memclaim(&bna->msgq, kva, dma); bfa_msgq_regisr(&bna->msgq, BFI_MC_ENET, bna_msgq_rsp_handler, bna); @@ -1892,7 +1898,8 @@ bna_res_req(struct bna_res_info *res_info) res_info[BNA_RES_MEM_T_COM].res_u.mem_info.num = 1; res_info[BNA_RES_MEM_T_COM].res_u.mem_info.len = ALIGN( (bfa_nw_cee_meminfo() + - bfa_msgq_meminfo()), PAGE_SIZE); + bfa_nw_flash_meminfo() + + bfa_msgq_meminfo()), PAGE_SIZE); /* DMA memory for retrieving IOC attributes */ res_info[BNA_RES_MEM_T_ATTR].res_type = BNA_RES_T_MEM; @@ -1904,8 +1911,8 @@ bna_res_req(struct bna_res_info *res_info) /* Virtual memory for retreiving fw_trc */ res_info[BNA_RES_MEM_T_FWTRC].res_type = BNA_RES_T_MEM; res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mem_type = BNA_MEM_T_KVA; - res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 0; - res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = 0; + res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 1; + res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = BNA_DBG_FWTRC_LEN; /* DMA memory for retreiving stats */ res_info[BNA_RES_MEM_T_STATS].res_type = BNA_RES_T_MEM; diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h index d090fbfb12f..e8d3ab7ea6c 100644 --- a/drivers/net/ethernet/brocade/bna/bna_types.h +++ b/drivers/net/ethernet/brocade/bna/bna_types.h @@ -427,7 +427,7 @@ struct bna_ethport { /* Doorbell structure */ struct bna_ib_dbell { - void *__iomem doorbell_addr; + void __iomem *doorbell_addr; u32 doorbell_ack; }; @@ -463,7 +463,7 @@ struct bna_tcb { u32 consumer_index; volatile u32 *hw_consumer_index; u32 q_depth; - void *__iomem q_dbell; + void __iomem *q_dbell; struct bna_ib_dbell *i_dbell; int page_idx; int page_count; @@ -599,7 +599,7 @@ struct bna_rcb { u32 producer_index; u32 consumer_index; u32 q_depth; - void *__iomem q_dbell; + void __iomem *q_dbell; int page_idx; int page_count; /* Control path */ @@ -966,6 +966,7 @@ struct bna { struct bna_ioceth ioceth; struct bfa_cee cee; + struct bfa_flash flash; struct bfa_msgq msgq; struct bna_ethport ethport; diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index 7f3091e7eb4..be7d91e4b78 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -44,11 +44,18 @@ static uint bnad_ioc_auto_recover = 1; module_param(bnad_ioc_auto_recover, uint, 0444); MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery"); +static uint bna_debugfs_enable = 1; +module_param(bna_debugfs_enable, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1," + " Range[false:0|true:1]"); + /* * Global variables */ u32 bnad_rxqs_per_cq = 2; - +static u32 bna_id; +static struct mutex bnad_list_mutex; +static LIST_HEAD(bnad_list); static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; /* @@ -75,6 +82,23 @@ do { \ #define BNAD_TXRX_SYNC_MDELAY 250 /* 250 msecs */ +static void +bnad_add_to_list(struct bnad *bnad) +{ + mutex_lock(&bnad_list_mutex); + list_add_tail(&bnad->list_entry, &bnad_list); + bnad->id = bna_id++; + mutex_unlock(&bnad_list_mutex); +} + +static void +bnad_remove_from_list(struct bnad *bnad) +{ + mutex_lock(&bnad_list_mutex); + list_del(&bnad->list_entry); + mutex_unlock(&bnad_list_mutex); +} + /* * Reinitialize completions in CQ, once Rx is taken down */ @@ -723,7 +747,7 @@ void bnad_cb_ethport_link_status(struct bnad *bnad, enum bna_link_status link_status) { - bool link_up = 0; + bool link_up = false; link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP); @@ -1084,6 +1108,16 @@ bnad_cb_enet_mtu_set(struct bnad *bnad) complete(&bnad->bnad_completions.mtu_comp); } +void +bnad_cb_completion(void *arg, enum bfa_status status) +{ + struct bnad_iocmd_comp *iocmd_comp = + (struct bnad_iocmd_comp *)arg; + + iocmd_comp->comp_status = (u32) status; + complete(&iocmd_comp->comp); +} + /* Resource allocation, free functions */ static void @@ -2968,7 +3002,7 @@ bnad_change_mtu(struct net_device *netdev, int new_mtu) return err; } -static void +static int bnad_vlan_rx_add_vid(struct net_device *netdev, unsigned short vid) { @@ -2976,7 +3010,7 @@ bnad_vlan_rx_add_vid(struct net_device *netdev, unsigned long flags; if (!bnad->rx_info[0].rx) - return; + return 0; mutex_lock(&bnad->conf_mutex); @@ -2986,9 +3020,11 @@ bnad_vlan_rx_add_vid(struct net_device *netdev, spin_unlock_irqrestore(&bnad->bna_lock, flags); mutex_unlock(&bnad->conf_mutex); + + return 0; } -static void +static int bnad_vlan_rx_kill_vid(struct net_device *netdev, unsigned short vid) { @@ -2996,7 +3032,7 @@ bnad_vlan_rx_kill_vid(struct net_device *netdev, unsigned long flags; if (!bnad->rx_info[0].rx) - return; + return 0; mutex_lock(&bnad->conf_mutex); @@ -3006,6 +3042,8 @@ bnad_vlan_rx_kill_vid(struct net_device *netdev, spin_unlock_irqrestore(&bnad->bna_lock, flags); mutex_unlock(&bnad->conf_mutex); + + return 0; } #ifdef CONFIG_NET_POLL_CONTROLLER @@ -3163,12 +3201,14 @@ bnad_lock_init(struct bnad *bnad) { spin_lock_init(&bnad->bna_lock); mutex_init(&bnad->conf_mutex); + mutex_init(&bnad_list_mutex); } static void bnad_lock_uninit(struct bnad *bnad) { mutex_destroy(&bnad->conf_mutex); + mutex_destroy(&bnad_list_mutex); } /* PCI Initialization */ @@ -3186,7 +3226,7 @@ bnad_pci_init(struct bnad *bnad, goto disable_device; if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { - *using_dac = 1; + *using_dac = true; } else { err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { @@ -3195,7 +3235,7 @@ bnad_pci_init(struct bnad *bnad, if (err) goto release_regions; } - *using_dac = 0; + *using_dac = false; } pci_set_master(pdev); return 0; @@ -3249,8 +3289,8 @@ bnad_pci_probe(struct pci_dev *pdev, return err; } bnad = netdev_priv(netdev); - bnad_lock_init(bnad); + bnad_add_to_list(bnad); mutex_lock(&bnad->conf_mutex); /* @@ -3277,6 +3317,10 @@ bnad_pci_probe(struct pci_dev *pdev, /* Set link to down state */ netif_carrier_off(netdev); + /* Setup the debugfs node for this bfad */ + if (bna_debugfs_enable) + bnad_debugfs_init(bnad); + /* Get resource requirement form bna */ spin_lock_irqsave(&bnad->bna_lock, flags); bna_res_req(&bnad->res_info[0]); @@ -3398,11 +3442,15 @@ disable_ioceth: res_free: bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX); drv_uninit: + /* Remove the debugfs node for this bnad */ + kfree(bnad->regdata); + bnad_debugfs_uninit(bnad); bnad_uninit(bnad); pci_uninit: bnad_pci_uninit(pdev); unlock_mutex: mutex_unlock(&bnad->conf_mutex); + bnad_remove_from_list(bnad); bnad_lock_uninit(bnad); free_netdev(netdev); return err; @@ -3441,7 +3489,11 @@ bnad_pci_remove(struct pci_dev *pdev) bnad_disable_msix(bnad); bnad_pci_uninit(pdev); mutex_unlock(&bnad->conf_mutex); + bnad_remove_from_list(bnad); bnad_lock_uninit(bnad); + /* Remove the debugfs node for this bnad */ + kfree(bnad->regdata); + bnad_debugfs_uninit(bnad); bnad_uninit(bnad); free_netdev(netdev); } diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h index 5487ca42d01..55824d92699 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.h +++ b/drivers/net/ethernet/brocade/bna/bnad.h @@ -124,6 +124,12 @@ enum bnad_link_state { BNAD_LS_UP = 1 }; +struct bnad_iocmd_comp { + struct bnad *bnad; + struct completion comp; + int comp_status; +}; + struct bnad_completion { struct completion ioc_comp; struct completion ucast_comp; @@ -251,6 +257,8 @@ struct bnad_unmap_q { struct bnad { struct net_device *netdev; + u32 id; + struct list_head list_entry; /* Data path */ struct bnad_tx_info tx_info[BNAD_MAX_TX]; @@ -320,12 +328,26 @@ struct bnad { char adapter_name[BNAD_NAME_LEN]; char port_name[BNAD_NAME_LEN]; char mbox_irq_name[BNAD_NAME_LEN]; + + /* debugfs specific data */ + char *regdata; + u32 reglen; + struct dentry *bnad_dentry_files[5]; + struct dentry *port_debugfs_root; +}; + +struct bnad_drvinfo { + struct bfa_ioc_attr ioc_attr; + struct bfa_cee_attr cee_attr; + struct bfa_flash_attr flash_attr; + u32 cee_status; + u32 flash_status; }; /* * EXTERN VARIABLES */ -extern struct firmware *bfi_fw; +extern const struct firmware *bfi_fw; extern u32 bnad_rxqs_per_cq; /* @@ -340,6 +362,7 @@ extern int bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr); extern int bnad_enable_default_bcast(struct bnad *bnad); extern void bnad_restore_vlans(struct bnad *bnad, u32 rx_id); extern void bnad_set_ethtool_ops(struct net_device *netdev); +extern void bnad_cb_completion(void *arg, enum bfa_status status); /* Configuration & setup */ extern void bnad_tx_coalescing_timeo_set(struct bnad *bnad); @@ -359,6 +382,10 @@ extern void bnad_netdev_qstats_fill(struct bnad *bnad, extern void bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats); +/* Debugfs */ +void bnad_debugfs_init(struct bnad *bnad); +void bnad_debugfs_uninit(struct bnad *bnad); + /** * MACROS */ diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c new file mode 100644 index 00000000000..592ad3929f5 --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c @@ -0,0 +1,623 @@ +/* + * Linux network driver for Brocade Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + */ + +#include <linux/debugfs.h> +#include <linux/module.h> +#include "bnad.h" + +/* + * BNA debufs interface + * + * To access the interface, debugfs file system should be mounted + * if not already mounted using: + * mount -t debugfs none /sys/kernel/debug + * + * BNA Hierarchy: + * - bna/pci_dev:<pci_name> + * where the pci_name corresponds to the one under /sys/bus/pci/drivers/bna + * + * Debugging service available per pci_dev: + * fwtrc: To collect current firmware trace. + * fwsave: To collect last saved fw trace as a result of firmware crash. + * regwr: To write one word to chip register + * regrd: To read one or more words from chip register. + */ + +struct bnad_debug_info { + char *debug_buffer; + void *i_private; + int buffer_len; +}; + +static int +bnad_debugfs_open_fwtrc(struct inode *inode, struct file *file) +{ + struct bnad *bnad = inode->i_private; + struct bnad_debug_info *fw_debug; + unsigned long flags; + int rc; + + fw_debug = kzalloc(sizeof(struct bnad_debug_info), GFP_KERNEL); + if (!fw_debug) + return -ENOMEM; + + fw_debug->buffer_len = BNA_DBG_FWTRC_LEN; + + fw_debug->debug_buffer = kzalloc(fw_debug->buffer_len, GFP_KERNEL); + if (!fw_debug->debug_buffer) { + kfree(fw_debug); + fw_debug = NULL; + pr_warn("bna %s: Failed to allocate fwtrc buffer\n", + pci_name(bnad->pcidev)); + return -ENOMEM; + } + + spin_lock_irqsave(&bnad->bna_lock, flags); + rc = bfa_nw_ioc_debug_fwtrc(&bnad->bna.ioceth.ioc, + fw_debug->debug_buffer, + &fw_debug->buffer_len); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + if (rc != BFA_STATUS_OK) { + kfree(fw_debug->debug_buffer); + fw_debug->debug_buffer = NULL; + kfree(fw_debug); + fw_debug = NULL; + pr_warn("bnad %s: Failed to collect fwtrc\n", + pci_name(bnad->pcidev)); + return -ENOMEM; + } + + file->private_data = fw_debug; + + return 0; +} + +static int +bnad_debugfs_open_fwsave(struct inode *inode, struct file *file) +{ + struct bnad *bnad = inode->i_private; + struct bnad_debug_info *fw_debug; + unsigned long flags; + int rc; + + fw_debug = kzalloc(sizeof(struct bnad_debug_info), GFP_KERNEL); + if (!fw_debug) + return -ENOMEM; + + fw_debug->buffer_len = BNA_DBG_FWTRC_LEN; + + fw_debug->debug_buffer = kzalloc(fw_debug->buffer_len, GFP_KERNEL); + if (!fw_debug->debug_buffer) { + kfree(fw_debug); + fw_debug = NULL; + pr_warn("bna %s: Failed to allocate fwsave buffer\n", + pci_name(bnad->pcidev)); + return -ENOMEM; + } + + spin_lock_irqsave(&bnad->bna_lock, flags); + rc = bfa_nw_ioc_debug_fwsave(&bnad->bna.ioceth.ioc, + fw_debug->debug_buffer, + &fw_debug->buffer_len); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + if (rc != BFA_STATUS_OK && rc != BFA_STATUS_ENOFSAVE) { + kfree(fw_debug->debug_buffer); + fw_debug->debug_buffer = NULL; + kfree(fw_debug); + fw_debug = NULL; + pr_warn("bna %s: Failed to collect fwsave\n", + pci_name(bnad->pcidev)); + return -ENOMEM; + } + + file->private_data = fw_debug; + + return 0; +} + +static int +bnad_debugfs_open_reg(struct inode *inode, struct file *file) +{ + struct bnad_debug_info *reg_debug; + + reg_debug = kzalloc(sizeof(struct bnad_debug_info), GFP_KERNEL); + if (!reg_debug) + return -ENOMEM; + + reg_debug->i_private = inode->i_private; + + file->private_data = reg_debug; + + return 0; +} + +static int +bnad_get_debug_drvinfo(struct bnad *bnad, void *buffer, u32 len) +{ + struct bnad_drvinfo *drvinfo = (struct bnad_drvinfo *) buffer; + struct bnad_iocmd_comp fcomp; + unsigned long flags = 0; + int ret = BFA_STATUS_FAILED; + + /* Get IOC info */ + spin_lock_irqsave(&bnad->bna_lock, flags); + bfa_nw_ioc_get_attr(&bnad->bna.ioceth.ioc, &drvinfo->ioc_attr); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + /* Retrieve CEE related info */ + fcomp.bnad = bnad; + fcomp.comp_status = 0; + init_completion(&fcomp.comp); + spin_lock_irqsave(&bnad->bna_lock, flags); + ret = bfa_nw_cee_get_attr(&bnad->bna.cee, &drvinfo->cee_attr, + bnad_cb_completion, &fcomp); + if (ret != BFA_STATUS_OK) { + spin_unlock_irqrestore(&bnad->bna_lock, flags); + goto out; + } + spin_unlock_irqrestore(&bnad->bna_lock, flags); + wait_for_completion(&fcomp.comp); + drvinfo->cee_status = fcomp.comp_status; + + /* Retrieve flash partition info */ + fcomp.comp_status = 0; + init_completion(&fcomp.comp); + spin_lock_irqsave(&bnad->bna_lock, flags); + ret = bfa_nw_flash_get_attr(&bnad->bna.flash, &drvinfo->flash_attr, + bnad_cb_completion, &fcomp); + if (ret != BFA_STATUS_OK) { + spin_unlock_irqrestore(&bnad->bna_lock, flags); + goto out; + } + spin_unlock_irqrestore(&bnad->bna_lock, flags); + wait_for_completion(&fcomp.comp); + drvinfo->flash_status = fcomp.comp_status; +out: + return ret; +} + +static int +bnad_debugfs_open_drvinfo(struct inode *inode, struct file *file) +{ + struct bnad *bnad = inode->i_private; + struct bnad_debug_info *drv_info; + int rc; + + drv_info = kzalloc(sizeof(struct bnad_debug_info), GFP_KERNEL); + if (!drv_info) + return -ENOMEM; + + drv_info->buffer_len = sizeof(struct bnad_drvinfo); + + drv_info->debug_buffer = kzalloc(drv_info->buffer_len, GFP_KERNEL); + if (!drv_info->debug_buffer) { + kfree(drv_info); + drv_info = NULL; + pr_warn("bna %s: Failed to allocate drv info buffer\n", + pci_name(bnad->pcidev)); + return -ENOMEM; + } + + mutex_lock(&bnad->conf_mutex); + rc = bnad_get_debug_drvinfo(bnad, drv_info->debug_buffer, + drv_info->buffer_len); + mutex_unlock(&bnad->conf_mutex); + if (rc != BFA_STATUS_OK) { + kfree(drv_info->debug_buffer); + drv_info->debug_buffer = NULL; + kfree(drv_info); + drv_info = NULL; + pr_warn("bna %s: Failed to collect drvinfo\n", + pci_name(bnad->pcidev)); + return -ENOMEM; + } + + file->private_data = drv_info; + + return 0; +} + +/* Changes the current file position */ +static loff_t +bnad_debugfs_lseek(struct file *file, loff_t offset, int orig) +{ + loff_t pos = file->f_pos; + struct bnad_debug_info *debug = file->private_data; + + if (!debug) + return -EINVAL; + + switch (orig) { + case 0: + file->f_pos = offset; + break; + case 1: + file->f_pos += offset; + break; + case 2: + file->f_pos = debug->buffer_len - offset; + break; + default: + return -EINVAL; + } + + if (file->f_pos < 0 || file->f_pos > debug->buffer_len) { + file->f_pos = pos; + return -EINVAL; + } + + return file->f_pos; +} + +static ssize_t +bnad_debugfs_read(struct file *file, char __user *buf, + size_t nbytes, loff_t *pos) +{ + struct bnad_debug_info *debug = file->private_data; + + if (!debug || !debug->debug_buffer) + return 0; + + return simple_read_from_buffer(buf, nbytes, pos, + debug->debug_buffer, debug->buffer_len); +} + +#define BFA_REG_CT_ADDRSZ (0x40000) +#define BFA_REG_CB_ADDRSZ (0x20000) +#define BFA_REG_ADDRSZ(__ioc) \ + ((u32)(bfa_asic_id_ctc(bfa_ioc_devid(__ioc)) ? \ + BFA_REG_CT_ADDRSZ : BFA_REG_CB_ADDRSZ)) +#define BFA_REG_ADDRMSK(__ioc) (BFA_REG_ADDRSZ(__ioc) - 1) + +/* + * Function to check if the register offset passed is valid. + */ +static int +bna_reg_offset_check(struct bfa_ioc *ioc, u32 offset, u32 len) +{ + u8 area; + + /* check [16:15] */ + area = (offset >> 15) & 0x7; + if (area == 0) { + /* PCIe core register */ + if ((offset + (len<<2)) > 0x8000) /* 8k dwords or 32KB */ + return BFA_STATUS_EINVAL; + } else if (area == 0x1) { + /* CB 32 KB memory page */ + if ((offset + (len<<2)) > 0x10000) /* 8k dwords or 32KB */ + return BFA_STATUS_EINVAL; + } else { + /* CB register space 64KB */ + if ((offset + (len<<2)) > BFA_REG_ADDRMSK(ioc)) + return BFA_STATUS_EINVAL; + } + return BFA_STATUS_OK; +} + +static ssize_t +bnad_debugfs_read_regrd(struct file *file, char __user *buf, + size_t nbytes, loff_t *pos) +{ + struct bnad_debug_info *regrd_debug = file->private_data; + struct bnad *bnad = (struct bnad *)regrd_debug->i_private; + ssize_t rc; + + if (!bnad->regdata) + return 0; + + rc = simple_read_from_buffer(buf, nbytes, pos, + bnad->regdata, bnad->reglen); + + if ((*pos + nbytes) >= bnad->reglen) { + kfree(bnad->regdata); + bnad->regdata = NULL; + bnad->reglen = 0; + } + + return rc; +} + +static ssize_t +bnad_debugfs_write_regrd(struct file *file, const char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct bnad_debug_info *regrd_debug = file->private_data; + struct bnad *bnad = (struct bnad *)regrd_debug->i_private; + struct bfa_ioc *ioc = &bnad->bna.ioceth.ioc; + int addr, len, rc, i; + u32 *regbuf; + void __iomem *rb, *reg_addr; + unsigned long flags; + void *kern_buf; + + /* Allocate memory to store the user space buf */ + kern_buf = kzalloc(nbytes, GFP_KERNEL); + if (!kern_buf) { + pr_warn("bna %s: Failed to allocate user buffer\n", + pci_name(bnad->pcidev)); + return -ENOMEM; + } + + if (copy_from_user(kern_buf, (void __user *)buf, nbytes)) { + kfree(kern_buf); + return -ENOMEM; + } + + rc = sscanf(kern_buf, "%x:%x", &addr, &len); + if (rc < 2) { + pr_warn("bna %s: Failed to read user buffer\n", + pci_name(bnad->pcidev)); + kfree(kern_buf); + return -EINVAL; + } + + kfree(kern_buf); + kfree(bnad->regdata); + bnad->regdata = NULL; + bnad->reglen = 0; + + bnad->regdata = kzalloc(len << 2, GFP_KERNEL); + if (!bnad->regdata) { + pr_warn("bna %s: Failed to allocate regrd buffer\n", + pci_name(bnad->pcidev)); + return -ENOMEM; + } + + bnad->reglen = len << 2; + rb = bfa_ioc_bar0(ioc); + addr &= BFA_REG_ADDRMSK(ioc); + + /* offset and len sanity check */ + rc = bna_reg_offset_check(ioc, addr, len); + if (rc) { + pr_warn("bna %s: Failed reg offset check\n", + pci_name(bnad->pcidev)); + kfree(bnad->regdata); + bnad->regdata = NULL; + bnad->reglen = 0; + return -EINVAL; + } + + reg_addr = rb + addr; + regbuf = (u32 *)bnad->regdata; + spin_lock_irqsave(&bnad->bna_lock, flags); + for (i = 0; i < len; i++) { + *regbuf = readl(reg_addr); + regbuf++; + reg_addr += sizeof(u32); + } + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + return nbytes; +} + +static ssize_t +bnad_debugfs_write_regwr(struct file *file, const char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct bnad_debug_info *debug = file->private_data; + struct bnad *bnad = (struct bnad *)debug->i_private; + struct bfa_ioc *ioc = &bnad->bna.ioceth.ioc; + int addr, val, rc; + void __iomem *reg_addr; + unsigned long flags; + void *kern_buf; + + /* Allocate memory to store the user space buf */ + kern_buf = kzalloc(nbytes, GFP_KERNEL); + if (!kern_buf) { + pr_warn("bna %s: Failed to allocate user buffer\n", + pci_name(bnad->pcidev)); + return -ENOMEM; + } + + if (copy_from_user(kern_buf, (void __user *)buf, nbytes)) { + kfree(kern_buf); + return -ENOMEM; + } + + rc = sscanf(kern_buf, "%x:%x", &addr, &val); + if (rc < 2) { + pr_warn("bna %s: Failed to read user buffer\n", + pci_name(bnad->pcidev)); + kfree(kern_buf); + return -EINVAL; + } + kfree(kern_buf); + + addr &= BFA_REG_ADDRMSK(ioc); /* offset only 17 bit and word align */ + + /* offset and len sanity check */ + rc = bna_reg_offset_check(ioc, addr, 1); + if (rc) { + pr_warn("bna %s: Failed reg offset check\n", + pci_name(bnad->pcidev)); + return -EINVAL; + } + + reg_addr = (bfa_ioc_bar0(ioc)) + addr; + spin_lock_irqsave(&bnad->bna_lock, flags); + writel(val, reg_addr); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + return nbytes; +} + +static int +bnad_debugfs_release(struct inode *inode, struct file *file) +{ + struct bnad_debug_info *debug = file->private_data; + + if (!debug) + return 0; + + file->private_data = NULL; + kfree(debug); + return 0; +} + +static int +bnad_debugfs_buffer_release(struct inode *inode, struct file *file) +{ + struct bnad_debug_info *debug = file->private_data; + + if (!debug) + return 0; + + kfree(debug->debug_buffer); + + file->private_data = NULL; + kfree(debug); + debug = NULL; + return 0; +} + +static const struct file_operations bnad_debugfs_op_fwtrc = { + .owner = THIS_MODULE, + .open = bnad_debugfs_open_fwtrc, + .llseek = bnad_debugfs_lseek, + .read = bnad_debugfs_read, + .release = bnad_debugfs_buffer_release, +}; + +static const struct file_operations bnad_debugfs_op_fwsave = { + .owner = THIS_MODULE, + .open = bnad_debugfs_open_fwsave, + .llseek = bnad_debugfs_lseek, + .read = bnad_debugfs_read, + .release = bnad_debugfs_buffer_release, +}; + +static const struct file_operations bnad_debugfs_op_regrd = { + .owner = THIS_MODULE, + .open = bnad_debugfs_open_reg, + .llseek = bnad_debugfs_lseek, + .read = bnad_debugfs_read_regrd, + .write = bnad_debugfs_write_regrd, + .release = bnad_debugfs_release, +}; + +static const struct file_operations bnad_debugfs_op_regwr = { + .owner = THIS_MODULE, + .open = bnad_debugfs_open_reg, + .llseek = bnad_debugfs_lseek, + .write = bnad_debugfs_write_regwr, + .release = bnad_debugfs_release, +}; + +static const struct file_operations bnad_debugfs_op_drvinfo = { + .owner = THIS_MODULE, + .open = bnad_debugfs_open_drvinfo, + .llseek = bnad_debugfs_lseek, + .read = bnad_debugfs_read, + .release = bnad_debugfs_buffer_release, +}; + +struct bnad_debugfs_entry { + const char *name; + mode_t mode; + const struct file_operations *fops; +}; + +static const struct bnad_debugfs_entry bnad_debugfs_files[] = { + { "fwtrc", S_IFREG|S_IRUGO, &bnad_debugfs_op_fwtrc, }, + { "fwsave", S_IFREG|S_IRUGO, &bnad_debugfs_op_fwsave, }, + { "regrd", S_IFREG|S_IRUGO|S_IWUSR, &bnad_debugfs_op_regrd, }, + { "regwr", S_IFREG|S_IWUSR, &bnad_debugfs_op_regwr, }, + { "drvinfo", S_IFREG|S_IRUGO, &bnad_debugfs_op_drvinfo, }, +}; + +static struct dentry *bna_debugfs_root; +static atomic_t bna_debugfs_port_count; + +/* Initialize debugfs interface for BNA */ +void +bnad_debugfs_init(struct bnad *bnad) +{ + const struct bnad_debugfs_entry *file; + char name[64]; + int i; + + /* Setup the BNA debugfs root directory*/ + if (!bna_debugfs_root) { + bna_debugfs_root = debugfs_create_dir("bna", NULL); + atomic_set(&bna_debugfs_port_count, 0); + if (!bna_debugfs_root) { + pr_warn("BNA: debugfs root dir creation failed\n"); + return; + } + } + + /* Setup the pci_dev debugfs directory for the port */ + snprintf(name, sizeof(name), "pci_dev:%s", pci_name(bnad->pcidev)); + if (!bnad->port_debugfs_root) { + bnad->port_debugfs_root = + debugfs_create_dir(name, bna_debugfs_root); + if (!bnad->port_debugfs_root) { + pr_warn("bna pci_dev %s: root dir creation failed\n", + pci_name(bnad->pcidev)); + return; + } + + atomic_inc(&bna_debugfs_port_count); + + for (i = 0; i < ARRAY_SIZE(bnad_debugfs_files); i++) { + file = &bnad_debugfs_files[i]; + bnad->bnad_dentry_files[i] = + debugfs_create_file(file->name, + file->mode, + bnad->port_debugfs_root, + bnad, + file->fops); + if (!bnad->bnad_dentry_files[i]) { + pr_warn( + "BNA pci_dev:%s: create %s entry failed\n", + pci_name(bnad->pcidev), file->name); + return; + } + } + } +} + +/* Uninitialize debugfs interface for BNA */ +void +bnad_debugfs_uninit(struct bnad *bnad) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(bnad_debugfs_files); i++) { + if (bnad->bnad_dentry_files[i]) { + debugfs_remove(bnad->bnad_dentry_files[i]); + bnad->bnad_dentry_files[i] = NULL; + } + } + + /* Remove the pci_dev debugfs directory for the port */ + if (bnad->port_debugfs_root) { + debugfs_remove(bnad->port_debugfs_root); + bnad->port_debugfs_root = NULL; + atomic_dec(&bna_debugfs_port_count); + } + + /* Remove the BNA debugfs root directory */ + if (atomic_read(&bna_debugfs_port_count) == 0) { + debugfs_remove(bna_debugfs_root); + bna_debugfs_root = NULL; + } +} diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c index fd3dcc1e914..9b44ec8096b 100644 --- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c +++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c @@ -38,7 +38,7 @@ sizeof(struct bnad_drv_stats) / sizeof(u64) + \ offsetof(struct bfi_enet_stats, rxf_stats[0]) / sizeof(u64)) -static char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = { +static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = { "rx_packets", "tx_packets", "rx_bytes", @@ -296,8 +296,8 @@ bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) struct bfa_ioc_attr *ioc_attr; unsigned long flags; - strcpy(drvinfo->driver, BNAD_NAME); - strcpy(drvinfo->version, BNAD_VERSION); + strlcpy(drvinfo->driver, BNAD_NAME, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, BNAD_VERSION, sizeof(drvinfo->version)); ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL); if (ioc_attr) { @@ -305,12 +305,13 @@ bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) bfa_nw_ioc_get_attr(&bnad->bna.ioceth.ioc, ioc_attr); spin_unlock_irqrestore(&bnad->bna_lock, flags); - strncpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver, - sizeof(drvinfo->fw_version) - 1); + strlcpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver, + sizeof(drvinfo->fw_version)); kfree(ioc_attr); } - strncpy(drvinfo->bus_info, pci_name(bnad->pcidev), ETHTOOL_BUSINFO_LEN); + strlcpy(drvinfo->bus_info, pci_name(bnad->pcidev), + sizeof(drvinfo->bus_info)); } static void @@ -934,7 +935,144 @@ bnad_get_sset_count(struct net_device *netdev, int sset) } } -static struct ethtool_ops bnad_ethtool_ops = { +static u32 +bnad_get_flash_partition_by_offset(struct bnad *bnad, u32 offset, + u32 *base_offset) +{ + struct bfa_flash_attr *flash_attr; + struct bnad_iocmd_comp fcomp; + u32 i, flash_part = 0, ret; + unsigned long flags = 0; + + flash_attr = kzalloc(sizeof(struct bfa_flash_attr), GFP_KERNEL); + if (!flash_attr) + return -ENOMEM; + + fcomp.bnad = bnad; + fcomp.comp_status = 0; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bnad->bna_lock, flags); + ret = bfa_nw_flash_get_attr(&bnad->bna.flash, flash_attr, + bnad_cb_completion, &fcomp); + if (ret != BFA_STATUS_OK) { + spin_unlock_irqrestore(&bnad->bna_lock, flags); + kfree(flash_attr); + goto out_err; + } + spin_unlock_irqrestore(&bnad->bna_lock, flags); + wait_for_completion(&fcomp.comp); + ret = fcomp.comp_status; + + /* Check for the flash type & base offset value */ + if (ret == BFA_STATUS_OK) { + for (i = 0; i < flash_attr->npart; i++) { + if (offset >= flash_attr->part[i].part_off && + offset < (flash_attr->part[i].part_off + + flash_attr->part[i].part_size)) { + flash_part = flash_attr->part[i].part_type; + *base_offset = flash_attr->part[i].part_off; + break; + } + } + } + kfree(flash_attr); + return flash_part; +out_err: + return -EINVAL; +} + +static int +bnad_get_eeprom_len(struct net_device *netdev) +{ + return BFA_TOTAL_FLASH_SIZE; +} + +static int +bnad_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, + u8 *bytes) +{ + struct bnad *bnad = netdev_priv(netdev); + struct bnad_iocmd_comp fcomp; + u32 flash_part = 0, base_offset = 0; + unsigned long flags = 0; + int ret = 0; + + /* Check if the flash read request is valid */ + if (eeprom->magic != (bnad->pcidev->vendor | + (bnad->pcidev->device << 16))) + return -EFAULT; + + /* Query the flash partition based on the offset */ + flash_part = bnad_get_flash_partition_by_offset(bnad, + eeprom->offset, &base_offset); + if (flash_part <= 0) + return -EFAULT; + + fcomp.bnad = bnad; + fcomp.comp_status = 0; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bnad->bna_lock, flags); + ret = bfa_nw_flash_read_part(&bnad->bna.flash, flash_part, + bnad->id, bytes, eeprom->len, + eeprom->offset - base_offset, + bnad_cb_completion, &fcomp); + if (ret != BFA_STATUS_OK) { + spin_unlock_irqrestore(&bnad->bna_lock, flags); + goto done; + } + + spin_unlock_irqrestore(&bnad->bna_lock, flags); + wait_for_completion(&fcomp.comp); + ret = fcomp.comp_status; +done: + return ret; +} + +static int +bnad_set_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, + u8 *bytes) +{ + struct bnad *bnad = netdev_priv(netdev); + struct bnad_iocmd_comp fcomp; + u32 flash_part = 0, base_offset = 0; + unsigned long flags = 0; + int ret = 0; + + /* Check if the flash update request is valid */ + if (eeprom->magic != (bnad->pcidev->vendor | + (bnad->pcidev->device << 16))) + return -EINVAL; + + /* Query the flash partition based on the offset */ + flash_part = bnad_get_flash_partition_by_offset(bnad, + eeprom->offset, &base_offset); + if (flash_part <= 0) + return -EFAULT; + + fcomp.bnad = bnad; + fcomp.comp_status = 0; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bnad->bna_lock, flags); + ret = bfa_nw_flash_update_part(&bnad->bna.flash, flash_part, + bnad->id, bytes, eeprom->len, + eeprom->offset - base_offset, + bnad_cb_completion, &fcomp); + if (ret != BFA_STATUS_OK) { + spin_unlock_irqrestore(&bnad->bna_lock, flags); + goto done; + } + + spin_unlock_irqrestore(&bnad->bna_lock, flags); + wait_for_completion(&fcomp.comp); + ret = fcomp.comp_status; +done: + return ret; +} + +static const struct ethtool_ops bnad_ethtool_ops = { .get_settings = bnad_get_settings, .set_settings = bnad_set_settings, .get_drvinfo = bnad_get_drvinfo, @@ -948,7 +1086,10 @@ static struct ethtool_ops bnad_ethtool_ops = { .set_pauseparam = bnad_set_pauseparam, .get_strings = bnad_get_strings, .get_ethtool_stats = bnad_get_ethtool_stats, - .get_sset_count = bnad_get_sset_count + .get_sset_count = bnad_get_sset_count, + .get_eeprom_len = bnad_get_eeprom_len, + .get_eeprom = bnad_get_eeprom, + .set_eeprom = bnad_set_eeprom, }; void diff --git a/drivers/net/ethernet/brocade/bna/cna.h b/drivers/net/ethernet/brocade/bna/cna.h index 1b3e90dfbd9..32e8f178ab7 100644 --- a/drivers/net/ethernet/brocade/bna/cna.h +++ b/drivers/net/ethernet/brocade/bna/cna.h @@ -43,8 +43,7 @@ extern char bfa_version[]; #pragma pack(1) -#define MAC_ADDRLEN (6) -typedef struct mac { u8 mac[MAC_ADDRLEN]; } mac_t; +typedef struct mac { u8 mac[ETH_ALEN]; } mac_t; #pragma pack() diff --git a/drivers/net/ethernet/brocade/bna/cna_fwimg.c b/drivers/net/ethernet/brocade/bna/cna_fwimg.c index 725b9fff337..cfc22a64157 100644 --- a/drivers/net/ethernet/brocade/bna/cna_fwimg.c +++ b/drivers/net/ethernet/brocade/bna/cna_fwimg.c @@ -16,6 +16,7 @@ * www.brocade.com */ #include <linux/firmware.h> +#include "bnad.h" #include "bfi.h" #include "cna.h" diff --git a/drivers/net/ethernet/calxeda/Kconfig b/drivers/net/ethernet/calxeda/Kconfig new file mode 100644 index 00000000000..aba435c3d4a --- /dev/null +++ b/drivers/net/ethernet/calxeda/Kconfig @@ -0,0 +1,7 @@ +config NET_CALXEDA_XGMAC + tristate "Calxeda 1G/10G XGMAC Ethernet driver" + depends on HAS_IOMEM + select CRC32 + help + This is the driver for the XGMAC Ethernet IP block found on Calxeda + Highbank platforms. diff --git a/drivers/net/ethernet/calxeda/Makefile b/drivers/net/ethernet/calxeda/Makefile new file mode 100644 index 00000000000..f0ef08067f9 --- /dev/null +++ b/drivers/net/ethernet/calxeda/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_NET_CALXEDA_XGMAC) += xgmac.o diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c new file mode 100644 index 00000000000..1fce186a903 --- /dev/null +++ b/drivers/net/ethernet/calxeda/xgmac.c @@ -0,0 +1,1928 @@ +/* + * Copyright 2010-2011 Calxeda, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ +#include <linux/module.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/circ_buf.h> +#include <linux/interrupt.h> +#include <linux/etherdevice.h> +#include <linux/platform_device.h> +#include <linux/skbuff.h> +#include <linux/ethtool.h> +#include <linux/if.h> +#include <linux/crc32.h> +#include <linux/dma-mapping.h> +#include <linux/slab.h> + +/* XGMAC Register definitions */ +#define XGMAC_CONTROL 0x00000000 /* MAC Configuration */ +#define XGMAC_FRAME_FILTER 0x00000004 /* MAC Frame Filter */ +#define XGMAC_FLOW_CTRL 0x00000018 /* MAC Flow Control */ +#define XGMAC_VLAN_TAG 0x0000001C /* VLAN Tags */ +#define XGMAC_VERSION 0x00000020 /* Version */ +#define XGMAC_VLAN_INCL 0x00000024 /* VLAN tag for tx frames */ +#define XGMAC_LPI_CTRL 0x00000028 /* LPI Control and Status */ +#define XGMAC_LPI_TIMER 0x0000002C /* LPI Timers Control */ +#define XGMAC_TX_PACE 0x00000030 /* Transmit Pace and Stretch */ +#define XGMAC_VLAN_HASH 0x00000034 /* VLAN Hash Table */ +#define XGMAC_DEBUG 0x00000038 /* Debug */ +#define XGMAC_INT_STAT 0x0000003C /* Interrupt and Control */ +#define XGMAC_ADDR_HIGH(reg) (0x00000040 + ((reg) * 8)) +#define XGMAC_ADDR_LOW(reg) (0x00000044 + ((reg) * 8)) +#define XGMAC_HASH(n) (0x00000300 + (n) * 4) /* HASH table regs */ +#define XGMAC_NUM_HASH 16 +#define XGMAC_OMR 0x00000400 +#define XGMAC_REMOTE_WAKE 0x00000700 /* Remote Wake-Up Frm Filter */ +#define XGMAC_PMT 0x00000704 /* PMT Control and Status */ +#define XGMAC_MMC_CTRL 0x00000800 /* XGMAC MMC Control */ +#define XGMAC_MMC_INTR_RX 0x00000804 /* Recieve Interrupt */ +#define XGMAC_MMC_INTR_TX 0x00000808 /* Transmit Interrupt */ +#define XGMAC_MMC_INTR_MASK_RX 0x0000080c /* Recieve Interrupt Mask */ +#define XGMAC_MMC_INTR_MASK_TX 0x00000810 /* Transmit Interrupt Mask */ + +/* Hardware TX Statistics Counters */ +#define XGMAC_MMC_TXOCTET_GB_LO 0x00000814 +#define XGMAC_MMC_TXOCTET_GB_HI 0x00000818 +#define XGMAC_MMC_TXFRAME_GB_LO 0x0000081C +#define XGMAC_MMC_TXFRAME_GB_HI 0x00000820 +#define XGMAC_MMC_TXBCFRAME_G 0x00000824 +#define XGMAC_MMC_TXMCFRAME_G 0x0000082C +#define XGMAC_MMC_TXUCFRAME_GB 0x00000864 +#define XGMAC_MMC_TXMCFRAME_GB 0x0000086C +#define XGMAC_MMC_TXBCFRAME_GB 0x00000874 +#define XGMAC_MMC_TXUNDERFLOW 0x0000087C +#define XGMAC_MMC_TXOCTET_G_LO 0x00000884 +#define XGMAC_MMC_TXOCTET_G_HI 0x00000888 +#define XGMAC_MMC_TXFRAME_G_LO 0x0000088C +#define XGMAC_MMC_TXFRAME_G_HI 0x00000890 +#define XGMAC_MMC_TXPAUSEFRAME 0x00000894 +#define XGMAC_MMC_TXVLANFRAME 0x0000089C + +/* Hardware RX Statistics Counters */ +#define XGMAC_MMC_RXFRAME_GB_LO 0x00000900 +#define XGMAC_MMC_RXFRAME_GB_HI 0x00000904 +#define XGMAC_MMC_RXOCTET_GB_LO 0x00000908 +#define XGMAC_MMC_RXOCTET_GB_HI 0x0000090C +#define XGMAC_MMC_RXOCTET_G_LO 0x00000910 +#define XGMAC_MMC_RXOCTET_G_HI 0x00000914 +#define XGMAC_MMC_RXBCFRAME_G 0x00000918 +#define XGMAC_MMC_RXMCFRAME_G 0x00000920 +#define XGMAC_MMC_RXCRCERR 0x00000928 +#define XGMAC_MMC_RXRUNT 0x00000930 +#define XGMAC_MMC_RXJABBER 0x00000934 +#define XGMAC_MMC_RXUCFRAME_G 0x00000970 +#define XGMAC_MMC_RXLENGTHERR 0x00000978 +#define XGMAC_MMC_RXPAUSEFRAME 0x00000988 +#define XGMAC_MMC_RXOVERFLOW 0x00000990 +#define XGMAC_MMC_RXVLANFRAME 0x00000998 +#define XGMAC_MMC_RXWATCHDOG 0x000009a0 + +/* DMA Control and Status Registers */ +#define XGMAC_DMA_BUS_MODE 0x00000f00 /* Bus Mode */ +#define XGMAC_DMA_TX_POLL 0x00000f04 /* Transmit Poll Demand */ +#define XGMAC_DMA_RX_POLL 0x00000f08 /* Received Poll Demand */ +#define XGMAC_DMA_RX_BASE_ADDR 0x00000f0c /* Receive List Base */ +#define XGMAC_DMA_TX_BASE_ADDR 0x00000f10 /* Transmit List Base */ +#define XGMAC_DMA_STATUS 0x00000f14 /* Status Register */ +#define XGMAC_DMA_CONTROL 0x00000f18 /* Ctrl (Operational Mode) */ +#define XGMAC_DMA_INTR_ENA 0x00000f1c /* Interrupt Enable */ +#define XGMAC_DMA_MISS_FRAME_CTR 0x00000f20 /* Missed Frame Counter */ +#define XGMAC_DMA_RI_WDOG_TIMER 0x00000f24 /* RX Intr Watchdog Timer */ +#define XGMAC_DMA_AXI_BUS 0x00000f28 /* AXI Bus Mode */ +#define XGMAC_DMA_AXI_STATUS 0x00000f2C /* AXI Status */ +#define XGMAC_DMA_HW_FEATURE 0x00000f58 /* Enabled Hardware Features */ + +#define XGMAC_ADDR_AE 0x80000000 +#define XGMAC_MAX_FILTER_ADDR 31 + +/* PMT Control and Status */ +#define XGMAC_PMT_POINTER_RESET 0x80000000 +#define XGMAC_PMT_GLBL_UNICAST 0x00000200 +#define XGMAC_PMT_WAKEUP_RX_FRM 0x00000040 +#define XGMAC_PMT_MAGIC_PKT 0x00000020 +#define XGMAC_PMT_WAKEUP_FRM_EN 0x00000004 +#define XGMAC_PMT_MAGIC_PKT_EN 0x00000002 +#define XGMAC_PMT_POWERDOWN 0x00000001 + +#define XGMAC_CONTROL_SPD 0x40000000 /* Speed control */ +#define XGMAC_CONTROL_SPD_MASK 0x60000000 +#define XGMAC_CONTROL_SPD_1G 0x60000000 +#define XGMAC_CONTROL_SPD_2_5G 0x40000000 +#define XGMAC_CONTROL_SPD_10G 0x00000000 +#define XGMAC_CONTROL_SARC 0x10000000 /* Source Addr Insert/Replace */ +#define XGMAC_CONTROL_SARK_MASK 0x18000000 +#define XGMAC_CONTROL_CAR 0x04000000 /* CRC Addition/Replacement */ +#define XGMAC_CONTROL_CAR_MASK 0x06000000 +#define XGMAC_CONTROL_DP 0x01000000 /* Disable Padding */ +#define XGMAC_CONTROL_WD 0x00800000 /* Disable Watchdog on rx */ +#define XGMAC_CONTROL_JD 0x00400000 /* Jabber disable */ +#define XGMAC_CONTROL_JE 0x00100000 /* Jumbo frame */ +#define XGMAC_CONTROL_LM 0x00001000 /* Loop-back mode */ +#define XGMAC_CONTROL_IPC 0x00000400 /* Checksum Offload */ +#define XGMAC_CONTROL_ACS 0x00000080 /* Automatic Pad/FCS Strip */ +#define XGMAC_CONTROL_DDIC 0x00000010 /* Disable Deficit Idle Count */ +#define XGMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */ +#define XGMAC_CONTROL_RE 0x00000004 /* Receiver Enable */ + +/* XGMAC Frame Filter defines */ +#define XGMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */ +#define XGMAC_FRAME_FILTER_HUC 0x00000002 /* Hash Unicast */ +#define XGMAC_FRAME_FILTER_HMC 0x00000004 /* Hash Multicast */ +#define XGMAC_FRAME_FILTER_DAIF 0x00000008 /* DA Inverse Filtering */ +#define XGMAC_FRAME_FILTER_PM 0x00000010 /* Pass all multicast */ +#define XGMAC_FRAME_FILTER_DBF 0x00000020 /* Disable Broadcast frames */ +#define XGMAC_FRAME_FILTER_SAIF 0x00000100 /* Inverse Filtering */ +#define XGMAC_FRAME_FILTER_SAF 0x00000200 /* Source Address Filter */ +#define XGMAC_FRAME_FILTER_HPF 0x00000400 /* Hash or perfect Filter */ +#define XGMAC_FRAME_FILTER_VHF 0x00000800 /* VLAN Hash Filter */ +#define XGMAC_FRAME_FILTER_VPF 0x00001000 /* VLAN Perfect Filter */ +#define XGMAC_FRAME_FILTER_RA 0x80000000 /* Receive all mode */ + +/* XGMAC FLOW CTRL defines */ +#define XGMAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */ +#define XGMAC_FLOW_CTRL_PT_SHIFT 16 +#define XGMAC_FLOW_CTRL_DZQP 0x00000080 /* Disable Zero-Quanta Phase */ +#define XGMAC_FLOW_CTRL_PLT 0x00000020 /* Pause Low Threshhold */ +#define XGMAC_FLOW_CTRL_PLT_MASK 0x00000030 /* PLT MASK */ +#define XGMAC_FLOW_CTRL_UP 0x00000008 /* Unicast Pause Frame Detect */ +#define XGMAC_FLOW_CTRL_RFE 0x00000004 /* Rx Flow Control Enable */ +#define XGMAC_FLOW_CTRL_TFE 0x00000002 /* Tx Flow Control Enable */ +#define XGMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */ + +/* XGMAC_INT_STAT reg */ +#define XGMAC_INT_STAT_PMT 0x0080 /* PMT Interrupt Status */ +#define XGMAC_INT_STAT_LPI 0x0040 /* LPI Interrupt Status */ + +/* DMA Bus Mode register defines */ +#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */ +#define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */ +#define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */ +#define DMA_BUS_MODE_ATDS 0x00000080 /* Alternate Descriptor Size */ + +/* Programmable burst length */ +#define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */ +#define DMA_BUS_MODE_PBL_SHIFT 8 +#define DMA_BUS_MODE_FB 0x00010000 /* Fixed burst */ +#define DMA_BUS_MODE_RPBL_MASK 0x003e0000 /* Rx-Programmable Burst Len */ +#define DMA_BUS_MODE_RPBL_SHIFT 17 +#define DMA_BUS_MODE_USP 0x00800000 +#define DMA_BUS_MODE_8PBL 0x01000000 +#define DMA_BUS_MODE_AAL 0x02000000 + +/* DMA Bus Mode register defines */ +#define DMA_BUS_PR_RATIO_MASK 0x0000c000 /* Rx/Tx priority ratio */ +#define DMA_BUS_PR_RATIO_SHIFT 14 +#define DMA_BUS_FB 0x00010000 /* Fixed Burst */ + +/* DMA Control register defines */ +#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */ +#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */ +#define DMA_CONTROL_DFF 0x01000000 /* Disable flush of rx frames */ + +/* DMA Normal interrupt */ +#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */ +#define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */ +#define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */ +#define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */ +#define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */ +#define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */ +#define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */ +#define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */ +#define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */ +#define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */ +#define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */ +#define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */ +#define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavail */ +#define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */ +#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */ + +#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \ + DMA_INTR_ENA_TUE) + +#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \ + DMA_INTR_ENA_RWE | DMA_INTR_ENA_RSE | \ + DMA_INTR_ENA_RUE | DMA_INTR_ENA_UNE | \ + DMA_INTR_ENA_OVE | DMA_INTR_ENA_TJE | \ + DMA_INTR_ENA_TSE) + +/* DMA default interrupt mask */ +#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL) + +/* DMA Status register defines */ +#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */ +#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int */ +#define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */ +#define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */ +#define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */ +#define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */ +#define DMA_STATUS_TS_SHIFT 20 +#define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */ +#define DMA_STATUS_RS_SHIFT 17 +#define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */ +#define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */ +#define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */ +#define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */ +#define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */ +#define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */ +#define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */ +#define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */ +#define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */ +#define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */ +#define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */ +#define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */ +#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavail */ +#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */ +#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */ + +/* Common MAC defines */ +#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */ +#define MAC_ENABLE_RX 0x00000004 /* Receiver Enable */ + +/* XGMAC Operation Mode Register */ +#define XGMAC_OMR_TSF 0x00200000 /* TX FIFO Store and Forward */ +#define XGMAC_OMR_FTF 0x00100000 /* Flush Transmit FIFO */ +#define XGMAC_OMR_TTC 0x00020000 /* Transmit Threshhold Ctrl */ +#define XGMAC_OMR_TTC_MASK 0x00030000 +#define XGMAC_OMR_RFD 0x00006000 /* FC Deactivation Threshhold */ +#define XGMAC_OMR_RFD_MASK 0x00007000 /* FC Deact Threshhold MASK */ +#define XGMAC_OMR_RFA 0x00000600 /* FC Activation Threshhold */ +#define XGMAC_OMR_RFA_MASK 0x00000E00 /* FC Act Threshhold MASK */ +#define XGMAC_OMR_EFC 0x00000100 /* Enable Hardware FC */ +#define XGMAC_OMR_FEF 0x00000080 /* Forward Error Frames */ +#define XGMAC_OMR_DT 0x00000040 /* Drop TCP/IP csum Errors */ +#define XGMAC_OMR_RSF 0x00000020 /* RX FIFO Store and Forward */ +#define XGMAC_OMR_RTC 0x00000010 /* RX Threshhold Ctrl */ +#define XGMAC_OMR_RTC_MASK 0x00000018 /* RX Threshhold Ctrl MASK */ + +/* XGMAC HW Features Register */ +#define DMA_HW_FEAT_TXCOESEL 0x00010000 /* TX Checksum offload */ + +#define XGMAC_MMC_CTRL_CNT_FRZ 0x00000008 + +/* XGMAC Descriptor Defines */ +#define MAX_DESC_BUF_SZ (0x2000 - 8) + +#define RXDESC_EXT_STATUS 0x00000001 +#define RXDESC_CRC_ERR 0x00000002 +#define RXDESC_RX_ERR 0x00000008 +#define RXDESC_RX_WDOG 0x00000010 +#define RXDESC_FRAME_TYPE 0x00000020 +#define RXDESC_GIANT_FRAME 0x00000080 +#define RXDESC_LAST_SEG 0x00000100 +#define RXDESC_FIRST_SEG 0x00000200 +#define RXDESC_VLAN_FRAME 0x00000400 +#define RXDESC_OVERFLOW_ERR 0x00000800 +#define RXDESC_LENGTH_ERR 0x00001000 +#define RXDESC_SA_FILTER_FAIL 0x00002000 +#define RXDESC_DESCRIPTOR_ERR 0x00004000 +#define RXDESC_ERROR_SUMMARY 0x00008000 +#define RXDESC_FRAME_LEN_OFFSET 16 +#define RXDESC_FRAME_LEN_MASK 0x3fff0000 +#define RXDESC_DA_FILTER_FAIL 0x40000000 + +#define RXDESC1_END_RING 0x00008000 + +#define RXDESC_IP_PAYLOAD_MASK 0x00000003 +#define RXDESC_IP_PAYLOAD_UDP 0x00000001 +#define RXDESC_IP_PAYLOAD_TCP 0x00000002 +#define RXDESC_IP_PAYLOAD_ICMP 0x00000003 +#define RXDESC_IP_HEADER_ERR 0x00000008 +#define RXDESC_IP_PAYLOAD_ERR 0x00000010 +#define RXDESC_IPV4_PACKET 0x00000040 +#define RXDESC_IPV6_PACKET 0x00000080 +#define TXDESC_UNDERFLOW_ERR 0x00000001 +#define TXDESC_JABBER_TIMEOUT 0x00000002 +#define TXDESC_LOCAL_FAULT 0x00000004 +#define TXDESC_REMOTE_FAULT 0x00000008 +#define TXDESC_VLAN_FRAME 0x00000010 +#define TXDESC_FRAME_FLUSHED 0x00000020 +#define TXDESC_IP_HEADER_ERR 0x00000040 +#define TXDESC_PAYLOAD_CSUM_ERR 0x00000080 +#define TXDESC_ERROR_SUMMARY 0x00008000 +#define TXDESC_SA_CTRL_INSERT 0x00040000 +#define TXDESC_SA_CTRL_REPLACE 0x00080000 +#define TXDESC_2ND_ADDR_CHAINED 0x00100000 +#define TXDESC_END_RING 0x00200000 +#define TXDESC_CSUM_IP 0x00400000 +#define TXDESC_CSUM_IP_PAYLD 0x00800000 +#define TXDESC_CSUM_ALL 0x00C00000 +#define TXDESC_CRC_EN_REPLACE 0x01000000 +#define TXDESC_CRC_EN_APPEND 0x02000000 +#define TXDESC_DISABLE_PAD 0x04000000 +#define TXDESC_FIRST_SEG 0x10000000 +#define TXDESC_LAST_SEG 0x20000000 +#define TXDESC_INTERRUPT 0x40000000 + +#define DESC_OWN 0x80000000 +#define DESC_BUFFER1_SZ_MASK 0x00001fff +#define DESC_BUFFER2_SZ_MASK 0x1fff0000 +#define DESC_BUFFER2_SZ_OFFSET 16 + +struct xgmac_dma_desc { + __le32 flags; + __le32 buf_size; + __le32 buf1_addr; /* Buffer 1 Address Pointer */ + __le32 buf2_addr; /* Buffer 2 Address Pointer */ + __le32 ext_status; + __le32 res[3]; +}; + +struct xgmac_extra_stats { + /* Transmit errors */ + unsigned long tx_jabber; + unsigned long tx_frame_flushed; + unsigned long tx_payload_error; + unsigned long tx_ip_header_error; + unsigned long tx_local_fault; + unsigned long tx_remote_fault; + /* Receive errors */ + unsigned long rx_watchdog; + unsigned long rx_da_filter_fail; + unsigned long rx_sa_filter_fail; + unsigned long rx_payload_error; + unsigned long rx_ip_header_error; + /* Tx/Rx IRQ errors */ + unsigned long tx_undeflow; + unsigned long tx_process_stopped; + unsigned long rx_buf_unav; + unsigned long rx_process_stopped; + unsigned long tx_early; + unsigned long fatal_bus_error; +}; + +struct xgmac_priv { + struct xgmac_dma_desc *dma_rx; + struct sk_buff **rx_skbuff; + unsigned int rx_tail; + unsigned int rx_head; + + struct xgmac_dma_desc *dma_tx; + struct sk_buff **tx_skbuff; + unsigned int tx_head; + unsigned int tx_tail; + + void __iomem *base; + struct sk_buff_head rx_recycle; + unsigned int dma_buf_sz; + dma_addr_t dma_rx_phy; + dma_addr_t dma_tx_phy; + + struct net_device *dev; + struct device *device; + struct napi_struct napi; + + struct xgmac_extra_stats xstats; + + spinlock_t stats_lock; + int pmt_irq; + char rx_pause; + char tx_pause; + int wolopts; +}; + +/* XGMAC Configuration Settings */ +#define MAX_MTU 9000 +#define PAUSE_TIME 0x400 + +#define DMA_RX_RING_SZ 256 +#define DMA_TX_RING_SZ 128 +/* minimum number of free TX descriptors required to wake up TX process */ +#define TX_THRESH (DMA_TX_RING_SZ/4) + +/* DMA descriptor ring helpers */ +#define dma_ring_incr(n, s) (((n) + 1) & ((s) - 1)) +#define dma_ring_space(h, t, s) CIRC_SPACE(h, t, s) +#define dma_ring_cnt(h, t, s) CIRC_CNT(h, t, s) + +/* XGMAC Descriptor Access Helpers */ +static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz) +{ + if (buf_sz > MAX_DESC_BUF_SZ) + p->buf_size = cpu_to_le32(MAX_DESC_BUF_SZ | + (buf_sz - MAX_DESC_BUF_SZ) << DESC_BUFFER2_SZ_OFFSET); + else + p->buf_size = cpu_to_le32(buf_sz); +} + +static inline int desc_get_buf_len(struct xgmac_dma_desc *p) +{ + u32 len = cpu_to_le32(p->flags); + return (len & DESC_BUFFER1_SZ_MASK) + + ((len & DESC_BUFFER2_SZ_MASK) >> DESC_BUFFER2_SZ_OFFSET); +} + +static inline void desc_init_rx_desc(struct xgmac_dma_desc *p, int ring_size, + int buf_sz) +{ + struct xgmac_dma_desc *end = p + ring_size - 1; + + memset(p, 0, sizeof(*p) * ring_size); + + for (; p <= end; p++) + desc_set_buf_len(p, buf_sz); + + end->buf_size |= cpu_to_le32(RXDESC1_END_RING); +} + +static inline void desc_init_tx_desc(struct xgmac_dma_desc *p, u32 ring_size) +{ + memset(p, 0, sizeof(*p) * ring_size); + p[ring_size - 1].flags = cpu_to_le32(TXDESC_END_RING); +} + +static inline int desc_get_owner(struct xgmac_dma_desc *p) +{ + return le32_to_cpu(p->flags) & DESC_OWN; +} + +static inline void desc_set_rx_owner(struct xgmac_dma_desc *p) +{ + /* Clear all fields and set the owner */ + p->flags = cpu_to_le32(DESC_OWN); +} + +static inline void desc_set_tx_owner(struct xgmac_dma_desc *p, u32 flags) +{ + u32 tmpflags = le32_to_cpu(p->flags); + tmpflags &= TXDESC_END_RING; + tmpflags |= flags | DESC_OWN; + p->flags = cpu_to_le32(tmpflags); +} + +static inline int desc_get_tx_ls(struct xgmac_dma_desc *p) +{ + return le32_to_cpu(p->flags) & TXDESC_LAST_SEG; +} + +static inline u32 desc_get_buf_addr(struct xgmac_dma_desc *p) +{ + return le32_to_cpu(p->buf1_addr); +} + +static inline void desc_set_buf_addr(struct xgmac_dma_desc *p, + u32 paddr, int len) +{ + p->buf1_addr = cpu_to_le32(paddr); + if (len > MAX_DESC_BUF_SZ) + p->buf2_addr = cpu_to_le32(paddr + MAX_DESC_BUF_SZ); +} + +static inline void desc_set_buf_addr_and_size(struct xgmac_dma_desc *p, + u32 paddr, int len) +{ + desc_set_buf_len(p, len); + desc_set_buf_addr(p, paddr, len); +} + +static inline int desc_get_rx_frame_len(struct xgmac_dma_desc *p) +{ + u32 data = le32_to_cpu(p->flags); + u32 len = (data & RXDESC_FRAME_LEN_MASK) >> RXDESC_FRAME_LEN_OFFSET; + if (data & RXDESC_FRAME_TYPE) + len -= ETH_FCS_LEN; + + return len; +} + +static void xgmac_dma_flush_tx_fifo(void __iomem *ioaddr) +{ + int timeout = 1000; + u32 reg = readl(ioaddr + XGMAC_OMR); + writel(reg | XGMAC_OMR_FTF, ioaddr + XGMAC_OMR); + + while ((timeout-- > 0) && readl(ioaddr + XGMAC_OMR) & XGMAC_OMR_FTF) + udelay(1); +} + +static int desc_get_tx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p) +{ + struct xgmac_extra_stats *x = &priv->xstats; + u32 status = le32_to_cpu(p->flags); + + if (!(status & TXDESC_ERROR_SUMMARY)) + return 0; + + netdev_dbg(priv->dev, "tx desc error = 0x%08x\n", status); + if (status & TXDESC_JABBER_TIMEOUT) + x->tx_jabber++; + if (status & TXDESC_FRAME_FLUSHED) + x->tx_frame_flushed++; + if (status & TXDESC_UNDERFLOW_ERR) + xgmac_dma_flush_tx_fifo(priv->base); + if (status & TXDESC_IP_HEADER_ERR) + x->tx_ip_header_error++; + if (status & TXDESC_LOCAL_FAULT) + x->tx_local_fault++; + if (status & TXDESC_REMOTE_FAULT) + x->tx_remote_fault++; + if (status & TXDESC_PAYLOAD_CSUM_ERR) + x->tx_payload_error++; + + return -1; +} + +static int desc_get_rx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p) +{ + struct xgmac_extra_stats *x = &priv->xstats; + int ret = CHECKSUM_UNNECESSARY; + u32 status = le32_to_cpu(p->flags); + u32 ext_status = le32_to_cpu(p->ext_status); + + if (status & RXDESC_DA_FILTER_FAIL) { + netdev_dbg(priv->dev, "XGMAC RX : Dest Address filter fail\n"); + x->rx_da_filter_fail++; + return -1; + } + + /* Check if packet has checksum already */ + if ((status & RXDESC_FRAME_TYPE) && (status & RXDESC_EXT_STATUS) && + !(ext_status & RXDESC_IP_PAYLOAD_MASK)) + ret = CHECKSUM_NONE; + + netdev_dbg(priv->dev, "rx status - frame type=%d, csum = %d, ext stat %08x\n", + (status & RXDESC_FRAME_TYPE) ? 1 : 0, ret, ext_status); + + if (!(status & RXDESC_ERROR_SUMMARY)) + return ret; + + /* Handle any errors */ + if (status & (RXDESC_DESCRIPTOR_ERR | RXDESC_OVERFLOW_ERR | + RXDESC_GIANT_FRAME | RXDESC_LENGTH_ERR | RXDESC_CRC_ERR)) + return -1; + + if (status & RXDESC_EXT_STATUS) { + if (ext_status & RXDESC_IP_HEADER_ERR) + x->rx_ip_header_error++; + if (ext_status & RXDESC_IP_PAYLOAD_ERR) + x->rx_payload_error++; + netdev_dbg(priv->dev, "IP checksum error - stat %08x\n", + ext_status); + return CHECKSUM_NONE; + } + + return ret; +} + +static inline void xgmac_mac_enable(void __iomem *ioaddr) +{ + u32 value = readl(ioaddr + XGMAC_CONTROL); + value |= MAC_ENABLE_RX | MAC_ENABLE_TX; + writel(value, ioaddr + XGMAC_CONTROL); + + value = readl(ioaddr + XGMAC_DMA_CONTROL); + value |= DMA_CONTROL_ST | DMA_CONTROL_SR; + writel(value, ioaddr + XGMAC_DMA_CONTROL); +} + +static inline void xgmac_mac_disable(void __iomem *ioaddr) +{ + u32 value = readl(ioaddr + XGMAC_DMA_CONTROL); + value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR); + writel(value, ioaddr + XGMAC_DMA_CONTROL); + + value = readl(ioaddr + XGMAC_CONTROL); + value &= ~(MAC_ENABLE_TX | MAC_ENABLE_RX); + writel(value, ioaddr + XGMAC_CONTROL); +} + +static void xgmac_set_mac_addr(void __iomem *ioaddr, unsigned char *addr, + int num) +{ + u32 data; + + data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0); + writel(data, ioaddr + XGMAC_ADDR_HIGH(num)); + data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; + writel(data, ioaddr + XGMAC_ADDR_LOW(num)); +} + +static void xgmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, + int num) +{ + u32 hi_addr, lo_addr; + + /* Read the MAC address from the hardware */ + hi_addr = readl(ioaddr + XGMAC_ADDR_HIGH(num)); + lo_addr = readl(ioaddr + XGMAC_ADDR_LOW(num)); + + /* Extract the MAC address from the high and low words */ + addr[0] = lo_addr & 0xff; + addr[1] = (lo_addr >> 8) & 0xff; + addr[2] = (lo_addr >> 16) & 0xff; + addr[3] = (lo_addr >> 24) & 0xff; + addr[4] = hi_addr & 0xff; + addr[5] = (hi_addr >> 8) & 0xff; +} + +static int xgmac_set_flow_ctrl(struct xgmac_priv *priv, int rx, int tx) +{ + u32 reg; + unsigned int flow = 0; + + priv->rx_pause = rx; + priv->tx_pause = tx; + + if (rx || tx) { + if (rx) + flow |= XGMAC_FLOW_CTRL_RFE; + if (tx) + flow |= XGMAC_FLOW_CTRL_TFE; + + flow |= XGMAC_FLOW_CTRL_PLT | XGMAC_FLOW_CTRL_UP; + flow |= (PAUSE_TIME << XGMAC_FLOW_CTRL_PT_SHIFT); + + writel(flow, priv->base + XGMAC_FLOW_CTRL); + + reg = readl(priv->base + XGMAC_OMR); + reg |= XGMAC_OMR_EFC; + writel(reg, priv->base + XGMAC_OMR); + } else { + writel(0, priv->base + XGMAC_FLOW_CTRL); + + reg = readl(priv->base + XGMAC_OMR); + reg &= ~XGMAC_OMR_EFC; + writel(reg, priv->base + XGMAC_OMR); + } + + return 0; +} + +static void xgmac_rx_refill(struct xgmac_priv *priv) +{ + struct xgmac_dma_desc *p; + dma_addr_t paddr; + + while (dma_ring_space(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ) > 1) { + int entry = priv->rx_head; + struct sk_buff *skb; + + p = priv->dma_rx + entry; + + if (priv->rx_skbuff[entry] != NULL) + continue; + + skb = __skb_dequeue(&priv->rx_recycle); + if (skb == NULL) + skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz); + if (unlikely(skb == NULL)) + break; + + priv->rx_skbuff[entry] = skb; + paddr = dma_map_single(priv->device, skb->data, + priv->dma_buf_sz, DMA_FROM_DEVICE); + desc_set_buf_addr(p, paddr, priv->dma_buf_sz); + + netdev_dbg(priv->dev, "rx ring: head %d, tail %d\n", + priv->rx_head, priv->rx_tail); + + priv->rx_head = dma_ring_incr(priv->rx_head, DMA_RX_RING_SZ); + /* Ensure descriptor is in memory before handing to h/w */ + wmb(); + desc_set_rx_owner(p); + } +} + +/** + * init_xgmac_dma_desc_rings - init the RX/TX descriptor rings + * @dev: net device structure + * Description: this function initializes the DMA RX/TX descriptors + * and allocates the socket buffers. + */ +static int xgmac_dma_desc_rings_init(struct net_device *dev) +{ + struct xgmac_priv *priv = netdev_priv(dev); + unsigned int bfsize; + + /* Set the Buffer size according to the MTU; + * indeed, in case of jumbo we need to bump-up the buffer sizes. + */ + bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN + 64, + 64); + + netdev_dbg(priv->dev, "mtu [%d] bfsize [%d]\n", dev->mtu, bfsize); + + priv->rx_skbuff = kzalloc(sizeof(struct sk_buff *) * DMA_RX_RING_SZ, + GFP_KERNEL); + if (!priv->rx_skbuff) + return -ENOMEM; + + priv->dma_rx = dma_alloc_coherent(priv->device, + DMA_RX_RING_SZ * + sizeof(struct xgmac_dma_desc), + &priv->dma_rx_phy, + GFP_KERNEL); + if (!priv->dma_rx) + goto err_dma_rx; + + priv->tx_skbuff = kzalloc(sizeof(struct sk_buff *) * DMA_TX_RING_SZ, + GFP_KERNEL); + if (!priv->tx_skbuff) + goto err_tx_skb; + + priv->dma_tx = dma_alloc_coherent(priv->device, + DMA_TX_RING_SZ * + sizeof(struct xgmac_dma_desc), + &priv->dma_tx_phy, + GFP_KERNEL); + if (!priv->dma_tx) + goto err_dma_tx; + + netdev_dbg(priv->dev, "DMA desc rings: virt addr (Rx %p, " + "Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n", + priv->dma_rx, priv->dma_tx, + (unsigned int)priv->dma_rx_phy, (unsigned int)priv->dma_tx_phy); + + priv->rx_tail = 0; + priv->rx_head = 0; + priv->dma_buf_sz = bfsize; + desc_init_rx_desc(priv->dma_rx, DMA_RX_RING_SZ, priv->dma_buf_sz); + xgmac_rx_refill(priv); + + priv->tx_tail = 0; + priv->tx_head = 0; + desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ); + + writel(priv->dma_tx_phy, priv->base + XGMAC_DMA_TX_BASE_ADDR); + writel(priv->dma_rx_phy, priv->base + XGMAC_DMA_RX_BASE_ADDR); + + return 0; + +err_dma_tx: + kfree(priv->tx_skbuff); +err_tx_skb: + dma_free_coherent(priv->device, + DMA_RX_RING_SZ * sizeof(struct xgmac_dma_desc), + priv->dma_rx, priv->dma_rx_phy); +err_dma_rx: + kfree(priv->rx_skbuff); + return -ENOMEM; +} + +static void xgmac_free_rx_skbufs(struct xgmac_priv *priv) +{ + int i; + struct xgmac_dma_desc *p; + + if (!priv->rx_skbuff) + return; + + for (i = 0; i < DMA_RX_RING_SZ; i++) { + if (priv->rx_skbuff[i] == NULL) + continue; + + p = priv->dma_rx + i; + dma_unmap_single(priv->device, desc_get_buf_addr(p), + priv->dma_buf_sz, DMA_FROM_DEVICE); + dev_kfree_skb_any(priv->rx_skbuff[i]); + priv->rx_skbuff[i] = NULL; + } +} + +static void xgmac_free_tx_skbufs(struct xgmac_priv *priv) +{ + int i, f; + struct xgmac_dma_desc *p; + + if (!priv->tx_skbuff) + return; + + for (i = 0; i < DMA_TX_RING_SZ; i++) { + if (priv->tx_skbuff[i] == NULL) + continue; + + p = priv->dma_tx + i; + dma_unmap_single(priv->device, desc_get_buf_addr(p), + desc_get_buf_len(p), DMA_TO_DEVICE); + + for (f = 0; f < skb_shinfo(priv->tx_skbuff[i])->nr_frags; f++) { + p = priv->dma_tx + i++; + dma_unmap_page(priv->device, desc_get_buf_addr(p), + desc_get_buf_len(p), DMA_TO_DEVICE); + } + + dev_kfree_skb_any(priv->tx_skbuff[i]); + priv->tx_skbuff[i] = NULL; + } +} + +static void xgmac_free_dma_desc_rings(struct xgmac_priv *priv) +{ + /* Release the DMA TX/RX socket buffers */ + xgmac_free_rx_skbufs(priv); + xgmac_free_tx_skbufs(priv); + + /* Free the consistent memory allocated for descriptor rings */ + if (priv->dma_tx) { + dma_free_coherent(priv->device, + DMA_TX_RING_SZ * sizeof(struct xgmac_dma_desc), + priv->dma_tx, priv->dma_tx_phy); + priv->dma_tx = NULL; + } + if (priv->dma_rx) { + dma_free_coherent(priv->device, + DMA_RX_RING_SZ * sizeof(struct xgmac_dma_desc), + priv->dma_rx, priv->dma_rx_phy); + priv->dma_rx = NULL; + } + kfree(priv->rx_skbuff); + priv->rx_skbuff = NULL; + kfree(priv->tx_skbuff); + priv->tx_skbuff = NULL; +} + +/** + * xgmac_tx: + * @priv: private driver structure + * Description: it reclaims resources after transmission completes. + */ +static void xgmac_tx_complete(struct xgmac_priv *priv) +{ + int i; + void __iomem *ioaddr = priv->base; + + writel(DMA_STATUS_TU | DMA_STATUS_NIS, ioaddr + XGMAC_DMA_STATUS); + + while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) { + unsigned int entry = priv->tx_tail; + struct sk_buff *skb = priv->tx_skbuff[entry]; + struct xgmac_dma_desc *p = priv->dma_tx + entry; + + /* Check if the descriptor is owned by the DMA. */ + if (desc_get_owner(p)) + break; + + /* Verify tx error by looking at the last segment */ + if (desc_get_tx_ls(p)) + desc_get_tx_status(priv, p); + + netdev_dbg(priv->dev, "tx ring: curr %d, dirty %d\n", + priv->tx_head, priv->tx_tail); + + dma_unmap_single(priv->device, desc_get_buf_addr(p), + desc_get_buf_len(p), DMA_TO_DEVICE); + + priv->tx_skbuff[entry] = NULL; + priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ); + + if (!skb) { + continue; + } + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + entry = priv->tx_tail = dma_ring_incr(priv->tx_tail, + DMA_TX_RING_SZ); + p = priv->dma_tx + priv->tx_tail; + + dma_unmap_page(priv->device, desc_get_buf_addr(p), + desc_get_buf_len(p), DMA_TO_DEVICE); + } + + /* + * If there's room in the queue (limit it to size) + * we add this skb back into the pool, + * if it's the right size. + */ + if ((skb_queue_len(&priv->rx_recycle) < + DMA_RX_RING_SZ) && + skb_recycle_check(skb, priv->dma_buf_sz)) + __skb_queue_head(&priv->rx_recycle, skb); + else + dev_kfree_skb(skb); + } + + if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) > + TX_THRESH) + netif_wake_queue(priv->dev); +} + +/** + * xgmac_tx_err: + * @priv: pointer to the private device structure + * Description: it cleans the descriptors and restarts the transmission + * in case of errors. + */ +static void xgmac_tx_err(struct xgmac_priv *priv) +{ + u32 reg, value, inten; + + netif_stop_queue(priv->dev); + + inten = readl(priv->base + XGMAC_DMA_INTR_ENA); + writel(0, priv->base + XGMAC_DMA_INTR_ENA); + + reg = readl(priv->base + XGMAC_DMA_CONTROL); + writel(reg & ~DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL); + do { + value = readl(priv->base + XGMAC_DMA_STATUS) & 0x700000; + } while (value && (value != 0x600000)); + + xgmac_free_tx_skbufs(priv); + desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ); + priv->tx_tail = 0; + priv->tx_head = 0; + writel(reg | DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL); + + writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS, + priv->base + XGMAC_DMA_STATUS); + writel(inten, priv->base + XGMAC_DMA_INTR_ENA); + + netif_wake_queue(priv->dev); +} + +static int xgmac_hw_init(struct net_device *dev) +{ + u32 value, ctrl; + int limit; + struct xgmac_priv *priv = netdev_priv(dev); + void __iomem *ioaddr = priv->base; + + /* Save the ctrl register value */ + ctrl = readl(ioaddr + XGMAC_CONTROL) & XGMAC_CONTROL_SPD_MASK; + + /* SW reset */ + value = DMA_BUS_MODE_SFT_RESET; + writel(value, ioaddr + XGMAC_DMA_BUS_MODE); + limit = 15000; + while (limit-- && + (readl(ioaddr + XGMAC_DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET)) + cpu_relax(); + if (limit < 0) + return -EBUSY; + + value = (0x10 << DMA_BUS_MODE_PBL_SHIFT) | + (0x10 << DMA_BUS_MODE_RPBL_SHIFT) | + DMA_BUS_MODE_FB | DMA_BUS_MODE_ATDS | DMA_BUS_MODE_AAL; + writel(value, ioaddr + XGMAC_DMA_BUS_MODE); + + /* Enable interrupts */ + writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS); + writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA); + + /* XGMAC requires AXI bus init. This is a 'magic number' for now */ + writel(0x000100E, ioaddr + XGMAC_DMA_AXI_BUS); + + ctrl |= XGMAC_CONTROL_DDIC | XGMAC_CONTROL_JE | XGMAC_CONTROL_ACS | + XGMAC_CONTROL_CAR; + if (dev->features & NETIF_F_RXCSUM) + ctrl |= XGMAC_CONTROL_IPC; + writel(ctrl, ioaddr + XGMAC_CONTROL); + + value = DMA_CONTROL_DFF; + writel(value, ioaddr + XGMAC_DMA_CONTROL); + + /* Set the HW DMA mode and the COE */ + writel(XGMAC_OMR_TSF | XGMAC_OMR_RSF | XGMAC_OMR_RFD | XGMAC_OMR_RFA, + ioaddr + XGMAC_OMR); + + /* Reset the MMC counters */ + writel(1, ioaddr + XGMAC_MMC_CTRL); + return 0; +} + +/** + * xgmac_open - open entry point of the driver + * @dev : pointer to the device structure. + * Description: + * This function is the open entry point of the driver. + * Return value: + * 0 on success and an appropriate (-)ve integer as defined in errno.h + * file on failure. + */ +static int xgmac_open(struct net_device *dev) +{ + int ret; + struct xgmac_priv *priv = netdev_priv(dev); + void __iomem *ioaddr = priv->base; + + /* Check that the MAC address is valid. If its not, refuse + * to bring the device up. The user must specify an + * address using the following linux command: + * ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx */ + if (!is_valid_ether_addr(dev->dev_addr)) { + random_ether_addr(dev->dev_addr); + netdev_dbg(priv->dev, "generated random MAC address %pM\n", + dev->dev_addr); + } + + skb_queue_head_init(&priv->rx_recycle); + memset(&priv->xstats, 0, sizeof(struct xgmac_extra_stats)); + + /* Initialize the XGMAC and descriptors */ + xgmac_hw_init(dev); + xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0); + xgmac_set_flow_ctrl(priv, priv->rx_pause, priv->tx_pause); + + ret = xgmac_dma_desc_rings_init(dev); + if (ret < 0) + return ret; + + /* Enable the MAC Rx/Tx */ + xgmac_mac_enable(ioaddr); + + napi_enable(&priv->napi); + netif_start_queue(dev); + + return 0; +} + +/** + * xgmac_release - close entry point of the driver + * @dev : device pointer. + * Description: + * This is the stop entry point of the driver. + */ +static int xgmac_stop(struct net_device *dev) +{ + struct xgmac_priv *priv = netdev_priv(dev); + + netif_stop_queue(dev); + + if (readl(priv->base + XGMAC_DMA_INTR_ENA)) + napi_disable(&priv->napi); + + writel(0, priv->base + XGMAC_DMA_INTR_ENA); + skb_queue_purge(&priv->rx_recycle); + + /* Disable the MAC core */ + xgmac_mac_disable(priv->base); + + /* Release and free the Rx/Tx resources */ + xgmac_free_dma_desc_rings(priv); + + return 0; +} + +/** + * xgmac_xmit: + * @skb : the socket buffer + * @dev : device pointer + * Description : Tx entry point of the driver. + */ +static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct xgmac_priv *priv = netdev_priv(dev); + unsigned int entry; + int i; + int nfrags = skb_shinfo(skb)->nr_frags; + struct xgmac_dma_desc *desc, *first; + unsigned int desc_flags; + unsigned int len; + dma_addr_t paddr; + + if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) < + (nfrags + 1)) { + writel(DMA_INTR_DEFAULT_MASK | DMA_INTR_ENA_TIE, + priv->base + XGMAC_DMA_INTR_ENA); + netif_stop_queue(dev); + return NETDEV_TX_BUSY; + } + + desc_flags = (skb->ip_summed == CHECKSUM_PARTIAL) ? + TXDESC_CSUM_ALL : 0; + entry = priv->tx_head; + desc = priv->dma_tx + entry; + first = desc; + + len = skb_headlen(skb); + paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE); + if (dma_mapping_error(priv->device, paddr)) { + dev_kfree_skb(skb); + return -EIO; + } + priv->tx_skbuff[entry] = skb; + desc_set_buf_addr_and_size(desc, paddr, len); + + for (i = 0; i < nfrags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + len = frag->size; + + paddr = skb_frag_dma_map(priv->device, frag, 0, len, + DMA_TO_DEVICE); + if (dma_mapping_error(priv->device, paddr)) { + dev_kfree_skb(skb); + return -EIO; + } + + entry = dma_ring_incr(entry, DMA_TX_RING_SZ); + desc = priv->dma_tx + entry; + priv->tx_skbuff[entry] = NULL; + + desc_set_buf_addr_and_size(desc, paddr, len); + if (i < (nfrags - 1)) + desc_set_tx_owner(desc, desc_flags); + } + + /* Interrupt on completition only for the latest segment */ + if (desc != first) + desc_set_tx_owner(desc, desc_flags | + TXDESC_LAST_SEG | TXDESC_INTERRUPT); + else + desc_flags |= TXDESC_LAST_SEG | TXDESC_INTERRUPT; + + /* Set owner on first desc last to avoid race condition */ + wmb(); + desc_set_tx_owner(first, desc_flags | TXDESC_FIRST_SEG); + + priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ); + + writel(1, priv->base + XGMAC_DMA_TX_POLL); + + return NETDEV_TX_OK; +} + +static int xgmac_rx(struct xgmac_priv *priv, int limit) +{ + unsigned int entry; + unsigned int count = 0; + struct xgmac_dma_desc *p; + + while (count < limit) { + int ip_checksum; + struct sk_buff *skb; + int frame_len; + + writel(DMA_STATUS_RI | DMA_STATUS_NIS, + priv->base + XGMAC_DMA_STATUS); + + entry = priv->rx_tail; + p = priv->dma_rx + entry; + if (desc_get_owner(p)) + break; + + count++; + priv->rx_tail = dma_ring_incr(priv->rx_tail, DMA_RX_RING_SZ); + + /* read the status of the incoming frame */ + ip_checksum = desc_get_rx_status(priv, p); + if (ip_checksum < 0) + continue; + + skb = priv->rx_skbuff[entry]; + if (unlikely(!skb)) { + netdev_err(priv->dev, "Inconsistent Rx descriptor chain\n"); + break; + } + priv->rx_skbuff[entry] = NULL; + + frame_len = desc_get_rx_frame_len(p); + netdev_dbg(priv->dev, "RX frame size %d, COE status: %d\n", + frame_len, ip_checksum); + + skb_put(skb, frame_len); + dma_unmap_single(priv->device, desc_get_buf_addr(p), + frame_len, DMA_FROM_DEVICE); + + skb->protocol = eth_type_trans(skb, priv->dev); + skb->ip_summed = ip_checksum; + if (ip_checksum == CHECKSUM_NONE) + netif_receive_skb(skb); + else + napi_gro_receive(&priv->napi, skb); + } + + xgmac_rx_refill(priv); + + writel(1, priv->base + XGMAC_DMA_RX_POLL); + + return count; +} + +/** + * xgmac_poll - xgmac poll method (NAPI) + * @napi : pointer to the napi structure. + * @budget : maximum number of packets that the current CPU can receive from + * all interfaces. + * Description : + * This function implements the the reception process. + * Also it runs the TX completion thread + */ +static int xgmac_poll(struct napi_struct *napi, int budget) +{ + struct xgmac_priv *priv = container_of(napi, + struct xgmac_priv, napi); + int work_done = 0; + + xgmac_tx_complete(priv); + work_done = xgmac_rx(priv, budget); + + if (work_done < budget) { + napi_complete(napi); + writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA); + } + return work_done; +} + +/** + * xgmac_tx_timeout + * @dev : Pointer to net device structure + * Description: this function is called when a packet transmission fails to + * complete within a reasonable tmrate. The driver will mark the error in the + * netdev structure and arrange for the device to be reset to a sane state + * in order to transmit a new packet. + */ +static void xgmac_tx_timeout(struct net_device *dev) +{ + struct xgmac_priv *priv = netdev_priv(dev); + + /* Clear Tx resources and restart transmitting again */ + xgmac_tx_err(priv); +} + +/** + * xgmac_set_rx_mode - entry point for multicast addressing + * @dev : pointer to the device structure + * Description: + * This function is a driver entry point which gets called by the kernel + * whenever multicast addresses must be enabled/disabled. + * Return value: + * void. + */ +static void xgmac_set_rx_mode(struct net_device *dev) +{ + int i; + struct xgmac_priv *priv = netdev_priv(dev); + void __iomem *ioaddr = priv->base; + unsigned int value = 0; + u32 hash_filter[XGMAC_NUM_HASH]; + int reg = 1; + struct netdev_hw_addr *ha; + bool use_hash = false; + + netdev_dbg(priv->dev, "# mcasts %d, # unicast %d\n", + netdev_mc_count(dev), netdev_uc_count(dev)); + + if (dev->flags & IFF_PROMISC) { + writel(XGMAC_FRAME_FILTER_PR, ioaddr + XGMAC_FRAME_FILTER); + return; + } + + memset(hash_filter, 0, sizeof(hash_filter)); + + if (netdev_uc_count(dev) > XGMAC_MAX_FILTER_ADDR) { + use_hash = true; + value |= XGMAC_FRAME_FILTER_HUC | XGMAC_FRAME_FILTER_HPF; + } + netdev_for_each_uc_addr(ha, dev) { + if (use_hash) { + u32 bit_nr = ~ether_crc(ETH_ALEN, ha->addr) >> 23; + + /* The most significant 4 bits determine the register to + * use (H/L) while the other 5 bits determine the bit + * within the register. */ + hash_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); + } else { + xgmac_set_mac_addr(ioaddr, ha->addr, reg); + reg++; + } + } + + if (dev->flags & IFF_ALLMULTI) { + value |= XGMAC_FRAME_FILTER_PM; + goto out; + } + + if ((netdev_mc_count(dev) + reg - 1) > XGMAC_MAX_FILTER_ADDR) { + use_hash = true; + value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF; + } + netdev_for_each_mc_addr(ha, dev) { + if (use_hash) { + u32 bit_nr = ~ether_crc(ETH_ALEN, ha->addr) >> 23; + + /* The most significant 4 bits determine the register to + * use (H/L) while the other 5 bits determine the bit + * within the register. */ + hash_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); + } else { + xgmac_set_mac_addr(ioaddr, ha->addr, reg); + reg++; + } + } + +out: + for (i = 0; i < XGMAC_NUM_HASH; i++) + writel(hash_filter[i], ioaddr + XGMAC_HASH(i)); + + writel(value, ioaddr + XGMAC_FRAME_FILTER); +} + +/** + * xgmac_change_mtu - entry point to change MTU size for the device. + * @dev : device pointer. + * @new_mtu : the new MTU size for the device. + * Description: the Maximum Transfer Unit (MTU) is used by the network layer + * to drive packet transmission. Ethernet has an MTU of 1500 octets + * (ETH_DATA_LEN). This value can be changed with ifconfig. + * Return value: + * 0 on success and an appropriate (-)ve integer as defined in errno.h + * file on failure. + */ +static int xgmac_change_mtu(struct net_device *dev, int new_mtu) +{ + struct xgmac_priv *priv = netdev_priv(dev); + int old_mtu; + + if ((new_mtu < 46) || (new_mtu > MAX_MTU)) { + netdev_err(priv->dev, "invalid MTU, max MTU is: %d\n", MAX_MTU); + return -EINVAL; + } + + old_mtu = dev->mtu; + dev->mtu = new_mtu; + + /* return early if the buffer sizes will not change */ + if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN) + return 0; + if (old_mtu == new_mtu) + return 0; + + /* Stop everything, get ready to change the MTU */ + if (!netif_running(dev)) + return 0; + + /* Bring the interface down and then back up */ + xgmac_stop(dev); + return xgmac_open(dev); +} + +static irqreturn_t xgmac_pmt_interrupt(int irq, void *dev_id) +{ + u32 intr_status; + struct net_device *dev = (struct net_device *)dev_id; + struct xgmac_priv *priv = netdev_priv(dev); + void __iomem *ioaddr = priv->base; + + intr_status = readl(ioaddr + XGMAC_INT_STAT); + if (intr_status & XGMAC_INT_STAT_PMT) { + netdev_dbg(priv->dev, "received Magic frame\n"); + /* clear the PMT bits 5 and 6 by reading the PMT */ + readl(ioaddr + XGMAC_PMT); + } + return IRQ_HANDLED; +} + +static irqreturn_t xgmac_interrupt(int irq, void *dev_id) +{ + u32 intr_status; + bool tx_err = false; + struct net_device *dev = (struct net_device *)dev_id; + struct xgmac_priv *priv = netdev_priv(dev); + struct xgmac_extra_stats *x = &priv->xstats; + + /* read the status register (CSR5) */ + intr_status = readl(priv->base + XGMAC_DMA_STATUS); + intr_status &= readl(priv->base + XGMAC_DMA_INTR_ENA); + writel(intr_status, priv->base + XGMAC_DMA_STATUS); + + /* It displays the DMA process states (CSR5 register) */ + /* ABNORMAL interrupts */ + if (unlikely(intr_status & DMA_STATUS_AIS)) { + if (intr_status & DMA_STATUS_TJT) { + netdev_err(priv->dev, "transmit jabber\n"); + x->tx_jabber++; + } + if (intr_status & DMA_STATUS_RU) + x->rx_buf_unav++; + if (intr_status & DMA_STATUS_RPS) { + netdev_err(priv->dev, "receive process stopped\n"); + x->rx_process_stopped++; + } + if (intr_status & DMA_STATUS_ETI) { + netdev_err(priv->dev, "transmit early interrupt\n"); + x->tx_early++; + } + if (intr_status & DMA_STATUS_TPS) { + netdev_err(priv->dev, "transmit process stopped\n"); + x->tx_process_stopped++; + tx_err = true; + } + if (intr_status & DMA_STATUS_FBI) { + netdev_err(priv->dev, "fatal bus error\n"); + x->fatal_bus_error++; + tx_err = true; + } + + if (tx_err) + xgmac_tx_err(priv); + } + + /* TX/RX NORMAL interrupts */ + if (intr_status & (DMA_STATUS_RI | DMA_STATUS_TU)) { + writel(DMA_INTR_ABNORMAL, priv->base + XGMAC_DMA_INTR_ENA); + napi_schedule(&priv->napi); + } + + return IRQ_HANDLED; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* Polling receive - used by NETCONSOLE and other diagnostic tools + * to allow network I/O with interrupts disabled. */ +static void xgmac_poll_controller(struct net_device *dev) +{ + disable_irq(dev->irq); + xgmac_interrupt(dev->irq, dev); + enable_irq(dev->irq); +} +#endif + +static struct rtnl_link_stats64 * +xgmac_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *storage) +{ + struct xgmac_priv *priv = netdev_priv(dev); + void __iomem *base = priv->base; + u32 count; + + spin_lock_bh(&priv->stats_lock); + writel(XGMAC_MMC_CTRL_CNT_FRZ, base + XGMAC_MMC_CTRL); + + storage->rx_bytes = readl(base + XGMAC_MMC_RXOCTET_G_LO); + storage->rx_bytes |= (u64)(readl(base + XGMAC_MMC_RXOCTET_G_HI)) << 32; + + storage->rx_packets = readl(base + XGMAC_MMC_RXFRAME_GB_LO); + storage->multicast = readl(base + XGMAC_MMC_RXMCFRAME_G); + storage->rx_crc_errors = readl(base + XGMAC_MMC_RXCRCERR); + storage->rx_length_errors = readl(base + XGMAC_MMC_RXLENGTHERR); + storage->rx_missed_errors = readl(base + XGMAC_MMC_RXOVERFLOW); + + storage->tx_bytes = readl(base + XGMAC_MMC_TXOCTET_G_LO); + storage->tx_bytes |= (u64)(readl(base + XGMAC_MMC_TXOCTET_G_HI)) << 32; + + count = readl(base + XGMAC_MMC_TXFRAME_GB_LO); + storage->tx_errors = count - readl(base + XGMAC_MMC_TXFRAME_G_LO); + storage->tx_packets = count; + storage->tx_fifo_errors = readl(base + XGMAC_MMC_TXUNDERFLOW); + + writel(0, base + XGMAC_MMC_CTRL); + spin_unlock_bh(&priv->stats_lock); + return storage; +} + +static int xgmac_set_mac_address(struct net_device *dev, void *p) +{ + struct xgmac_priv *priv = netdev_priv(dev); + void __iomem *ioaddr = priv->base; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + + xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0); + + return 0; +} + +static int xgmac_set_features(struct net_device *dev, netdev_features_t features) +{ + u32 ctrl; + struct xgmac_priv *priv = netdev_priv(dev); + void __iomem *ioaddr = priv->base; + u32 changed = dev->features ^ features; + + if (!(changed & NETIF_F_RXCSUM)) + return 0; + + ctrl = readl(ioaddr + XGMAC_CONTROL); + if (features & NETIF_F_RXCSUM) + ctrl |= XGMAC_CONTROL_IPC; + else + ctrl &= ~XGMAC_CONTROL_IPC; + writel(ctrl, ioaddr + XGMAC_CONTROL); + + return 0; +} + +static const struct net_device_ops xgmac_netdev_ops = { + .ndo_open = xgmac_open, + .ndo_start_xmit = xgmac_xmit, + .ndo_stop = xgmac_stop, + .ndo_change_mtu = xgmac_change_mtu, + .ndo_set_rx_mode = xgmac_set_rx_mode, + .ndo_tx_timeout = xgmac_tx_timeout, + .ndo_get_stats64 = xgmac_get_stats64, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = xgmac_poll_controller, +#endif + .ndo_set_mac_address = xgmac_set_mac_address, + .ndo_set_features = xgmac_set_features, +}; + +static int xgmac_ethtool_getsettings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + cmd->autoneg = 0; + cmd->duplex = DUPLEX_FULL; + ethtool_cmd_speed_set(cmd, 10000); + cmd->supported = 0; + cmd->advertising = 0; + cmd->transceiver = XCVR_INTERNAL; + return 0; +} + +static void xgmac_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct xgmac_priv *priv = netdev_priv(netdev); + + pause->rx_pause = priv->rx_pause; + pause->tx_pause = priv->tx_pause; +} + +static int xgmac_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct xgmac_priv *priv = netdev_priv(netdev); + + if (pause->autoneg) + return -EINVAL; + + return xgmac_set_flow_ctrl(priv, pause->rx_pause, pause->tx_pause); +} + +struct xgmac_stats { + char stat_string[ETH_GSTRING_LEN]; + int stat_offset; + bool is_reg; +}; + +#define XGMAC_STAT(m) \ + { #m, offsetof(struct xgmac_priv, xstats.m), false } +#define XGMAC_HW_STAT(m, reg_offset) \ + { #m, reg_offset, true } + +static const struct xgmac_stats xgmac_gstrings_stats[] = { + XGMAC_STAT(tx_frame_flushed), + XGMAC_STAT(tx_payload_error), + XGMAC_STAT(tx_ip_header_error), + XGMAC_STAT(tx_local_fault), + XGMAC_STAT(tx_remote_fault), + XGMAC_STAT(tx_early), + XGMAC_STAT(tx_process_stopped), + XGMAC_STAT(tx_jabber), + XGMAC_STAT(rx_buf_unav), + XGMAC_STAT(rx_process_stopped), + XGMAC_STAT(rx_payload_error), + XGMAC_STAT(rx_ip_header_error), + XGMAC_STAT(rx_da_filter_fail), + XGMAC_STAT(rx_sa_filter_fail), + XGMAC_STAT(fatal_bus_error), + XGMAC_HW_STAT(rx_watchdog, XGMAC_MMC_RXWATCHDOG), + XGMAC_HW_STAT(tx_vlan, XGMAC_MMC_TXVLANFRAME), + XGMAC_HW_STAT(rx_vlan, XGMAC_MMC_RXVLANFRAME), + XGMAC_HW_STAT(tx_pause, XGMAC_MMC_TXPAUSEFRAME), + XGMAC_HW_STAT(rx_pause, XGMAC_MMC_RXPAUSEFRAME), +}; +#define XGMAC_STATS_LEN ARRAY_SIZE(xgmac_gstrings_stats) + +static void xgmac_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *dummy, + u64 *data) +{ + struct xgmac_priv *priv = netdev_priv(dev); + void *p = priv; + int i; + + for (i = 0; i < XGMAC_STATS_LEN; i++) { + if (xgmac_gstrings_stats[i].is_reg) + *data++ = readl(priv->base + + xgmac_gstrings_stats[i].stat_offset); + else + *data++ = *(u32 *)(p + + xgmac_gstrings_stats[i].stat_offset); + } +} + +static int xgmac_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return XGMAC_STATS_LEN; + default: + return -EINVAL; + } +} + +static void xgmac_get_strings(struct net_device *dev, u32 stringset, + u8 *data) +{ + int i; + u8 *p = data; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < XGMAC_STATS_LEN; i++) { + memcpy(p, xgmac_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + break; + default: + WARN_ON(1); + break; + } +} + +static void xgmac_get_wol(struct net_device *dev, + struct ethtool_wolinfo *wol) +{ + struct xgmac_priv *priv = netdev_priv(dev); + + if (device_can_wakeup(priv->device)) { + wol->supported = WAKE_MAGIC | WAKE_UCAST; + wol->wolopts = priv->wolopts; + } +} + +static int xgmac_set_wol(struct net_device *dev, + struct ethtool_wolinfo *wol) +{ + struct xgmac_priv *priv = netdev_priv(dev); + u32 support = WAKE_MAGIC | WAKE_UCAST; + + if (!device_can_wakeup(priv->device)) + return -ENOTSUPP; + + if (wol->wolopts & ~support) + return -EINVAL; + + priv->wolopts = wol->wolopts; + + if (wol->wolopts) { + device_set_wakeup_enable(priv->device, 1); + enable_irq_wake(dev->irq); + } else { + device_set_wakeup_enable(priv->device, 0); + disable_irq_wake(dev->irq); + } + + return 0; +} + +static const struct ethtool_ops xgmac_ethtool_ops = { + .get_settings = xgmac_ethtool_getsettings, + .get_link = ethtool_op_get_link, + .get_pauseparam = xgmac_get_pauseparam, + .set_pauseparam = xgmac_set_pauseparam, + .get_ethtool_stats = xgmac_get_ethtool_stats, + .get_strings = xgmac_get_strings, + .get_wol = xgmac_get_wol, + .set_wol = xgmac_set_wol, + .get_sset_count = xgmac_get_sset_count, +}; + +/** + * xgmac_probe + * @pdev: platform device pointer + * Description: the driver is initialized through platform_device. + */ +static int xgmac_probe(struct platform_device *pdev) +{ + int ret = 0; + struct resource *res; + struct net_device *ndev = NULL; + struct xgmac_priv *priv = NULL; + u32 uid; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; + + if (!request_mem_region(res->start, resource_size(res), pdev->name)) + return -EBUSY; + + ndev = alloc_etherdev(sizeof(struct xgmac_priv)); + if (!ndev) { + ret = -ENOMEM; + goto err_alloc; + } + + SET_NETDEV_DEV(ndev, &pdev->dev); + priv = netdev_priv(ndev); + platform_set_drvdata(pdev, ndev); + ether_setup(ndev); + ndev->netdev_ops = &xgmac_netdev_ops; + SET_ETHTOOL_OPS(ndev, &xgmac_ethtool_ops); + spin_lock_init(&priv->stats_lock); + + priv->device = &pdev->dev; + priv->dev = ndev; + priv->rx_pause = 1; + priv->tx_pause = 1; + + priv->base = ioremap(res->start, resource_size(res)); + if (!priv->base) { + netdev_err(ndev, "ioremap failed\n"); + ret = -ENOMEM; + goto err_io; + } + + uid = readl(priv->base + XGMAC_VERSION); + netdev_info(ndev, "h/w version is 0x%x\n", uid); + + writel(0, priv->base + XGMAC_DMA_INTR_ENA); + ndev->irq = platform_get_irq(pdev, 0); + if (ndev->irq == -ENXIO) { + netdev_err(ndev, "No irq resource\n"); + ret = ndev->irq; + goto err_irq; + } + + ret = request_irq(ndev->irq, xgmac_interrupt, 0, + dev_name(&pdev->dev), ndev); + if (ret < 0) { + netdev_err(ndev, "Could not request irq %d - ret %d)\n", + ndev->irq, ret); + goto err_irq; + } + + priv->pmt_irq = platform_get_irq(pdev, 1); + if (priv->pmt_irq == -ENXIO) { + netdev_err(ndev, "No pmt irq resource\n"); + ret = priv->pmt_irq; + goto err_pmt_irq; + } + + ret = request_irq(priv->pmt_irq, xgmac_pmt_interrupt, 0, + dev_name(&pdev->dev), ndev); + if (ret < 0) { + netdev_err(ndev, "Could not request irq %d - ret %d)\n", + priv->pmt_irq, ret); + goto err_pmt_irq; + } + + device_set_wakeup_capable(&pdev->dev, 1); + if (device_can_wakeup(priv->device)) + priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */ + + ndev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA; + if (readl(priv->base + XGMAC_DMA_HW_FEATURE) & DMA_HW_FEAT_TXCOESEL) + ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_RXCSUM; + ndev->features |= ndev->hw_features; + ndev->priv_flags |= IFF_UNICAST_FLT; + + /* Get the MAC address */ + xgmac_get_mac_addr(priv->base, ndev->dev_addr, 0); + if (!is_valid_ether_addr(ndev->dev_addr)) + netdev_warn(ndev, "MAC address %pM not valid", + ndev->dev_addr); + + netif_napi_add(ndev, &priv->napi, xgmac_poll, 64); + ret = register_netdev(ndev); + if (ret) + goto err_reg; + + return 0; + +err_reg: + netif_napi_del(&priv->napi); + free_irq(priv->pmt_irq, ndev); +err_pmt_irq: + free_irq(ndev->irq, ndev); +err_irq: + iounmap(priv->base); +err_io: + free_netdev(ndev); +err_alloc: + release_mem_region(res->start, resource_size(res)); + platform_set_drvdata(pdev, NULL); + return ret; +} + +/** + * xgmac_dvr_remove + * @pdev: platform device pointer + * Description: this function resets the TX/RX processes, disables the MAC RX/TX + * changes the link status, releases the DMA descriptor rings, + * unregisters the MDIO bus and unmaps the allocated memory. + */ +static int xgmac_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct xgmac_priv *priv = netdev_priv(ndev); + struct resource *res; + + xgmac_mac_disable(priv->base); + + /* Free the IRQ lines */ + free_irq(ndev->irq, ndev); + free_irq(priv->pmt_irq, ndev); + + platform_set_drvdata(pdev, NULL); + unregister_netdev(ndev); + netif_napi_del(&priv->napi); + + iounmap(priv->base); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(res->start, resource_size(res)); + + free_netdev(ndev); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static void xgmac_pmt(void __iomem *ioaddr, unsigned long mode) +{ + unsigned int pmt = 0; + + if (mode & WAKE_MAGIC) + pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT; + if (mode & WAKE_UCAST) + pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_GLBL_UNICAST; + + writel(pmt, ioaddr + XGMAC_PMT); +} + +static int xgmac_suspend(struct device *dev) +{ + struct net_device *ndev = platform_get_drvdata(to_platform_device(dev)); + struct xgmac_priv *priv = netdev_priv(ndev); + u32 value; + + if (!ndev || !netif_running(ndev)) + return 0; + + netif_device_detach(ndev); + napi_disable(&priv->napi); + writel(0, priv->base + XGMAC_DMA_INTR_ENA); + + if (device_may_wakeup(priv->device)) { + /* Stop TX/RX DMA Only */ + value = readl(priv->base + XGMAC_DMA_CONTROL); + value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR); + writel(value, priv->base + XGMAC_DMA_CONTROL); + + xgmac_pmt(priv->base, priv->wolopts); + } else + xgmac_mac_disable(priv->base); + + return 0; +} + +static int xgmac_resume(struct device *dev) +{ + struct net_device *ndev = platform_get_drvdata(to_platform_device(dev)); + struct xgmac_priv *priv = netdev_priv(ndev); + void __iomem *ioaddr = priv->base; + + if (!netif_running(ndev)) + return 0; + + xgmac_pmt(ioaddr, 0); + + /* Enable the MAC and DMA */ + xgmac_mac_enable(ioaddr); + writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS); + writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA); + + netif_device_attach(ndev); + napi_enable(&priv->napi); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(xgmac_pm_ops, xgmac_suspend, xgmac_resume); +#define XGMAC_PM_OPS (&xgmac_pm_ops) +#else +#define XGMAC_PM_OPS NULL +#endif /* CONFIG_PM_SLEEP */ + +static const struct of_device_id xgmac_of_match[] = { + { .compatible = "calxeda,hb-xgmac", }, + {}, +}; +MODULE_DEVICE_TABLE(of, xgmac_of_match); + +static struct platform_driver xgmac_driver = { + .driver = { + .name = "calxedaxgmac", + .of_match_table = xgmac_of_match, + }, + .probe = xgmac_probe, + .remove = xgmac_remove, + .driver.pm = XGMAC_PM_OPS, +}; + +module_platform_driver(xgmac_driver); + +MODULE_AUTHOR("Calxeda, Inc."); +MODULE_DESCRIPTION("Calxeda 10G XGMAC driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c index ca26d97171b..1d17c92f2dd 100644 --- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c +++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c @@ -434,10 +434,10 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct adapter *adapter = dev->ml_priv; - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - strcpy(info->fw_version, "N/A"); - strcpy(info->bus_info, pci_name(adapter->pdev)); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(adapter->pdev), + sizeof(info->bus_info)); } static int get_sset_count(struct net_device *dev, int sset) @@ -849,7 +849,8 @@ static int t1_set_mac_addr(struct net_device *dev, void *p) return 0; } -static u32 t1_fix_features(struct net_device *dev, u32 features) +static netdev_features_t t1_fix_features(struct net_device *dev, + netdev_features_t features) { /* * Since there is no support for separate rx/tx vlan accel @@ -863,9 +864,9 @@ static u32 t1_fix_features(struct net_device *dev, u32 features) return features; } -static int t1_set_features(struct net_device *dev, u32 features) +static int t1_set_features(struct net_device *dev, netdev_features_t features) { - u32 changed = dev->features ^ features; + netdev_features_t changed = dev->features ^ features; struct adapter *adapter = dev->ml_priv; if (changed & NETIF_F_HW_VLAN_RX) diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c index f9b60230004..47a84359d4e 100644 --- a/drivers/net/ethernet/chelsio/cxgb/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb/sge.c @@ -742,7 +742,7 @@ static inline void setup_ring_params(struct adapter *adapter, u64 addr, /* * Enable/disable VLAN acceleration. */ -void t1_vlan_mode(struct adapter *adapter, u32 features) +void t1_vlan_mode(struct adapter *adapter, netdev_features_t features) { struct sge *sge = adapter->sge; diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.h b/drivers/net/ethernet/chelsio/cxgb/sge.h index e03980bcdd6..b9bf16b385f 100644 --- a/drivers/net/ethernet/chelsio/cxgb/sge.h +++ b/drivers/net/ethernet/chelsio/cxgb/sge.h @@ -79,7 +79,7 @@ irqreturn_t t1_interrupt(int irq, void *cookie); int t1_poll(struct napi_struct *, int); netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev); -void t1_vlan_mode(struct adapter *adapter, u32 features); +void t1_vlan_mode(struct adapter *adapter, netdev_features_t features); void t1_sge_start(struct sge *); void t1_sge_stop(struct sge *); int t1_sge_intr_error_handler(struct sge *); diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index 4d15c8f99c3..857cc254cab 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -1576,12 +1576,11 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) t3_get_tp_version(adapter, &tp_vers); spin_unlock(&adapter->stats_lock); - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - strcpy(info->bus_info, pci_name(adapter->pdev)); - if (!fw_vers) - strcpy(info->fw_version, "N/A"); - else { + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(adapter->pdev), + sizeof(info->bus_info)); + if (fw_vers) snprintf(info->fw_version, sizeof(info->fw_version), "%s %u.%u.%u TP %u.%u.%u", G_FW_VERSION_TYPE(fw_vers) ? "T" : "N", @@ -1591,7 +1590,6 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) G_TP_VERSION_MAJOR(tp_vers), G_TP_VERSION_MINOR(tp_vers), G_TP_VERSION_MICRO(tp_vers)); - } } static void get_strings(struct net_device *dev, u32 stringset, u8 * data) @@ -2531,7 +2529,7 @@ static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p) } } -static void cxgb_vlan_mode(struct net_device *dev, u32 features) +static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features) { struct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter; @@ -2552,7 +2550,8 @@ static void cxgb_vlan_mode(struct net_device *dev, u32 features) t3_synchronize_rx(adapter, pi); } -static u32 cxgb_fix_features(struct net_device *dev, u32 features) +static netdev_features_t cxgb_fix_features(struct net_device *dev, + netdev_features_t features) { /* * Since there is no support for separate rx/tx vlan accel @@ -2566,9 +2565,9 @@ static u32 cxgb_fix_features(struct net_device *dev, u32 features) return features; } -static int cxgb_set_features(struct net_device *dev, u32 features) +static int cxgb_set_features(struct net_device *dev, netdev_features_t features) { - u32 changed = dev->features ^ features; + netdev_features_t changed = dev->features ^ features; if (changed & NETIF_F_HW_VLAN_RX) cxgb_vlan_mode(dev, features); diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c index 90ff1318cc0..65e4b280619 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c @@ -969,7 +969,7 @@ static int nb_callback(struct notifier_block *self, unsigned long event, case (NETEVENT_REDIRECT):{ struct netevent_redirect *nr = ctx; cxgb_redirect(nr->old, nr->new); - cxgb_neigh_update(dst_get_neighbour(nr->new)); + cxgb_neigh_update(dst_get_neighbour_noref(nr->new)); break; } default: @@ -1072,8 +1072,11 @@ static int is_offloading(struct net_device *dev) static void cxgb_neigh_update(struct neighbour *neigh) { - struct net_device *dev = neigh->dev; + struct net_device *dev; + if (!neigh) + return; + dev = neigh->dev; if (dev && (is_offloading(dev))) { struct t3cdev *tdev = dev2t3cdev(dev); @@ -1107,6 +1110,7 @@ static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e) static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new) { struct net_device *olddev, *newdev; + struct neighbour *n; struct tid_info *ti; struct t3cdev *tdev; u32 tid; @@ -1114,8 +1118,16 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new) struct l2t_entry *e; struct t3c_tid_entry *te; - olddev = dst_get_neighbour(old)->dev; - newdev = dst_get_neighbour(new)->dev; + n = dst_get_neighbour_noref(old); + if (!n) + return; + olddev = n->dev; + + n = dst_get_neighbour_noref(new); + if (!n) + return; + newdev = n->dev; + if (!is_offloading(olddev)) return; if (!is_offloading(newdev)) { @@ -1132,7 +1144,7 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new) } /* Add new L2T entry */ - e = t3_l2t_get(tdev, dst_get_neighbour(new), newdev); + e = t3_l2t_get(tdev, new, newdev); if (!e) { printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n", __func__); @@ -1301,7 +1313,7 @@ int cxgb3_offload_activate(struct adapter *adapter) out_free_l2t: t3_free_l2t(L2DATA(dev)); - rcu_assign_pointer(dev->l2opt, NULL); + RCU_INIT_POINTER(dev->l2opt, NULL); out_free: kfree(t); return err; @@ -1329,7 +1341,7 @@ void cxgb3_offload_deactivate(struct adapter *adapter) rcu_read_lock(); d = L2DATA(tdev); rcu_read_unlock(); - rcu_assign_pointer(tdev->l2opt, NULL); + RCU_INIT_POINTER(tdev->l2opt, NULL); call_rcu(&d->rcu_head, clean_l2_data); if (t->nofail_skb) kfree_skb(t->nofail_skb); diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c index 70fec8b1140..3fa3c8833ed 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c @@ -298,18 +298,31 @@ static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh) spin_unlock(&e->lock); } -struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh, +struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst, struct net_device *dev) { struct l2t_entry *e = NULL; + struct neighbour *neigh; + struct port_info *p; struct l2t_data *d; int hash; - u32 addr = *(u32 *) neigh->primary_key; - int ifidx = neigh->dev->ifindex; - struct port_info *p = netdev_priv(dev); - int smt_idx = p->port_id; + u32 addr; + int ifidx; + int smt_idx; rcu_read_lock(); + neigh = dst_get_neighbour_noref(dst); + if (!neigh) + goto done_rcu; + + addr = *(u32 *) neigh->primary_key; + ifidx = neigh->dev->ifindex; + + if (!dev) + dev = neigh->dev; + p = netdev_priv(dev); + smt_idx = p->port_id; + d = L2DATA(cdev); if (!d) goto done_rcu; @@ -323,7 +336,7 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh, l2t_hold(d, e); if (atomic_read(&e->refcnt) == 1) reuse_entry(e, neigh); - goto done; + goto done_unlock; } /* Need to allocate a new entry */ @@ -344,7 +357,7 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh, e->vlan = VLAN_NONE; spin_unlock(&e->lock); } -done: +done_unlock: write_unlock_bh(&d->lock); done_rcu: rcu_read_unlock(); diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h index c5f54796e2c..c4e86436975 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h @@ -109,7 +109,7 @@ static inline void set_arp_failure_handler(struct sk_buff *skb, void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e); void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh); -struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh, +struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst, struct net_device *dev); int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb, struct l2t_entry *e); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 4c8f42afa3c..e83d12c7bf2 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -243,7 +243,7 @@ module_param_array(intr_cnt, uint, NULL, 0644); MODULE_PARM_DESC(intr_cnt, "thresholds 1..3 for queue interrupt packet counters"); -static int vf_acls; +static bool vf_acls; #ifdef CONFIG_PCI_IOV module_param(vf_acls, bool, 0644); @@ -1002,13 +1002,12 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct adapter *adapter = netdev2adap(dev); - strcpy(info->driver, KBUILD_MODNAME); - strcpy(info->version, DRV_VERSION); - strcpy(info->bus_info, pci_name(adapter->pdev)); + strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(adapter->pdev), + sizeof(info->bus_info)); - if (!adapter->params.fw_vers) - strcpy(info->fw_version, "N/A"); - else + if (adapter->params.fw_vers) snprintf(info->fw_version, sizeof(info->fw_version), "%u.%u.%u.%u, TP %u.%u.%u.%u", FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers), @@ -1855,10 +1854,10 @@ static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) return err; } -static int cxgb_set_features(struct net_device *dev, u32 features) +static int cxgb_set_features(struct net_device *dev, netdev_features_t features) { const struct port_info *pi = netdev_priv(dev); - u32 changed = dev->features ^ features; + netdev_features_t changed = dev->features ^ features; int err; if (!(changed & NETIF_F_HW_VLAN_RX)) @@ -1872,30 +1871,30 @@ static int cxgb_set_features(struct net_device *dev, u32 features) return err; } -static int get_rss_table(struct net_device *dev, struct ethtool_rxfh_indir *p) +static u32 get_rss_table_size(struct net_device *dev) +{ + const struct port_info *pi = netdev_priv(dev); + + return pi->rss_size; +} + +static int get_rss_table(struct net_device *dev, u32 *p) { const struct port_info *pi = netdev_priv(dev); - unsigned int n = min_t(unsigned int, p->size, pi->rss_size); + unsigned int n = pi->rss_size; - p->size = pi->rss_size; while (n--) - p->ring_index[n] = pi->rss[n]; + p[n] = pi->rss[n]; return 0; } -static int set_rss_table(struct net_device *dev, - const struct ethtool_rxfh_indir *p) +static int set_rss_table(struct net_device *dev, const u32 *p) { unsigned int i; struct port_info *pi = netdev_priv(dev); - if (p->size != pi->rss_size) - return -EINVAL; - for (i = 0; i < p->size; i++) - if (p->ring_index[i] >= pi->nqsets) - return -EINVAL; - for (i = 0; i < p->size; i++) - pi->rss[i] = p->ring_index[i]; + for (i = 0; i < pi->rss_size; i++) + pi->rss[i] = p[i]; if (pi->adapter->flags & FULL_INIT_DONE) return write_rss(pi, pi->rss); return 0; @@ -1964,7 +1963,7 @@ static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, return -EOPNOTSUPP; } -static struct ethtool_ops cxgb_ethtool_ops = { +static const struct ethtool_ops cxgb_ethtool_ops = { .get_settings = get_settings, .set_settings = set_settings, .get_drvinfo = get_drvinfo, @@ -1990,6 +1989,7 @@ static struct ethtool_ops cxgb_ethtool_ops = { .get_wol = get_wol, .set_wol = set_wol, .get_rxnfc = get_rxnfc, + .get_rxfh_indir_size = get_rss_table_size, .get_rxfh_indir = get_rss_table, .set_rxfh_indir = set_rss_table, .flash_device = set_flash, @@ -3449,7 +3449,7 @@ static int __devinit init_rss(struct adapter *adap) if (!pi->rss) return -ENOMEM; for (j = 0; j < pi->rss_size; j++) - pi->rss[j] = j % pi->nqsets; + pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets); } return 0; } @@ -3537,7 +3537,7 @@ static int __devinit init_one(struct pci_dev *pdev, { int func, i, err; struct port_info *pi; - unsigned int highdma = 0; + bool highdma = false; struct adapter *adapter = NULL; printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION); @@ -3563,7 +3563,7 @@ static int __devinit init_one(struct pci_dev *pdev, } if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { - highdma = NETIF_F_HIGHDMA; + highdma = true; err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); if (err) { dev_err(&pdev->dev, "unable to obtain 64-bit DMA for " @@ -3637,7 +3637,9 @@ static int __devinit init_one(struct pci_dev *pdev, NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; - netdev->features |= netdev->hw_features | highdma; + if (highdma) + netdev->hw_features |= NETIF_F_HIGHDMA; + netdev->features |= netdev->hw_features; netdev->vlan_features = netdev->features & VLAN_FEAT; netdev->priv_flags |= IFF_UNICAST_FLT; diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 140254c7cba..2dae7959f00 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -491,7 +491,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, __be64 *d = &q->desc[q->pidx]; struct rx_sw_desc *sd = &q->sdesc[q->pidx]; - gfp |= __GFP_NOWARN; /* failures are expected */ + gfp |= __GFP_NOWARN | __GFP_COLD; #if FL_PG_ORDER > 0 /* @@ -528,7 +528,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, #endif while (n--) { - pg = __netdev_alloc_page(adap->port[0], gfp); + pg = alloc_page(gfp); if (unlikely(!pg)) { q->alloc_failed++; break; @@ -537,7 +537,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE, PCI_DMA_FROMDEVICE); if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { - netdev_free_page(adap->port[0], pg); + put_page(pg); goto out; } *d++ = cpu_to_be64(mapping); diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index da9072bfca8..5ca73671830 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -1092,7 +1092,8 @@ static int cxgb4vf_change_mtu(struct net_device *dev, int new_mtu) return ret; } -static u32 cxgb4vf_fix_features(struct net_device *dev, u32 features) +static netdev_features_t cxgb4vf_fix_features(struct net_device *dev, + netdev_features_t features) { /* * Since there is no support for separate rx/tx vlan accel @@ -1106,10 +1107,11 @@ static u32 cxgb4vf_fix_features(struct net_device *dev, u32 features) return features; } -static int cxgb4vf_set_features(struct net_device *dev, u32 features) +static int cxgb4vf_set_features(struct net_device *dev, + netdev_features_t features) { struct port_info *pi = netdev_priv(dev); - u32 changed = dev->features ^ features; + netdev_features_t changed = dev->features ^ features; if (changed & NETIF_F_HW_VLAN_RX) t4vf_set_rxmode(pi->adapter, pi->viid, -1, -1, -1, -1, @@ -1203,9 +1205,10 @@ static void cxgb4vf_get_drvinfo(struct net_device *dev, { struct adapter *adapter = netdev2adap(dev); - strcpy(drvinfo->driver, KBUILD_MODNAME); - strcpy(drvinfo->version, DRV_VERSION); - strcpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent))); + strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); + strlcpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)), + sizeof(drvinfo->bus_info)); snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%u.%u.%u.%u, TP %u.%u.%u.%u", FW_HDR_FW_VER_MAJOR_GET(adapter->params.dev.fwrev), @@ -1561,7 +1564,7 @@ static void cxgb4vf_get_wol(struct net_device *dev, */ #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) -static struct ethtool_ops cxgb4vf_ethtool_ops = { +static const struct ethtool_ops cxgb4vf_ethtool_ops = { .get_settings = cxgb4vf_get_settings, .get_drvinfo = cxgb4vf_get_drvinfo, .get_msglevel = cxgb4vf_get_msglevel, diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index 8d5d55ad102..c381db23e71 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -653,8 +653,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, alloc_small_pages: while (n--) { - page = __netdev_alloc_page(adapter->port[0], - gfp | __GFP_NOWARN); + page = alloc_page(gfp | __GFP_NOWARN | __GFP_COLD); if (unlikely(!page)) { fl->alloc_failed++; break; @@ -664,7 +663,7 @@ alloc_small_pages: dma_addr = dma_map_page(adapter->pdev_dev, page, 0, PAGE_SIZE, PCI_DMA_FROMDEVICE); if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) { - netdev_free_page(adapter->port[0], page); + put_page(page); break; } *d++ = cpu_to_be64(dma_addr); diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.c b/drivers/net/ethernet/cisco/enic/enic_dev.c index fd6247b3c0e..bf0fc56dba1 100644 --- a/drivers/net/ethernet/cisco/enic/enic_dev.c +++ b/drivers/net/ethernet/cisco/enic/enic_dev.c @@ -212,23 +212,29 @@ int enic_dev_deinit_done(struct enic *enic, int *status) } /* rtnl lock is held */ -void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +int enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid) { struct enic *enic = netdev_priv(netdev); + int err; spin_lock(&enic->devcmd_lock); - enic_add_vlan(enic, vid); + err = enic_add_vlan(enic, vid); spin_unlock(&enic->devcmd_lock); + + return err; } /* rtnl lock is held */ -void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +int enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) { struct enic *enic = netdev_priv(netdev); + int err; spin_lock(&enic->devcmd_lock); - enic_del_vlan(enic, vid); + err = enic_del_vlan(enic, vid); spin_unlock(&enic->devcmd_lock); + + return err; } int enic_dev_enable2(struct enic *enic, int active) diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.h b/drivers/net/ethernet/cisco/enic/enic_dev.h index 1f83a4747ba..da1cba3c410 100644 --- a/drivers/net/ethernet/cisco/enic/enic_dev.h +++ b/drivers/net/ethernet/cisco/enic/enic_dev.h @@ -46,8 +46,8 @@ int enic_dev_packet_filter(struct enic *enic, int directed, int multicast, int broadcast, int promisc, int allmulti); int enic_dev_add_addr(struct enic *enic, u8 *addr); int enic_dev_del_addr(struct enic *enic, u8 *addr); -void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid); -void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid); +int enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid); +int enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid); int enic_dev_notify_unset(struct enic *enic); int enic_dev_hang_notify(struct enic *enic); int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic); diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index c3786fda11d..2fd9db4b1be 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -217,11 +217,11 @@ static void enic_get_drvinfo(struct net_device *netdev, enic_dev_fw_info(enic, &fw_info); - strncpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); - strncpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); - strncpy(drvinfo->fw_version, fw_info->fw_version, + strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); + strlcpy(drvinfo->fw_version, fw_info->fw_version, sizeof(drvinfo->fw_version)); - strncpy(drvinfo->bus_info, pci_name(enic->pdev), + strlcpy(drvinfo->bus_info, pci_name(enic->pdev), sizeof(drvinfo->bus_info)); } @@ -2379,7 +2379,7 @@ static int __devinit enic_probe(struct pci_dev *pdev, #endif /* Allocate structure for port profiles */ - enic->pp = kzalloc(num_pps * sizeof(*enic->pp), GFP_KERNEL); + enic->pp = kcalloc(num_pps, sizeof(*enic->pp), GFP_KERNEL); if (!enic->pp) { pr_err("port profile alloc failed, aborting\n"); err = -ENOMEM; diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 2a22f525635..f801754c71a 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -474,10 +474,11 @@ static int dm9000_nway_reset(struct net_device *dev) return mii_nway_restart(&dm->mii); } -static int dm9000_set_features(struct net_device *dev, u32 features) +static int dm9000_set_features(struct net_device *dev, + netdev_features_t features) { board_info_t *dm = to_dm9000_board(dev); - u32 changed = dev->features ^ features; + netdev_features_t changed = dev->features ^ features; unsigned long flags; if (!(changed & NETIF_F_RXCSUM)) diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c index 1427739d9a5..1eb46a0bb48 100644 --- a/drivers/net/ethernet/dec/tulip/de2104x.c +++ b/drivers/net/ethernet/dec/tulip/de2104x.c @@ -1598,9 +1598,9 @@ static void de_get_drvinfo (struct net_device *dev,struct ethtool_drvinfo *info) { struct de_private *de = netdev_priv(dev); - strcpy (info->driver, DRV_NAME); - strcpy (info->version, DRV_VERSION); - strcpy (info->bus_info, pci_name(de->pdev)); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(de->pdev), sizeof(info->bus_info)); info->eedump_len = DE_EEPROM_SIZE; } diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c index 871bcaa7068..4d71f5ae20c 100644 --- a/drivers/net/ethernet/dec/tulip/de4x5.c +++ b/drivers/net/ethernet/dec/tulip/de4x5.c @@ -2127,14 +2127,9 @@ srom_search(struct net_device *dev, struct pci_dev *pdev) u_long iobase = 0; /* Clear upper 32 bits in Alphas */ int i, j; struct de4x5_private *lp = netdev_priv(dev); - struct list_head *walk; - - list_for_each(walk, &pdev->bus_list) { - struct pci_dev *this_dev = pci_dev_b(walk); - - /* Skip the pci_bus list entry */ - if (list_entry(walk, struct pci_bus, devices) == pdev->bus) continue; + struct pci_dev *this_dev; + list_for_each_entry(this_dev, &pdev->bus->devices, bus_list) { vendor = this_dev->vendor; device = this_dev->device << 8; if (!(is_DC21040 || is_DC21041 || is_DC21140 || is_DC2114x)) continue; @@ -5196,7 +5191,7 @@ de4x5_parse_params(struct net_device *dev) struct de4x5_private *lp = netdev_priv(dev); char *p, *q, t; - lp->params.fdx = 0; + lp->params.fdx = false; lp->params.autosense = AUTO; if (args == NULL) return; @@ -5206,7 +5201,7 @@ de4x5_parse_params(struct net_device *dev) t = *q; *q = '\0'; - if (strstr(p, "fdx") || strstr(p, "FDX")) lp->params.fdx = 1; + if (strstr(p, "fdx") || strstr(p, "FDX")) lp->params.fdx = true; if (strstr(p, "autosense") || strstr(p, "AUTOSENSE")) { if (strstr(p, "TP")) { diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c index 17b11ee1745..51f7542eb45 100644 --- a/drivers/net/ethernet/dec/tulip/dmfe.c +++ b/drivers/net/ethernet/dec/tulip/dmfe.c @@ -1085,10 +1085,11 @@ static void dmfe_ethtool_get_drvinfo(struct net_device *dev, { struct dmfe_board_info *np = netdev_priv(dev); - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); if (np->pdev) - strcpy(info->bus_info, pci_name(np->pdev)); + strlcpy(info->bus_info, pci_name(np->pdev), + sizeof(info->bus_info)); else sprintf(info->bus_info, "EISA 0x%lx %d", dev->base_addr, dev->irq); diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c index 9656dd0647d..4eb0d76145c 100644 --- a/drivers/net/ethernet/dec/tulip/tulip_core.c +++ b/drivers/net/ethernet/dec/tulip/tulip_core.c @@ -871,9 +871,9 @@ static struct net_device_stats *tulip_get_stats(struct net_device *dev) static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct tulip_private *np = netdev_priv(dev); - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - strcpy(info->bus_info, pci_name(np->pdev)); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info)); } diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c index 7a44a7a6adc..48b0b6566ee 100644 --- a/drivers/net/ethernet/dec/tulip/uli526x.c +++ b/drivers/net/ethernet/dec/tulip/uli526x.c @@ -960,10 +960,11 @@ static void netdev_get_drvinfo(struct net_device *dev, { struct uli526x_board_info *np = netdev_priv(dev); - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); if (np->pdev) - strcpy(info->bus_info, pci_name(np->pdev)); + strlcpy(info->bus_info, pci_name(np->pdev), + sizeof(info->bus_info)); else sprintf(info->bus_info, "EISA 0x%lx %d", dev->base_addr, dev->irq); diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c index 4d01219ba22..52da7b2fe3b 100644 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c @@ -1390,9 +1390,9 @@ static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo * { struct netdev_private *np = netdev_priv(dev); - strcpy (info->driver, DRV_NAME); - strcpy (info->version, DRV_VERSION); - strcpy (info->bus_info, pci_name(np->pci_dev)); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); } static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) diff --git a/drivers/net/ethernet/dlink/de600.c b/drivers/net/ethernet/dlink/de600.c index 23a65398d01..c24fab1e9cb 100644 --- a/drivers/net/ethernet/dlink/de600.c +++ b/drivers/net/ethernet/dlink/de600.c @@ -59,7 +59,7 @@ static const char version[] = "de600.c: $Revision: 1.41-2.5 $, Bjorn Ekwall (bj #include "de600.h" -static unsigned int check_lost = 1; +static bool check_lost = true; module_param(check_lost, bool, 0); MODULE_PARM_DESC(check_lost, "If set then check for unplugged de600"); diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c index dcd7f7a71ad..28a3a9b50b8 100644 --- a/drivers/net/ethernet/dlink/sundance.c +++ b/drivers/net/ethernet/dlink/sundance.c @@ -1634,9 +1634,9 @@ static int check_if_running(struct net_device *dev) static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct netdev_private *np = netdev_priv(dev); - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - strcpy(info->bus_info, pci_name(np->pci_dev)); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); } static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c index c1063d1540c..ce88c0f399f 100644 --- a/drivers/net/ethernet/dnet.c +++ b/drivers/net/ethernet/dnet.c @@ -804,9 +804,9 @@ static int dnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) static void dnet_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - strcpy(info->bus_info, "0"); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, "0", sizeof(info->bus_info)); } static const struct ethtool_ops dnet_ethtool_ops = { @@ -977,18 +977,7 @@ static struct platform_driver dnet_driver = { }, }; -static int __init dnet_init(void) -{ - return platform_driver_register(&dnet_driver); -} - -static void __exit dnet_exit(void) -{ - platform_driver_unregister(&dnet_driver); -} - -module_init(dnet_init); -module_exit(dnet_exit); +module_platform_driver(dnet_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Dave DNET Ethernet driver"); diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 644e8fed836..cbdec2536da 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -40,6 +40,7 @@ #define OC_NAME "Emulex OneConnect 10Gbps NIC" #define OC_NAME_BE OC_NAME "(be3)" #define OC_NAME_LANCER OC_NAME "(Lancer)" +#define OC_NAME_SH OC_NAME "(Skyhawk)" #define DRV_DESC "ServerEngines BladeEngine 10Gbps NIC Driver" #define BE_VENDOR_ID 0x19a2 @@ -50,6 +51,7 @@ #define OC_DEVICE_ID2 0x710 /* Device Id for BE3 cards */ #define OC_DEVICE_ID3 0xe220 /* Device id for Lancer cards */ #define OC_DEVICE_ID4 0xe228 /* Device id for VF in Lancer */ +#define OC_DEVICE_ID5 0x720 /* Device Id for Skyhawk cards */ static inline char *nic_name(struct pci_dev *pdev) { @@ -63,6 +65,8 @@ static inline char *nic_name(struct pci_dev *pdev) return OC_NAME_LANCER; case BE_DEVICE_ID2: return BE3_NAME; + case OC_DEVICE_ID5: + return OC_NAME_SH; default: return BE_NAME; } @@ -288,14 +292,14 @@ struct be_drv_stats { }; struct be_vf_cfg { - unsigned char vf_mac_addr[ETH_ALEN]; - u32 vf_if_handle; - u32 vf_pmac_id; - u16 vf_vlan_tag; - u32 vf_tx_rate; + unsigned char mac_addr[ETH_ALEN]; + int if_handle; + int pmac_id; + u16 vlan_tag; + u32 tx_rate; }; -#define BE_INVALID_PMAC_ID 0xffffffff +#define BE_FLAGS_LINK_STATUS_INIT 1 struct be_adapter { struct pci_dev *pdev; @@ -345,13 +349,16 @@ struct be_adapter { struct delayed_work work; u16 work_counter; + u32 flags; /* Ethtool knobs and info */ char fw_ver[FW_VER_LEN]; - u32 if_handle; /* Used to configure filtering */ + int if_handle; /* Used to configure filtering */ u32 pmac_id; /* MAC addr handle used by BE card */ u32 beacon_state; /* for set_phys_id */ bool eeh_err; + bool ue_detected; + bool fw_timeout; u32 port_num; bool promiscuous; bool wol; @@ -359,7 +366,6 @@ struct be_adapter { u32 function_caps; u32 rx_fc; /* Rx flow control */ u32 tx_fc; /* Tx flow control */ - bool ue_detected; bool stats_cmd_sent; int link_speed; u8 port_type; @@ -369,16 +375,20 @@ struct be_adapter { u32 flash_status; struct completion flash_compl; - bool be3_native; - bool sriov_enabled; - struct be_vf_cfg *vf_cfg; + u32 num_vfs; u8 is_virtfn; + struct be_vf_cfg *vf_cfg; + bool be3_native; u32 sli_family; u8 hba_port_num; u16 pvid; }; #define be_physfn(adapter) (!adapter->is_virtfn) +#define sriov_enabled(adapter) (adapter->num_vfs > 0) +#define for_all_vfs(adapter, vf_cfg, i) \ + for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs; \ + i++, vf_cfg++) /* BladeEngine Generation numbers */ #define BE_GEN2 2 @@ -524,9 +534,14 @@ static inline bool be_multi_rxq(const struct be_adapter *adapter) return adapter->num_rx_qs > 1; } +static inline bool be_error(struct be_adapter *adapter) +{ + return adapter->eeh_err || adapter->ue_detected || adapter->fw_timeout; +} + extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped); -extern void be_link_status_update(struct be_adapter *adapter, u32 link_status); +extern void be_link_status_update(struct be_adapter *adapter, u8 link_status); extern void be_parse_stats(struct be_adapter *adapter); extern int be_load_fw(struct be_adapter *adapter, u8 *func); #endif /* BE_H */ diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 2c7b36673df..0fcb4562479 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -31,11 +31,8 @@ static void be_mcc_notify(struct be_adapter *adapter) struct be_queue_info *mccq = &adapter->mcc_obj.q; u32 val = 0; - if (adapter->eeh_err) { - dev_info(&adapter->pdev->dev, - "Error in Card Detected! Cannot issue commands\n"); + if (be_error(adapter)) return; - } val |= mccq->id & DB_MCCQ_RING_ID_MASK; val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT; @@ -128,7 +125,14 @@ done: static void be_async_link_state_process(struct be_adapter *adapter, struct be_async_event_link_state *evt) { - be_link_status_update(adapter, evt->port_link_status); + /* When link status changes, link speed must be re-queried from FW */ + adapter->link_speed = -1; + + /* For the initial link status do not rely on the ASYNC event as + * it may not be received in some cases. + */ + if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT) + be_link_status_update(adapter, evt->port_link_status); } /* Grp5 CoS Priority evt */ @@ -266,10 +270,10 @@ static int be_mcc_wait_compl(struct be_adapter *adapter) int i, num, status = 0; struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; - if (adapter->eeh_err) - return -EIO; - for (i = 0; i < mcc_timeout; i++) { + if (be_error(adapter)) + return -EIO; + num = be_process_mcc(adapter, &status); if (num) be_cq_notify(adapter, mcc_obj->cq.id, @@ -280,7 +284,8 @@ static int be_mcc_wait_compl(struct be_adapter *adapter) udelay(100); } if (i == mcc_timeout) { - dev_err(&adapter->pdev->dev, "mccq poll timed out\n"); + dev_err(&adapter->pdev->dev, "FW not responding\n"); + adapter->fw_timeout = true; return -1; } return status; @@ -298,26 +303,21 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db) int msecs = 0; u32 ready; - if (adapter->eeh_err) { - dev_err(&adapter->pdev->dev, - "Error detected in card.Cannot issue commands\n"); - return -EIO; - } - do { + if (be_error(adapter)) + return -EIO; + ready = ioread32(db); - if (ready == 0xffffffff) { - dev_err(&adapter->pdev->dev, - "pci slot disconnected\n"); + if (ready == 0xffffffff) return -1; - } ready &= MPU_MAILBOX_DB_RDY_MASK; if (ready) break; if (msecs > 4000) { - dev_err(&adapter->pdev->dev, "mbox poll timed out\n"); + dev_err(&adapter->pdev->dev, "FW not responding\n"); + adapter->fw_timeout = true; be_detect_dump_ue(adapter); return -1; } @@ -555,9 +555,6 @@ int be_cmd_fw_clean(struct be_adapter *adapter) u8 *wrb; int status; - if (adapter->eeh_err) - return -EIO; - if (mutex_lock_interruptible(&adapter->mbox_lock)) return -1; @@ -619,7 +616,7 @@ int be_cmd_eq_create(struct be_adapter *adapter, /* Use MCC */ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, - u8 type, bool permanent, u32 if_handle) + u8 type, bool permanent, u32 if_handle, u32 pmac_id) { struct be_mcc_wrb *wrb; struct be_cmd_req_mac_query *req; @@ -641,6 +638,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, req->permanent = 1; } else { req->if_id = cpu_to_le16((u16) if_handle); + req->pmac_id = cpu_to_le32(pmac_id); req->permanent = 0; } @@ -695,12 +693,15 @@ err: } /* Uses synchronous MCCQ */ -int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id, u32 dom) +int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom) { struct be_mcc_wrb *wrb; struct be_cmd_req_pmac_del *req; int status; + if (pmac_id == -1) + return 0; + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); @@ -923,10 +924,14 @@ int be_cmd_txq_create(struct be_adapter *adapter, void *ctxt; int status; - if (mutex_lock_interruptible(&adapter->mbox_lock)) - return -1; + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } - wrb = wrb_from_mbox(adapter); req = embedded_payload(wrb); ctxt = &req->context; @@ -952,14 +957,15 @@ int be_cmd_txq_create(struct be_adapter *adapter, be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); - status = be_mbox_notify_wait(adapter); + status = be_mcc_notify_wait(adapter); if (!status) { struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb); txq->id = le16_to_cpu(resp->cid); txq->created = true; } - mutex_unlock(&adapter->mbox_lock); +err: + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -1018,9 +1024,6 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q, u8 subsys = 0, opcode = 0; int status; - if (adapter->eeh_err) - return -EIO; - if (mutex_lock_interruptible(&adapter->mbox_lock)) return -1; @@ -1136,16 +1139,13 @@ err: } /* Uses MCCQ */ -int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id, u32 domain) +int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain) { struct be_mcc_wrb *wrb; struct be_cmd_req_if_destroy *req; int status; - if (adapter->eeh_err) - return -EIO; - - if (!interface_id) + if (interface_id == -1) return 0; spin_lock_bh(&adapter->mcc_lock); @@ -1239,7 +1239,7 @@ err: /* Uses synchronous mcc */ int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed, - u16 *link_speed, u32 dom) + u16 *link_speed, u8 *link_status, u32 dom) { struct be_mcc_wrb *wrb; struct be_cmd_req_link_status *req; @@ -1247,6 +1247,9 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed, spin_lock_bh(&adapter->mcc_lock); + if (link_status) + *link_status = LINK_DOWN; + wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; @@ -1254,6 +1257,9 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed, } req = embedded_payload(wrb); + if (adapter->generation == BE_GEN3 || lancer_chip(adapter)) + req->hdr.version = 1; + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL); @@ -1261,10 +1267,13 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed, if (!status) { struct be_cmd_resp_link_status *resp = embedded_payload(wrb); if (resp->mac_speed != PHY_LINK_SPEED_ZERO) { - *link_speed = le16_to_cpu(resp->link_speed); + if (link_speed) + *link_speed = le16_to_cpu(resp->link_speed); if (mac_speed) *mac_speed = resp->mac_speed; } + if (link_status) + *link_status = resp->logical_link_status; } err: @@ -1673,8 +1682,9 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size) { struct be_mcc_wrb *wrb; struct be_cmd_req_rss_config *req; - u32 myhash[10] = {0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF, - 0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF}; + u32 myhash[10] = {0x15d43fa5, 0x2534685a, 0x5f87693a, 0x5668494e, + 0x33cf6a53, 0x383334c6, 0x76ac4257, 0x59b242b2, + 0x3ea83c02, 0x4a110304}; int status; if (mutex_lock_interruptible(&adapter->mbox_lock)) @@ -1836,6 +1846,53 @@ err_unlock: return status; } +int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd, + u32 data_size, u32 data_offset, const char *obj_name, + u32 *data_read, u32 *eof, u8 *addn_status) +{ + struct be_mcc_wrb *wrb; + struct lancer_cmd_req_read_object *req; + struct lancer_cmd_resp_read_object *resp; + int status; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err_unlock; + } + + req = embedded_payload(wrb); + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_READ_OBJECT, + sizeof(struct lancer_cmd_req_read_object), wrb, + NULL); + + req->desired_read_len = cpu_to_le32(data_size); + req->read_offset = cpu_to_le32(data_offset); + strcpy(req->object_name, obj_name); + req->descriptor_count = cpu_to_le32(1); + req->buf_len = cpu_to_le32(data_size); + req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF)); + req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma)); + + status = be_mcc_notify_wait(adapter); + + resp = embedded_payload(wrb); + if (!status) { + *data_read = le32_to_cpu(resp->actual_read_len); + *eof = le32_to_cpu(resp->eof); + } else { + *addn_status = resp->additional_status; + } + +err_unlock: + spin_unlock_bh(&adapter->mcc_lock); + return status; +} + int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, u32 flash_type, u32 flash_opcode, u32 buf_size) { @@ -2238,3 +2295,99 @@ err: mutex_unlock(&adapter->mbox_lock); return status; } + +/* Uses synchronous MCCQ */ +int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain, + u32 *pmac_id) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_get_mac_list *req; + int status; + int mac_count; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + req = embedded_payload(wrb); + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_GET_MAC_LIST, sizeof(*req), + wrb, NULL); + + req->hdr.domain = domain; + + status = be_mcc_notify_wait(adapter); + if (!status) { + struct be_cmd_resp_get_mac_list *resp = + embedded_payload(wrb); + int i; + u8 *ctxt = &resp->context[0][0]; + status = -EIO; + mac_count = resp->mac_count; + be_dws_le_to_cpu(&resp->context, sizeof(resp->context)); + for (i = 0; i < mac_count; i++) { + if (!AMAP_GET_BITS(struct amap_get_mac_list_context, + act, ctxt)) { + *pmac_id = AMAP_GET_BITS + (struct amap_get_mac_list_context, + macid, ctxt); + status = 0; + break; + } + ctxt += sizeof(struct amap_get_mac_list_context) / 8; + } + } + +err: + spin_unlock_bh(&adapter->mcc_lock); + return status; +} + +/* Uses synchronous MCCQ */ +int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, + u8 mac_count, u32 domain) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_set_mac_list *req; + int status; + struct be_dma_mem cmd; + + memset(&cmd, 0, sizeof(struct be_dma_mem)); + cmd.size = sizeof(struct be_cmd_req_set_mac_list); + cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, + &cmd.dma, GFP_KERNEL); + if (!cmd.va) { + dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); + return -ENOMEM; + } + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + + req = cmd.va; + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_SET_MAC_LIST, sizeof(*req), + wrb, &cmd); + + req->hdr.domain = domain; + req->mac_count = mac_count; + if (mac_count) + memcpy(req->mac, mac_array, ETH_ALEN*mac_count); + + status = be_mcc_notify_wait(adapter); + +err: + dma_free_coherent(&adapter->pdev->dev, cmd.size, + cmd.va, cmd.dma); + spin_unlock_bh(&adapter->mcc_lock); + return status; +} diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index a35cd03fac4..dca89249088 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -189,6 +189,9 @@ struct be_mcc_mailbox { #define OPCODE_COMMON_GET_PHY_DETAILS 102 #define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103 #define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121 +#define OPCODE_COMMON_GET_MAC_LIST 147 +#define OPCODE_COMMON_SET_MAC_LIST 148 +#define OPCODE_COMMON_READ_OBJECT 171 #define OPCODE_COMMON_WRITE_OBJECT 172 #define OPCODE_ETH_RSS_CONFIG 1 @@ -294,6 +297,7 @@ struct be_cmd_req_mac_query { u8 type; u8 permanent; u16 if_id; + u32 pmac_id; } __packed; struct be_cmd_resp_mac_query { @@ -956,7 +960,8 @@ struct be_cmd_resp_link_status { u8 mgmt_mac_duplex; u8 mgmt_mac_speed; u16 link_speed; - u32 rsvd0; + u8 logical_link_status; + u8 rsvd1[3]; } __packed; /******************** Port Identification ***************************/ @@ -1161,6 +1166,38 @@ struct lancer_cmd_resp_write_object { u32 actual_write_len; }; +/************************ Lancer Read FW info **************/ +#define LANCER_READ_FILE_CHUNK (32*1024) +#define LANCER_READ_FILE_EOF_MASK 0x80000000 + +#define LANCER_FW_DUMP_FILE "/dbg/dump.bin" +#define LANCER_VPD_PF_FILE "/vpd/ntr_pf.vpd" +#define LANCER_VPD_VF_FILE "/vpd/ntr_vf.vpd" + +struct lancer_cmd_req_read_object { + struct be_cmd_req_hdr hdr; + u32 desired_read_len; + u32 read_offset; + u8 object_name[104]; + u32 descriptor_count; + u32 buf_len; + u32 addr_low; + u32 addr_high; +}; + +struct lancer_cmd_resp_read_object { + u8 opcode; + u8 subsystem; + u8 rsvd1[2]; + u8 status; + u8 additional_status; + u8 rsvd2[2]; + u32 resp_len; + u32 actual_resp_len; + u32 actual_read_len; + u32 eof; +}; + /************************ WOL *******************************/ struct be_cmd_req_acpi_wol_magic_config{ struct be_cmd_req_hdr hdr; @@ -1307,6 +1344,34 @@ struct be_cmd_resp_set_func_cap { u8 rsvd[212]; }; +/******************** GET/SET_MACLIST **************************/ +#define BE_MAX_MAC 64 +struct amap_get_mac_list_context { + u8 macid[31]; + u8 act; +} __packed; + +struct be_cmd_req_get_mac_list { + struct be_cmd_req_hdr hdr; + u32 rsvd; +} __packed; + +struct be_cmd_resp_get_mac_list { + struct be_cmd_resp_hdr hdr; + u8 mac_count; + u8 rsvd1; + u16 rsvd2; + u8 context[sizeof(struct amap_get_mac_list_context) / 8][BE_MAX_MAC]; +} __packed; + +struct be_cmd_req_set_mac_list { + struct be_cmd_req_hdr hdr; + u8 mac_count; + u8 rsvd1; + u16 rsvd2; + struct macaddr mac[BE_MAX_MAC]; +} __packed; + /*************** HW Stats Get v1 **********************************/ #define BE_TXP_SW_SZ 48 struct be_port_rxf_stats_v1 { @@ -1413,15 +1478,15 @@ static inline void *be_erx_stats_from_cmd(struct be_adapter *adapter) extern int be_pci_fnum_get(struct be_adapter *adapter); extern int be_cmd_POST(struct be_adapter *adapter); extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, - u8 type, bool permanent, u32 if_handle); + u8 type, bool permanent, u32 if_handle, u32 pmac_id); extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, u32 if_id, u32 *pmac_id, u32 domain); extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, - u32 pmac_id, u32 domain); + int pmac_id, u32 domain); extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags, u8 *mac, u32 *if_handle, u32 *pmac_id, u32 domain); -extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle, +extern int be_cmd_if_destroy(struct be_adapter *adapter, int if_handle, u32 domain); extern int be_cmd_eq_create(struct be_adapter *adapter, struct be_queue_info *eq, int eq_delay); @@ -1443,8 +1508,8 @@ extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q, int type); extern int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q); -extern int be_cmd_link_status_query(struct be_adapter *adapter, - u8 *mac_speed, u16 *link_speed, u32 dom); +extern int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed, + u16 *link_speed, u8 *link_status, u32 dom); extern int be_cmd_reset(struct be_adapter *adapter); extern int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd); @@ -1480,6 +1545,9 @@ extern int lancer_cmd_write_object(struct be_adapter *adapter, u32 data_size, u32 data_offset, const char *obj_name, u32 *data_written, u8 *addn_status); +int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd, + u32 data_size, u32 data_offset, const char *obj_name, + u32 *data_read, u32 *eof, u8 *addn_status); int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, int offset); extern int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, @@ -1506,4 +1574,8 @@ extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter); extern int be_cmd_req_native_mode(struct be_adapter *adapter); extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size); extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf); +extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain, + u32 *pmac_id); +extern int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, + u8 mac_count, u32 domain); diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index bf8153ea4ed..6db6b6ae5e9 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -127,8 +127,8 @@ static void be_get_drvinfo(struct net_device *netdev, memset(fw_on_flash, 0 , sizeof(fw_on_flash)); be_cmd_get_fw_ver(adapter, adapter->fw_ver, fw_on_flash); - strcpy(drvinfo->driver, DRV_NAME); - strcpy(drvinfo->version, DRV_VER); + strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, DRV_VER, sizeof(drvinfo->version)); strncpy(drvinfo->fw_version, adapter->fw_ver, FW_VER_LEN); if (memcmp(adapter->fw_ver, fw_on_flash, FW_VER_LEN) != 0) { strcat(drvinfo->fw_version, " ["); @@ -136,21 +136,84 @@ static void be_get_drvinfo(struct net_device *netdev, strcat(drvinfo->fw_version, "]"); } - strcpy(drvinfo->bus_info, pci_name(adapter->pdev)); + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); drvinfo->testinfo_len = 0; drvinfo->regdump_len = 0; drvinfo->eedump_len = 0; } +static u32 +lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name) +{ + u32 data_read = 0, eof; + u8 addn_status; + struct be_dma_mem data_len_cmd; + int status; + + memset(&data_len_cmd, 0, sizeof(data_len_cmd)); + /* data_offset and data_size should be 0 to get reg len */ + status = lancer_cmd_read_object(adapter, &data_len_cmd, 0, 0, + file_name, &data_read, &eof, &addn_status); + + return data_read; +} + +static int +lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name, + u32 buf_len, void *buf) +{ + struct be_dma_mem read_cmd; + u32 read_len = 0, total_read_len = 0, chunk_size; + u32 eof = 0; + u8 addn_status; + int status = 0; + + read_cmd.size = LANCER_READ_FILE_CHUNK; + read_cmd.va = pci_alloc_consistent(adapter->pdev, read_cmd.size, + &read_cmd.dma); + + if (!read_cmd.va) { + dev_err(&adapter->pdev->dev, + "Memory allocation failure while reading dump\n"); + return -ENOMEM; + } + + while ((total_read_len < buf_len) && !eof) { + chunk_size = min_t(u32, (buf_len - total_read_len), + LANCER_READ_FILE_CHUNK); + chunk_size = ALIGN(chunk_size, 4); + status = lancer_cmd_read_object(adapter, &read_cmd, chunk_size, + total_read_len, file_name, &read_len, + &eof, &addn_status); + if (!status) { + memcpy(buf + total_read_len, read_cmd.va, read_len); + total_read_len += read_len; + eof &= LANCER_READ_FILE_EOF_MASK; + } else { + status = -EIO; + break; + } + } + pci_free_consistent(adapter->pdev, read_cmd.size, read_cmd.va, + read_cmd.dma); + + return status; +} + static int be_get_reg_len(struct net_device *netdev) { struct be_adapter *adapter = netdev_priv(netdev); u32 log_size = 0; - if (be_physfn(adapter)) - be_cmd_get_reg_len(adapter, &log_size); - + if (be_physfn(adapter)) { + if (lancer_chip(adapter)) + log_size = lancer_cmd_get_file_len(adapter, + LANCER_FW_DUMP_FILE); + else + be_cmd_get_reg_len(adapter, &log_size); + } return log_size; } @@ -161,7 +224,11 @@ be_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf) if (be_physfn(adapter)) { memset(buf, 0, regs->len); - be_cmd_get_regs(adapter, regs->len, buf); + if (lancer_chip(adapter)) + lancer_cmd_read_file(adapter, LANCER_FW_DUMP_FILE, + regs->len, buf); + else + be_cmd_get_regs(adapter, regs->len, buf); } } @@ -362,11 +429,14 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) struct be_phy_info phy_info; u8 mac_speed = 0; u16 link_speed = 0; + u8 link_status; int status; if ((adapter->link_speed < 0) || (!(netdev->flags & IFF_UP))) { status = be_cmd_link_status_query(adapter, &mac_speed, - &link_speed, 0); + &link_speed, &link_status, 0); + if (!status) + be_link_status_update(adapter, link_status); /* link_speed is in units of 10 Mbps */ if (link_speed) { @@ -453,16 +523,13 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) return 0; } -static void -be_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) +static void be_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) { struct be_adapter *adapter = netdev_priv(netdev); - ring->rx_max_pending = adapter->rx_obj[0].q.len; - ring->tx_max_pending = adapter->tx_obj[0].q.len; - - ring->rx_pending = atomic_read(&adapter->rx_obj[0].q.used); - ring->tx_pending = atomic_read(&adapter->tx_obj[0].q.used); + ring->rx_max_pending = ring->rx_pending = adapter->rx_obj[0].q.len; + ring->tx_max_pending = ring->tx_pending = adapter->tx_obj[0].q.len; } static void @@ -636,7 +703,7 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data) } if (be_cmd_link_status_query(adapter, &mac_speed, - &qos_link_speed, 0) != 0) { + &qos_link_speed, NULL, 0) != 0) { test->flags |= ETH_TEST_FL_FAILED; data[4] = -1; } else if (!mac_speed) { @@ -660,7 +727,17 @@ be_do_flash(struct net_device *netdev, struct ethtool_flash *efl) static int be_get_eeprom_len(struct net_device *netdev) { - return BE_READ_SEEPROM_LEN; + struct be_adapter *adapter = netdev_priv(netdev); + if (lancer_chip(adapter)) { + if (be_physfn(adapter)) + return lancer_cmd_get_file_len(adapter, + LANCER_VPD_PF_FILE); + else + return lancer_cmd_get_file_len(adapter, + LANCER_VPD_VF_FILE); + } else { + return BE_READ_SEEPROM_LEN; + } } static int @@ -675,6 +752,15 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, if (!eeprom->len) return -EINVAL; + if (lancer_chip(adapter)) { + if (be_physfn(adapter)) + return lancer_cmd_read_file(adapter, LANCER_VPD_PF_FILE, + eeprom->len, data); + else + return lancer_cmd_read_file(adapter, LANCER_VPD_VF_FILE, + eeprom->len, data); + } + eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16); memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem)); diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index bf266a00c77..6c46753aeb4 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -27,13 +27,14 @@ MODULE_DESCRIPTION(DRV_DESC " " DRV_VER); MODULE_AUTHOR("ServerEngines Corporation"); MODULE_LICENSE("GPL"); -static ushort rx_frag_size = 2048; static unsigned int num_vfs; -module_param(rx_frag_size, ushort, S_IRUGO); module_param(num_vfs, uint, S_IRUGO); -MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data."); MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize"); +static ushort rx_frag_size = 2048; +module_param(rx_frag_size, ushort, S_IRUGO); +MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data."); + static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = { { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) }, @@ -41,6 +42,7 @@ static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = { { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) }, { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)}, { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)}, + { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)}, { 0 } }; MODULE_DEVICE_TABLE(pci, be_dev_ids); @@ -237,7 +239,8 @@ static int be_mac_addr_set(struct net_device *netdev, void *p) return -EADDRNOTAVAIL; status = be_cmd_mac_addr_query(adapter, current_mac, - MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle); + MAC_ADDRESS_TYPE_NETWORK, false, + adapter->if_handle, 0); if (status) goto err; @@ -315,6 +318,8 @@ static void populate_be3_stats(struct be_adapter *adapter) struct be_drv_stats *drvs = &adapter->drv_stats; be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats)); + drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop; + drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames; drvs->rx_pause_frames = port_stats->rx_pause_frames; drvs->rx_crc_errors = port_stats->rx_crc_errors; drvs->rx_control_frames = port_stats->rx_control_frames; @@ -491,19 +496,19 @@ static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev, return stats; } -void be_link_status_update(struct be_adapter *adapter, u32 link_status) +void be_link_status_update(struct be_adapter *adapter, u8 link_status) { struct net_device *netdev = adapter->netdev; - /* when link status changes, link speed must be re-queried from card */ - adapter->link_speed = -1; - if ((link_status & LINK_STATUS_MASK) == LINK_UP) { - netif_carrier_on(netdev); - dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name); - } else { + if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) { netif_carrier_off(netdev); - dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name); + adapter->flags |= BE_FLAGS_LINK_STATUS_INIT; } + + if ((link_status & LINK_STATUS_MASK) == LINK_UP) + netif_carrier_on(netdev); + else + netif_carrier_off(netdev); } static void be_tx_stats_update(struct be_tx_obj *txo, @@ -549,11 +554,26 @@ static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len) wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK; } +static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter, + struct sk_buff *skb) +{ + u8 vlan_prio; + u16 vlan_tag; + + vlan_tag = vlan_tx_tag_get(skb); + vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; + /* If vlan priority provided by OS is NOT in available bmap */ + if (!(adapter->vlan_prio_bmap & (1 << vlan_prio))) + vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) | + adapter->recommended_prio; + + return vlan_tag; +} + static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr, struct sk_buff *skb, u32 wrb_cnt, u32 len) { - u8 vlan_prio = 0; - u16 vlan_tag = 0; + u16 vlan_tag; memset(hdr, 0, sizeof(*hdr)); @@ -584,12 +604,7 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr, if (vlan_tx_tag_present(skb)) { AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1); - vlan_tag = vlan_tx_tag_get(skb); - vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; - /* If vlan priority provided by OS is NOT in available bmap */ - if (!(adapter->vlan_prio_bmap & (1 << vlan_prio))) - vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) | - adapter->recommended_prio; + vlan_tag = be_get_tx_vlan_tag(adapter, skb); AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag); } @@ -692,6 +707,25 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, u32 start = txq->head; bool dummy_wrb, stopped = false; + /* For vlan tagged pkts, BE + * 1) calculates checksum even when CSO is not requested + * 2) calculates checksum wrongly for padded pkt less than + * 60 bytes long. + * As a workaround disable TX vlan offloading in such cases. + */ + if (unlikely(vlan_tx_tag_present(skb) && + (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) { + skb = skb_share_check(skb, GFP_ATOMIC); + if (unlikely(!skb)) + goto tx_drop; + + skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb)); + if (unlikely(!skb)) + goto tx_drop; + + skb->vlan_tci = 0; + } + wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb); copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb); @@ -719,6 +753,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, txq->head = start; dev_kfree_skb_any(skb); } +tx_drop: return NETDEV_TX_OK; } @@ -746,15 +781,15 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu) */ static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num) { + struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num]; u16 vtag[BE_NUM_VLANS_SUPPORTED]; u16 ntags = 0, i; int status = 0; - u32 if_handle; if (vf) { - if_handle = adapter->vf_cfg[vf_num].vf_if_handle; - vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag); - status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0); + vtag[0] = cpu_to_le16(vf_cfg->vlan_tag); + status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag, + 1, 1, 0); } /* No need to further configure vids if in promiscuous mode */ @@ -779,31 +814,48 @@ static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num) return status; } -static void be_vlan_add_vid(struct net_device *netdev, u16 vid) +static int be_vlan_add_vid(struct net_device *netdev, u16 vid) { struct be_adapter *adapter = netdev_priv(netdev); + int status = 0; - adapter->vlans_added++; - if (!be_physfn(adapter)) - return; + if (!be_physfn(adapter)) { + status = -EINVAL; + goto ret; + } adapter->vlan_tag[vid] = 1; if (adapter->vlans_added <= (adapter->max_vlans + 1)) - be_vid_config(adapter, false, 0); + status = be_vid_config(adapter, false, 0); + + if (!status) + adapter->vlans_added++; + else + adapter->vlan_tag[vid] = 0; +ret: + return status; } -static void be_vlan_rem_vid(struct net_device *netdev, u16 vid) +static int be_vlan_rem_vid(struct net_device *netdev, u16 vid) { struct be_adapter *adapter = netdev_priv(netdev); + int status = 0; - adapter->vlans_added--; - - if (!be_physfn(adapter)) - return; + if (!be_physfn(adapter)) { + status = -EINVAL; + goto ret; + } adapter->vlan_tag[vid] = 0; if (adapter->vlans_added <= adapter->max_vlans) - be_vid_config(adapter, false, 0); + status = be_vid_config(adapter, false, 0); + + if (!status) + adapter->vlans_added--; + else + adapter->vlan_tag[vid] = 1; +ret: + return status; } static void be_set_rx_mode(struct net_device *netdev) @@ -840,28 +892,30 @@ done: static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) { struct be_adapter *adapter = netdev_priv(netdev); + struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; int status; - if (!adapter->sriov_enabled) + if (!sriov_enabled(adapter)) return -EPERM; - if (!is_valid_ether_addr(mac) || (vf >= num_vfs)) + if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs) return -EINVAL; - if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID) - status = be_cmd_pmac_del(adapter, - adapter->vf_cfg[vf].vf_if_handle, - adapter->vf_cfg[vf].vf_pmac_id, vf + 1); + if (lancer_chip(adapter)) { + status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1); + } else { + status = be_cmd_pmac_del(adapter, vf_cfg->if_handle, + vf_cfg->pmac_id, vf + 1); - status = be_cmd_pmac_add(adapter, mac, - adapter->vf_cfg[vf].vf_if_handle, - &adapter->vf_cfg[vf].vf_pmac_id, vf + 1); + status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle, + &vf_cfg->pmac_id, vf + 1); + } if (status) dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n", mac, vf); else - memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN); + memcpy(vf_cfg->mac_addr, mac, ETH_ALEN); return status; } @@ -870,18 +924,19 @@ static int be_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *vi) { struct be_adapter *adapter = netdev_priv(netdev); + struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; - if (!adapter->sriov_enabled) + if (!sriov_enabled(adapter)) return -EPERM; - if (vf >= num_vfs) + if (vf >= adapter->num_vfs) return -EINVAL; vi->vf = vf; - vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate; - vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag; + vi->tx_rate = vf_cfg->tx_rate; + vi->vlan = vf_cfg->vlan_tag; vi->qos = 0; - memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN); + memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN); return 0; } @@ -892,17 +947,17 @@ static int be_set_vf_vlan(struct net_device *netdev, struct be_adapter *adapter = netdev_priv(netdev); int status = 0; - if (!adapter->sriov_enabled) + if (!sriov_enabled(adapter)) return -EPERM; - if ((vf >= num_vfs) || (vlan > 4095)) + if (vf >= adapter->num_vfs || vlan > 4095) return -EINVAL; if (vlan) { - adapter->vf_cfg[vf].vf_vlan_tag = vlan; + adapter->vf_cfg[vf].vlan_tag = vlan; adapter->vlans_added++; } else { - adapter->vf_cfg[vf].vf_vlan_tag = 0; + adapter->vf_cfg[vf].vlan_tag = 0; adapter->vlans_added--; } @@ -920,21 +975,25 @@ static int be_set_vf_tx_rate(struct net_device *netdev, struct be_adapter *adapter = netdev_priv(netdev); int status = 0; - if (!adapter->sriov_enabled) + if (!sriov_enabled(adapter)) return -EPERM; - if ((vf >= num_vfs) || (rate < 0)) + if (vf >= adapter->num_vfs) return -EINVAL; - if (rate > 10000) - rate = 10000; + if (rate < 100 || rate > 10000) { + dev_err(&adapter->pdev->dev, + "tx rate must be between 100 and 10000 Mbps\n"); + return -EINVAL; + } - adapter->vf_cfg[vf].vf_tx_rate = rate; status = be_cmd_set_qos(adapter, rate / 10, vf + 1); if (status) - dev_info(&adapter->pdev->dev, + dev_err(&adapter->pdev->dev, "tx rate %d on VF %d failed\n", rate, vf); + else + adapter->vf_cfg[vf].tx_rate = rate; return status; } @@ -1645,8 +1704,7 @@ static void be_tx_queues_destroy(struct be_adapter *adapter) static int be_num_txqs_want(struct be_adapter *adapter) { - if ((num_vfs && adapter->sriov_enabled) || - be_is_mc(adapter) || + if (sriov_enabled(adapter) || be_is_mc(adapter) || lancer_chip(adapter) || !be_physfn(adapter) || adapter->generation == BE_GEN2) return 1; @@ -1662,9 +1720,12 @@ static int be_tx_queues_create(struct be_adapter *adapter) u8 i; adapter->num_tx_qs = be_num_txqs_want(adapter); - if (adapter->num_tx_qs != MAX_TX_QS) + if (adapter->num_tx_qs != MAX_TX_QS) { + rtnl_lock(); netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_qs); + rtnl_unlock(); + } adapter->tx_eq.max_eqd = 0; adapter->tx_eq.min_eqd = 0; @@ -1693,9 +1754,6 @@ static int be_tx_queues_create(struct be_adapter *adapter) if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb))) goto err; - - if (be_cmd_txq_create(adapter, q, cq)) - goto err; } return 0; @@ -1728,8 +1786,8 @@ static void be_rx_queues_destroy(struct be_adapter *adapter) static u32 be_num_rxqs_want(struct be_adapter *adapter) { if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) && - !adapter->sriov_enabled && be_physfn(adapter) && - !be_is_mc(adapter)) { + !sriov_enabled(adapter) && be_physfn(adapter) && + !be_is_mc(adapter)) { return 1 + MAX_RSS_QS; /* one default non-RSS queue */ } else { dev_warn(&adapter->pdev->dev, @@ -1929,6 +1987,7 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget) struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi); struct be_adapter *adapter = container_of(tx_eq, struct be_adapter, tx_eq); + struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; struct be_tx_obj *txo; struct be_eth_tx_compl *txcp; int tx_compl, mcc_compl, status = 0; @@ -1965,12 +2024,19 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget) mcc_compl = be_process_mcc(adapter, &status); if (mcc_compl) { - struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl); } napi_complete(napi); + /* Arm CQ again to regenerate EQEs for Lancer in INTx mode */ + if (lancer_chip(adapter) && !msix_enabled(adapter)) { + for_all_tx_queues(adapter, txo, i) + be_cq_notify(adapter, txo->cq.id, true, 0); + + be_cq_notify(adapter, mcc_obj->cq.id, true, 0); + } + be_eq_notify(adapter, tx_eq->q.id, true, false, 0); adapter->drv_stats.tx_events++; return 1; @@ -1982,6 +2048,9 @@ void be_detect_dump_ue(struct be_adapter *adapter) u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0; u32 i; + if (adapter->eeh_err || adapter->ue_detected) + return; + if (lancer_chip(adapter)) { sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); if (sliport_status & SLIPORT_STATUS_ERR_MASK) { @@ -2008,7 +2077,8 @@ void be_detect_dump_ue(struct be_adapter *adapter) sliport_status & SLIPORT_STATUS_ERR_MASK) { adapter->ue_detected = true; adapter->eeh_err = true; - dev_err(&adapter->pdev->dev, "UE Detected!!\n"); + dev_err(&adapter->pdev->dev, + "Unrecoverable error in the card\n"); } if (ue_lo) { @@ -2036,53 +2106,6 @@ void be_detect_dump_ue(struct be_adapter *adapter) } } -static void be_worker(struct work_struct *work) -{ - struct be_adapter *adapter = - container_of(work, struct be_adapter, work.work); - struct be_rx_obj *rxo; - int i; - - if (!adapter->ue_detected) - be_detect_dump_ue(adapter); - - /* when interrupts are not yet enabled, just reap any pending - * mcc completions */ - if (!netif_running(adapter->netdev)) { - int mcc_compl, status = 0; - - mcc_compl = be_process_mcc(adapter, &status); - - if (mcc_compl) { - struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; - be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl); - } - - goto reschedule; - } - - if (!adapter->stats_cmd_sent) { - if (lancer_chip(adapter)) - lancer_cmd_get_pport_stats(adapter, - &adapter->stats_cmd); - else - be_cmd_get_stats(adapter, &adapter->stats_cmd); - } - - for_all_rx_queues(adapter, rxo, i) { - be_rx_eqd_update(adapter, rxo); - - if (rxo->rx_post_starved) { - rxo->rx_post_starved = false; - be_post_rx_frags(rxo, GFP_KERNEL); - } - } - -reschedule: - adapter->work_counter++; - schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); -} - static void be_msix_disable(struct be_adapter *adapter) { if (msix_enabled(adapter)) { @@ -2119,27 +2142,28 @@ done: static int be_sriov_enable(struct be_adapter *adapter) { be_check_sriov_fn_type(adapter); + #ifdef CONFIG_PCI_IOV if (be_physfn(adapter) && num_vfs) { int status, pos; - u16 nvfs; + u16 dev_vfs; pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV); pci_read_config_word(adapter->pdev, - pos + PCI_SRIOV_TOTAL_VF, &nvfs); + pos + PCI_SRIOV_TOTAL_VF, &dev_vfs); - if (num_vfs > nvfs) { + adapter->num_vfs = min_t(u16, num_vfs, dev_vfs); + if (adapter->num_vfs != num_vfs) dev_info(&adapter->pdev->dev, - "Device supports %d VFs and not %d\n", - nvfs, num_vfs); - num_vfs = nvfs; - } + "Device supports %d VFs and not %d\n", + adapter->num_vfs, num_vfs); - status = pci_enable_sriov(adapter->pdev, num_vfs); - adapter->sriov_enabled = status ? false : true; + status = pci_enable_sriov(adapter->pdev, adapter->num_vfs); + if (status) + adapter->num_vfs = 0; - if (adapter->sriov_enabled) { + if (adapter->num_vfs) { adapter->vf_cfg = kcalloc(num_vfs, sizeof(struct be_vf_cfg), GFP_KERNEL); @@ -2154,10 +2178,10 @@ static int be_sriov_enable(struct be_adapter *adapter) static void be_sriov_disable(struct be_adapter *adapter) { #ifdef CONFIG_PCI_IOV - if (adapter->sriov_enabled) { + if (sriov_enabled(adapter)) { pci_disable_sriov(adapter->pdev); kfree(adapter->vf_cfg); - adapter->sriov_enabled = false; + adapter->num_vfs = 0; } #endif } @@ -2351,8 +2375,8 @@ static int be_close(struct net_device *netdev) static int be_rx_queues_setup(struct be_adapter *adapter) { struct be_rx_obj *rxo; - int rc, i; - u8 rsstable[MAX_RSS_QS]; + int rc, i, j; + u8 rsstable[128]; for_all_rx_queues(adapter, rxo, i) { rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, @@ -2364,11 +2388,15 @@ static int be_rx_queues_setup(struct be_adapter *adapter) } if (be_multi_rxq(adapter)) { - for_all_rss_queues(adapter, rxo, i) - rsstable[i] = rxo->rss_id; + for (j = 0; j < 128; j += adapter->num_rx_qs - 1) { + for_all_rss_queues(adapter, rxo, i) { + if ((j + i) >= 128) + break; + rsstable[j + i] = rxo->rss_id; + } + } + rc = be_cmd_rss_config(adapter, rsstable, 128); - rc = be_cmd_rss_config(adapter, rsstable, - adapter->num_rx_qs - 1); if (rc) return rc; } @@ -2386,6 +2414,7 @@ static int be_open(struct net_device *netdev) struct be_adapter *adapter = netdev_priv(netdev); struct be_eq_obj *tx_eq = &adapter->tx_eq; struct be_rx_obj *rxo; + u8 link_status; int status, i; status = be_rx_queues_setup(adapter); @@ -2409,6 +2438,11 @@ static int be_open(struct net_device *netdev) /* Now that interrupts are on we can process async mcc */ be_async_mcc_enable(adapter); + status = be_cmd_link_status_query(adapter, NULL, NULL, + &link_status, 0); + if (!status) + be_link_status_update(adapter, link_status); + return 0; err: be_close(adapter->netdev); @@ -2465,19 +2499,24 @@ static inline int be_vf_eth_addr_config(struct be_adapter *adapter) u32 vf; int status = 0; u8 mac[ETH_ALEN]; + struct be_vf_cfg *vf_cfg; be_vf_eth_addr_generate(adapter, mac); - for (vf = 0; vf < num_vfs; vf++) { - status = be_cmd_pmac_add(adapter, mac, - adapter->vf_cfg[vf].vf_if_handle, - &adapter->vf_cfg[vf].vf_pmac_id, - vf + 1); + for_all_vfs(adapter, vf_cfg, vf) { + if (lancer_chip(adapter)) { + status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1); + } else { + status = be_cmd_pmac_add(adapter, mac, + vf_cfg->if_handle, + &vf_cfg->pmac_id, vf + 1); + } + if (status) dev_err(&adapter->pdev->dev, - "Mac address add failed for VF %d\n", vf); + "Mac address assignment failed for VF %d\n", vf); else - memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN); + memcpy(vf_cfg->mac_addr, mac, ETH_ALEN); mac[5] += 1; } @@ -2486,24 +2525,23 @@ static inline int be_vf_eth_addr_config(struct be_adapter *adapter) static void be_vf_clear(struct be_adapter *adapter) { + struct be_vf_cfg *vf_cfg; u32 vf; - for (vf = 0; vf < num_vfs; vf++) { - if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID) - be_cmd_pmac_del(adapter, - adapter->vf_cfg[vf].vf_if_handle, - adapter->vf_cfg[vf].vf_pmac_id, vf + 1); - } + for_all_vfs(adapter, vf_cfg, vf) { + if (lancer_chip(adapter)) + be_cmd_set_mac_list(adapter, NULL, 0, vf + 1); + else + be_cmd_pmac_del(adapter, vf_cfg->if_handle, + vf_cfg->pmac_id, vf + 1); - for (vf = 0; vf < num_vfs; vf++) - if (adapter->vf_cfg[vf].vf_if_handle) - be_cmd_if_destroy(adapter, - adapter->vf_cfg[vf].vf_if_handle, vf + 1); + be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1); + } } static int be_clear(struct be_adapter *adapter) { - if (be_physfn(adapter) && adapter->sriov_enabled) + if (sriov_enabled(adapter)) be_vf_clear(adapter); be_cmd_if_destroy(adapter, adapter->if_handle, 0); @@ -2511,61 +2549,94 @@ static int be_clear(struct be_adapter *adapter) be_mcc_queues_destroy(adapter); be_rx_queues_destroy(adapter); be_tx_queues_destroy(adapter); - adapter->eq_next_idx = 0; - - adapter->be3_native = false; - adapter->promiscuous = false; /* tell fw we're done with firing cmds */ be_cmd_fw_clean(adapter); return 0; } +static void be_vf_setup_init(struct be_adapter *adapter) +{ + struct be_vf_cfg *vf_cfg; + int vf; + + for_all_vfs(adapter, vf_cfg, vf) { + vf_cfg->if_handle = -1; + vf_cfg->pmac_id = -1; + } +} + static int be_vf_setup(struct be_adapter *adapter) { + struct be_vf_cfg *vf_cfg; u32 cap_flags, en_flags, vf; u16 lnk_speed; int status; - cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST; - for (vf = 0; vf < num_vfs; vf++) { + be_vf_setup_init(adapter); + + cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | + BE_IF_FLAGS_MULTICAST; + for_all_vfs(adapter, vf_cfg, vf) { status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL, - &adapter->vf_cfg[vf].vf_if_handle, - NULL, vf+1); + &vf_cfg->if_handle, NULL, vf + 1); if (status) goto err; - adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID; } - if (!lancer_chip(adapter)) { - status = be_vf_eth_addr_config(adapter); - if (status) - goto err; - } + status = be_vf_eth_addr_config(adapter); + if (status) + goto err; - for (vf = 0; vf < num_vfs; vf++) { + for_all_vfs(adapter, vf_cfg, vf) { status = be_cmd_link_status_query(adapter, NULL, &lnk_speed, - vf + 1); + NULL, vf + 1); if (status) goto err; - adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10; + vf_cfg->tx_rate = lnk_speed * 10; } return 0; err: return status; } +static void be_setup_init(struct be_adapter *adapter) +{ + adapter->vlan_prio_bmap = 0xff; + adapter->link_speed = -1; + adapter->if_handle = -1; + adapter->be3_native = false; + adapter->promiscuous = false; + adapter->eq_next_idx = 0; +} + +static int be_configure_mac_from_list(struct be_adapter *adapter, u8 *mac) +{ + u32 pmac_id; + int status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id); + if (status != 0) + goto do_none; + status = be_cmd_mac_addr_query(adapter, mac, + MAC_ADDRESS_TYPE_NETWORK, + false, adapter->if_handle, pmac_id); + if (status != 0) + goto do_none; + status = be_cmd_pmac_add(adapter, mac, adapter->if_handle, + &adapter->pmac_id, 0); +do_none: + return status; +} + static int be_setup(struct be_adapter *adapter) { struct net_device *netdev = adapter->netdev; u32 cap_flags, en_flags; u32 tx_fc, rx_fc; - int status; + int status, i; u8 mac[ETH_ALEN]; + struct be_tx_obj *txo; - /* Allow all priorities by default. A GRP5 evt may modify this */ - adapter->vlan_prio_bmap = 0xff; - adapter->link_speed = -1; + be_setup_init(adapter); be_cmd_req_native_mode(adapter); @@ -2583,7 +2654,7 @@ static int be_setup(struct be_adapter *adapter) memset(mac, 0, ETH_ALEN); status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK, - true /*permanent */, 0); + true /*permanent */, 0, 0); if (status) return status; memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN); @@ -2592,7 +2663,8 @@ static int be_setup(struct be_adapter *adapter) en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS; cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS | - BE_IF_FLAGS_PROMISCUOUS; + BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS; + if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) { cap_flags |= BE_IF_FLAGS_RSS; en_flags |= BE_IF_FLAGS_RSS; @@ -2603,12 +2675,23 @@ static int be_setup(struct be_adapter *adapter) if (status != 0) goto err; - /* For BEx, the VF's permanent mac queried from card is incorrect. - * Query the mac configued by the PF using if_handle - */ - if (!be_physfn(adapter) && !lancer_chip(adapter)) { - status = be_cmd_mac_addr_query(adapter, mac, - MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle); + for_all_tx_queues(adapter, txo, i) { + status = be_cmd_txq_create(adapter, &txo->q, &txo->cq); + if (status) + goto err; + } + + /* The VF's permanent mac queried from card is incorrect. + * For BEx: Query the mac configued by the PF using if_handle + * For Lancer: Get and use mac_list to obtain mac address. + */ + if (!be_physfn(adapter)) { + if (lancer_chip(adapter)) + status = be_configure_mac_from_list(adapter, mac); + else + status = be_cmd_mac_addr_query(adapter, mac, + MAC_ADDRESS_TYPE_NETWORK, false, + adapter->if_handle, 0); if (!status) { memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN); memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN); @@ -2624,18 +2707,21 @@ static int be_setup(struct be_adapter *adapter) be_set_rx_mode(adapter->netdev); status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc); - if (status) + /* For Lancer: It is legal for this cmd to fail on VF */ + if (status && (be_physfn(adapter) || !lancer_chip(adapter))) goto err; + if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) { status = be_cmd_set_flow_control(adapter, adapter->tx_fc, adapter->rx_fc); - if (status) + /* For Lancer: It is legal for this cmd to fail on VF */ + if (status && (be_physfn(adapter) || !lancer_chip(adapter))) goto err; } pcie_set_readrq(adapter->pdev, 4096); - if (be_physfn(adapter) && adapter->sriov_enabled) { + if (sriov_enabled(adapter)) { status = be_vf_setup(adapter); if (status) goto err; @@ -2647,6 +2733,19 @@ err: return status; } +#ifdef CONFIG_NET_POLL_CONTROLLER +static void be_netpoll(struct net_device *netdev) +{ + struct be_adapter *adapter = netdev_priv(netdev); + struct be_rx_obj *rxo; + int i; + + event_handle(adapter, &adapter->tx_eq, false); + for_all_rx_queues(adapter, rxo, i) + event_handle(adapter, &rxo->rx_eq, true); +} +#endif + #define FW_FILE_HDR_SIGN "ServerEngines Corp. " static bool be_flash_redboot(struct be_adapter *adapter, const u8 *p, u32 img_start, int image_size, @@ -2995,7 +3094,10 @@ static struct net_device_ops be_netdev_ops = { .ndo_set_vf_mac = be_set_vf_mac, .ndo_set_vf_vlan = be_set_vf_vlan, .ndo_set_vf_tx_rate = be_set_vf_tx_rate, - .ndo_get_vf_config = be_get_vf_config + .ndo_get_vf_config = be_get_vf_config, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = be_netpoll, +#endif }; static void be_netdev_init(struct net_device *netdev) @@ -3242,6 +3344,7 @@ static int be_dev_family_check(struct be_adapter *adapter) break; case BE_DEVICE_ID2: case OC_DEVICE_ID2: + case OC_DEVICE_ID5: adapter->generation = BE_GEN3; break; case OC_DEVICE_ID3: @@ -3267,7 +3370,7 @@ static int be_dev_family_check(struct be_adapter *adapter) static int lancer_wait_ready(struct be_adapter *adapter) { -#define SLIPORT_READY_TIMEOUT 500 +#define SLIPORT_READY_TIMEOUT 30 u32 sliport_status; int status = 0, i; @@ -3276,7 +3379,7 @@ static int lancer_wait_ready(struct be_adapter *adapter) if (sliport_status & SLIPORT_STATUS_RDY_MASK) break; - msleep(20); + msleep(1000); } if (i == SLIPORT_READY_TIMEOUT) @@ -3313,6 +3416,104 @@ static int lancer_test_and_set_rdy_state(struct be_adapter *adapter) return status; } +static void lancer_test_and_recover_fn_err(struct be_adapter *adapter) +{ + int status; + u32 sliport_status; + + if (adapter->eeh_err || adapter->ue_detected) + return; + + sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); + + if (sliport_status & SLIPORT_STATUS_ERR_MASK) { + dev_err(&adapter->pdev->dev, + "Adapter in error state." + "Trying to recover.\n"); + + status = lancer_test_and_set_rdy_state(adapter); + if (status) + goto err; + + netif_device_detach(adapter->netdev); + + if (netif_running(adapter->netdev)) + be_close(adapter->netdev); + + be_clear(adapter); + + adapter->fw_timeout = false; + + status = be_setup(adapter); + if (status) + goto err; + + if (netif_running(adapter->netdev)) { + status = be_open(adapter->netdev); + if (status) + goto err; + } + + netif_device_attach(adapter->netdev); + + dev_err(&adapter->pdev->dev, + "Adapter error recovery succeeded\n"); + } + return; +err: + dev_err(&adapter->pdev->dev, + "Adapter error recovery failed\n"); +} + +static void be_worker(struct work_struct *work) +{ + struct be_adapter *adapter = + container_of(work, struct be_adapter, work.work); + struct be_rx_obj *rxo; + int i; + + if (lancer_chip(adapter)) + lancer_test_and_recover_fn_err(adapter); + + be_detect_dump_ue(adapter); + + /* when interrupts are not yet enabled, just reap any pending + * mcc completions */ + if (!netif_running(adapter->netdev)) { + int mcc_compl, status = 0; + + mcc_compl = be_process_mcc(adapter, &status); + + if (mcc_compl) { + struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; + be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl); + } + + goto reschedule; + } + + if (!adapter->stats_cmd_sent) { + if (lancer_chip(adapter)) + lancer_cmd_get_pport_stats(adapter, + &adapter->stats_cmd); + else + be_cmd_get_stats(adapter, &adapter->stats_cmd); + } + + for_all_rx_queues(adapter, rxo, i) { + be_rx_eqd_update(adapter, rxo); + + if (rxo->rx_post_starved) { + rxo->rx_post_starved = false; + be_post_rx_frags(rxo, GFP_KERNEL); + } + } + +reschedule: + adapter->work_counter++; + schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); +} + static int __devinit be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id) { @@ -3365,7 +3566,12 @@ static int __devinit be_probe(struct pci_dev *pdev, goto disable_sriov; if (lancer_chip(adapter)) { - status = lancer_test_and_set_rdy_state(adapter); + status = lancer_wait_ready(adapter); + if (!status) { + iowrite32(SLI_PORT_CONTROL_IP_MASK, + adapter->db + SLIPORT_CONTROL_OFFSET); + status = lancer_test_and_set_rdy_state(adapter); + } if (status) { dev_err(&pdev->dev, "Adapter in non recoverable error\n"); goto ctrl_clean; @@ -3559,6 +3765,8 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev) dev_info(&adapter->pdev->dev, "EEH reset\n"); adapter->eeh_err = false; + adapter->ue_detected = false; + adapter->fw_timeout = false; status = pci_enable_device(pdev); if (status) diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index 251b635fe75..60f0e788cc2 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c @@ -1185,18 +1185,7 @@ static struct platform_driver ethoc_driver = { }, }; -static int __init ethoc_init(void) -{ - return platform_driver_register(ðoc_driver); -} - -static void __exit ethoc_exit(void) -{ - platform_driver_unregister(ðoc_driver); -} - -module_init(ethoc_init); -module_exit(ethoc_exit); +module_platform_driver(ethoc_driver); MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>"); MODULE_DESCRIPTION("OpenCores Ethernet MAC driver"); diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c index 61d2bddec1f..c82d444b582 100644 --- a/drivers/net/ethernet/fealnx.c +++ b/drivers/net/ethernet/fealnx.c @@ -1818,9 +1818,9 @@ static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i { struct netdev_private *np = netdev_priv(dev); - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - strcpy(info->bus_info, pci_name(np->pci_dev)); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); } static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig index 9de37642f09..3574e1499df 100644 --- a/drivers/net/ethernet/freescale/Kconfig +++ b/drivers/net/ethernet/freescale/Kconfig @@ -21,7 +21,7 @@ config NET_VENDOR_FREESCALE if NET_VENDOR_FREESCALE config FEC - bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)" + tristate "FEC ethernet controller (of ColdFire and some i.MX CPUs)" depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \ ARCH_MXC || SOC_IMX28) default ARCH_MXC || SOC_IMX28 if ARM diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c index c136230d50b..20c2e3f3e18 100644 --- a/drivers/net/ethernet/freescale/fec.c +++ b/drivers/net/ethernet/freescale/fec.c @@ -99,7 +99,7 @@ static struct platform_device_id fec_devtype[] = { MODULE_DEVICE_TABLE(platform, fec_devtype); enum imx_fec_type { - IMX25_FEC = 1, /* runs on i.mx25/50/53 */ + IMX25_FEC = 1, /* runs on i.mx25/50/53 */ IMX27_FEC, /* runs on i.mx27/35/51 */ IMX28_FEC, IMX6Q_FEC, @@ -132,7 +132,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); #elif defined (CONFIG_M5272C3) #define FEC_FLASHMAC (0xffe04000 + 4) #elif defined(CONFIG_MOD5272) -#define FEC_FLASHMAC 0xffc0406b +#define FEC_FLASHMAC 0xffc0406b #else #define FEC_FLASHMAC 0 #endif @@ -255,11 +255,13 @@ struct fec_enet_private { #define FEC_MMFR_TA (2 << 16) #define FEC_MMFR_DATA(v) (v & 0xffff) -#define FEC_MII_TIMEOUT 1000 /* us */ +#define FEC_MII_TIMEOUT 30000 /* us */ /* Transmitter timeout */ #define TX_TIMEOUT (2 * HZ) +static int mii_cnt; + static void *swap_buffer(void *bufaddr, int len) { int i; @@ -516,6 +518,7 @@ fec_stop(struct net_device *ndev) struct fec_enet_private *fep = netdev_priv(ndev); const struct platform_device_id *id_entry = platform_get_device_id(fep->pdev); + u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8); /* We cannot expect a graceful transmit stop without link !!! */ if (fep->link) { @@ -532,8 +535,10 @@ fec_stop(struct net_device *ndev) writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); /* We have to keep ENET enabled to have MII interrupt stay working */ - if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) + if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { writel(2, fep->hwp + FEC_ECNTRL); + writel(rmii_mode, fep->hwp + FEC_R_CNTRL); + } } @@ -819,7 +824,7 @@ static void __inline__ fec_get_mac(struct net_device *ndev) iap = (unsigned char *)FEC_FLASHMAC; #else if (pdata) - memcpy(iap, pdata->mac, ETH_ALEN); + iap = (unsigned char *)&pdata->mac; #endif } @@ -866,6 +871,8 @@ static void fec_enet_adjust_link(struct net_device *ndev) if (phy_dev->link) { if (fep->full_duplex != phy_dev->duplex) { fec_restart(ndev, phy_dev->duplex); + /* prevent unnecessary second fec_restart() below */ + fep->link = phy_dev->link; status_change = 1; } } @@ -973,8 +980,9 @@ static int fec_enet_mii_probe(struct net_device *ndev) } if (phy_id >= PHY_MAX_ADDR) { - printk(KERN_INFO "%s: no PHY, assuming direct connection " - "to switch\n", ndev->name); + printk(KERN_INFO + "%s: no PHY, assuming direct connection to switch\n", + ndev->name); strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE); phy_id = 0; } @@ -999,8 +1007,9 @@ static int fec_enet_mii_probe(struct net_device *ndev) fep->link = 0; fep->full_duplex = 0; - printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] " - "(mii_bus:phy_addr=%s, irq=%d)\n", ndev->name, + printk(KERN_INFO + "%s: Freescale FEC PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", + ndev->name, fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev), fep->phy_dev->irq); @@ -1034,8 +1043,12 @@ static int fec_enet_mii_init(struct platform_device *pdev) */ if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) { /* fec1 uses fec0 mii_bus */ - fep->mii_bus = fec0_mii_bus; - return 0; + if (mii_cnt && fec0_mii_bus) { + fep->mii_bus = fec0_mii_bus; + mii_cnt++; + return 0; + } + return -ENOENT; } fep->mii_timeout = 0; @@ -1080,6 +1093,8 @@ static int fec_enet_mii_init(struct platform_device *pdev) if (mdiobus_register(fep->mii_bus)) goto err_out_free_mdio_irq; + mii_cnt++; + /* save fec0 mii_bus */ if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) fec0_mii_bus = fep->mii_bus; @@ -1096,11 +1111,11 @@ err_out: static void fec_enet_mii_remove(struct fec_enet_private *fep) { - if (fep->phy_dev) - phy_disconnect(fep->phy_dev); - mdiobus_unregister(fep->mii_bus); - kfree(fep->mii_bus->irq); - mdiobus_free(fep->mii_bus); + if (--mii_cnt == 0) { + mdiobus_unregister(fep->mii_bus); + kfree(fep->mii_bus->irq); + mdiobus_free(fep->mii_bus); + } } static int fec_enet_get_settings(struct net_device *ndev, @@ -1137,7 +1152,7 @@ static void fec_enet_get_drvinfo(struct net_device *ndev, strcpy(info->bus_info, dev_name(&ndev->dev)); } -static struct ethtool_ops fec_enet_ethtool_ops = { +static const struct ethtool_ops fec_enet_ethtool_ops = { .get_settings = fec_enet_get_settings, .set_settings = fec_enet_set_settings, .get_drvinfo = fec_enet_get_drvinfo, @@ -1574,8 +1589,12 @@ fec_probe(struct platform_device *pdev) for (i = 0; i < FEC_IRQ_NUM; i++) { irq = platform_get_irq(pdev, i); - if (i && irq < 0) - break; + if (irq < 0) { + if (i) + break; + ret = irq; + goto failed_irq; + } ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev); if (ret) { while (--i >= 0) { @@ -1586,7 +1605,7 @@ fec_probe(struct platform_device *pdev) } } - fep->clk = clk_get(&pdev->dev, "fec_clk"); + fep->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(fep->clk)) { ret = PTR_ERR(fep->clk); goto failed_clk; @@ -1638,13 +1657,18 @@ fec_drv_remove(struct platform_device *pdev) struct net_device *ndev = platform_get_drvdata(pdev); struct fec_enet_private *fep = netdev_priv(ndev); struct resource *r; + int i; - fec_stop(ndev); + unregister_netdev(ndev); fec_enet_mii_remove(fep); + for (i = 0; i < FEC_IRQ_NUM; i++) { + int irq = platform_get_irq(pdev, i); + if (irq > 0) + free_irq(irq, ndev); + } clk_disable(fep->clk); clk_put(fep->clk); iounmap(fep->hwp); - unregister_netdev(ndev); free_netdev(ndev); r = platform_get_resource(pdev, IORESOURCE_MEM, 0); diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index 5bf5471f06f..910a8e18a9a 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -1171,16 +1171,6 @@ static struct platform_driver fs_enet_driver = { .remove = fs_enet_remove, }; -static int __init fs_init(void) -{ - return platform_driver_register(&fs_enet_driver); -} - -static void __exit fs_cleanup(void) -{ - platform_driver_unregister(&fs_enet_driver); -} - #ifdef CONFIG_NET_POLL_CONTROLLER static void fs_enet_netpoll(struct net_device *dev) { @@ -1190,7 +1180,4 @@ static void fs_enet_netpoll(struct net_device *dev) } #endif -/**************************************************************************************/ - -module_init(fs_init); -module_exit(fs_cleanup); +module_platform_driver(fs_enet_driver); diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c index b09270b5d0a..0f2d1a71090 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c +++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c @@ -232,15 +232,4 @@ static struct platform_driver fs_enet_bb_mdio_driver = { .remove = fs_enet_mdio_remove, }; -static int fs_enet_mdio_bb_init(void) -{ - return platform_driver_register(&fs_enet_bb_mdio_driver); -} - -static void fs_enet_mdio_bb_exit(void) -{ - platform_driver_unregister(&fs_enet_bb_mdio_driver); -} - -module_init(fs_enet_mdio_bb_init); -module_exit(fs_enet_mdio_bb_exit); +module_platform_driver(fs_enet_bb_mdio_driver); diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c index e0e9d6c35d8..55bb867258e 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c +++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c @@ -237,15 +237,4 @@ static struct platform_driver fs_enet_fec_mdio_driver = { .remove = fs_enet_mdio_remove, }; -static int fs_enet_mdio_fec_init(void) -{ - return platform_driver_register(&fs_enet_fec_mdio_driver); -} - -static void fs_enet_mdio_fec_exit(void) -{ - platform_driver_unregister(&fs_enet_fec_mdio_driver); -} - -module_init(fs_enet_mdio_fec_init); -module_exit(fs_enet_mdio_fec_exit); +module_platform_driver(fs_enet_fec_mdio_driver); diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c index 4d9f84b8ab9..9eb815941df 100644 --- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c +++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c @@ -360,12 +360,11 @@ static int fsl_pq_mdio_probe(struct platform_device *ofdev) if (tbiaddr == -1) { err = -EBUSY; - goto err_free_irqs; + } else { + out_be32(tbipa, tbiaddr); } - out_be32(tbipa, tbiaddr); - err = of_mdiobus_register(new_bus, np); if (err) { printk (KERN_ERR "%s: Cannot register as MDIO bus\n", @@ -443,15 +442,6 @@ static struct platform_driver fsl_pq_mdio_driver = { .remove = fsl_pq_mdio_remove, }; -int __init fsl_pq_mdio_init(void) -{ - return platform_driver_register(&fsl_pq_mdio_driver); -} -module_init(fsl_pq_mdio_init); +module_platform_driver(fsl_pq_mdio_driver); -void fsl_pq_mdio_exit(void) -{ - platform_driver_unregister(&fsl_pq_mdio_driver); -} -module_exit(fsl_pq_mdio_exit); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 83199fd0d62..e01cdaa722a 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -734,7 +734,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) mac_addr = of_get_mac_address(np); if (mac_addr) - memcpy(dev->dev_addr, mac_addr, MAC_ADDR_LEN); + memcpy(dev->dev_addr, mac_addr, ETH_ALEN); if (model && !strcasecmp(model, "TSEC")) priv->device_flags = @@ -2306,7 +2306,7 @@ void gfar_check_rx_parser_mode(struct gfar_private *priv) } /* Enables and disables VLAN insertion/extraction */ -void gfar_vlan_mode(struct net_device *dev, u32 features) +void gfar_vlan_mode(struct net_device *dev, netdev_features_t features) { struct gfar_private *priv = netdev_priv(dev); struct gfar __iomem *regs = NULL; @@ -3114,7 +3114,7 @@ static void gfar_set_multi(struct net_device *dev) static void gfar_clear_exact_match(struct net_device *dev) { int idx; - static const u8 zero_arr[MAC_ADDR_LEN] = {0, 0, 0, 0, 0, 0}; + static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0}; for(idx = 1;idx < GFAR_EM_NUM + 1;idx++) gfar_set_mac_for_addr(dev, idx, zero_arr); @@ -3137,7 +3137,7 @@ static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr) { u32 tempval; struct gfar_private *priv = netdev_priv(dev); - u32 result = ether_crc(MAC_ADDR_LEN, addr); + u32 result = ether_crc(ETH_ALEN, addr); int width = priv->hash_width; u8 whichbit = (result >> (32 - width)) & 0x1f; u8 whichreg = result >> (32 - width + 5); @@ -3158,7 +3158,7 @@ static void gfar_set_mac_for_addr(struct net_device *dev, int num, struct gfar_private *priv = netdev_priv(dev); struct gfar __iomem *regs = priv->gfargrp[0].regs; int idx; - char tmpbuf[MAC_ADDR_LEN]; + char tmpbuf[ETH_ALEN]; u32 tempval; u32 __iomem *macptr = ®s->macstnaddr1; @@ -3166,8 +3166,8 @@ static void gfar_set_mac_for_addr(struct net_device *dev, int num, /* Now copy it into the mac registers backwards, cuz */ /* little endian is silly */ - for (idx = 0; idx < MAC_ADDR_LEN; idx++) - tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx]; + for (idx = 0; idx < ETH_ALEN; idx++) + tmpbuf[ETH_ALEN - 1 - idx] = addr[idx]; gfar_write(macptr, *((u32 *) (tmpbuf))); @@ -3281,16 +3281,4 @@ static struct platform_driver gfar_driver = { .remove = gfar_remove, }; -static int __init gfar_init(void) -{ - return platform_driver_register(&gfar_driver); -} - -static void __exit gfar_exit(void) -{ - platform_driver_unregister(&gfar_driver); -} - -module_init(gfar_init); -module_exit(gfar_exit); - +module_platform_driver(gfar_driver); diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h index 9aa43773e8e..fe7ac3a8319 100644 --- a/drivers/net/ethernet/freescale/gianfar.h +++ b/drivers/net/ethernet/freescale/gianfar.h @@ -74,9 +74,6 @@ struct ethtool_rx_list { * will be the next highest multiple of 512 bytes. */ #define INCREMENTAL_BUFFER_SIZE 512 - -#define MAC_ADDR_LEN 6 - #define PHY_INIT_TIMEOUT 100000 #define GFAR_PHY_CHANGE_TIME 2 @@ -1179,9 +1176,9 @@ extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev, extern void gfar_configure_coalescing(struct gfar_private *priv, unsigned long tx_mask, unsigned long rx_mask); void gfar_init_sysfs(struct net_device *dev); -int gfar_set_features(struct net_device *dev, u32 features); +int gfar_set_features(struct net_device *dev, netdev_features_t features); extern void gfar_check_rx_parser_mode(struct gfar_private *priv); -extern void gfar_vlan_mode(struct net_device *dev, u32 features); +extern void gfar_vlan_mode(struct net_device *dev, netdev_features_t features); extern const struct ethtool_ops gfar_ethtool_ops; diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index 212736bab6b..5a3b2e5b288 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -519,12 +519,12 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva return err; } -int gfar_set_features(struct net_device *dev, u32 features) +int gfar_set_features(struct net_device *dev, netdev_features_t features) { struct gfar_private *priv = netdev_priv(dev); unsigned long flags; int err = 0, i = 0; - u32 changed = dev->features ^ features; + netdev_features_t changed = dev->features ^ features; if (changed & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX)) gfar_vlan_mode(dev, features); @@ -1410,10 +1410,9 @@ static int gfar_optimize_filer_masks(struct filer_table *tab) /* We need a copy of the filer table because * we want to change its order */ - temp_table = kmalloc(sizeof(*temp_table), GFP_KERNEL); + temp_table = kmemdup(tab, sizeof(*temp_table), GFP_KERNEL); if (temp_table == NULL) return -ENOMEM; - memcpy(temp_table, tab, sizeof(*temp_table)); mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1, sizeof(struct gfar_mask_entry), GFP_KERNEL); @@ -1693,8 +1692,9 @@ static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd) ret = gfar_set_hash_opts(priv, cmd); break; case ETHTOOL_SRXCLSRLINS: - if (cmd->fs.ring_cookie != RX_CLS_FLOW_DISC && - cmd->fs.ring_cookie >= priv->num_rx_queues) { + if ((cmd->fs.ring_cookie != RX_CLS_FLOW_DISC && + cmd->fs.ring_cookie >= priv->num_rx_queues) || + cmd->fs.location >= MAX_FILER_IDX) { ret = -EINVAL; break; } diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c index f67b8aebc89..83e0ed757e3 100644 --- a/drivers/net/ethernet/freescale/gianfar_ptp.c +++ b/drivers/net/ethernet/freescale/gianfar_ptp.c @@ -562,21 +562,7 @@ static struct platform_driver gianfar_ptp_driver = { .remove = gianfar_ptp_remove, }; -/* module operations */ - -static int __init ptp_gianfar_init(void) -{ - return platform_driver_register(&gianfar_ptp_driver); -} - -module_init(ptp_gianfar_init); - -static void __exit ptp_gianfar_exit(void) -{ - platform_driver_unregister(&gianfar_ptp_driver); -} - -module_exit(ptp_gianfar_exit); +module_platform_driver(gianfar_ptp_driver); MODULE_AUTHOR("Richard Cochran <richard.cochran@omicron.at>"); MODULE_DESCRIPTION("PTP clock using the eTSEC"); diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index b5dc0273a1d..ba2dc083bfc 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -443,7 +443,7 @@ static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth, static inline int compare_addr(u8 **addr1, u8 **addr2) { - return memcmp(addr1, addr2, ENET_NUM_OCTETS_PER_ADDRESS); + return memcmp(addr1, addr2, ETH_ALEN); } #ifdef DEBUG diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h index d12fcad145e..2e395a2566b 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.h +++ b/drivers/net/ethernet/freescale/ucc_geth.h @@ -20,6 +20,7 @@ #include <linux/kernel.h> #include <linux/list.h> +#include <linux/if_ether.h> #include <asm/immap_qe.h> #include <asm/qe.h> @@ -881,7 +882,6 @@ struct ucc_geth_hardware_statistics { #define TX_RING_MOD_MASK(size) (size-1) #define RX_RING_MOD_MASK(size) (size-1) -#define ENET_NUM_OCTETS_PER_ADDRESS 6 #define ENET_GROUP_ADDR 0x01 /* Group address mask for ethernet addresses */ @@ -1051,7 +1051,7 @@ enum ucc_geth_num_of_station_addresses { /* UCC GETH 82xx Ethernet Address Container */ struct enet_addr_container { - u8 address[ENET_NUM_OCTETS_PER_ADDRESS]; /* ethernet address */ + u8 address[ETH_ALEN]; /* ethernet address */ enum ucc_geth_enet_address_recognition_location location; /* location in 82xx address recognition @@ -1194,7 +1194,7 @@ struct ucc_geth_private { u16 cpucount[NUM_TX_QUEUES]; u16 __iomem *p_cpucount[NUM_TX_QUEUES]; int indAddrRegUsed[NUM_OF_PADDRS]; - u8 paddr[NUM_OF_PADDRS][ENET_NUM_OCTETS_PER_ADDRESS]; /* ethernet address */ + u8 paddr[NUM_OF_PADDRS][ETH_ALEN]; /* ethernet address */ u8 numGroupAddrInHash; u8 numIndAddrInHash; u8 numIndAddrInReg; diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c index 15416752c13..ee84b472cee 100644 --- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c +++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c @@ -1058,9 +1058,10 @@ static void fjn_rx(struct net_device *dev) static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + snprintf(info->bus_info, sizeof(info->bus_info), + "PCMCIA 0x%lx", dev->base_addr); } static const struct ethtool_ops netdev_ethtool_ops = { diff --git a/drivers/net/ethernet/i825xx/eepro.c b/drivers/net/ethernet/i825xx/eepro.c index 067c46069a1..114cda7721f 100644 --- a/drivers/net/ethernet/i825xx/eepro.c +++ b/drivers/net/ethernet/i825xx/eepro.c @@ -1726,9 +1726,10 @@ static int eepro_ethtool_get_settings(struct net_device *dev, static void eepro_ethtool_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) { - strcpy(drvinfo->driver, DRV_NAME); - strcpy(drvinfo->version, DRV_VERSION); - sprintf(drvinfo->bus_info, "ISA 0x%lx", dev->base_addr); + strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); + snprintf(drvinfo->bus_info, sizeof(drvinfo->bus_info), + "ISA 0x%lx", dev->base_addr); } static const struct ethtool_ops eepro_ethtool_ops = { diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index bfeccbfde23..3554414eb5e 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -2114,17 +2114,19 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } -static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) +static int ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) { struct ehea_port *port = netdev_priv(dev); struct ehea_adapter *adapter = port->adapter; struct hcp_ehea_port_cb1 *cb1; int index; u64 hret; + int err = 0; cb1 = (void *)get_zeroed_page(GFP_KERNEL); if (!cb1) { pr_err("no mem for cb1\n"); + err = -ENOMEM; goto out; } @@ -2132,6 +2134,7 @@ static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) H_PORT_CB1, H_PORT_CB1_ALL, cb1); if (hret != H_SUCCESS) { pr_err("query_ehea_port failed\n"); + err = -EINVAL; goto out; } @@ -2140,24 +2143,28 @@ static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, H_PORT_CB1, H_PORT_CB1_ALL, cb1); - if (hret != H_SUCCESS) + if (hret != H_SUCCESS) { pr_err("modify_ehea_port failed\n"); + err = -EINVAL; + } out: free_page((unsigned long)cb1); - return; + return err; } -static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) +static int ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) { struct ehea_port *port = netdev_priv(dev); struct ehea_adapter *adapter = port->adapter; struct hcp_ehea_port_cb1 *cb1; int index; u64 hret; + int err = 0; cb1 = (void *)get_zeroed_page(GFP_KERNEL); if (!cb1) { pr_err("no mem for cb1\n"); + err = -ENOMEM; goto out; } @@ -2165,6 +2172,7 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) H_PORT_CB1, H_PORT_CB1_ALL, cb1); if (hret != H_SUCCESS) { pr_err("query_ehea_port failed\n"); + err = -EINVAL; goto out; } @@ -2173,10 +2181,13 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, H_PORT_CB1, H_PORT_CB1_ALL, cb1); - if (hret != H_SUCCESS) + if (hret != H_SUCCESS) { pr_err("modify_ehea_port failed\n"); + err = -EINVAL; + } out: free_page((unsigned long)cb1); + return err; } int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp) diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index ed79b2d3ad3..2abce965c7b 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -2924,6 +2924,9 @@ static int __devexit emac_remove(struct platform_device *ofdev) if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) zmii_detach(dev->zmii_dev, dev->zmii_port); + busy_phy_map &= ~(1 << dev->phy.address); + DBG(dev, "busy_phy_map now %#x" NL, busy_phy_map); + mal_unregister_commac(dev->mal, &dev->commac); emac_put_deps(dev); diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index b1cd41b9c61..e877371680a 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -735,7 +735,8 @@ static void netdev_get_drvinfo(struct net_device *dev, sizeof(info->version) - 1); } -static u32 ibmveth_fix_features(struct net_device *dev, u32 features) +static netdev_features_t ibmveth_fix_features(struct net_device *dev, + netdev_features_t features) { /* * Since the ibmveth firmware interface does not have the @@ -838,7 +839,8 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data) return rc1 ? rc1 : rc2; } -static int ibmveth_set_features(struct net_device *dev, u32 features) +static int ibmveth_set_features(struct net_device *dev, + netdev_features_t features) { struct ibmveth_adapter *adapter = netdev_priv(dev); int rx_csum = !!(features & NETIF_F_RXCSUM); diff --git a/drivers/net/ethernet/icplus/ipg.c b/drivers/net/ethernet/icplus/ipg.c index 8fd80a00b89..075451d0207 100644 --- a/drivers/net/ethernet/icplus/ipg.c +++ b/drivers/net/ethernet/icplus/ipg.c @@ -371,16 +371,9 @@ static void mdio_write(struct net_device *dev, int phy_id, int phy_reg, int val) } /* The last cycle is a tri-state, so read from the PHY. */ - for (j = 7; j < 8; j++) { - for (i = 0; i < p[j].len; i++) { - ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | polarity); - - p[j].field |= ((ipg_r8(PHY_CTRL) & - IPG_PC_MGMTDATA) >> 1) << (p[j].len - 1 - i); - - ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | polarity); - } - } + ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | polarity); + ipg_r8(PHY_CTRL); + ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | polarity); } static void ipg_set_led_mode(struct net_device *dev) diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index 5a2fdf7a00c..9436397e572 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -2376,10 +2376,10 @@ static void e100_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) { struct nic *nic = netdev_priv(netdev); - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - strcpy(info->fw_version, "N/A"); - strcpy(info->bus_info, pci_name(nic->pdev)); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(nic->pdev), + sizeof(info->bus_info)); } #define E100_PHY_REGS 0x1C diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c index 2b223ac99c4..3103f0b6bf5 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c @@ -515,14 +515,14 @@ static void e1000_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct e1000_adapter *adapter = netdev_priv(netdev); - char firmware_version[32]; - strncpy(drvinfo->driver, e1000_driver_name, 32); - strncpy(drvinfo->version, e1000_driver_version, 32); + strlcpy(drvinfo->driver, e1000_driver_name, + sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, e1000_driver_version, + sizeof(drvinfo->version)); - sprintf(firmware_version, "N/A"); - strncpy(drvinfo->fw_version, firmware_version, 32); - strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); drvinfo->regdump_len = e1000_get_regs_len(netdev); drvinfo->eedump_len = e1000_get_eeprom_len(netdev); } diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.h b/drivers/net/ethernet/intel/e1000/e1000_hw.h index 5c9a8403668..f6c4d7e2560 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_hw.h +++ b/drivers/net/ethernet/intel/e1000/e1000_hw.h @@ -448,7 +448,6 @@ void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value); #define E1000_DEV_ID_INTEL_CE4100_GBE 0x2E6E #define NODE_ADDRESS_SIZE 6 -#define ETH_LENGTH_OF_ADDRESS 6 /* MAC decode size is 128K - This is the size of BAR0 */ #define MAC_DECODE_SIZE (128 * 1024) @@ -813,8 +812,7 @@ struct e1000_ffvt_entry { #define E1000_FLA 0x0001C /* Flash Access - RW */ #define E1000_MDIC 0x00020 /* MDI Control - RW */ -extern void __iomem *ce4100_gbe_mdio_base_virt; -#define INTEL_CE_GBE_MDIO_RCOMP_BASE (ce4100_gbe_mdio_base_virt) +#define INTEL_CE_GBE_MDIO_RCOMP_BASE (hw->ce4100_gbe_mdio_base_virt) #define E1000_MDIO_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0) #define E1000_MDIO_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 4) #define E1000_MDIO_DRV (INTEL_CE_GBE_MDIO_RCOMP_BASE + 8) @@ -1344,6 +1342,7 @@ struct e1000_hw_stats { struct e1000_hw { u8 __iomem *hw_addr; u8 __iomem *flash_address; + void __iomem *ce4100_gbe_mdio_base_virt; e1000_mac_type mac_type; e1000_phy_type phy_type; u32 phy_init_script; diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index cf480b55462..669ca3800c0 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -33,11 +33,6 @@ #include <linux/bitops.h> #include <linux/if_vlan.h> -/* Intel Media SOC GbE MDIO physical base address */ -static unsigned long ce4100_gbe_mdio_base_phy; -/* Intel Media SOC GbE MDIO virtual base address */ -void __iomem *ce4100_gbe_mdio_base_virt; - char e1000_driver_name[] = "e1000"; static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; #define DRV_VERSION "7.3.21-k8-NAPI" @@ -167,9 +162,10 @@ static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb); static bool e1000_vlan_used(struct e1000_adapter *adapter); -static void e1000_vlan_mode(struct net_device *netdev, u32 features); -static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid); -static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid); +static void e1000_vlan_mode(struct net_device *netdev, + netdev_features_t features); +static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid); +static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid); static void e1000_restore_vlan(struct e1000_adapter *adapter); #ifdef CONFIG_PM @@ -806,7 +802,8 @@ static int e1000_is_need_ioport(struct pci_dev *pdev) } } -static u32 e1000_fix_features(struct net_device *netdev, u32 features) +static netdev_features_t e1000_fix_features(struct net_device *netdev, + netdev_features_t features) { /* * Since there is no support for separate rx/tx vlan accel @@ -820,10 +817,11 @@ static u32 e1000_fix_features(struct net_device *netdev, u32 features) return features; } -static int e1000_set_features(struct net_device *netdev, u32 features) +static int e1000_set_features(struct net_device *netdev, + netdev_features_t features) { struct e1000_adapter *adapter = netdev_priv(netdev); - u32 changed = features ^ netdev->features; + netdev_features_t changed = features ^ netdev->features; if (changed & NETIF_F_HW_VLAN_RX) e1000_vlan_mode(netdev, features); @@ -1051,11 +1049,11 @@ static int __devinit e1000_probe(struct pci_dev *pdev, err = -EIO; if (hw->mac_type == e1000_ce4100) { - ce4100_gbe_mdio_base_phy = pci_resource_start(pdev, BAR_1); - ce4100_gbe_mdio_base_virt = ioremap(ce4100_gbe_mdio_base_phy, + hw->ce4100_gbe_mdio_base_virt = + ioremap(pci_resource_start(pdev, BAR_1), pci_resource_len(pdev, BAR_1)); - if (!ce4100_gbe_mdio_base_virt) + if (!hw->ce4100_gbe_mdio_base_virt) goto err_mdio_ioremap; } @@ -1182,7 +1180,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev, if (global_quad_port_a != 0) adapter->eeprom_wol = 0; else - adapter->quad_port_a = 1; + adapter->quad_port_a = true; /* Reset for multiple quad port adapters */ if (++global_quad_port_a == 4) global_quad_port_a = 0; @@ -1246,7 +1244,7 @@ err_eeprom: err_dma: err_sw_init: err_mdio_ioremap: - iounmap(ce4100_gbe_mdio_base_virt); + iounmap(hw->ce4100_gbe_mdio_base_virt); iounmap(hw->hw_addr); err_ioremap: free_netdev(netdev); @@ -1283,6 +1281,8 @@ static void __devexit e1000_remove(struct pci_dev *pdev) kfree(adapter->tx_ring); kfree(adapter->rx_ring); + if (hw->mac_type == e1000_ce4100) + iounmap(hw->ce4100_gbe_mdio_base_virt); iounmap(hw->hw_addr); if (hw->flash_address) iounmap(hw->flash_address); @@ -1676,7 +1676,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) * need this to apply a workaround later in the send path. */ if (hw->mac_type == e1000_82544 && hw->bus_type == e1000_bus_type_pcix) - adapter->pcix_82544 = 1; + adapter->pcix_82544 = true; ew32(TCTL, tctl); @@ -1999,7 +1999,7 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter, tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; - tx_ring->last_tx_tso = 0; + tx_ring->last_tx_tso = false; writel(0, hw->hw_addr + tx_ring->tdh); writel(0, hw->hw_addr + tx_ring->tdt); @@ -2848,7 +2848,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter, * DMA'd to the controller */ if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb)) { - tx_ring->last_tx_tso = 0; + tx_ring->last_tx_tso = false; size -= 4; } @@ -3216,7 +3216,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, if (likely(tso)) { if (likely(hw->mac_type != e1000_82544)) - tx_ring->last_tx_tso = 1; + tx_ring->last_tx_tso = true; tx_flags |= E1000_TX_FLAGS_TSO; } else if (likely(e1000_tx_csum(adapter, tx_ring, skb))) tx_flags |= E1000_TX_FLAGS_CSUM; @@ -4577,7 +4577,8 @@ static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter, e1000_irq_enable(adapter); } -static void e1000_vlan_mode(struct net_device *netdev, u32 features) +static void e1000_vlan_mode(struct net_device *netdev, + netdev_features_t features) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -4600,7 +4601,7 @@ static void e1000_vlan_mode(struct net_device *netdev, u32 features) e1000_irq_enable(adapter); } -static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -4609,7 +4610,7 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) if ((hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && (vid == adapter->mng_vlan_id)) - return; + return 0; if (!e1000_vlan_used(adapter)) e1000_vlan_filter_on_off(adapter, true); @@ -4621,9 +4622,11 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) e1000_write_vfta(hw, index, vfta); set_bit(vid, adapter->active_vlans); + + return 0; } -static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -4644,6 +4647,8 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) if (!e1000_vlan_used(adapter)) e1000_vlan_filter_on_off(adapter, false); + + return 0; } static void e1000_restore_vlan(struct e1000_adapter *adapter) @@ -4716,8 +4721,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) netif_device_detach(netdev); - mutex_lock(&adapter->mutex); - if (netif_running(netdev)) { WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); e1000_down(adapter); @@ -4725,10 +4728,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) #ifdef CONFIG_PM retval = pci_save_state(pdev); - if (retval) { - mutex_unlock(&adapter->mutex); + if (retval) return retval; - } #endif status = er32(STATUS); @@ -4783,8 +4784,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) if (netif_running(netdev)) e1000_free_irq(adapter); - mutex_unlock(&adapter->mutex); - pci_disable_device(pdev); return 0; diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index 9fe18d1d53d..f478a22ed57 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -309,6 +309,7 @@ struct e1000_adapter { u32 txd_cmd; bool detect_tx_hung; + bool tx_hang_recheck; u8 tx_timeout_factor; u32 tx_int_delay; diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index 69c9d219914..fb2c28e799a 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -579,26 +579,24 @@ static void e1000_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct e1000_adapter *adapter = netdev_priv(netdev); - char firmware_version[32]; - strncpy(drvinfo->driver, e1000e_driver_name, - sizeof(drvinfo->driver) - 1); - strncpy(drvinfo->version, e1000e_driver_version, - sizeof(drvinfo->version) - 1); + strlcpy(drvinfo->driver, e1000e_driver_name, + sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, e1000e_driver_version, + sizeof(drvinfo->version)); /* * EEPROM image version # is reported as firmware version # for * PCI-E controllers */ - snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d", + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%d.%d-%d", (adapter->eeprom_vers & 0xF000) >> 12, (adapter->eeprom_vers & 0x0FF0) >> 4, (adapter->eeprom_vers & 0x000F)); - strncpy(drvinfo->fw_version, firmware_version, - sizeof(drvinfo->fw_version) - 1); - strncpy(drvinfo->bus_info, pci_name(adapter->pdev), - sizeof(drvinfo->bus_info) - 1); + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); drvinfo->regdump_len = e1000_get_regs_len(netdev); drvinfo->eedump_len = e1000_get_eeprom_len(netdev); } diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index a855db1ad24..3911401ed65 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -163,16 +163,13 @@ static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo) regs[n] = __er32(hw, E1000_TARC(n)); break; default: - printk(KERN_INFO "%-15s %08x\n", - reginfo->name, __er32(hw, reginfo->ofs)); + pr_info("%-15s %08x\n", + reginfo->name, __er32(hw, reginfo->ofs)); return; } snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]"); - printk(KERN_INFO "%-15s ", rname); - for (n = 0; n < 2; n++) - printk(KERN_CONT "%08x ", regs[n]); - printk(KERN_CONT "\n"); + pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]); } /* @@ -208,16 +205,15 @@ static void e1000e_dump(struct e1000_adapter *adapter) /* Print netdevice Info */ if (netdev) { dev_info(&adapter->pdev->dev, "Net device Info\n"); - printk(KERN_INFO "Device Name state " - "trans_start last_rx\n"); - printk(KERN_INFO "%-15s %016lX %016lX %016lX\n", - netdev->name, netdev->state, netdev->trans_start, - netdev->last_rx); + pr_info("Device Name state trans_start last_rx\n"); + pr_info("%-15s %016lX %016lX %016lX\n", + netdev->name, netdev->state, netdev->trans_start, + netdev->last_rx); } /* Print Registers */ dev_info(&adapter->pdev->dev, "Register Dump\n"); - printk(KERN_INFO " Register Name Value\n"); + pr_info(" Register Name Value\n"); for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl; reginfo->name; reginfo++) { e1000_regdump(hw, reginfo); @@ -228,15 +224,14 @@ static void e1000e_dump(struct e1000_adapter *adapter) goto exit; dev_info(&adapter->pdev->dev, "Tx Ring Summary\n"); - printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]" - " leng ntw timestamp\n"); + pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean]; - printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n", - 0, tx_ring->next_to_use, tx_ring->next_to_clean, - (unsigned long long)buffer_info->dma, - buffer_info->length, - buffer_info->next_to_watch, - (unsigned long long)buffer_info->time_stamp); + pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n", + 0, tx_ring->next_to_use, tx_ring->next_to_clean, + (unsigned long long)buffer_info->dma, + buffer_info->length, + buffer_info->next_to_watch, + (unsigned long long)buffer_info->time_stamp); /* Print Tx Ring */ if (!netif_msg_tx_done(adapter)) @@ -271,37 +266,32 @@ static void e1000e_dump(struct e1000_adapter *adapter) * +----------------------------------------------------------------+ * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 */ - printk(KERN_INFO "Tl[desc] [address 63:0 ] [SpeCssSCmCsLen]" - " [bi->dma ] leng ntw timestamp bi->skb " - "<-- Legacy format\n"); - printk(KERN_INFO "Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen]" - " [bi->dma ] leng ntw timestamp bi->skb " - "<-- Ext Context format\n"); - printk(KERN_INFO "Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen]" - " [bi->dma ] leng ntw timestamp bi->skb " - "<-- Ext Data format\n"); + pr_info("Tl[desc] [address 63:0 ] [SpeCssSCmCsLen] [bi->dma ] leng ntw timestamp bi->skb <-- Legacy format\n"); + pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Context format\n"); + pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Data format\n"); for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { + const char *next_desc; tx_desc = E1000_TX_DESC(*tx_ring, i); buffer_info = &tx_ring->buffer_info[i]; u0 = (struct my_u0 *)tx_desc; - printk(KERN_INFO "T%c[0x%03X] %016llX %016llX %016llX " - "%04X %3X %016llX %p", - (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' : - ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')), i, - (unsigned long long)le64_to_cpu(u0->a), - (unsigned long long)le64_to_cpu(u0->b), - (unsigned long long)buffer_info->dma, - buffer_info->length, buffer_info->next_to_watch, - (unsigned long long)buffer_info->time_stamp, - buffer_info->skb); if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) - printk(KERN_CONT " NTC/U\n"); + next_desc = " NTC/U"; else if (i == tx_ring->next_to_use) - printk(KERN_CONT " NTU\n"); + next_desc = " NTU"; else if (i == tx_ring->next_to_clean) - printk(KERN_CONT " NTC\n"); + next_desc = " NTC"; else - printk(KERN_CONT "\n"); + next_desc = ""; + pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p%s\n", + (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' : + ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')), + i, + (unsigned long long)le64_to_cpu(u0->a), + (unsigned long long)le64_to_cpu(u0->b), + (unsigned long long)buffer_info->dma, + buffer_info->length, buffer_info->next_to_watch, + (unsigned long long)buffer_info->time_stamp, + buffer_info->skb, next_desc); if (netif_msg_pktdata(adapter) && buffer_info->dma != 0) print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, @@ -312,9 +302,9 @@ static void e1000e_dump(struct e1000_adapter *adapter) /* Print Rx Ring Summary */ rx_ring_summary: dev_info(&adapter->pdev->dev, "Rx Ring Summary\n"); - printk(KERN_INFO "Queue [NTU] [NTC]\n"); - printk(KERN_INFO " %5d %5X %5X\n", 0, - rx_ring->next_to_use, rx_ring->next_to_clean); + pr_info("Queue [NTU] [NTC]\n"); + pr_info(" %5d %5X %5X\n", + 0, rx_ring->next_to_use, rx_ring->next_to_clean); /* Print Rx Ring */ if (!netif_msg_rx_status(adapter)) @@ -337,10 +327,7 @@ rx_ring_summary: * 24 | Buffer Address 3 [63:0] | * +-----------------------------------------------------+ */ - printk(KERN_INFO "R [desc] [buffer 0 63:0 ] " - "[buffer 1 63:0 ] " - "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] " - "[bi->skb] <-- Ext Pkt Split format\n"); + pr_info("R [desc] [buffer 0 63:0 ] [buffer 1 63:0 ] [buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] [bi->skb] <-- Ext Pkt Split format\n"); /* [Extended] Receive Descriptor (Write-Back) Format * * 63 48 47 32 31 13 12 8 7 4 3 0 @@ -352,35 +339,40 @@ rx_ring_summary: * +------------------------------------------------------+ * 63 48 47 32 31 20 19 0 */ - printk(KERN_INFO "RWB[desc] [ck ipid mrqhsh] " - "[vl l0 ee es] " - "[ l3 l2 l1 hs] [reserved ] ---------------- " - "[bi->skb] <-- Ext Rx Write-Back format\n"); + pr_info("RWB[desc] [ck ipid mrqhsh] [vl l0 ee es] [ l3 l2 l1 hs] [reserved ] ---------------- [bi->skb] <-- Ext Rx Write-Back format\n"); for (i = 0; i < rx_ring->count; i++) { + const char *next_desc; buffer_info = &rx_ring->buffer_info[i]; rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i); u1 = (struct my_u1 *)rx_desc_ps; staterr = le32_to_cpu(rx_desc_ps->wb.middle.status_error); + + if (i == rx_ring->next_to_use) + next_desc = " NTU"; + else if (i == rx_ring->next_to_clean) + next_desc = " NTC"; + else + next_desc = ""; + if (staterr & E1000_RXD_STAT_DD) { /* Descriptor Done */ - printk(KERN_INFO "RWB[0x%03X] %016llX " - "%016llX %016llX %016llX " - "---------------- %p", i, - (unsigned long long)le64_to_cpu(u1->a), - (unsigned long long)le64_to_cpu(u1->b), - (unsigned long long)le64_to_cpu(u1->c), - (unsigned long long)le64_to_cpu(u1->d), - buffer_info->skb); + pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX ---------------- %p%s\n", + "RWB", i, + (unsigned long long)le64_to_cpu(u1->a), + (unsigned long long)le64_to_cpu(u1->b), + (unsigned long long)le64_to_cpu(u1->c), + (unsigned long long)le64_to_cpu(u1->d), + buffer_info->skb, next_desc); } else { - printk(KERN_INFO "R [0x%03X] %016llX " - "%016llX %016llX %016llX %016llX %p", i, - (unsigned long long)le64_to_cpu(u1->a), - (unsigned long long)le64_to_cpu(u1->b), - (unsigned long long)le64_to_cpu(u1->c), - (unsigned long long)le64_to_cpu(u1->d), - (unsigned long long)buffer_info->dma, - buffer_info->skb); + pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX %016llX %p%s\n", + "R ", i, + (unsigned long long)le64_to_cpu(u1->a), + (unsigned long long)le64_to_cpu(u1->b), + (unsigned long long)le64_to_cpu(u1->c), + (unsigned long long)le64_to_cpu(u1->d), + (unsigned long long)buffer_info->dma, + buffer_info->skb, next_desc); if (netif_msg_pktdata(adapter)) print_hex_dump(KERN_INFO, "", @@ -388,13 +380,6 @@ rx_ring_summary: phys_to_virt(buffer_info->dma), adapter->rx_ps_bsize0, true); } - - if (i == rx_ring->next_to_use) - printk(KERN_CONT " NTU\n"); - else if (i == rx_ring->next_to_clean) - printk(KERN_CONT " NTC\n"); - else - printk(KERN_CONT "\n"); } break; default: @@ -407,9 +392,7 @@ rx_ring_summary: * 8 | Reserved | * +-----------------------------------------------------+ */ - printk(KERN_INFO "R [desc] [buf addr 63:0 ] " - "[reserved 63:0 ] [bi->dma ] " - "[bi->skb] <-- Ext (Read) format\n"); + pr_info("R [desc] [buf addr 63:0 ] [reserved 63:0 ] [bi->dma ] [bi->skb] <-- Ext (Read) format\n"); /* Extended Receive Descriptor (Write-Back) Format * * 63 48 47 32 31 24 23 4 3 0 @@ -423,29 +406,37 @@ rx_ring_summary: * +------------------------------------------------------+ * 63 48 47 32 31 20 19 0 */ - printk(KERN_INFO "RWB[desc] [cs ipid mrq] " - "[vt ln xe xs] " - "[bi->skb] <-- Ext (Write-Back) format\n"); + pr_info("RWB[desc] [cs ipid mrq] [vt ln xe xs] [bi->skb] <-- Ext (Write-Back) format\n"); for (i = 0; i < rx_ring->count; i++) { + const char *next_desc; + buffer_info = &rx_ring->buffer_info[i]; rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); u1 = (struct my_u1 *)rx_desc; staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + + if (i == rx_ring->next_to_use) + next_desc = " NTU"; + else if (i == rx_ring->next_to_clean) + next_desc = " NTC"; + else + next_desc = ""; + if (staterr & E1000_RXD_STAT_DD) { /* Descriptor Done */ - printk(KERN_INFO "RWB[0x%03X] %016llX " - "%016llX ---------------- %p", i, - (unsigned long long)le64_to_cpu(u1->a), - (unsigned long long)le64_to_cpu(u1->b), - buffer_info->skb); + pr_info("%s[0x%03X] %016llX %016llX ---------------- %p%s\n", + "RWB", i, + (unsigned long long)le64_to_cpu(u1->a), + (unsigned long long)le64_to_cpu(u1->b), + buffer_info->skb, next_desc); } else { - printk(KERN_INFO "R [0x%03X] %016llX " - "%016llX %016llX %p", i, - (unsigned long long)le64_to_cpu(u1->a), - (unsigned long long)le64_to_cpu(u1->b), - (unsigned long long)buffer_info->dma, - buffer_info->skb); + pr_info("%s[0x%03X] %016llX %016llX %016llX %p%s\n", + "R ", i, + (unsigned long long)le64_to_cpu(u1->a), + (unsigned long long)le64_to_cpu(u1->b), + (unsigned long long)buffer_info->dma, + buffer_info->skb, next_desc); if (netif_msg_pktdata(adapter)) print_hex_dump(KERN_INFO, "", @@ -456,13 +447,6 @@ rx_ring_summary: adapter->rx_buffer_len, true); } - - if (i == rx_ring->next_to_use) - printk(KERN_CONT " NTU\n"); - else if (i == rx_ring->next_to_clean) - printk(KERN_CONT " NTC\n"); - else - printk(KERN_CONT "\n"); } } @@ -875,7 +859,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, u32 length, staterr; unsigned int i; int cleaned_count = 0; - bool cleaned = 0; + bool cleaned = false; unsigned int total_rx_bytes = 0, total_rx_packets = 0; i = rx_ring->next_to_clean; @@ -904,7 +888,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, next_buffer = &rx_ring->buffer_info[i]; - cleaned = 1; + cleaned = true; cleaned_count++; dma_unmap_single(&pdev->dev, buffer_info->dma, @@ -1030,6 +1014,7 @@ static void e1000_print_hw_hang(struct work_struct *work) struct e1000_adapter *adapter = container_of(work, struct e1000_adapter, print_hang_task); + struct net_device *netdev = adapter->netdev; struct e1000_ring *tx_ring = adapter->tx_ring; unsigned int i = tx_ring->next_to_clean; unsigned int eop = tx_ring->buffer_info[i].next_to_watch; @@ -1041,6 +1026,21 @@ static void e1000_print_hw_hang(struct work_struct *work) if (test_bit(__E1000_DOWN, &adapter->state)) return; + if (!adapter->tx_hang_recheck && + (adapter->flags2 & FLAG2_DMA_BURST)) { + /* May be block on write-back, flush and detect again + * flush pending descriptor writebacks to memory + */ + ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); + /* execute the writes immediately */ + e1e_flush(); + adapter->tx_hang_recheck = true; + return; + } + /* Real hang detected */ + adapter->tx_hang_recheck = false; + netif_stop_queue(netdev); + e1e_rphy(hw, PHY_STATUS, &phy_status); e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status); e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status); @@ -1095,6 +1095,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) unsigned int i, eop; unsigned int count = 0; unsigned int total_tx_bytes = 0, total_tx_packets = 0; + unsigned int bytes_compl = 0, pkts_compl = 0; i = tx_ring->next_to_clean; eop = tx_ring->buffer_info[i].next_to_watch; @@ -1112,6 +1113,10 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) if (cleaned) { total_tx_packets += buffer_info->segs; total_tx_bytes += buffer_info->bytecount; + if (buffer_info->skb) { + bytes_compl += buffer_info->skb->len; + pkts_compl++; + } } e1000_put_txbuf(adapter, buffer_info); @@ -1130,6 +1135,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) tx_ring->next_to_clean = i; + netdev_completed_queue(netdev, pkts_compl, bytes_compl); + #define TX_WAKE_THRESHOLD 32 if (count && netif_carrier_ok(netdev) && e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) { @@ -1150,14 +1157,14 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) * Detect a transmit hang in hardware, this serializes the * check with the clearing of time_stamp and movement of i */ - adapter->detect_tx_hung = 0; + adapter->detect_tx_hung = false; if (tx_ring->buffer_info[i].time_stamp && time_after(jiffies, tx_ring->buffer_info[i].time_stamp + (adapter->tx_timeout_factor * HZ)) && - !(er32(STATUS) & E1000_STATUS_TXOFF)) { + !(er32(STATUS) & E1000_STATUS_TXOFF)) schedule_work(&adapter->print_hang_task); - netif_stop_queue(netdev); - } + else + adapter->tx_hang_recheck = false; } adapter->total_tx_bytes += total_tx_bytes; adapter->total_tx_packets += total_tx_packets; @@ -1185,7 +1192,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, unsigned int i, j; u32 length, staterr; int cleaned_count = 0; - bool cleaned = 0; + bool cleaned = false; unsigned int total_rx_bytes = 0, total_rx_packets = 0; i = rx_ring->next_to_clean; @@ -1211,7 +1218,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, next_buffer = &rx_ring->buffer_info[i]; - cleaned = 1; + cleaned = true; cleaned_count++; dma_unmap_single(&pdev->dev, buffer_info->dma, adapter->rx_ps_bsize0, DMA_FROM_DEVICE); @@ -1222,8 +1229,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, adapter->flags2 |= FLAG2_IS_DISCARDING; if (adapter->flags2 & FLAG2_IS_DISCARDING) { - e_dbg("Packet Split buffers didn't pick up the full " - "packet\n"); + e_dbg("Packet Split buffers didn't pick up the full packet\n"); dev_kfree_skb_irq(skb); if (staterr & E1000_RXD_STAT_EOP) adapter->flags2 &= ~FLAG2_IS_DISCARDING; @@ -1238,8 +1244,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, length = le16_to_cpu(rx_desc->wb.middle.length0); if (!length) { - e_dbg("Last part of the packet spanning multiple " - "descriptors\n"); + e_dbg("Last part of the packet spanning multiple descriptors\n"); dev_kfree_skb_irq(skb); goto next_desc; } @@ -1917,8 +1922,7 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter) return; } /* MSI-X failed, so fall through and try MSI */ - e_err("Failed to initialize MSI-X interrupts. " - "Falling back to MSI interrupts.\n"); + e_err("Failed to initialize MSI-X interrupts. Falling back to MSI interrupts.\n"); e1000e_reset_interrupt_capability(adapter); } adapter->int_mode = E1000E_INT_MODE_MSI; @@ -1928,8 +1932,7 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter) adapter->flags |= FLAG_MSI_ENABLED; } else { adapter->int_mode = E1000E_INT_MODE_LEGACY; - e_err("Failed to initialize MSI interrupts. Falling " - "back to legacy interrupts.\n"); + e_err("Failed to initialize MSI interrupts. Falling back to legacy interrupts.\n"); } /* Fall through */ case E1000E_INT_MODE_LEGACY: @@ -2260,6 +2263,7 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter) e1000_put_txbuf(adapter, buffer_info); } + netdev_reset_queue(adapter->netdev); size = sizeof(struct e1000_buffer) * tx_ring->count; memset(tx_ring->buffer_info, 0, size); @@ -2518,7 +2522,7 @@ clean_rx: return work_done; } -static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -2528,7 +2532,7 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) if ((adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && (vid == adapter->mng_vlan_id)) - return; + return 0; /* add VID to filter table */ if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { @@ -2539,9 +2543,11 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) } set_bit(vid, adapter->active_vlans); + + return 0; } -static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -2552,7 +2558,7 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) (vid == adapter->mng_vlan_id)) { /* release control to f/w */ e1000e_release_hw_control(adapter); - return; + return 0; } /* remove VID from filter table */ @@ -2564,6 +2570,8 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) } clear_bit(vid, adapter->active_vlans); + + return 0; } /** @@ -3113,79 +3121,147 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) } /** - * e1000_update_mc_addr_list - Update Multicast addresses - * @hw: pointer to the HW structure - * @mc_addr_list: array of multicast addresses to program - * @mc_addr_count: number of multicast addresses to program + * e1000e_write_mc_addr_list - write multicast addresses to MTA + * @netdev: network interface device structure + * + * Writes multicast address list to the MTA hash table. + * Returns: -ENOMEM on failure + * 0 on no addresses written + * X on writing X addresses to MTA + */ +static int e1000e_write_mc_addr_list(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct netdev_hw_addr *ha; + u8 *mta_list; + int i; + + if (netdev_mc_empty(netdev)) { + /* nothing to program, so clear mc list */ + hw->mac.ops.update_mc_addr_list(hw, NULL, 0); + return 0; + } + + mta_list = kzalloc(netdev_mc_count(netdev) * ETH_ALEN, GFP_ATOMIC); + if (!mta_list) + return -ENOMEM; + + /* update_mc_addr_list expects a packed array of only addresses. */ + i = 0; + netdev_for_each_mc_addr(ha, netdev) + memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); + + hw->mac.ops.update_mc_addr_list(hw, mta_list, i); + kfree(mta_list); + + return netdev_mc_count(netdev); +} + +/** + * e1000e_write_uc_addr_list - write unicast addresses to RAR table + * @netdev: network interface device structure * - * Updates the Multicast Table Array. - * The caller must have a packed mc_addr_list of multicast addresses. + * Writes unicast address list to the RAR table. + * Returns: -ENOMEM on failure/insufficient address space + * 0 on no addresses written + * X on writing X addresses to the RAR table **/ -static void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list, - u32 mc_addr_count) +static int e1000e_write_uc_addr_list(struct net_device *netdev) { - hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + unsigned int rar_entries = hw->mac.rar_entry_count; + int count = 0; + + /* save a rar entry for our hardware address */ + rar_entries--; + + /* save a rar entry for the LAA workaround */ + if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) + rar_entries--; + + /* return ENOMEM indicating insufficient memory for addresses */ + if (netdev_uc_count(netdev) > rar_entries) + return -ENOMEM; + + if (!netdev_uc_empty(netdev) && rar_entries) { + struct netdev_hw_addr *ha; + + /* + * write the addresses in reverse order to avoid write + * combining + */ + netdev_for_each_uc_addr(ha, netdev) { + if (!rar_entries) + break; + e1000e_rar_set(hw, ha->addr, rar_entries--); + count++; + } + } + + /* zero out the remaining RAR entries not used above */ + for (; rar_entries > 0; rar_entries--) { + ew32(RAH(rar_entries), 0); + ew32(RAL(rar_entries), 0); + } + e1e_flush(); + + return count; } /** - * e1000_set_multi - Multicast and Promiscuous mode set + * e1000e_set_rx_mode - secondary unicast, Multicast and Promiscuous mode set * @netdev: network interface device structure * - * The set_multi entry point is called whenever the multicast address - * list or the network interface flags are updated. This routine is - * responsible for configuring the hardware for proper multicast, + * The ndo_set_rx_mode entry point is called whenever the unicast or multicast + * address list or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper unicast, multicast, * promiscuous mode, and all-multi behavior. **/ -static void e1000_set_multi(struct net_device *netdev) +static void e1000e_set_rx_mode(struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - struct netdev_hw_addr *ha; - u8 *mta_list; u32 rctl; /* Check for Promiscuous and All Multicast modes */ - rctl = er32(RCTL); + /* clear the affected bits */ + rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE); + if (netdev->flags & IFF_PROMISC) { rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); - rctl &= ~E1000_RCTL_VFE; /* Do not hardware filter VLANs in promisc mode */ e1000e_vlan_filter_disable(adapter); } else { + int count; if (netdev->flags & IFF_ALLMULTI) { rctl |= E1000_RCTL_MPE; - rctl &= ~E1000_RCTL_UPE; } else { - rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE); + /* + * Write addresses to the MTA, if the attempt fails + * then we should just turn on promiscuous mode so + * that we can at least receive multicast traffic + */ + count = e1000e_write_mc_addr_list(netdev); + if (count < 0) + rctl |= E1000_RCTL_MPE; } e1000e_vlan_filter_enable(adapter); - } - - ew32(RCTL, rctl); - - if (!netdev_mc_empty(netdev)) { - int i = 0; - - mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC); - if (!mta_list) - return; - - /* prepare a packed array of only addresses. */ - netdev_for_each_mc_addr(ha, netdev) - memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); - - e1000_update_mc_addr_list(hw, mta_list, i); - kfree(mta_list); - } else { /* - * if we're called from probe, we might not have - * anything to do here, so clear out the list + * Write addresses to available RAR registers, if there is not + * sufficient space to store all the addresses then enable + * unicast promiscuous mode */ - e1000_update_mc_addr_list(hw, NULL, 0); + count = e1000e_write_uc_addr_list(netdev); + if (count < 0) + rctl |= E1000_RCTL_UPE; } + ew32(RCTL, rctl); + if (netdev->features & NETIF_F_HW_VLAN_RX) e1000e_vlan_strip_enable(adapter); else @@ -3198,7 +3274,7 @@ static void e1000_set_multi(struct net_device *netdev) **/ static void e1000_configure(struct e1000_adapter *adapter) { - e1000_set_multi(adapter->netdev); + e1000e_set_rx_mode(adapter->netdev); e1000_restore_vlan(adapter); e1000_init_manageability_pt(adapter); @@ -3444,7 +3520,6 @@ int e1000e_up(struct e1000_adapter *adapter) clear_bit(__E1000_DOWN, &adapter->state); - napi_enable(&adapter->napi); if (adapter->msix_entries) e1000_configure_msix(adapter); e1000_irq_enable(adapter); @@ -3506,7 +3581,6 @@ void e1000e_down(struct e1000_adapter *adapter) e1e_flush(); usleep_range(10000, 20000); - napi_disable(&adapter->napi); e1000_irq_disable(adapter); del_timer_sync(&adapter->watchdog_timer); @@ -3782,6 +3856,7 @@ static int e1000_open(struct net_device *netdev) e1000_irq_enable(adapter); + adapter->tx_hang_recheck = false; netif_start_queue(netdev); adapter->idle_check = true; @@ -3828,6 +3903,8 @@ static int e1000_close(struct net_device *netdev) pm_runtime_get_sync(&pdev->dev); + napi_disable(&adapter->napi); + if (!test_bit(__E1000_DOWN, &adapter->state)) { e1000e_down(adapter); e1000_free_irq(adapter); @@ -4168,22 +4245,19 @@ static void e1000_print_link_info(struct e1000_adapter *adapter) u32 ctrl = er32(CTRL); /* Link status message must follow this format for user tools */ - printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s, " - "Flow Control: %s\n", - adapter->netdev->name, - adapter->link_speed, - (adapter->link_duplex == FULL_DUPLEX) ? - "Full Duplex" : "Half Duplex", - ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ? - "Rx/Tx" : - ((ctrl & E1000_CTRL_RFCE) ? "Rx" : - ((ctrl & E1000_CTRL_TFCE) ? "Tx" : "None"))); + printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", + adapter->netdev->name, + adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half", + (ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE) ? "Rx/Tx" : + (ctrl & E1000_CTRL_RFCE) ? "Rx" : + (ctrl & E1000_CTRL_TFCE) ? "Tx" : "None"); } static bool e1000e_has_link(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; - bool link_active = 0; + bool link_active = false; s32 ret_val = 0; /* @@ -4198,7 +4272,7 @@ static bool e1000e_has_link(struct e1000_adapter *adapter) ret_val = hw->mac.ops.check_for_link(hw); link_active = !hw->mac.get_link_status; } else { - link_active = 1; + link_active = true; } break; case e1000_media_type_fiber: @@ -4297,7 +4371,7 @@ static void e1000_watchdog_task(struct work_struct *work) if (link) { if (!netif_carrier_ok(netdev)) { - bool txb2b = 1; + bool txb2b = true; /* Cancel scheduled suspend requests. */ pm_runtime_resume(netdev->dev.parent); @@ -4323,21 +4397,18 @@ static void e1000_watchdog_task(struct work_struct *work) e1e_rphy(hw, PHY_AUTONEG_EXP, &autoneg_exp); if (!(autoneg_exp & NWAY_ER_LP_NWAY_CAPS)) - e_info("Autonegotiated half duplex but" - " link partner cannot autoneg. " - " Try forcing full duplex if " - "link gets many collisions.\n"); + e_info("Autonegotiated half duplex but link partner cannot autoneg. Try forcing full duplex if link gets many collisions.\n"); } /* adjust timeout factor according to speed/duplex */ adapter->tx_timeout_factor = 1; switch (adapter->link_speed) { case SPEED_10: - txb2b = 0; + txb2b = false; adapter->tx_timeout_factor = 16; break; case SPEED_100: - txb2b = 0; + txb2b = false; adapter->tx_timeout_factor = 10; break; } @@ -4473,7 +4544,7 @@ link_up: e1000e_flush_descriptors(adapter); /* Force detection of hung controller every watchdog period */ - adapter->detect_tx_hung = 1; + adapter->detect_tx_hung = true; /* * With 82571 controllers, LAA may be overwritten due to controller @@ -4985,6 +5056,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, /* if count is 0 then mapping error has occurred */ count = e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss); if (count) { + netdev_sent_queue(netdev, skb->len); e1000_tx_queue(adapter, tx_flags, count); /* Make sure there is space in the ring for the next send. */ e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2); @@ -5110,8 +5182,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) if ((adapter->hw.mac.type == e1000_pch2lan) && !(adapter->flags2 & FLAG2_CRC_STRIPPING) && (new_mtu > ETH_DATA_LEN)) { - e_err("Jumbo Frames not supported on 82579 when CRC " - "stripping is disabled.\n"); + e_err("Jumbo Frames not supported on 82579 when CRC stripping is disabled.\n"); return -EINVAL; } @@ -5331,7 +5402,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake, if (wufc) { e1000_setup_rctl(adapter); - e1000_set_multi(netdev); + e1000e_set_rx_mode(netdev); /* turn on all-multi mode if wake on multicast is enabled */ if (wufc & E1000_WUFC_MC) { @@ -5527,8 +5598,8 @@ static int __e1000_resume(struct pci_dev *pdev) phy_data & E1000_WUS_MC ? "Multicast Packet" : phy_data & E1000_WUS_BC ? "Broadcast Packet" : phy_data & E1000_WUS_MAG ? "Magic Packet" : - phy_data & E1000_WUS_LNKC ? "Link Status " - " Change" : "other"); + phy_data & E1000_WUS_LNKC ? + "Link Status Change" : "other"); } e1e_wphy(&adapter->hw, BM_WUS, ~0); } else { @@ -5859,10 +5930,11 @@ static void e1000_eeprom_checks(struct e1000_adapter *adapter) } } -static int e1000_set_features(struct net_device *netdev, u32 features) +static int e1000_set_features(struct net_device *netdev, + netdev_features_t features) { struct e1000_adapter *adapter = netdev_priv(netdev); - u32 changed = features ^ netdev->features; + netdev_features_t changed = features ^ netdev->features; if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) adapter->flags |= FLAG_TSO_FORCE; @@ -5884,7 +5956,7 @@ static const struct net_device_ops e1000e_netdev_ops = { .ndo_stop = e1000_close, .ndo_start_xmit = e1000_xmit_frame, .ndo_get_stats64 = e1000e_get_stats64, - .ndo_set_rx_mode = e1000_set_multi, + .ndo_set_rx_mode = e1000e_set_rx_mode, .ndo_set_mac_address = e1000_set_mac, .ndo_change_mtu = e1000_change_mtu, .ndo_do_ioctl = e1000_ioctl, @@ -5949,8 +6021,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev, err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { - dev_err(&pdev->dev, "No usable DMA " - "configuration, aborting\n"); + dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); goto err_dma; } } @@ -6076,6 +6147,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev, NETIF_F_TSO6 | NETIF_F_HW_CSUM); + netdev->priv_flags |= IFF_UNICAST_FLT; + if (pci_using_dac) { netdev->features |= NETIF_F_HIGHDMA; netdev->vlan_features |= NETIF_F_HIGHDMA; @@ -6135,7 +6208,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev, /* Initialize link parameters. User can change them with ethtool */ adapter->hw.mac.autoneg = 1; - adapter->fc_autoneg = 1; + adapter->fc_autoneg = true; adapter->hw.fc.requested_mode = e1000_fc_default; adapter->hw.fc.current_mode = e1000_fc_default; adapter->hw.phy.autoneg_advertised = 0x2f; diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index 7881fb95a25..b8e20f037d0 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -29,6 +29,8 @@ * e1000_82576 */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/types.h> #include <linux/if_ether.h> @@ -244,8 +246,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) * Check for invalid size */ if ((hw->mac.type == e1000_82576) && (size > 15)) { - printk("igb: The NVM size is not valid, " - "defaulting to 32K.\n"); + pr_notice("The NVM size is not valid, defaulting to 32K\n"); size = 15; } nvm->word_size = 1 << size; diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index c69feebf265..3d12e67eebb 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -447,4 +447,9 @@ static inline s32 igb_get_phy_info(struct e1000_hw *hw) return 0; } +static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring) +{ + return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index); +} + #endif /* _IGB_H_ */ diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 43873eba2f6..7998bf4d594 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -36,6 +36,7 @@ #include <linux/ethtool.h> #include <linux/sched.h> #include <linux/slab.h> +#include <linux/pm_runtime.h> #include "igb.h" @@ -148,7 +149,8 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) SUPPORTED_1000baseT_Full| SUPPORTED_Autoneg | SUPPORTED_TP); - ecmd->advertising = ADVERTISED_TP; + ecmd->advertising = (ADVERTISED_TP | + ADVERTISED_Pause); if (hw->mac.autoneg == 1) { ecmd->advertising |= ADVERTISED_Autoneg; @@ -165,7 +167,8 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) ecmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_FIBRE | - ADVERTISED_Autoneg); + ADVERTISED_Autoneg | + ADVERTISED_Pause); ecmd->port = PORT_FIBRE; } @@ -673,25 +676,22 @@ static void igb_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct igb_adapter *adapter = netdev_priv(netdev); - char firmware_version[32]; u16 eeprom_data; - strncpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver) - 1); - strncpy(drvinfo->version, igb_driver_version, - sizeof(drvinfo->version) - 1); + strlcpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, igb_driver_version, sizeof(drvinfo->version)); /* EEPROM image version # is reported as firmware version # for * 82575 controllers */ adapter->hw.nvm.ops.read(&adapter->hw, 5, 1, &eeprom_data); - sprintf(firmware_version, "%d.%d-%d", + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%d.%d-%d", (eeprom_data & 0xF000) >> 12, (eeprom_data & 0x0FF0) >> 4, eeprom_data & 0x000F); - strncpy(drvinfo->fw_version, firmware_version, - sizeof(drvinfo->fw_version) - 1); - strncpy(drvinfo->bus_info, pci_name(adapter->pdev), - sizeof(drvinfo->bus_info) - 1); + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); drvinfo->n_stats = IGB_STATS_LEN; drvinfo->testinfo_len = IGB_TEST_LEN; drvinfo->regdump_len = igb_get_regs_len(netdev); @@ -2162,6 +2162,19 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) } } +static int igb_ethtool_begin(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + pm_runtime_get_sync(&adapter->pdev->dev); + return 0; +} + +static void igb_ethtool_complete(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + pm_runtime_put(&adapter->pdev->dev); +} + static const struct ethtool_ops igb_ethtool_ops = { .get_settings = igb_get_settings, .set_settings = igb_set_settings, @@ -2188,6 +2201,8 @@ static const struct ethtool_ops igb_ethtool_ops = { .get_ethtool_stats = igb_get_ethtool_stats, .get_coalesce = igb_get_coalesce, .set_coalesce = igb_set_coalesce, + .begin = igb_ethtool_begin, + .complete = igb_ethtool_complete, }; void igb_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index ced544499f1..01e5e89ef95 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -25,6 +25,8 @@ *******************************************************************************/ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/module.h> #include <linux/types.h> #include <linux/init.h> @@ -51,6 +53,7 @@ #include <linux/if_ether.h> #include <linux/aer.h> #include <linux/prefetch.h> +#include <linux/pm_runtime.h> #ifdef CONFIG_IGB_DCA #include <linux/dca.h> #endif @@ -145,9 +148,9 @@ static bool igb_clean_rx_irq(struct igb_q_vector *, int); static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); static void igb_tx_timeout(struct net_device *); static void igb_reset_task(struct work_struct *); -static void igb_vlan_mode(struct net_device *netdev, u32 features); -static void igb_vlan_rx_add_vid(struct net_device *, u16); -static void igb_vlan_rx_kill_vid(struct net_device *, u16); +static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features); +static int igb_vlan_rx_add_vid(struct net_device *, u16); +static int igb_vlan_rx_kill_vid(struct net_device *, u16); static void igb_restore_vlan(struct igb_adapter *); static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8); static void igb_ping_all_vfs(struct igb_adapter *); @@ -170,8 +173,18 @@ static int igb_check_vf_assignment(struct igb_adapter *adapter); #endif #ifdef CONFIG_PM -static int igb_suspend(struct pci_dev *, pm_message_t); -static int igb_resume(struct pci_dev *); +static int igb_suspend(struct device *); +static int igb_resume(struct device *); +#ifdef CONFIG_PM_RUNTIME +static int igb_runtime_suspend(struct device *dev); +static int igb_runtime_resume(struct device *dev); +static int igb_runtime_idle(struct device *dev); +#endif +static const struct dev_pm_ops igb_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume) + SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume, + igb_runtime_idle) +}; #endif static void igb_shutdown(struct pci_dev *); #ifdef CONFIG_IGB_DCA @@ -212,9 +225,7 @@ static struct pci_driver igb_driver = { .probe = igb_probe, .remove = __devexit_p(igb_remove), #ifdef CONFIG_PM - /* Power Management Hooks */ - .suspend = igb_suspend, - .resume = igb_resume, + .driver.pm = &igb_pm_ops, #endif .shutdown = igb_shutdown, .err_handler = &igb_err_handler @@ -325,16 +336,13 @@ static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo) regs[n] = rd32(E1000_TXDCTL(n)); break; default: - printk(KERN_INFO "%-15s %08x\n", - reginfo->name, rd32(reginfo->ofs)); + pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs)); return; } snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]"); - printk(KERN_INFO "%-15s ", rname); - for (n = 0; n < 4; n++) - printk(KERN_CONT "%08x ", regs[n]); - printk(KERN_CONT "\n"); + pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1], + regs[2], regs[3]); } /* @@ -359,18 +367,15 @@ static void igb_dump(struct igb_adapter *adapter) /* Print netdevice Info */ if (netdev) { dev_info(&adapter->pdev->dev, "Net device Info\n"); - printk(KERN_INFO "Device Name state " - "trans_start last_rx\n"); - printk(KERN_INFO "%-15s %016lX %016lX %016lX\n", - netdev->name, - netdev->state, - netdev->trans_start, - netdev->last_rx); + pr_info("Device Name state trans_start " + "last_rx\n"); + pr_info("%-15s %016lX %016lX %016lX\n", netdev->name, + netdev->state, netdev->trans_start, netdev->last_rx); } /* Print Registers */ dev_info(&adapter->pdev->dev, "Register Dump\n"); - printk(KERN_INFO " Register Name Value\n"); + pr_info(" Register Name Value\n"); for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl; reginfo->name; reginfo++) { igb_regdump(hw, reginfo); @@ -381,18 +386,17 @@ static void igb_dump(struct igb_adapter *adapter) goto exit; dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); - printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]" - " leng ntw timestamp\n"); + pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); for (n = 0; n < adapter->num_tx_queues; n++) { struct igb_tx_buffer *buffer_info; tx_ring = adapter->tx_ring[n]; buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; - printk(KERN_INFO " %5d %5X %5X %016llX %04X %p %016llX\n", - n, tx_ring->next_to_use, tx_ring->next_to_clean, - (u64)buffer_info->dma, - buffer_info->length, - buffer_info->next_to_watch, - (u64)buffer_info->time_stamp); + pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n", + n, tx_ring->next_to_use, tx_ring->next_to_clean, + (u64)buffer_info->dma, + buffer_info->length, + buffer_info->next_to_watch, + (u64)buffer_info->time_stamp); } /* Print TX Rings */ @@ -414,36 +418,38 @@ static void igb_dump(struct igb_adapter *adapter) for (n = 0; n < adapter->num_tx_queues; n++) { tx_ring = adapter->tx_ring[n]; - printk(KERN_INFO "------------------------------------\n"); - printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index); - printk(KERN_INFO "------------------------------------\n"); - printk(KERN_INFO "T [desc] [address 63:0 ] " - "[PlPOCIStDDM Ln] [bi->dma ] " - "leng ntw timestamp bi->skb\n"); + pr_info("------------------------------------\n"); + pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); + pr_info("------------------------------------\n"); + pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] " + "[bi->dma ] leng ntw timestamp " + "bi->skb\n"); for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { + const char *next_desc; struct igb_tx_buffer *buffer_info; tx_desc = IGB_TX_DESC(tx_ring, i); buffer_info = &tx_ring->tx_buffer_info[i]; u0 = (struct my_u0 *)tx_desc; - printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX" - " %04X %p %016llX %p", i, + if (i == tx_ring->next_to_use && + i == tx_ring->next_to_clean) + next_desc = " NTC/U"; + else if (i == tx_ring->next_to_use) + next_desc = " NTU"; + else if (i == tx_ring->next_to_clean) + next_desc = " NTC"; + else + next_desc = ""; + + pr_info("T [0x%03X] %016llX %016llX %016llX" + " %04X %p %016llX %p%s\n", i, le64_to_cpu(u0->a), le64_to_cpu(u0->b), (u64)buffer_info->dma, buffer_info->length, buffer_info->next_to_watch, (u64)buffer_info->time_stamp, - buffer_info->skb); - if (i == tx_ring->next_to_use && - i == tx_ring->next_to_clean) - printk(KERN_CONT " NTC/U\n"); - else if (i == tx_ring->next_to_use) - printk(KERN_CONT " NTU\n"); - else if (i == tx_ring->next_to_clean) - printk(KERN_CONT " NTC\n"); - else - printk(KERN_CONT "\n"); + buffer_info->skb, next_desc); if (netif_msg_pktdata(adapter) && buffer_info->dma != 0) print_hex_dump(KERN_INFO, "", @@ -456,11 +462,11 @@ static void igb_dump(struct igb_adapter *adapter) /* Print RX Rings Summary */ rx_ring_summary: dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); - printk(KERN_INFO "Queue [NTU] [NTC]\n"); + pr_info("Queue [NTU] [NTC]\n"); for (n = 0; n < adapter->num_rx_queues; n++) { rx_ring = adapter->rx_ring[n]; - printk(KERN_INFO " %5d %5X %5X\n", n, - rx_ring->next_to_use, rx_ring->next_to_clean); + pr_info(" %5d %5X %5X\n", + n, rx_ring->next_to_use, rx_ring->next_to_clean); } /* Print RX Rings */ @@ -492,36 +498,43 @@ rx_ring_summary: for (n = 0; n < adapter->num_rx_queues; n++) { rx_ring = adapter->rx_ring[n]; - printk(KERN_INFO "------------------------------------\n"); - printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index); - printk(KERN_INFO "------------------------------------\n"); - printk(KERN_INFO "R [desc] [ PktBuf A0] " - "[ HeadBuf DD] [bi->dma ] [bi->skb] " - "<-- Adv Rx Read format\n"); - printk(KERN_INFO "RWB[desc] [PcsmIpSHl PtRs] " - "[vl er S cks ln] ---------------- [bi->skb] " - "<-- Adv Rx Write-Back format\n"); + pr_info("------------------------------------\n"); + pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); + pr_info("------------------------------------\n"); + pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] " + "[bi->dma ] [bi->skb] <-- Adv Rx Read format\n"); + pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] -----" + "----------- [bi->skb] <-- Adv Rx Write-Back format\n"); for (i = 0; i < rx_ring->count; i++) { + const char *next_desc; struct igb_rx_buffer *buffer_info; buffer_info = &rx_ring->rx_buffer_info[i]; rx_desc = IGB_RX_DESC(rx_ring, i); u0 = (struct my_u0 *)rx_desc; staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + + if (i == rx_ring->next_to_use) + next_desc = " NTU"; + else if (i == rx_ring->next_to_clean) + next_desc = " NTC"; + else + next_desc = ""; + if (staterr & E1000_RXD_STAT_DD) { /* Descriptor Done */ - printk(KERN_INFO "RWB[0x%03X] %016llX " - "%016llX ---------------- %p", i, + pr_info("%s[0x%03X] %016llX %016llX -------" + "--------- %p%s\n", "RWB", i, le64_to_cpu(u0->a), le64_to_cpu(u0->b), - buffer_info->skb); + buffer_info->skb, next_desc); } else { - printk(KERN_INFO "R [0x%03X] %016llX " - "%016llX %016llX %p", i, + pr_info("%s[0x%03X] %016llX %016llX %016llX" + " %p%s\n", "R ", i, le64_to_cpu(u0->a), le64_to_cpu(u0->b), (u64)buffer_info->dma, - buffer_info->skb); + buffer_info->skb, next_desc); if (netif_msg_pktdata(adapter)) { print_hex_dump(KERN_INFO, "", @@ -538,14 +551,6 @@ rx_ring_summary: PAGE_SIZE/2, true); } } - - if (i == rx_ring->next_to_use) - printk(KERN_CONT " NTU\n"); - else if (i == rx_ring->next_to_clean) - printk(KERN_CONT " NTC\n"); - else - printk(KERN_CONT "\n"); - } } @@ -599,10 +604,10 @@ struct net_device *igb_get_hw_dev(struct e1000_hw *hw) static int __init igb_init_module(void) { int ret; - printk(KERN_INFO "%s - version %s\n", + pr_info("%s - version %s\n", igb_driver_string, igb_driver_version); - printk(KERN_INFO "%s\n", igb_copyright); + pr_info("%s\n", igb_copyright); #ifdef CONFIG_IGB_DCA dca_register_notify(&dca_notifier); @@ -1502,6 +1507,7 @@ void igb_power_up_link(struct igb_adapter *adapter) igb_power_up_phy_copper(&adapter->hw); else igb_power_up_serdes_link_82575(&adapter->hw); + igb_reset_phy(&adapter->hw); } /** @@ -1742,7 +1748,8 @@ void igb_reset(struct igb_adapter *adapter) igb_get_phy_info(hw); } -static u32 igb_fix_features(struct net_device *netdev, u32 features) +static netdev_features_t igb_fix_features(struct net_device *netdev, + netdev_features_t features) { /* * Since there is no support for separate rx/tx vlan accel @@ -1756,9 +1763,10 @@ static u32 igb_fix_features(struct net_device *netdev, u32 features) return features; } -static int igb_set_features(struct net_device *netdev, u32 features) +static int igb_set_features(struct net_device *netdev, + netdev_features_t features) { - u32 changed = netdev->features ^ features; + netdev_features_t changed = netdev->features ^ features; if (changed & NETIF_F_HW_VLAN_RX) igb_vlan_mode(netdev, features); @@ -2113,6 +2121,8 @@ static int __devinit igb_probe(struct pci_dev *pdev, default: break; } + + pm_runtime_put_noidle(&pdev->dev); return 0; err_register: @@ -2152,6 +2162,8 @@ static void __devexit igb_remove(struct pci_dev *pdev) struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; + pm_runtime_get_noresume(&pdev->dev); + /* * The watchdog timer may be rescheduled, so explicitly * disable watchdog from being rescheduled. @@ -2474,16 +2486,22 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter) * handler is registered with the OS, the watchdog timer is started, * and the stack is notified that the interface is ready. **/ -static int igb_open(struct net_device *netdev) +static int __igb_open(struct net_device *netdev, bool resuming) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; int err; int i; /* disallow open during test */ - if (test_bit(__IGB_TESTING, &adapter->state)) + if (test_bit(__IGB_TESTING, &adapter->state)) { + WARN_ON(resuming); return -EBUSY; + } + + if (!resuming) + pm_runtime_get_sync(&pdev->dev); netif_carrier_off(netdev); @@ -2529,6 +2547,9 @@ static int igb_open(struct net_device *netdev) netif_tx_start_all_queues(netdev); + if (!resuming) + pm_runtime_put(&pdev->dev); + /* start the watchdog. */ hw->mac.get_link_status = 1; schedule_work(&adapter->watchdog_task); @@ -2543,10 +2564,17 @@ err_setup_rx: igb_free_all_tx_resources(adapter); err_setup_tx: igb_reset(adapter); + if (!resuming) + pm_runtime_put(&pdev->dev); return err; } +static int igb_open(struct net_device *netdev) +{ + return __igb_open(netdev, false); +} + /** * igb_close - Disables a network interface * @netdev: network interface device structure @@ -2558,21 +2586,32 @@ err_setup_tx: * needs to be disabled. A global MAC reset is issued to stop the * hardware, and all transmit and receive resources are freed. **/ -static int igb_close(struct net_device *netdev) +static int __igb_close(struct net_device *netdev, bool suspending) { struct igb_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; WARN_ON(test_bit(__IGB_RESETTING, &adapter->state)); - igb_down(adapter); + if (!suspending) + pm_runtime_get_sync(&pdev->dev); + + igb_down(adapter); igb_free_irq(adapter); igb_free_all_tx_resources(adapter); igb_free_all_rx_resources(adapter); + if (!suspending) + pm_runtime_put_sync(&pdev->dev); return 0; } +static int igb_close(struct net_device *netdev) +{ + return __igb_close(netdev, false); +} + /** * igb_setup_tx_resources - allocate Tx resources (Descriptors) * @tx_ring: tx descriptor ring (for a specific queue) to setup @@ -3203,6 +3242,7 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring) buffer_info = &tx_ring->tx_buffer_info[i]; igb_unmap_and_free_tx_resource(tx_ring, buffer_info); } + netdev_tx_reset_queue(txring_txq(tx_ring)); size = sizeof(struct igb_tx_buffer) * tx_ring->count; memset(tx_ring->tx_buffer_info, 0, size); @@ -3632,6 +3672,9 @@ static void igb_watchdog_task(struct work_struct *work) link = igb_has_link(adapter); if (link) { + /* Cancel scheduled suspend requests. */ + pm_runtime_resume(netdev->dev.parent); + if (!netif_carrier_ok(netdev)) { u32 ctrl; hw->mac.ops.get_speed_and_duplex(hw, @@ -3640,23 +3683,23 @@ static void igb_watchdog_task(struct work_struct *work) ctrl = rd32(E1000_CTRL); /* Links status message must follow this format */ - printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, " - "Flow Control: %s\n", + printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s " + "Duplex, Flow Control: %s\n", netdev->name, adapter->link_speed, adapter->link_duplex == FULL_DUPLEX ? - "Full Duplex" : "Half Duplex", - ((ctrl & E1000_CTRL_TFCE) && - (ctrl & E1000_CTRL_RFCE)) ? "RX/TX" : - ((ctrl & E1000_CTRL_RFCE) ? "RX" : - ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None"))); + "Full" : "Half", + (ctrl & E1000_CTRL_TFCE) && + (ctrl & E1000_CTRL_RFCE) ? "RX/TX" : + (ctrl & E1000_CTRL_RFCE) ? "RX" : + (ctrl & E1000_CTRL_TFCE) ? "TX" : "None"); /* check for thermal sensor event */ - if (igb_thermal_sensor_event(hw, E1000_THSTAT_LINK_THROTTLE)) { - printk(KERN_INFO "igb: %s The network adapter " - "link speed was downshifted " - "because it overheated.\n", - netdev->name); + if (igb_thermal_sensor_event(hw, + E1000_THSTAT_LINK_THROTTLE)) { + netdev_info(netdev, "The network adapter link " + "speed was downshifted because it " + "overheated\n"); } /* adjust timeout factor according to speed/duplex */ @@ -3686,11 +3729,10 @@ static void igb_watchdog_task(struct work_struct *work) adapter->link_duplex = 0; /* check for thermal sensor event */ - if (igb_thermal_sensor_event(hw, E1000_THSTAT_PWR_DOWN)) { - printk(KERN_ERR "igb: %s The network adapter " - "was stopped because it " - "overheated.\n", - netdev->name); + if (igb_thermal_sensor_event(hw, + E1000_THSTAT_PWR_DOWN)) { + netdev_err(netdev, "The network adapter was " + "stopped because it overheated\n"); } /* Links status message must follow this format */ @@ -3704,6 +3746,9 @@ static void igb_watchdog_task(struct work_struct *work) if (!test_bit(__IGB_DOWN, &adapter->state)) mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ)); + + pm_schedule_suspend(netdev->dev.parent, + MSEC_PER_SEC * 5); } } @@ -4241,6 +4286,8 @@ static void igb_tx_map(struct igb_ring *tx_ring, frag++; } + netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); + /* write last descriptor with RS and EOP bits */ cmd_type |= cpu_to_le32(size) | cpu_to_le32(IGB_TXD_DCMD); tx_desc->read.cmd_type_len = cmd_type; @@ -5780,6 +5827,8 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) } } + netdev_tx_completed_queue(txring_txq(tx_ring), + total_packets, total_bytes); i += tx_ring->count; tx_ring->next_to_clean = i; u64_stats_update_begin(&tx_ring->tx_syncp); @@ -6138,7 +6187,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, return true; if (!page) { - page = netdev_alloc_page(rx_ring->netdev); + page = alloc_page(GFP_ATOMIC | __GFP_COLD); bi->page = page; if (unlikely(!page)) { rx_ring->rx_stats.alloc_failed++; @@ -6467,7 +6516,7 @@ s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) return 0; } -static void igb_vlan_mode(struct net_device *netdev, u32 features) +static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -6494,7 +6543,7 @@ static void igb_vlan_mode(struct net_device *netdev, u32 features) igb_rlpml_set(adapter); } -static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +static int igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -6507,9 +6556,11 @@ static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid) igb_vfta_set(hw, vid, true); set_bit(vid, adapter->active_vlans); + + return 0; } -static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +static int igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -6524,6 +6575,8 @@ static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) igb_vfta_set(hw, vid, false); clear_bit(vid, adapter->active_vlans); + + return 0; } static void igb_restore_vlan(struct igb_adapter *adapter) @@ -6582,13 +6635,14 @@ err_inval: return -EINVAL; } -static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake) +static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, + bool runtime) { struct net_device *netdev = pci_get_drvdata(pdev); struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; u32 ctrl, rctl, status; - u32 wufc = adapter->wol; + u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; #ifdef CONFIG_PM int retval = 0; #endif @@ -6596,7 +6650,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake) netif_device_detach(netdev); if (netif_running(netdev)) - igb_close(netdev); + __igb_close(netdev, true); igb_clear_interrupt_scheme(adapter); @@ -6655,12 +6709,13 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake) } #ifdef CONFIG_PM -static int igb_suspend(struct pci_dev *pdev, pm_message_t state) +static int igb_suspend(struct device *dev) { int retval; bool wake; + struct pci_dev *pdev = to_pci_dev(dev); - retval = __igb_shutdown(pdev, &wake); + retval = __igb_shutdown(pdev, &wake, 0); if (retval) return retval; @@ -6674,8 +6729,9 @@ static int igb_suspend(struct pci_dev *pdev, pm_message_t state) return 0; } -static int igb_resume(struct pci_dev *pdev) +static int igb_resume(struct device *dev) { + struct pci_dev *pdev = to_pci_dev(dev); struct net_device *netdev = pci_get_drvdata(pdev); struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -6696,7 +6752,18 @@ static int igb_resume(struct pci_dev *pdev) pci_enable_wake(pdev, PCI_D3hot, 0); pci_enable_wake(pdev, PCI_D3cold, 0); - if (igb_init_interrupt_scheme(adapter)) { + if (!rtnl_is_locked()) { + /* + * shut up ASSERT_RTNL() warning in + * netif_set_real_num_tx/rx_queues. + */ + rtnl_lock(); + err = igb_init_interrupt_scheme(adapter); + rtnl_unlock(); + } else { + err = igb_init_interrupt_scheme(adapter); + } + if (err) { dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); return -ENOMEM; } @@ -6709,23 +6776,61 @@ static int igb_resume(struct pci_dev *pdev) wr32(E1000_WUS, ~0); - if (netif_running(netdev)) { - err = igb_open(netdev); + if (netdev->flags & IFF_UP) { + err = __igb_open(netdev, true); if (err) return err; } netif_device_attach(netdev); + return 0; +} + +#ifdef CONFIG_PM_RUNTIME +static int igb_runtime_idle(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + + if (!igb_has_link(adapter)) + pm_schedule_suspend(dev, MSEC_PER_SEC * 5); + + return -EBUSY; +} + +static int igb_runtime_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + int retval; + bool wake; + + retval = __igb_shutdown(pdev, &wake, 1); + if (retval) + return retval; + + if (wake) { + pci_prepare_to_sleep(pdev); + } else { + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + } return 0; } + +static int igb_runtime_resume(struct device *dev) +{ + return igb_resume(dev); +} +#endif /* CONFIG_PM_RUNTIME */ #endif static void igb_shutdown(struct pci_dev *pdev) { bool wake; - __igb_shutdown(pdev, &wake); + __igb_shutdown(pdev, &wake, 0); if (system_state == SYSTEM_POWER_OFF) { pci_wake_from_d3(pdev, wake); @@ -7064,15 +7169,28 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) wr32(E1000_DMCTXTH, 0); /* - * DMA Coalescing high water mark needs to be higher - * than the RX threshold. set hwm to PBA - 2 * max - * frame size + * DMA Coalescing high water mark needs to be greater + * than the Rx threshold. Set hwm to PBA - max frame + * size in 16B units, capping it at PBA - 6KB. */ - hwm = pba - (2 * adapter->max_frame_size); + hwm = 64 * pba - adapter->max_frame_size / 16; + if (hwm < 64 * (pba - 6)) + hwm = 64 * (pba - 6); + reg = rd32(E1000_FCRTC); + reg &= ~E1000_FCRTC_RTH_COAL_MASK; + reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT) + & E1000_FCRTC_RTH_COAL_MASK); + wr32(E1000_FCRTC, reg); + + /* + * Set the DMA Coalescing Rx threshold to PBA - 2 * max + * frame size, capping it at PBA - 10KB. + */ + dmac_thr = pba - adapter->max_frame_size / 512; + if (dmac_thr < pba - 10) + dmac_thr = pba - 10; reg = rd32(E1000_DMACR); reg &= ~E1000_DMACR_DMACTHR_MASK; - dmac_thr = pba - 4; - reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT) & E1000_DMACR_DMACTHR_MASK); @@ -7088,7 +7206,6 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) * coalescing(smart fifb)-UTRESH=0 */ wr32(E1000_DMCRTRH, 0); - wr32(E1000_FCRTC, hwm); reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4); diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c index 2c25858cc0f..7b600a1f636 100644 --- a/drivers/net/ethernet/intel/igbvf/ethtool.c +++ b/drivers/net/ethernet/intel/igbvf/ethtool.c @@ -191,12 +191,12 @@ static void igbvf_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct igbvf_adapter *adapter = netdev_priv(netdev); - char firmware_version[32] = "N/A"; - strncpy(drvinfo->driver, igbvf_driver_name, 32); - strncpy(drvinfo->version, igbvf_driver_version, 32); - strncpy(drvinfo->fw_version, firmware_version, 32); - strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); + strlcpy(drvinfo->driver, igbvf_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, igbvf_driver_version, + sizeof(drvinfo->version)); + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); drvinfo->regdump_len = igbvf_get_regs_len(netdev); drvinfo->eedump_len = igbvf_get_eeprom_len(netdev); } diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index cca78124be3..fd3da3076c2 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -25,6 +25,8 @@ *******************************************************************************/ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/module.h> #include <linux/types.h> #include <linux/init.h> @@ -1174,18 +1176,20 @@ static void igbvf_set_rlpml(struct igbvf_adapter *adapter) e1000_rlpml_set_vf(hw, max_frame_size); } -static void igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +static int igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid) { struct igbvf_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - if (hw->mac.ops.set_vfta(hw, vid, true)) + if (hw->mac.ops.set_vfta(hw, vid, true)) { dev_err(&adapter->pdev->dev, "Failed to add vlan id %d\n", vid); - else - set_bit(vid, adapter->active_vlans); + return -EINVAL; + } + set_bit(vid, adapter->active_vlans); + return 0; } -static void igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +static int igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) { struct igbvf_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -1195,11 +1199,13 @@ static void igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) if (!test_bit(__IGBVF_DOWN, &adapter->state)) igbvf_irq_enable(adapter); - if (hw->mac.ops.set_vfta(hw, vid, false)) + if (hw->mac.ops.set_vfta(hw, vid, false)) { dev_err(&adapter->pdev->dev, "Failed to remove vlan id %d\n", vid); - else - clear_bit(vid, adapter->active_vlans); + return -EINVAL; + } + clear_bit(vid, adapter->active_vlans); + return 0; } static void igbvf_restore_vlan(struct igbvf_adapter *adapter) @@ -1746,10 +1752,9 @@ void igbvf_update_stats(struct igbvf_adapter *adapter) static void igbvf_print_link_info(struct igbvf_adapter *adapter) { - dev_info(&adapter->pdev->dev, "Link is Up %d Mbps %s\n", - adapter->link_speed, - ((adapter->link_duplex == FULL_DUPLEX) ? - "Full Duplex" : "Half Duplex")); + dev_info(&adapter->pdev->dev, "Link is Up %d Mbps %s Duplex\n", + adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half"); } static bool igbvf_has_link(struct igbvf_adapter *adapter) @@ -2532,7 +2537,8 @@ static void igbvf_print_device_info(struct igbvf_adapter *adapter) dev_info(&pdev->dev, "Address: %pM\n", netdev->dev_addr); } -static int igbvf_set_features(struct net_device *netdev, u32 features) +static int igbvf_set_features(struct net_device *netdev, + netdev_features_t features) { struct igbvf_adapter *adapter = netdev_priv(netdev); @@ -2842,9 +2848,8 @@ static struct pci_driver igbvf_driver = { static int __init igbvf_init_module(void) { int ret; - printk(KERN_INFO "%s - version %s\n", - igbvf_driver_string, igbvf_driver_version); - printk(KERN_INFO "%s\n", igbvf_copyright); + pr_info("%s - version %s\n", igbvf_driver_string, igbvf_driver_version); + pr_info("%s\n", igbvf_copyright); ret = pci_register_driver(&igbvf_driver); diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c index 9dfce7dff79..dbb7dd2f8e3 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c @@ -473,10 +473,12 @@ ixgb_get_drvinfo(struct net_device *netdev, { struct ixgb_adapter *adapter = netdev_priv(netdev); - strncpy(drvinfo->driver, ixgb_driver_name, 32); - strncpy(drvinfo->version, ixgb_driver_version, 32); - strncpy(drvinfo->fw_version, "N/A", 32); - strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); + strlcpy(drvinfo->driver, ixgb_driver_name, + sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, ixgb_driver_version, + sizeof(drvinfo->version)); + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); drvinfo->n_stats = IXGB_STATS_LEN; drvinfo->regdump_len = ixgb_get_regs_len(netdev); drvinfo->eedump_len = ixgb_get_eeprom_len(netdev); diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index e21148f8b16..9bd5faf64a8 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@ -101,8 +101,8 @@ static void ixgb_tx_timeout_task(struct work_struct *work); static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter); static void ixgb_vlan_strip_disable(struct ixgb_adapter *adapter); -static void ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid); -static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid); +static int ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid); +static int ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid); static void ixgb_restore_vlan(struct ixgb_adapter *adapter); #ifdef CONFIG_NET_POLL_CONTROLLER @@ -228,7 +228,7 @@ ixgb_up(struct ixgb_adapter *adapter) if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_PCIX_MODE) { err = pci_enable_msi(adapter->pdev); if (!err) { - adapter->have_msi = 1; + adapter->have_msi = true; irq_flags = 0; } /* proceed to try to request regular interrupt */ @@ -325,8 +325,8 @@ ixgb_reset(struct ixgb_adapter *adapter) } } -static u32 -ixgb_fix_features(struct net_device *netdev, u32 features) +static netdev_features_t +ixgb_fix_features(struct net_device *netdev, netdev_features_t features) { /* * Tx VLAN insertion does not work per HW design when Rx stripping is @@ -339,10 +339,10 @@ ixgb_fix_features(struct net_device *netdev, u32 features) } static int -ixgb_set_features(struct net_device *netdev, u32 features) +ixgb_set_features(struct net_device *netdev, netdev_features_t features) { struct ixgb_adapter *adapter = netdev_priv(netdev); - u32 changed = features ^ netdev->features; + netdev_features_t changed = features ^ netdev->features; if (!(changed & (NETIF_F_RXCSUM|NETIF_F_HW_VLAN_RX))) return 0; @@ -2217,7 +2217,7 @@ ixgb_vlan_strip_disable(struct ixgb_adapter *adapter) IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl); } -static void +static int ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid) { struct ixgb_adapter *adapter = netdev_priv(netdev); @@ -2230,9 +2230,11 @@ ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid) vfta |= (1 << (vid & 0x1F)); ixgb_write_vfta(&adapter->hw, index, vfta); set_bit(vid, adapter->active_vlans); + + return 0; } -static void +static int ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) { struct ixgb_adapter *adapter = netdev_priv(netdev); @@ -2245,6 +2247,8 @@ ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) vfta &= ~(1 << (vid & 0x1F)); ixgb_write_vfta(&adapter->hw, index, vfta); clear_bit(vid, adapter->active_vlans); + + return 0; } static void diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index a8368d5cf68..258164d6d45 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -560,6 +560,7 @@ extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg, extern char ixgbe_driver_name[]; extern const char ixgbe_driver_version[]; +extern char ixgbe_default_device_descr[]; extern void ixgbe_up(struct ixgbe_adapter *adapter); extern void ixgbe_down(struct ixgbe_adapter *adapter); @@ -627,6 +628,8 @@ extern u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter); extern u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up); #endif /* CONFIG_IXGBE_DCB */ extern int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type); +extern int ixgbe_fcoe_get_hbainfo(struct net_device *netdev, + struct netdev_fcoe_hbainfo *info); #endif /* IXGBE_FCOE */ #endif /* _IXGBE_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index 4ae26a748da..772072147be 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -356,6 +356,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) case IXGBE_DEV_ID_82599_SFP_FCOE: case IXGBE_DEV_ID_82599_SFP_EM: case IXGBE_DEV_ID_82599_SFP_SF2: + case IXGBE_DEV_ID_82599_SFP_SF_QP: case IXGBE_DEV_ID_82599EN_SFP: media_type = ixgbe_media_type_fiber; break; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index f1365fef4ed..a3aa6333073 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -266,10 +266,10 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw) if (hw->mac.type == ixgbe_mac_X540) { if (hw->phy.id == 0) hw->phy.ops.identify(hw); - hw->phy.ops.read_reg(hw, 0x3, IXGBE_PCRC8ECL, &i); - hw->phy.ops.read_reg(hw, 0x3, IXGBE_PCRC8ECH, &i); - hw->phy.ops.read_reg(hw, 0x3, IXGBE_LDPCECL, &i); - hw->phy.ops.read_reg(hw, 0x3, IXGBE_LDPCECH, &i); + hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, MDIO_MMD_PCS, &i); + hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH, MDIO_MMD_PCS, &i); + hw->phy.ops.read_reg(hw, IXGBE_LDPCECL, MDIO_MMD_PCS, &i); + hw->phy.ops.read_reg(hw, IXGBE_LDPCECH, MDIO_MMD_PCS, &i); } return 0; @@ -2599,7 +2599,7 @@ s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval) s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) { ixgbe_link_speed speed = 0; - bool link_up = 0; + bool link_up = false; u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c index 33b93ffb87c..da31735311f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c @@ -158,10 +158,6 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc, { struct ixgbe_adapter *adapter = netdev_priv(netdev); - /* Abort a bad configuration */ - if (ffs(up_map) > adapter->dcb_cfg.num_tcs.pg_tcs) - return; - if (prio != DCB_ATTR_VALUE_UNDEFINED) adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type = prio; if (bwg_id != DCB_ATTR_VALUE_UNDEFINED) @@ -185,7 +181,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc, if (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap != adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap) - adapter->dcb_set_bitmap |= BIT_PFC; + adapter->dcb_set_bitmap |= BIT_PFC | BIT_APP_UPCHG; } static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, @@ -206,10 +202,6 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc, { struct ixgbe_adapter *adapter = netdev_priv(netdev); - /* Abort bad configurations */ - if (ffs(up_map) > adapter->dcb_cfg.num_tcs.pg_tcs) - return; - if (prio != DCB_ATTR_VALUE_UNDEFINED) adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type = prio; if (bwg_id != DCB_ATTR_VALUE_UNDEFINED) @@ -309,6 +301,27 @@ static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority, *setting = adapter->dcb_cfg.tc_config[priority].dcb_pfc; } +#ifdef IXGBE_FCOE +static void ixgbe_dcbnl_devreset(struct net_device *dev) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + + while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (netif_running(dev)) + dev->netdev_ops->ndo_stop(dev); + + ixgbe_clear_interrupt_scheme(adapter); + ixgbe_init_interrupt_scheme(adapter); + + if (netif_running(dev)) + dev->netdev_ops->ndo_open(dev); + + clear_bit(__IXGBE_RESETTING, &adapter->state); +} +#endif + static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); @@ -338,27 +351,6 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) if (ret) return DCB_NO_HW_CHG; -#ifdef IXGBE_FCOE - if (up && !(up & (1 << adapter->fcoe.up))) - adapter->dcb_set_bitmap |= BIT_APP_UPCHG; - - /* - * Only take down the adapter if an app change occurred. FCoE - * may shuffle tx rings in this case and this can not be done - * without a reset currently. - */ - if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) { - while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) - usleep_range(1000, 2000); - - adapter->fcoe.up = ffs(up) - 1; - - if (netif_running(netdev)) - netdev->netdev_ops->ndo_stop(netdev); - ixgbe_clear_interrupt_scheme(adapter); - } -#endif - if (adapter->dcb_cfg.pfc_mode_enable) { switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: @@ -385,15 +377,6 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) } } -#ifdef IXGBE_FCOE - if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) { - ixgbe_init_interrupt_scheme(adapter); - if (netif_running(netdev)) - netdev->netdev_ops->ndo_open(netdev); - ret = DCB_HW_CHG_RST; - } -#endif - if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) { u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS]; u8 bwg_id[MAX_TRAFFIC_CLASS], prio_type[MAX_TRAFFIC_CLASS]; @@ -442,8 +425,19 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) if (adapter->dcb_cfg.pfc_mode_enable) adapter->hw.fc.current_mode = ixgbe_fc_pfc; - if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) - clear_bit(__IXGBE_RESETTING, &adapter->state); +#ifdef IXGBE_FCOE + /* Reprogam FCoE hardware offloads when the traffic class + * FCoE is using changes. This happens if the APP info + * changes or the up2tc mapping is updated. + */ + if ((up && !(up & (1 << adapter->fcoe.up))) || + (adapter->dcb_set_bitmap & BIT_APP_UPCHG)) { + adapter->fcoe.up = ffs(up) - 1; + ixgbe_dcbnl_devreset(netdev); + ret = DCB_HW_CHG_RST; + } +#endif + adapter->dcb_set_bitmap = 0x00; return ret; } @@ -661,22 +655,6 @@ static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev, return ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc->pfc_en, prio_tc); } -#ifdef IXGBE_FCOE -static void ixgbe_dcbnl_devreset(struct net_device *dev) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - - if (netif_running(dev)) - dev->netdev_ops->ndo_stop(dev); - - ixgbe_clear_interrupt_scheme(adapter); - ixgbe_init_interrupt_scheme(adapter); - - if (netif_running(dev)) - dev->netdev_ops->ndo_open(dev); -} -#endif - static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app) { @@ -761,7 +739,9 @@ static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode) ixgbe_dcbnl_ieee_setets(dev, &ets); ixgbe_dcbnl_ieee_setpfc(dev, &pfc); } else if (mode & DCB_CAP_DCBX_VER_CEE) { - adapter->dcb_set_bitmap |= (BIT_PFC & BIT_PG_TX & BIT_PG_RX); + u8 mask = BIT_PFC | BIT_PG_TX | BIT_PG_RX | BIT_APP_UPCHG; + + adapter->dcb_set_bitmap |= mask; ixgbe_dcbnl_set_all(dev); } else { /* Drop into single TC mode strict priority as this diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 70d58c3849b..da7e580f517 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -888,23 +888,19 @@ static void ixgbe_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - char firmware_version[32]; u32 nvm_track_id; - strncpy(drvinfo->driver, ixgbe_driver_name, - sizeof(drvinfo->driver) - 1); - strncpy(drvinfo->version, ixgbe_driver_version, - sizeof(drvinfo->version) - 1); + strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, ixgbe_driver_version, + sizeof(drvinfo->version)); nvm_track_id = (adapter->eeprom_verh << 16) | adapter->eeprom_verl; - snprintf(firmware_version, sizeof(firmware_version), "0x%08x", + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x", nvm_track_id); - strncpy(drvinfo->fw_version, firmware_version, - sizeof(drvinfo->fw_version) - 1); - strncpy(drvinfo->bus_info, pci_name(adapter->pdev), - sizeof(drvinfo->bus_info) - 1); + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); drvinfo->n_stats = IXGBE_STATS_LEN; drvinfo->testinfo_len = IXGBE_TEST_LEN; drvinfo->regdump_len = ixgbe_get_regs_len(netdev); @@ -1959,12 +1955,21 @@ static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter, /* WOL not supported except for the following */ switch(hw->device_id) { case IXGBE_DEV_ID_82599_SFP: - /* Only this subdevice supports WOL */ - if (hw->subsystem_device_id != IXGBE_SUBDEV_ID_82599_SFP) { + /* Only these subdevices could supports WOL */ + switch (hw->subsystem_device_id) { + case IXGBE_SUBDEV_ID_82599_560FLR: + /* only support first port */ + if (hw->bus.func != 0) { + wol->supported = 0; + break; + } + case IXGBE_SUBDEV_ID_82599_SFP: + retval = 0; + break; + default: wol->supported = 0; break; } - retval = 0; break; case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: /* All except this subdevice support WOL */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index df3b1be69d8..d18d6157dd2 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c @@ -855,3 +855,86 @@ int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type) } return rc; } + +/** + * ixgbe_fcoe_get_hbainfo - get FCoE HBA information + * @netdev : ixgbe adapter + * @info : HBA information + * + * Returns ixgbe HBA information + * + * Returns : 0 on success + */ +int ixgbe_fcoe_get_hbainfo(struct net_device *netdev, + struct netdev_fcoe_hbainfo *info) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + int i, pos; + u8 buf[8]; + + if (!info) + return -EINVAL; + + /* Don't return information on unsupported devices */ + if (hw->mac.type != ixgbe_mac_82599EB && + hw->mac.type != ixgbe_mac_X540) + return -EINVAL; + + /* Manufacturer */ + snprintf(info->manufacturer, sizeof(info->manufacturer), + "Intel Corporation"); + + /* Serial Number */ + + /* Get the PCI-e Device Serial Number Capability */ + pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_DSN); + if (pos) { + pos += 4; + for (i = 0; i < 8; i++) + pci_read_config_byte(adapter->pdev, pos + i, &buf[i]); + + snprintf(info->serial_number, sizeof(info->serial_number), + "%02X%02X%02X%02X%02X%02X%02X%02X", + buf[7], buf[6], buf[5], buf[4], + buf[3], buf[2], buf[1], buf[0]); + } else + snprintf(info->serial_number, sizeof(info->serial_number), + "Unknown"); + + /* Hardware Version */ + snprintf(info->hardware_version, + sizeof(info->hardware_version), + "Rev %d", hw->revision_id); + /* Driver Name/Version */ + snprintf(info->driver_version, + sizeof(info->driver_version), + "%s v%s", + ixgbe_driver_name, + ixgbe_driver_version); + /* Firmware Version */ + snprintf(info->firmware_version, + sizeof(info->firmware_version), + "0x%08x", + (adapter->eeprom_verh << 16) | + adapter->eeprom_verl); + + /* Model */ + if (hw->mac.type == ixgbe_mac_82599EB) { + snprintf(info->model, + sizeof(info->model), + "Intel 82599"); + } else { + snprintf(info->model, + sizeof(info->model), + "Intel X540"); + } + + /* Model Description */ + snprintf(info->model_description, + sizeof(info->model_description), + "%s", + ixgbe_default_device_descr); + + return 0; +} diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 8ef92d1a6aa..1ee5d0fbb90 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -55,6 +55,8 @@ char ixgbe_driver_name[] = "ixgbe"; static const char ixgbe_driver_string[] = "Intel(R) 10 Gigabit PCI Express Network Driver"; +char ixgbe_default_device_descr[] = + "Intel(R) 10 Gigabit Network Connection"; #define MAJ 3 #define MIN 6 #define BUILD 7 @@ -106,6 +108,7 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = { {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 }, /* required last entry */ {0, } }; @@ -146,7 +149,7 @@ static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter) { BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state)); - /* flush memory to make sure state is correct before next watchog */ + /* flush memory to make sure state is correct before next watchdog */ smp_mb__before_clear_bit(); clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); } @@ -1140,7 +1143,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) if (ring_is_ps_enabled(rx_ring)) { if (!bi->page) { - bi->page = netdev_alloc_page(rx_ring->netdev); + bi->page = alloc_page(GFP_ATOMIC | __GFP_COLD); if (!bi->page) { rx_ring->rx_stats.alloc_rx_page_failed++; goto no_buffers; @@ -2156,7 +2159,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data) IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read - * therefore no explict interrupt disable is necessary */ + * therefore no explicit interrupt disable is necessary */ eicr = IXGBE_READ_REG(hw, IXGBE_EICR); if (!eicr) { /* @@ -3044,7 +3047,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) hw->mac.ops.enable_rx_dma(hw, rxctrl); } -static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; @@ -3053,9 +3056,11 @@ static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) /* add VID to filter table */ hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true); set_bit(vid, adapter->active_vlans); + + return 0; } -static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; @@ -3064,6 +3069,8 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) /* remove VID from filter table */ hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false); clear_bit(vid, adapter->active_vlans); + + return 0; } /** @@ -3602,7 +3609,7 @@ static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter) { /* - * We are assuming the worst case scenerio here, and that + * We are assuming the worst case scenario here, and that * is that an SFP was inserted/removed after the reset * but before SFP detection was enabled. As such the best * solution is to just start searching as soon as we start @@ -3824,7 +3831,7 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) case IXGBE_ERR_EEPROM_VERSION: /* We are running on a pre-production device, log a warning */ e_dev_warn("This device is a pre-production adapter/LOM. " - "Please be aware there may be issuesassociated with " + "Please be aware there may be issues associated with " "your hardware. If you are experiencing problems " "please contact your Intel or hardware " "representative who provided you with this " @@ -4019,7 +4026,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter) /* Mark all the VFs as inactive */ for (i = 0 ; i < adapter->num_vfs; i++) - adapter->vfinfo[i].clear_to_send = 0; + adapter->vfinfo[i].clear_to_send = false; /* ping all the active vfs to let them know we are going down */ ixgbe_ping_all_vfs(adapter); @@ -5788,9 +5795,9 @@ static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter) * @adapter - pointer to the device adapter structure * * This function serves two purposes. First it strobes the interrupt lines - * in order to make certain interrupts are occuring. Secondly it sets the + * in order to make certain interrupts are occurring. Secondly it sets the * bits needed to check for TX hangs. As a result we should immediately - * determine if a hang has occured. + * determine if a hang has occurred. */ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter) { @@ -7128,7 +7135,7 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) return -EINVAL; /* Hardware has to reinitialize queues and interrupts to - * match packet buffer alignment. Unfortunantly, the + * match packet buffer alignment. Unfortunately, the * hardware is not flexible enough to do this dynamically. */ if (netif_running(dev)) @@ -7174,7 +7181,8 @@ void ixgbe_do_reset(struct net_device *netdev) ixgbe_reset(adapter); } -static u32 ixgbe_fix_features(struct net_device *netdev, u32 data) +static netdev_features_t ixgbe_fix_features(struct net_device *netdev, + netdev_features_t data) { struct ixgbe_adapter *adapter = netdev_priv(netdev); @@ -7204,7 +7212,8 @@ static u32 ixgbe_fix_features(struct net_device *netdev, u32 data) return data; } -static int ixgbe_set_features(struct net_device *netdev, u32 data) +static int ixgbe_set_features(struct net_device *netdev, + netdev_features_t data) { struct ixgbe_adapter *adapter = netdev_priv(netdev); bool need_reset = false; @@ -7286,6 +7295,7 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_fcoe_enable = ixgbe_fcoe_enable, .ndo_fcoe_disable = ixgbe_fcoe_disable, .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn, + .ndo_fcoe_get_hbainfo = ixgbe_fcoe_get_hbainfo, #endif /* IXGBE_FCOE */ .ndo_set_features = ixgbe_set_features, .ndo_fix_features = ixgbe_fix_features, @@ -7598,9 +7608,16 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, adapter->wol = 0; switch (pdev->device) { case IXGBE_DEV_ID_82599_SFP: - /* Only this subdevice supports WOL */ - if (pdev->subsystem_device == IXGBE_SUBDEV_ID_82599_SFP) + /* Only these subdevice supports WOL */ + switch (pdev->subsystem_device) { + case IXGBE_SUBDEV_ID_82599_560FLR: + /* only support first port */ + if (hw->bus.func != 0) + break; + case IXGBE_SUBDEV_ID_82599_SFP: adapter->wol = IXGBE_WUFC_MAG; + break; + } break; case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: /* All except this subdevice support WOL */ @@ -7708,7 +7725,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, /* add san mac addr to netdev */ ixgbe_add_sanmac_netdev(netdev); - e_dev_info("Intel(R) 10 Gigabit Network Connection\n"); + e_dev_info("%s\n", ixgbe_default_device_descr); cards_found++; return 0; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index 9a56fd74e67..7cf1e1f56c6 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -1214,7 +1214,7 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, u32 max_retry = 10; u32 retry = 0; u16 swfw_mask = 0; - bool nack = 1; + bool nack = true; *data = 0; if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) @@ -1421,7 +1421,7 @@ static void ixgbe_i2c_stop(struct ixgbe_hw *hw) static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data) { s32 i; - bool bit = 0; + bool bit = false; for (i = 7; i >= 0; i--) { ixgbe_clock_in_i2c_bit(hw, &bit); @@ -1443,7 +1443,7 @@ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data) s32 status = 0; s32 i; u32 i2cctl; - bool bit = 0; + bool bit = false; for (i = 7; i >= 0; i--) { bit = (data >> i) & 0x1; @@ -1457,6 +1457,7 @@ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data) i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); i2cctl |= IXGBE_I2C_DATA_OUT; IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, i2cctl); + IXGBE_WRITE_FLUSH(hw); return status; } @@ -1473,7 +1474,7 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw) u32 i = 0; u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); u32 timeout = 10; - bool ack = 1; + bool ack = true; ixgbe_raise_i2c_clk(hw, &i2cctl); @@ -1646,9 +1647,9 @@ static bool ixgbe_get_i2c_data(u32 *i2cctl) bool data; if (*i2cctl & IXGBE_I2C_DATA_IN) - data = 1; + data = true; else - data = 0; + data = false; return data; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 00fcd39ad66..cf6812dd143 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -572,7 +572,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) /* reply to reset with ack and vf mac address */ msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK; - memcpy(new_mac, vf_mac, IXGBE_ETH_LENGTH_OF_ADDRESS); + memcpy(new_mac, vf_mac, ETH_ALEN); /* * Piggyback the multicast filter type so VF can compute the * correct vectors diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h index df04f1a3857..e8badab0335 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h @@ -33,7 +33,6 @@ void ixgbe_msg_task(struct ixgbe_adapter *adapter); int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask); void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter); void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter); -void ixgbe_dump_registers(struct ixgbe_adapter *adapter); int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac); int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan, u8 qos); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 6c5cca808bd..802bfa0f62c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -57,6 +57,7 @@ #define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152a #define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529 #define IXGBE_SUBDEV_ID_82599_SFP 0x11A9 +#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0 #define IXGBE_DEV_ID_82599_SFP_EM 0x1507 #define IXGBE_DEV_ID_82599_SFP_SF2 0x154D #define IXGBE_DEV_ID_82599EN_SFP 0x1557 @@ -65,6 +66,7 @@ #define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C #define IXGBE_DEV_ID_82599_LS 0x154F #define IXGBE_DEV_ID_X540T 0x1528 +#define IXGBE_DEV_ID_82599_SFP_SF_QP 0x154A /* VF Device IDs */ #define IXGBE_DEV_ID_82599_VF 0x10ED @@ -1710,8 +1712,6 @@ enum { #define IXGBE_NVM_POLL_WRITE 1 /* Flag for polling for write complete */ #define IXGBE_NVM_POLL_READ 0 /* Flag for polling for read complete */ -#define IXGBE_ETH_LENGTH_OF_ADDRESS 6 - #define IXGBE_EEPROM_PAGE_SIZE_MAX 128 #define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 512 /* EEPROM words # read in burst */ #define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* EEPROM words # wr in burst */ @@ -2802,9 +2802,9 @@ struct ixgbe_eeprom_info { struct ixgbe_mac_info { struct ixgbe_mac_operations ops; enum ixgbe_mac_type type; - u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; - u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; - u8 san_addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; + u8 addr[ETH_ALEN]; + u8 perm_addr[ETH_ALEN]; + u8 san_addr[ETH_ALEN]; /* prefix for World Wide Node Name (WWNN) */ u16 wwnn_prefix; /* prefix for World Wide Port Name (WWPN) */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c index e5101e91b6b..8cc5eccfd65 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c @@ -751,16 +751,20 @@ static s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index) { u32 macc_reg; u32 ledctl_reg; + ixgbe_link_speed speed; + bool link_up; /* - * In order for the blink bit in the LED control register - * to work, link and speed must be forced in the MAC. We - * will reverse this when we stop the blinking. + * Link should be up in order for the blink bit in the LED control + * register to work. Force link and speed in the MAC if link is down. + * This will be reversed when we stop the blinking. */ - macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC); - macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS; - IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg); - + hw->mac.ops.check_link(hw, &speed, &link_up, false); + if (link_up == false) { + macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC); + macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS; + IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg); + } /* Set the LED to LINK_UP + BLINK. */ ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); ledctl_reg &= ~IXGBE_LED_MODE_MASK(index); diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h index 78abb6f1a86..2eb89cb94a0 100644 --- a/drivers/net/ethernet/intel/ixgbevf/defines.h +++ b/drivers/net/ethernet/intel/ixgbevf/defines.h @@ -35,7 +35,6 @@ #define IXGBE_VF_IRQ_CLEAR_MASK 7 #define IXGBE_VF_MAX_TX_QUEUES 1 #define IXGBE_VF_MAX_RX_QUEUES 1 -#define IXGBE_ETH_LENGTH_OF_ADDRESS 6 /* Link speed */ typedef u32 ixgbe_link_speed; diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index e29ba4506b7..dc8e6511c64 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -27,6 +27,8 @@ /* ethtool support for ixgbevf */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/types.h> #include <linux/module.h> #include <linux/slab.h> @@ -265,11 +267,11 @@ static void ixgbevf_get_drvinfo(struct net_device *netdev, { struct ixgbevf_adapter *adapter = netdev_priv(netdev); - strlcpy(drvinfo->driver, ixgbevf_driver_name, 32); - strlcpy(drvinfo->version, ixgbevf_driver_version, 32); - - strlcpy(drvinfo->fw_version, "N/A", 4); - strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); + strlcpy(drvinfo->driver, ixgbevf_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, ixgbevf_driver_version, + sizeof(drvinfo->version)); + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); } static void ixgbevf_get_ringparam(struct net_device *netdev, @@ -549,8 +551,8 @@ static const u32 register_test_patterns[] = { writel((W & M), (adapter->hw.hw_addr + R)); \ val = readl(adapter->hw.hw_addr + R); \ if ((W & M) != (val & M)) { \ - printk(KERN_ERR "set/check reg %04X test failed: got 0x%08X " \ - "expected 0x%08X\n", R, (val & M), (W & M)); \ + pr_err("set/check reg %04X test failed: got 0x%08X expected " \ + "0x%08X\n", R, (val & M), (W & M)); \ *data = R; \ writel(before, (adapter->hw.hw_addr + R)); \ return 1; \ diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 4c8e19951d5..891162d1610 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -29,6 +29,9 @@ /****************************************************************************** Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code ******************************************************************************/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/types.h> #include <linux/bitops.h> #include <linux/module.h> @@ -363,7 +366,7 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter, if (!bi->page_dma && (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) { if (!bi->page) { - bi->page = netdev_alloc_page(adapter->netdev); + bi->page = alloc_page(GFP_ATOMIC | __GFP_COLD); if (!bi->page) { adapter->alloc_rx_page_failed++; goto no_buffers; @@ -1400,7 +1403,7 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) } } -static void ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; @@ -1409,9 +1412,11 @@ static void ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid) if (hw->mac.ops.set_vfta) hw->mac.ops.set_vfta(hw, vid, 0, true); set_bit(vid, adapter->active_vlans); + + return 0; } -static void ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; @@ -1420,6 +1425,8 @@ static void ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) if (hw->mac.ops.set_vfta) hw->mac.ops.set_vfta(hw, vid, 0, false); clear_bit(vid, adapter->active_vlans); + + return 0; } static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter) @@ -1437,7 +1444,7 @@ static int ixgbevf_write_uc_addr_list(struct net_device *netdev) int count = 0; if ((netdev_uc_count(netdev)) > 10) { - printk(KERN_ERR "Too many unicast filters - No Space\n"); + pr_err("Too many unicast filters - No Space\n"); return -ENOSPC; } @@ -2135,7 +2142,7 @@ static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter) err = ixgbevf_alloc_queues(adapter); if (err) { - printk(KERN_ERR "Unable to allocate memory for queues\n"); + pr_err("Unable to allocate memory for queues\n"); goto err_alloc_queues; } @@ -2189,7 +2196,7 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter) } else { err = hw->mac.ops.init_hw(hw); if (err) { - printk(KERN_ERR "init_shared_code failed: %d\n", err); + pr_err("init_shared_code failed: %d\n", err); goto out; } } @@ -2630,8 +2637,8 @@ static int ixgbevf_open(struct net_device *netdev) * the vf can't start. */ if (hw->adapter_stopped) { err = IXGBE_ERR_MBX; - printk(KERN_ERR "Unable to start - perhaps the PF" - " Driver isn't up yet\n"); + pr_err("Unable to start - perhaps the PF Driver isn't " + "up yet\n"); goto err_setup_reset; } } @@ -2842,10 +2849,8 @@ static bool ixgbevf_tx_csum(struct ixgbevf_adapter *adapter, break; default: if (unlikely(net_ratelimit())) { - printk(KERN_WARNING - "partial checksum but " - "proto=%x!\n", - skb->protocol); + pr_warn("partial checksum but " + "proto=%x!\n", skb->protocol); } break; } @@ -3249,7 +3254,8 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev, return stats; } -static int ixgbevf_set_features(struct net_device *netdev, u32 features) +static int ixgbevf_set_features(struct net_device *netdev, + netdev_features_t features) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); @@ -3414,7 +3420,7 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev, memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); if (!is_valid_ether_addr(netdev->dev_addr)) { - printk(KERN_ERR "invalid MAC address\n"); + pr_err("invalid MAC address\n"); err = -EIO; goto err_sw_init; } @@ -3535,10 +3541,10 @@ static struct pci_driver ixgbevf_driver = { static int __init ixgbevf_init_module(void) { int ret; - printk(KERN_INFO "ixgbevf: %s - version %s\n", ixgbevf_driver_string, - ixgbevf_driver_version); + pr_info("%s - version %s\n", ixgbevf_driver_string, + ixgbevf_driver_version); - printk(KERN_INFO "%s\n", ixgbevf_copyright); + pr_info("%s\n", ixgbevf_copyright); ret = pci_register_driver(&ixgbevf_driver); return ret; diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h index ea393eb03f3..9d38a94a348 100644 --- a/drivers/net/ethernet/intel/ixgbevf/mbx.h +++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h @@ -47,8 +47,8 @@ #define IXGBE_VFMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */ #define IXGBE_VFMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */ -#define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * x)) -#define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * vfn)) +#define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * (x))) +#define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * (vfn))) #define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */ #define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ diff --git a/drivers/net/ethernet/intel/ixgbevf/regs.h b/drivers/net/ethernet/intel/ixgbevf/regs.h index 189200eeca2..5e4d5e5cdf3 100644 --- a/drivers/net/ethernet/intel/ixgbevf/regs.h +++ b/drivers/net/ethernet/intel/ixgbevf/regs.h @@ -39,29 +39,29 @@ #define IXGBE_VTEIMC 0x0010C #define IXGBE_VTEIAC 0x00110 #define IXGBE_VTEIAM 0x00114 -#define IXGBE_VTEITR(x) (0x00820 + (4 * x)) -#define IXGBE_VTIVAR(x) (0x00120 + (4 * x)) +#define IXGBE_VTEITR(x) (0x00820 + (4 * (x))) +#define IXGBE_VTIVAR(x) (0x00120 + (4 * (x))) #define IXGBE_VTIVAR_MISC 0x00140 -#define IXGBE_VTRSCINT(x) (0x00180 + (4 * x)) -#define IXGBE_VFRDBAL(x) (0x01000 + (0x40 * x)) -#define IXGBE_VFRDBAH(x) (0x01004 + (0x40 * x)) -#define IXGBE_VFRDLEN(x) (0x01008 + (0x40 * x)) -#define IXGBE_VFRDH(x) (0x01010 + (0x40 * x)) -#define IXGBE_VFRDT(x) (0x01018 + (0x40 * x)) -#define IXGBE_VFRXDCTL(x) (0x01028 + (0x40 * x)) -#define IXGBE_VFSRRCTL(x) (0x01014 + (0x40 * x)) -#define IXGBE_VFRSCCTL(x) (0x0102C + (0x40 * x)) +#define IXGBE_VTRSCINT(x) (0x00180 + (4 * (x))) +#define IXGBE_VFRDBAL(x) (0x01000 + (0x40 * (x))) +#define IXGBE_VFRDBAH(x) (0x01004 + (0x40 * (x))) +#define IXGBE_VFRDLEN(x) (0x01008 + (0x40 * (x))) +#define IXGBE_VFRDH(x) (0x01010 + (0x40 * (x))) +#define IXGBE_VFRDT(x) (0x01018 + (0x40 * (x))) +#define IXGBE_VFRXDCTL(x) (0x01028 + (0x40 * (x))) +#define IXGBE_VFSRRCTL(x) (0x01014 + (0x40 * (x))) +#define IXGBE_VFRSCCTL(x) (0x0102C + (0x40 * (x))) #define IXGBE_VFPSRTYPE 0x00300 -#define IXGBE_VFTDBAL(x) (0x02000 + (0x40 * x)) -#define IXGBE_VFTDBAH(x) (0x02004 + (0x40 * x)) -#define IXGBE_VFTDLEN(x) (0x02008 + (0x40 * x)) -#define IXGBE_VFTDH(x) (0x02010 + (0x40 * x)) -#define IXGBE_VFTDT(x) (0x02018 + (0x40 * x)) -#define IXGBE_VFTXDCTL(x) (0x02028 + (0x40 * x)) -#define IXGBE_VFTDWBAL(x) (0x02038 + (0x40 * x)) -#define IXGBE_VFTDWBAH(x) (0x0203C + (0x40 * x)) -#define IXGBE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * x)) -#define IXGBE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * x)) +#define IXGBE_VFTDBAL(x) (0x02000 + (0x40 * (x))) +#define IXGBE_VFTDBAH(x) (0x02004 + (0x40 * (x))) +#define IXGBE_VFTDLEN(x) (0x02008 + (0x40 * (x))) +#define IXGBE_VFTDH(x) (0x02010 + (0x40 * (x))) +#define IXGBE_VFTDT(x) (0x02018 + (0x40 * (x))) +#define IXGBE_VFTXDCTL(x) (0x02028 + (0x40 * (x))) +#define IXGBE_VFTDWBAL(x) (0x02038 + (0x40 * (x))) +#define IXGBE_VFTDWBAH(x) (0x0203C + (0x40 * (x))) +#define IXGBE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * (x))) +#define IXGBE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * (x))) #define IXGBE_VFGPRC 0x0101C #define IXGBE_VFGPTC 0x0201C #define IXGBE_VFGORC_LSB 0x01020 diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index aa3682e8c47..21533e30036 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -108,7 +108,7 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw) if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK)) return IXGBE_ERR_INVALID_MAC_ADDR; - memcpy(hw->mac.perm_addr, addr, IXGBE_ETH_LENGTH_OF_ADDRESS); + memcpy(hw->mac.perm_addr, addr, ETH_ALEN); hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD]; return 0; @@ -211,7 +211,7 @@ static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) **/ static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr) { - memcpy(mac_addr, hw->mac.perm_addr, IXGBE_ETH_LENGTH_OF_ADDRESS); + memcpy(mac_addr, hw->mac.perm_addr, ETH_ALEN); return 0; } diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index 76b84573566..27d651a80f3 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@ -1990,7 +1990,7 @@ jme_fill_tx_map(struct pci_dev *pdev, struct page *page, u32 page_offset, u32 len, - u8 hidma) + bool hidma) { dma_addr_t dmaaddr; @@ -2024,7 +2024,7 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx) struct jme_ring *txring = &(jme->txring[0]); struct txdesc *txdesc = txring->desc, *ctxdesc; struct jme_buffer_info *txbi = txring->bufinf, *ctxbi; - u8 hidma = jme->dev->features & NETIF_F_HIGHDMA; + bool hidma = jme->dev->features & NETIF_F_HIGHDMA; int i, nr_frags = skb_shinfo(skb)->nr_frags; int mask = jme->tx_ring_mask; const struct skb_frag_struct *frag; @@ -2399,9 +2399,9 @@ jme_get_drvinfo(struct net_device *netdev, { struct jme_adapter *jme = netdev_priv(netdev); - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - strcpy(info->bus_info, pci_name(jme->pdev)); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(jme->pdev), sizeof(info->bus_info)); } static int @@ -2727,8 +2727,8 @@ jme_set_msglevel(struct net_device *netdev, u32 value) jme->msg_enable = value; } -static u32 -jme_fix_features(struct net_device *netdev, u32 features) +static netdev_features_t +jme_fix_features(struct net_device *netdev, netdev_features_t features) { if (netdev->mtu > 1900) features &= ~(NETIF_F_ALL_TSO | NETIF_F_ALL_CSUM); @@ -2736,7 +2736,7 @@ jme_fix_features(struct net_device *netdev, u32 features) } static int -jme_set_features(struct net_device *netdev, u32 features) +jme_set_features(struct net_device *netdev, netdev_features_t features) { struct jme_adapter *jme = netdev_priv(netdev); diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c index d8430f487b8..6ad094f176f 100644 --- a/drivers/net/ethernet/korina.c +++ b/drivers/net/ethernet/korina.c @@ -1230,18 +1230,7 @@ static struct platform_driver korina_driver = { .remove = korina_remove, }; -static int __init korina_init_module(void) -{ - return platform_driver_register(&korina_driver); -} - -static void korina_cleanup_module(void) -{ - return platform_driver_unregister(&korina_driver); -} - -module_init(korina_init_module); -module_exit(korina_cleanup_module); +module_platform_driver(korina_driver); MODULE_AUTHOR("Philip Rischel <rischelp@idt.com>"); MODULE_AUTHOR("Felix Fietkau <nbd@openwrt.org>"); diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 194a0311380..e87847e32dd 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -1502,10 +1502,12 @@ mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) static void mv643xx_eth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) { - strncpy(drvinfo->driver, mv643xx_eth_driver_name, 32); - strncpy(drvinfo->version, mv643xx_eth_driver_version, 32); - strncpy(drvinfo->fw_version, "N/A", 32); - strncpy(drvinfo->bus_info, "platform", 32); + strlcpy(drvinfo->driver, mv643xx_eth_driver_name, + sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, mv643xx_eth_driver_version, + sizeof(drvinfo->version)); + strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); + strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info)); drvinfo->n_stats = ARRAY_SIZE(mv643xx_eth_stats); } @@ -1578,10 +1580,10 @@ mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er) static int -mv643xx_eth_set_features(struct net_device *dev, u32 features) +mv643xx_eth_set_features(struct net_device *dev, netdev_features_t features) { struct mv643xx_eth_private *mp = netdev_priv(dev); - u32 rx_csum = features & NETIF_F_RXCSUM; + bool rx_csum = features & NETIF_F_RXCSUM; wrlp(mp, PORT_CONFIG, rx_csum ? 0x02000000 : 0x00000000); diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index d17d0624c5e..5ec409e3da0 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -1645,18 +1645,7 @@ static struct platform_driver pxa168_eth_driver = { }, }; -static int __init pxa168_init_module(void) -{ - return platform_driver_register(&pxa168_eth_driver); -} - -static void __exit pxa168_cleanup_module(void) -{ - platform_driver_unregister(&pxa168_eth_driver); -} - -module_init(pxa168_init_module); -module_exit(pxa168_cleanup_module); +module_platform_driver(pxa168_eth_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Ethernet driver for Marvell PXA168"); diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index dea0cb4400e..18a87a57fc0 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -394,10 +394,10 @@ static void skge_get_drvinfo(struct net_device *dev, { struct skge_port *skge = netdev_priv(dev); - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - strcpy(info->fw_version, "N/A"); - strcpy(info->bus_info, pci_name(skge->hw->pdev)); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(skge->hw->pdev), + sizeof(info->bus_info)); } static const struct skge_stat { @@ -4042,7 +4042,7 @@ static void __devexit skge_remove(struct pci_dev *pdev) pci_set_drvdata(pdev, NULL); } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int skge_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); @@ -4104,7 +4104,7 @@ static SIMPLE_DEV_PM_OPS(skge_pm_ops, skge_suspend, skge_resume); #else #define SKGE_PM_OPS NULL -#endif +#endif /* CONFIG_PM_SLEEP */ static void skge_shutdown(struct pci_dev *pdev) { diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 7803efa46eb..760c2b17dfd 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -1110,6 +1110,7 @@ static void tx_init(struct sky2_port *sky2) sky2->tx_prod = sky2->tx_cons = 0; sky2->tx_tcpsum = 0; sky2->tx_last_mss = 0; + netdev_reset_queue(sky2->netdev); le = get_tx_le(sky2, &sky2->tx_prod); le->addr = 0; @@ -1284,7 +1285,7 @@ static const uint32_t rss_init_key[10] = { }; /* Enable/disable receive hash calculation (RSS) */ -static void rx_set_rss(struct net_device *dev, u32 features) +static void rx_set_rss(struct net_device *dev, netdev_features_t features) { struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; @@ -1402,7 +1403,7 @@ static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) #define SKY2_VLAN_OFFLOADS (NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO) -static void sky2_vlan_mode(struct net_device *dev, u32 features) +static void sky2_vlan_mode(struct net_device *dev, netdev_features_t features) { struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; @@ -1971,6 +1972,7 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb, if (tx_avail(sky2) <= MAX_SKB_TX_LE) netif_stop_queue(dev); + netdev_sent_queue(dev, skb->len); sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod); return NETDEV_TX_OK; @@ -2002,7 +2004,8 @@ mapping_error: static void sky2_tx_complete(struct sky2_port *sky2, u16 done) { struct net_device *dev = sky2->netdev; - unsigned idx; + u16 idx; + unsigned int bytes_compl = 0, pkts_compl = 0; BUG_ON(done >= sky2->tx_ring_size); @@ -2017,10 +2020,8 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done) netif_printk(sky2, tx_done, KERN_DEBUG, dev, "tx done %u\n", idx); - u64_stats_update_begin(&sky2->tx_stats.syncp); - ++sky2->tx_stats.packets; - sky2->tx_stats.bytes += skb->len; - u64_stats_update_end(&sky2->tx_stats.syncp); + pkts_compl++; + bytes_compl += skb->len; re->skb = NULL; dev_kfree_skb_any(skb); @@ -2031,6 +2032,13 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done) sky2->tx_cons = idx; smp_mb(); + + netdev_completed_queue(dev, pkts_compl, bytes_compl); + + u64_stats_update_begin(&sky2->tx_stats.syncp); + sky2->tx_stats.packets += pkts_compl; + sky2->tx_stats.bytes += bytes_compl; + u64_stats_update_end(&sky2->tx_stats.syncp); } static void sky2_tx_reset(struct sky2_hw *hw, unsigned port) @@ -3643,10 +3651,10 @@ static void sky2_get_drvinfo(struct net_device *dev, { struct sky2_port *sky2 = netdev_priv(dev); - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - strcpy(info->fw_version, "N/A"); - strcpy(info->bus_info, pci_name(sky2->hw->pdev)); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(sky2->hw->pdev), + sizeof(info->bus_info)); } static const struct sky2_stat { @@ -4311,7 +4319,8 @@ static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom return sky2_vpd_write(sky2->hw, cap, data, eeprom->offset, eeprom->len); } -static u32 sky2_fix_features(struct net_device *dev, u32 features) +static netdev_features_t sky2_fix_features(struct net_device *dev, + netdev_features_t features) { const struct sky2_port *sky2 = netdev_priv(dev); const struct sky2_hw *hw = sky2->hw; @@ -4335,13 +4344,13 @@ static u32 sky2_fix_features(struct net_device *dev, u32 features) return features; } -static int sky2_set_features(struct net_device *dev, u32 features) +static int sky2_set_features(struct net_device *dev, netdev_features_t features) { struct sky2_port *sky2 = netdev_priv(dev); - u32 changed = dev->features ^ features; + netdev_features_t changed = dev->features ^ features; if (changed & NETIF_F_RXCSUM) { - u32 on = features & NETIF_F_RXCSUM; + bool on = features & NETIF_F_RXCSUM; sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR), on ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM); } diff --git a/drivers/net/ethernet/mellanox/mlx4/Makefile b/drivers/net/ethernet/mellanox/mlx4/Makefile index d1aa45a1585..4a40ab967ee 100644 --- a/drivers/net/ethernet/mellanox/mlx4/Makefile +++ b/drivers/net/ethernet/mellanox/mlx4/Makefile @@ -1,7 +1,7 @@ obj-$(CONFIG_MLX4_CORE) += mlx4_core.o mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \ - mr.o pd.o port.o profile.o qp.o reset.o sense.o srq.o + mr.o pd.o port.o profile.o qp.o reset.o sense.o srq.o resource_tracker.o obj-$(CONFIG_MLX4_EN) += mlx4_en.o diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c index 45aea9c3ae2..915e947b422 100644 --- a/drivers/net/ethernet/mellanox/mlx4/catas.c +++ b/drivers/net/ethernet/mellanox/mlx4/catas.c @@ -48,7 +48,8 @@ static struct work_struct catas_work; static int internal_err_reset = 1; module_param(internal_err_reset, int, 0644); MODULE_PARM_DESC(internal_err_reset, - "Reset device on internal errors if non-zero (default 1)"); + "Reset device on internal errors if non-zero" + " (default 1, in SRIOV mode default is 0)"); static void dump_err_buf(struct mlx4_dev *dev) { @@ -116,6 +117,10 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev) struct mlx4_priv *priv = mlx4_priv(dev); phys_addr_t addr; + /*If we are in SRIOV the default of the module param must be 0*/ + if (mlx4_is_mfunc(dev)) + internal_err_reset = 0; + INIT_LIST_HEAD(&priv->catas_err.list); init_timer(&priv->catas_err.timer); priv->catas_err.map = NULL; diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 78f5a1a0b8c..978f593094c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -39,12 +39,18 @@ #include <linux/errno.h> #include <linux/mlx4/cmd.h> +#include <linux/semaphore.h> #include <asm/io.h> #include "mlx4.h" +#include "fw.h" #define CMD_POLL_TOKEN 0xffff +#define INBOX_MASK 0xffffffffffffff00ULL + +#define CMD_CHAN_VER 1 +#define CMD_CHAN_IF_REV 1 enum { /* command completed successfully: */ @@ -110,8 +116,12 @@ struct mlx4_cmd_context { int next; u64 out_param; u16 token; + u8 fw_status; }; +static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr_cmd *in_vhcr); + static int mlx4_status_to_errno(u8 status) { static const int trans_table[] = { @@ -142,6 +152,139 @@ static int mlx4_status_to_errno(u8 status) return trans_table[status]; } +static u8 mlx4_errno_to_status(int errno) +{ + switch (errno) { + case -EPERM: + return CMD_STAT_BAD_OP; + case -EINVAL: + return CMD_STAT_BAD_PARAM; + case -ENXIO: + return CMD_STAT_BAD_SYS_STATE; + case -EBUSY: + return CMD_STAT_RESOURCE_BUSY; + case -ENOMEM: + return CMD_STAT_EXCEED_LIM; + case -ENFILE: + return CMD_STAT_ICM_ERROR; + default: + return CMD_STAT_INTERNAL_ERR; + } +} + +static int comm_pending(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + u32 status = readl(&priv->mfunc.comm->slave_read); + + return (swab32(status) >> 31) != priv->cmd.comm_toggle; +} + +static void mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + u32 val; + + priv->cmd.comm_toggle ^= 1; + val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31); + __raw_writel((__force u32) cpu_to_be32(val), + &priv->mfunc.comm->slave_write); + mmiowb(); +} + +static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param, + unsigned long timeout) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + unsigned long end; + int err = 0; + int ret_from_pending = 0; + + /* First, verify that the master reports correct status */ + if (comm_pending(dev)) { + mlx4_warn(dev, "Communication channel is not idle." + "my toggle is %d (cmd:0x%x)\n", + priv->cmd.comm_toggle, cmd); + return -EAGAIN; + } + + /* Write command */ + down(&priv->cmd.poll_sem); + mlx4_comm_cmd_post(dev, cmd, param); + + end = msecs_to_jiffies(timeout) + jiffies; + while (comm_pending(dev) && time_before(jiffies, end)) + cond_resched(); + ret_from_pending = comm_pending(dev); + if (ret_from_pending) { + /* check if the slave is trying to boot in the middle of + * FLR process. The only non-zero result in the RESET command + * is MLX4_DELAY_RESET_SLAVE*/ + if ((MLX4_COMM_CMD_RESET == cmd)) { + mlx4_warn(dev, "Got slave FLRed from Communication" + " channel (ret:0x%x)\n", ret_from_pending); + err = MLX4_DELAY_RESET_SLAVE; + } else { + mlx4_warn(dev, "Communication channel timed out\n"); + err = -ETIMEDOUT; + } + } + + up(&priv->cmd.poll_sem); + return err; +} + +static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 op, + u16 param, unsigned long timeout) +{ + struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; + struct mlx4_cmd_context *context; + int err = 0; + + down(&cmd->event_sem); + + spin_lock(&cmd->context_lock); + BUG_ON(cmd->free_head < 0); + context = &cmd->context[cmd->free_head]; + context->token += cmd->token_mask + 1; + cmd->free_head = context->next; + spin_unlock(&cmd->context_lock); + + init_completion(&context->done); + + mlx4_comm_cmd_post(dev, op, param); + + if (!wait_for_completion_timeout(&context->done, + msecs_to_jiffies(timeout))) { + err = -EBUSY; + goto out; + } + + err = context->result; + if (err && context->fw_status != CMD_STAT_MULTI_FUNC_REQ) { + mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n", + op, context->fw_status); + goto out; + } + +out: + spin_lock(&cmd->context_lock); + context->next = cmd->free_head; + cmd->free_head = context - cmd->context; + spin_unlock(&cmd->context_lock); + + up(&cmd->event_sem); + return err; +} + +int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param, + unsigned long timeout) +{ + if (mlx4_priv(dev)->cmd.use_events) + return mlx4_comm_cmd_wait(dev, cmd, param, timeout); + return mlx4_comm_cmd_poll(dev, cmd, param, timeout); +} + static int cmd_pending(struct mlx4_dev *dev) { u32 status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET); @@ -167,8 +310,10 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param, end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS); while (cmd_pending(dev)) { - if (time_after_eq(jiffies, end)) + if (time_after_eq(jiffies, end)) { + mlx4_err(dev, "%s:cmd_pending failed\n", __func__); goto out; + } cond_resched(); } @@ -192,7 +337,7 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param, (cmd->toggle << HCR_T_BIT) | (event ? (1 << HCR_E_BIT) : 0) | (op_modifier << HCR_OPMOD_SHIFT) | - op), hcr + 6); + op), hcr + 6); /* * Make sure that our HCR writes don't get mixed in with @@ -209,6 +354,62 @@ out: return ret; } +static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param, + int out_is_imm, u32 in_modifier, u8 op_modifier, + u16 op, unsigned long timeout) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_vhcr_cmd *vhcr = priv->mfunc.vhcr; + int ret; + + down(&priv->cmd.slave_sem); + vhcr->in_param = cpu_to_be64(in_param); + vhcr->out_param = out_param ? cpu_to_be64(*out_param) : 0; + vhcr->in_modifier = cpu_to_be32(in_modifier); + vhcr->opcode = cpu_to_be16((((u16) op_modifier) << 12) | (op & 0xfff)); + vhcr->token = cpu_to_be16(CMD_POLL_TOKEN); + vhcr->status = 0; + vhcr->flags = !!(priv->cmd.use_events) << 6; + if (mlx4_is_master(dev)) { + ret = mlx4_master_process_vhcr(dev, dev->caps.function, vhcr); + if (!ret) { + if (out_is_imm) { + if (out_param) + *out_param = + be64_to_cpu(vhcr->out_param); + else { + mlx4_err(dev, "response expected while" + "output mailbox is NULL for " + "command 0x%x\n", op); + vhcr->status = CMD_STAT_BAD_PARAM; + } + } + ret = mlx4_status_to_errno(vhcr->status); + } + } else { + ret = mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_POST, 0, + MLX4_COMM_TIME + timeout); + if (!ret) { + if (out_is_imm) { + if (out_param) + *out_param = + be64_to_cpu(vhcr->out_param); + else { + mlx4_err(dev, "response expected while" + "output mailbox is NULL for " + "command 0x%x\n", op); + vhcr->status = CMD_STAT_BAD_PARAM; + } + } + ret = mlx4_status_to_errno(vhcr->status); + } else + mlx4_err(dev, "failed execution of VHCR_POST command" + "opcode 0x%x\n", op); + } + up(&priv->cmd.slave_sem); + return ret; +} + static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param, int out_is_imm, u32 in_modifier, u8 op_modifier, u16 op, unsigned long timeout) @@ -217,6 +418,7 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param, void __iomem *hcr = priv->cmd.hcr; int err = 0; unsigned long end; + u32 stat; down(&priv->cmd.poll_sem); @@ -240,9 +442,12 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param, __raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 | (u64) be32_to_cpu((__force __be32) __raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4)); - - err = mlx4_status_to_errno(be32_to_cpu((__force __be32) - __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24); + stat = be32_to_cpu((__force __be32) + __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24; + err = mlx4_status_to_errno(stat); + if (err) + mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n", + op, stat); out: up(&priv->cmd.poll_sem); @@ -259,6 +464,7 @@ void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param) if (token != context->token) return; + context->fw_status = status; context->result = mlx4_status_to_errno(status); context->out_param = out_param; @@ -287,14 +493,18 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param, mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0, in_modifier, op_modifier, op, context->token, 1); - if (!wait_for_completion_timeout(&context->done, msecs_to_jiffies(timeout))) { + if (!wait_for_completion_timeout(&context->done, + msecs_to_jiffies(timeout))) { err = -EBUSY; goto out; } err = context->result; - if (err) + if (err) { + mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n", + op, context->fw_status); goto out; + } if (out_is_imm) *out_param = context->out_param; @@ -311,17 +521,1046 @@ out: int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param, int out_is_imm, u32 in_modifier, u8 op_modifier, - u16 op, unsigned long timeout) + u16 op, unsigned long timeout, int native) { - if (mlx4_priv(dev)->cmd.use_events) - return mlx4_cmd_wait(dev, in_param, out_param, out_is_imm, - in_modifier, op_modifier, op, timeout); - else - return mlx4_cmd_poll(dev, in_param, out_param, out_is_imm, - in_modifier, op_modifier, op, timeout); + if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) { + if (mlx4_priv(dev)->cmd.use_events) + return mlx4_cmd_wait(dev, in_param, out_param, + out_is_imm, in_modifier, + op_modifier, op, timeout); + else + return mlx4_cmd_poll(dev, in_param, out_param, + out_is_imm, in_modifier, + op_modifier, op, timeout); + } + return mlx4_slave_cmd(dev, in_param, out_param, out_is_imm, + in_modifier, op_modifier, op, timeout); } EXPORT_SYMBOL_GPL(__mlx4_cmd); + +static int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev) +{ + return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_ARM_COMM_CHANNEL, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); +} + +static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr, + int slave, u64 slave_addr, + int size, int is_read) +{ + u64 in_param; + u64 out_param; + + if ((slave_addr & 0xfff) | (master_addr & 0xfff) | + (slave & ~0x7f) | (size & 0xff)) { + mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx " + "master_addr:0x%llx slave_id:%d size:%d\n", + slave_addr, master_addr, slave, size); + return -EINVAL; + } + + if (is_read) { + in_param = (u64) slave | slave_addr; + out_param = (u64) dev->caps.function | master_addr; + } else { + in_param = (u64) dev->caps.function | master_addr; + out_param = (u64) slave | slave_addr; + } + + return mlx4_cmd_imm(dev, in_param, &out_param, size, 0, + MLX4_CMD_ACCESS_MEM, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); +} + +int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + u64 in_param; + u64 out_param; + int err; + + in_param = cmd->has_inbox ? (u64) inbox->dma : vhcr->in_param; + out_param = cmd->has_outbox ? (u64) outbox->dma : vhcr->out_param; + if (cmd->encode_slave_id) { + in_param &= 0xffffffffffffff00ll; + in_param |= slave; + } + + err = __mlx4_cmd(dev, in_param, &out_param, cmd->out_is_imm, + vhcr->in_modifier, vhcr->op_modifier, vhcr->op, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + + if (cmd->out_is_imm) + vhcr->out_param = out_param; + + return err; +} + +static struct mlx4_cmd_info cmd_info[] = { + { + .opcode = MLX4_CMD_QUERY_FW, + .has_inbox = false, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = NULL + }, + { + .opcode = MLX4_CMD_QUERY_HCA, + .has_inbox = false, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = NULL + }, + { + .opcode = MLX4_CMD_QUERY_DEV_CAP, + .has_inbox = false, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = NULL + }, + { + .opcode = MLX4_CMD_QUERY_FUNC_CAP, + .has_inbox = false, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_QUERY_FUNC_CAP_wrapper + }, + { + .opcode = MLX4_CMD_QUERY_ADAPTER, + .has_inbox = false, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = NULL + }, + { + .opcode = MLX4_CMD_INIT_PORT, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_INIT_PORT_wrapper + }, + { + .opcode = MLX4_CMD_CLOSE_PORT, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_CLOSE_PORT_wrapper + }, + { + .opcode = MLX4_CMD_QUERY_PORT, + .has_inbox = false, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_QUERY_PORT_wrapper + }, + { + .opcode = MLX4_CMD_SET_PORT, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_SET_PORT_wrapper + }, + { + .opcode = MLX4_CMD_MAP_EQ, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_MAP_EQ_wrapper + }, + { + .opcode = MLX4_CMD_SW2HW_EQ, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = true, + .verify = NULL, + .wrapper = mlx4_SW2HW_EQ_wrapper + }, + { + .opcode = MLX4_CMD_HW_HEALTH_CHECK, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = NULL + }, + { + .opcode = MLX4_CMD_NOP, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = NULL + }, + { + .opcode = MLX4_CMD_ALLOC_RES, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = true, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_ALLOC_RES_wrapper + }, + { + .opcode = MLX4_CMD_FREE_RES, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_FREE_RES_wrapper + }, + { + .opcode = MLX4_CMD_SW2HW_MPT, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = true, + .verify = NULL, + .wrapper = mlx4_SW2HW_MPT_wrapper + }, + { + .opcode = MLX4_CMD_QUERY_MPT, + .has_inbox = false, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_QUERY_MPT_wrapper + }, + { + .opcode = MLX4_CMD_HW2SW_MPT, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_HW2SW_MPT_wrapper + }, + { + .opcode = MLX4_CMD_READ_MTT, + .has_inbox = false, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = NULL + }, + { + .opcode = MLX4_CMD_WRITE_MTT, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_WRITE_MTT_wrapper + }, + { + .opcode = MLX4_CMD_SYNC_TPT, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = NULL + }, + { + .opcode = MLX4_CMD_HW2SW_EQ, + .has_inbox = false, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = true, + .verify = NULL, + .wrapper = mlx4_HW2SW_EQ_wrapper + }, + { + .opcode = MLX4_CMD_QUERY_EQ, + .has_inbox = false, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = true, + .verify = NULL, + .wrapper = mlx4_QUERY_EQ_wrapper + }, + { + .opcode = MLX4_CMD_SW2HW_CQ, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = true, + .verify = NULL, + .wrapper = mlx4_SW2HW_CQ_wrapper + }, + { + .opcode = MLX4_CMD_HW2SW_CQ, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_HW2SW_CQ_wrapper + }, + { + .opcode = MLX4_CMD_QUERY_CQ, + .has_inbox = false, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_QUERY_CQ_wrapper + }, + { + .opcode = MLX4_CMD_MODIFY_CQ, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = true, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_MODIFY_CQ_wrapper + }, + { + .opcode = MLX4_CMD_SW2HW_SRQ, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = true, + .verify = NULL, + .wrapper = mlx4_SW2HW_SRQ_wrapper + }, + { + .opcode = MLX4_CMD_HW2SW_SRQ, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_HW2SW_SRQ_wrapper + }, + { + .opcode = MLX4_CMD_QUERY_SRQ, + .has_inbox = false, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_QUERY_SRQ_wrapper + }, + { + .opcode = MLX4_CMD_ARM_SRQ, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_ARM_SRQ_wrapper + }, + { + .opcode = MLX4_CMD_RST2INIT_QP, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = true, + .verify = NULL, + .wrapper = mlx4_RST2INIT_QP_wrapper + }, + { + .opcode = MLX4_CMD_INIT2INIT_QP, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_GEN_QP_wrapper + }, + { + .opcode = MLX4_CMD_INIT2RTR_QP, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_INIT2RTR_QP_wrapper + }, + { + .opcode = MLX4_CMD_RTR2RTS_QP, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_GEN_QP_wrapper + }, + { + .opcode = MLX4_CMD_RTS2RTS_QP, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_GEN_QP_wrapper + }, + { + .opcode = MLX4_CMD_SQERR2RTS_QP, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_GEN_QP_wrapper + }, + { + .opcode = MLX4_CMD_2ERR_QP, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_GEN_QP_wrapper + }, + { + .opcode = MLX4_CMD_RTS2SQD_QP, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_GEN_QP_wrapper + }, + { + .opcode = MLX4_CMD_SQD2SQD_QP, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_GEN_QP_wrapper + }, + { + .opcode = MLX4_CMD_SQD2RTS_QP, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_GEN_QP_wrapper + }, + { + .opcode = MLX4_CMD_2RST_QP, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_2RST_QP_wrapper + }, + { + .opcode = MLX4_CMD_QUERY_QP, + .has_inbox = false, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_GEN_QP_wrapper + }, + { + .opcode = MLX4_CMD_SUSPEND_QP, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_GEN_QP_wrapper + }, + { + .opcode = MLX4_CMD_UNSUSPEND_QP, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_GEN_QP_wrapper + }, + { + .opcode = MLX4_CMD_QUERY_IF_STAT, + .has_inbox = false, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_QUERY_IF_STAT_wrapper + }, + /* Native multicast commands are not available for guests */ + { + .opcode = MLX4_CMD_QP_ATTACH, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_QP_ATTACH_wrapper + }, + { + .opcode = MLX4_CMD_PROMISC, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_PROMISC_wrapper + }, + /* Ethernet specific commands */ + { + .opcode = MLX4_CMD_SET_VLAN_FLTR, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_SET_VLAN_FLTR_wrapper + }, + { + .opcode = MLX4_CMD_SET_MCAST_FLTR, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_SET_MCAST_FLTR_wrapper + }, + { + .opcode = MLX4_CMD_DUMP_ETH_STATS, + .has_inbox = false, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_DUMP_ETH_STATS_wrapper + }, + { + .opcode = MLX4_CMD_INFORM_FLR_DONE, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = NULL + }, +}; + +static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr_cmd *in_vhcr) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_cmd_info *cmd = NULL; + struct mlx4_vhcr_cmd *vhcr_cmd = in_vhcr ? in_vhcr : priv->mfunc.vhcr; + struct mlx4_vhcr *vhcr; + struct mlx4_cmd_mailbox *inbox = NULL; + struct mlx4_cmd_mailbox *outbox = NULL; + u64 in_param; + u64 out_param; + int ret = 0; + int i; + int err = 0; + + /* Create sw representation of Virtual HCR */ + vhcr = kzalloc(sizeof(struct mlx4_vhcr), GFP_KERNEL); + if (!vhcr) + return -ENOMEM; + + /* DMA in the vHCR */ + if (!in_vhcr) { + ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave, + priv->mfunc.master.slave_state[slave].vhcr_dma, + ALIGN(sizeof(struct mlx4_vhcr_cmd), + MLX4_ACCESS_MEM_ALIGN), 1); + if (ret) { + mlx4_err(dev, "%s:Failed reading vhcr" + "ret: 0x%x\n", __func__, ret); + kfree(vhcr); + return ret; + } + } + + /* Fill SW VHCR fields */ + vhcr->in_param = be64_to_cpu(vhcr_cmd->in_param); + vhcr->out_param = be64_to_cpu(vhcr_cmd->out_param); + vhcr->in_modifier = be32_to_cpu(vhcr_cmd->in_modifier); + vhcr->token = be16_to_cpu(vhcr_cmd->token); + vhcr->op = be16_to_cpu(vhcr_cmd->opcode) & 0xfff; + vhcr->op_modifier = (u8) (be16_to_cpu(vhcr_cmd->opcode) >> 12); + vhcr->e_bit = vhcr_cmd->flags & (1 << 6); + + /* Lookup command */ + for (i = 0; i < ARRAY_SIZE(cmd_info); ++i) { + if (vhcr->op == cmd_info[i].opcode) { + cmd = &cmd_info[i]; + break; + } + } + if (!cmd) { + mlx4_err(dev, "Unknown command:0x%x accepted from slave:%d\n", + vhcr->op, slave); + vhcr_cmd->status = CMD_STAT_BAD_PARAM; + goto out_status; + } + + /* Read inbox */ + if (cmd->has_inbox) { + vhcr->in_param &= INBOX_MASK; + inbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(inbox)) { + vhcr_cmd->status = CMD_STAT_BAD_SIZE; + inbox = NULL; + goto out_status; + } + + if (mlx4_ACCESS_MEM(dev, inbox->dma, slave, + vhcr->in_param, + MLX4_MAILBOX_SIZE, 1)) { + mlx4_err(dev, "%s: Failed reading inbox (cmd:0x%x)\n", + __func__, cmd->opcode); + vhcr_cmd->status = CMD_STAT_INTERNAL_ERR; + goto out_status; + } + } + + /* Apply permission and bound checks if applicable */ + if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) { + mlx4_warn(dev, "Command:0x%x from slave: %d failed protection " + "checks for resource_id:%d\n", vhcr->op, slave, + vhcr->in_modifier); + vhcr_cmd->status = CMD_STAT_BAD_OP; + goto out_status; + } + + /* Allocate outbox */ + if (cmd->has_outbox) { + outbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(outbox)) { + vhcr_cmd->status = CMD_STAT_BAD_SIZE; + outbox = NULL; + goto out_status; + } + } + + /* Execute the command! */ + if (cmd->wrapper) { + err = cmd->wrapper(dev, slave, vhcr, inbox, outbox, + cmd); + if (cmd->out_is_imm) + vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param); + } else { + in_param = cmd->has_inbox ? (u64) inbox->dma : + vhcr->in_param; + out_param = cmd->has_outbox ? (u64) outbox->dma : + vhcr->out_param; + err = __mlx4_cmd(dev, in_param, &out_param, + cmd->out_is_imm, vhcr->in_modifier, + vhcr->op_modifier, vhcr->op, + MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + + if (cmd->out_is_imm) { + vhcr->out_param = out_param; + vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param); + } + } + + if (err) { + mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with" + " error:%d, status %d\n", + vhcr->op, slave, vhcr->errno, err); + vhcr_cmd->status = mlx4_errno_to_status(err); + goto out_status; + } + + + /* Write outbox if command completed successfully */ + if (cmd->has_outbox && !vhcr_cmd->status) { + ret = mlx4_ACCESS_MEM(dev, outbox->dma, slave, + vhcr->out_param, + MLX4_MAILBOX_SIZE, MLX4_CMD_WRAPPED); + if (ret) { + /* If we failed to write back the outbox after the + *command was successfully executed, we must fail this + * slave, as it is now in undefined state */ + mlx4_err(dev, "%s:Failed writing outbox\n", __func__); + goto out; + } + } + +out_status: + /* DMA back vhcr result */ + if (!in_vhcr) { + ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave, + priv->mfunc.master.slave_state[slave].vhcr_dma, + ALIGN(sizeof(struct mlx4_vhcr), + MLX4_ACCESS_MEM_ALIGN), + MLX4_CMD_WRAPPED); + if (ret) + mlx4_err(dev, "%s:Failed writing vhcr result\n", + __func__); + else if (vhcr->e_bit && + mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe)) + mlx4_warn(dev, "Failed to generate command completion " + "eqe for slave %d\n", slave); + } + +out: + kfree(vhcr); + mlx4_free_cmd_mailbox(dev, inbox); + mlx4_free_cmd_mailbox(dev, outbox); + return ret; +} + +static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd, + u16 param, u8 toggle) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state; + u32 reply; + u32 slave_status = 0; + u8 is_going_down = 0; + + slave_state[slave].comm_toggle ^= 1; + reply = (u32) slave_state[slave].comm_toggle << 31; + if (toggle != slave_state[slave].comm_toggle) { + mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER" + "STATE COMPROMISIED ***\n", toggle, slave); + goto reset_slave; + } + if (cmd == MLX4_COMM_CMD_RESET) { + mlx4_warn(dev, "Received reset from slave:%d\n", slave); + slave_state[slave].active = false; + /*check if we are in the middle of FLR process, + if so return "retry" status to the slave*/ + if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) { + slave_status = MLX4_DELAY_RESET_SLAVE; + goto inform_slave_state; + } + + /* write the version in the event field */ + reply |= mlx4_comm_get_version(); + + goto reset_slave; + } + /*command from slave in the middle of FLR*/ + if (cmd != MLX4_COMM_CMD_RESET && + MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) { + mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) " + "in the middle of FLR\n", slave, cmd); + return; + } + + switch (cmd) { + case MLX4_COMM_CMD_VHCR0: + if (slave_state[slave].last_cmd != MLX4_COMM_CMD_RESET) + goto reset_slave; + slave_state[slave].vhcr_dma = ((u64) param) << 48; + priv->mfunc.master.slave_state[slave].cookie = 0; + mutex_init(&priv->mfunc.master.gen_eqe_mutex[slave]); + break; + case MLX4_COMM_CMD_VHCR1: + if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0) + goto reset_slave; + slave_state[slave].vhcr_dma |= ((u64) param) << 32; + break; + case MLX4_COMM_CMD_VHCR2: + if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR1) + goto reset_slave; + slave_state[slave].vhcr_dma |= ((u64) param) << 16; + break; + case MLX4_COMM_CMD_VHCR_EN: + if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR2) + goto reset_slave; + slave_state[slave].vhcr_dma |= param; + slave_state[slave].active = true; + break; + case MLX4_COMM_CMD_VHCR_POST: + if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) && + (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST)) + goto reset_slave; + down(&priv->cmd.slave_sem); + if (mlx4_master_process_vhcr(dev, slave, NULL)) { + mlx4_err(dev, "Failed processing vhcr for slave:%d," + " reseting slave.\n", slave); + up(&priv->cmd.slave_sem); + goto reset_slave; + } + up(&priv->cmd.slave_sem); + break; + default: + mlx4_warn(dev, "Bad comm cmd:%d from slave:%d\n", cmd, slave); + goto reset_slave; + } + spin_lock(&priv->mfunc.master.slave_state_lock); + if (!slave_state[slave].is_slave_going_down) + slave_state[slave].last_cmd = cmd; + else + is_going_down = 1; + spin_unlock(&priv->mfunc.master.slave_state_lock); + if (is_going_down) { + mlx4_warn(dev, "Slave is going down aborting command(%d)" + " executing from slave:%d\n", + cmd, slave); + return; + } + __raw_writel((__force u32) cpu_to_be32(reply), + &priv->mfunc.comm[slave].slave_read); + mmiowb(); + + return; + +reset_slave: + /* cleanup any slave resources */ + mlx4_delete_all_resources_for_slave(dev, slave); + spin_lock(&priv->mfunc.master.slave_state_lock); + if (!slave_state[slave].is_slave_going_down) + slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET; + spin_unlock(&priv->mfunc.master.slave_state_lock); + /*with slave in the middle of flr, no need to clean resources again.*/ +inform_slave_state: + memset(&slave_state[slave].event_eq, 0, + sizeof(struct mlx4_slave_event_eq_info)); + __raw_writel((__force u32) cpu_to_be32(reply), + &priv->mfunc.comm[slave].slave_read); + wmb(); +} + +/* master command processing */ +void mlx4_master_comm_channel(struct work_struct *work) +{ + struct mlx4_mfunc_master_ctx *master = + container_of(work, + struct mlx4_mfunc_master_ctx, + comm_work); + struct mlx4_mfunc *mfunc = + container_of(master, struct mlx4_mfunc, master); + struct mlx4_priv *priv = + container_of(mfunc, struct mlx4_priv, mfunc); + struct mlx4_dev *dev = &priv->dev; + __be32 *bit_vec; + u32 comm_cmd; + u32 vec; + int i, j, slave; + int toggle; + int served = 0; + int reported = 0; + u32 slt; + + bit_vec = master->comm_arm_bit_vector; + for (i = 0; i < COMM_CHANNEL_BIT_ARRAY_SIZE; i++) { + vec = be32_to_cpu(bit_vec[i]); + for (j = 0; j < 32; j++) { + if (!(vec & (1 << j))) + continue; + ++reported; + slave = (i * 32) + j; + comm_cmd = swab32(readl( + &mfunc->comm[slave].slave_write)); + slt = swab32(readl(&mfunc->comm[slave].slave_read)) + >> 31; + toggle = comm_cmd >> 31; + if (toggle != slt) { + if (master->slave_state[slave].comm_toggle + != slt) { + printk(KERN_INFO "slave %d out of sync." + " read toggle %d, state toggle %d. " + "Resynching.\n", slave, slt, + master->slave_state[slave].comm_toggle); + master->slave_state[slave].comm_toggle = + slt; + } + mlx4_master_do_cmd(dev, slave, + comm_cmd >> 16 & 0xff, + comm_cmd & 0xffff, toggle); + ++served; + } + } + } + + if (reported && reported != served) + mlx4_warn(dev, "Got command event with bitmask from %d slaves" + " but %d were served\n", + reported, served); + + if (mlx4_ARM_COMM_CHANNEL(dev)) + mlx4_warn(dev, "Failed to arm comm channel events\n"); +} + +static int sync_toggles(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int wr_toggle; + int rd_toggle; + unsigned long end; + + wr_toggle = swab32(readl(&priv->mfunc.comm->slave_write)) >> 31; + end = jiffies + msecs_to_jiffies(5000); + + while (time_before(jiffies, end)) { + rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read)) >> 31; + if (rd_toggle == wr_toggle) { + priv->cmd.comm_toggle = rd_toggle; + return 0; + } + + cond_resched(); + } + + /* + * we could reach here if for example the previous VM using this + * function misbehaved and left the channel with unsynced state. We + * should fix this here and give this VM a chance to use a properly + * synced channel + */ + mlx4_warn(dev, "recovering from previously mis-behaved VM\n"); + __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_read); + __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_write); + priv->cmd.comm_toggle = 0; + + return 0; +} + +int mlx4_multi_func_init(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_slave_state *s_state; + int i, err, port; + + priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE, + &priv->mfunc.vhcr_dma, + GFP_KERNEL); + if (!priv->mfunc.vhcr) { + mlx4_err(dev, "Couldn't allocate vhcr.\n"); + return -ENOMEM; + } + + if (mlx4_is_master(dev)) + priv->mfunc.comm = + ioremap(pci_resource_start(dev->pdev, priv->fw.comm_bar) + + priv->fw.comm_base, MLX4_COMM_PAGESIZE); + else + priv->mfunc.comm = + ioremap(pci_resource_start(dev->pdev, 2) + + MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE); + if (!priv->mfunc.comm) { + mlx4_err(dev, "Couldn't map communication vector.\n"); + goto err_vhcr; + } + + if (mlx4_is_master(dev)) { + priv->mfunc.master.slave_state = + kzalloc(dev->num_slaves * + sizeof(struct mlx4_slave_state), GFP_KERNEL); + if (!priv->mfunc.master.slave_state) + goto err_comm; + + for (i = 0; i < dev->num_slaves; ++i) { + s_state = &priv->mfunc.master.slave_state[i]; + s_state->last_cmd = MLX4_COMM_CMD_RESET; + __raw_writel((__force u32) 0, + &priv->mfunc.comm[i].slave_write); + __raw_writel((__force u32) 0, + &priv->mfunc.comm[i].slave_read); + mmiowb(); + for (port = 1; port <= MLX4_MAX_PORTS; port++) { + s_state->vlan_filter[port] = + kzalloc(sizeof(struct mlx4_vlan_fltr), + GFP_KERNEL); + if (!s_state->vlan_filter[port]) { + if (--port) + kfree(s_state->vlan_filter[port]); + goto err_slaves; + } + INIT_LIST_HEAD(&s_state->mcast_filters[port]); + } + spin_lock_init(&s_state->lock); + } + + memset(&priv->mfunc.master.cmd_eqe, 0, sizeof(struct mlx4_eqe)); + priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD; + INIT_WORK(&priv->mfunc.master.comm_work, + mlx4_master_comm_channel); + INIT_WORK(&priv->mfunc.master.slave_event_work, + mlx4_gen_slave_eqe); + INIT_WORK(&priv->mfunc.master.slave_flr_event_work, + mlx4_master_handle_slave_flr); + spin_lock_init(&priv->mfunc.master.slave_state_lock); + priv->mfunc.master.comm_wq = + create_singlethread_workqueue("mlx4_comm"); + if (!priv->mfunc.master.comm_wq) + goto err_slaves; + + if (mlx4_init_resource_tracker(dev)) + goto err_thread; + + sema_init(&priv->cmd.slave_sem, 1); + err = mlx4_ARM_COMM_CHANNEL(dev); + if (err) { + mlx4_err(dev, " Failed to arm comm channel eq: %x\n", + err); + goto err_resource; + } + + } else { + err = sync_toggles(dev); + if (err) { + mlx4_err(dev, "Couldn't sync toggles\n"); + goto err_comm; + } + + sema_init(&priv->cmd.slave_sem, 1); + } + return 0; + +err_resource: + mlx4_free_resource_tracker(dev); +err_thread: + flush_workqueue(priv->mfunc.master.comm_wq); + destroy_workqueue(priv->mfunc.master.comm_wq); +err_slaves: + while (--i) { + for (port = 1; port <= MLX4_MAX_PORTS; port++) + kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]); + } + kfree(priv->mfunc.master.slave_state); +err_comm: + iounmap(priv->mfunc.comm); +err_vhcr: + dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, + priv->mfunc.vhcr, + priv->mfunc.vhcr_dma); + priv->mfunc.vhcr = NULL; + return -ENOMEM; +} + int mlx4_cmd_init(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); @@ -331,22 +1570,51 @@ int mlx4_cmd_init(struct mlx4_dev *dev) priv->cmd.use_events = 0; priv->cmd.toggle = 1; - priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_HCR_BASE, - MLX4_HCR_SIZE); - if (!priv->cmd.hcr) { - mlx4_err(dev, "Couldn't map command register."); - return -ENOMEM; + priv->cmd.hcr = NULL; + priv->mfunc.vhcr = NULL; + + if (!mlx4_is_slave(dev)) { + priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) + + MLX4_HCR_BASE, MLX4_HCR_SIZE); + if (!priv->cmd.hcr) { + mlx4_err(dev, "Couldn't map command register.\n"); + return -ENOMEM; + } } priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev, MLX4_MAILBOX_SIZE, MLX4_MAILBOX_SIZE, 0); - if (!priv->cmd.pool) { - iounmap(priv->cmd.hcr); - return -ENOMEM; - } + if (!priv->cmd.pool) + goto err_hcr; return 0; + +err_hcr: + if (!mlx4_is_slave(dev)) + iounmap(priv->cmd.hcr); + return -ENOMEM; +} + +void mlx4_multi_func_cleanup(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int i, port; + + if (mlx4_is_master(dev)) { + flush_workqueue(priv->mfunc.master.comm_wq); + destroy_workqueue(priv->mfunc.master.comm_wq); + for (i = 0; i < dev->num_slaves; i++) { + for (port = 1; port <= MLX4_MAX_PORTS; port++) + kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]); + } + kfree(priv->mfunc.master.slave_state); + iounmap(priv->mfunc.comm); + dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, + priv->mfunc.vhcr, + priv->mfunc.vhcr_dma); + priv->mfunc.vhcr = NULL; + } } void mlx4_cmd_cleanup(struct mlx4_dev *dev) @@ -354,7 +1622,9 @@ void mlx4_cmd_cleanup(struct mlx4_dev *dev) struct mlx4_priv *priv = mlx4_priv(dev); pci_pool_destroy(priv->cmd.pool); - iounmap(priv->cmd.hcr); + + if (!mlx4_is_slave(dev)) + iounmap(priv->cmd.hcr); } /* @@ -365,6 +1635,7 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); int i; + int err = 0; priv->cmd.context = kmalloc(priv->cmd.max_cmds * sizeof (struct mlx4_cmd_context), @@ -389,11 +1660,10 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev) ; /* nothing */ --priv->cmd.token_mask; - priv->cmd.use_events = 1; - down(&priv->cmd.poll_sem); + priv->cmd.use_events = 1; - return 0; + return err; } /* @@ -433,7 +1703,8 @@ struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev) } EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox); -void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox) +void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, + struct mlx4_cmd_mailbox *mailbox) { if (!mailbox) return; @@ -442,3 +1713,8 @@ void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbo kfree(mailbox); } EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox); + +u32 mlx4_comm_get_version(void) +{ + return ((u32) CMD_CHAN_IF_REV << 8) | (u32) CMD_CHAN_VER; +} diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c index 499a5168892..475f9d6af95 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/cq.c @@ -34,9 +34,9 @@ * SOFTWARE. */ +#include <linux/init.h> #include <linux/hardirq.h> #include <linux/export.h> -#include <linux/gfp.h> #include <linux/mlx4/cmd.h> #include <linux/mlx4/cq.h> @@ -44,27 +44,6 @@ #include "mlx4.h" #include "icm.h" -struct mlx4_cq_context { - __be32 flags; - u16 reserved1[3]; - __be16 page_offset; - __be32 logsize_usrpage; - __be16 cq_period; - __be16 cq_max_count; - u8 reserved2[3]; - u8 comp_eqn; - u8 log_page_size; - u8 reserved3[2]; - u8 mtt_base_addr_h; - __be32 mtt_base_addr_l; - __be32 last_notified_index; - __be32 solicit_producer_index; - __be32 consumer_index; - __be32 producer_index; - u32 reserved4[2]; - __be64 db_rec_addr; -}; - #define MLX4_CQ_STATUS_OK ( 0 << 28) #define MLX4_CQ_STATUS_OVERFLOW ( 9 << 28) #define MLX4_CQ_STATUS_WRITE_FAIL (10 << 28) @@ -81,7 +60,7 @@ void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn) cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree, cqn & (dev->caps.num_cqs - 1)); if (!cq) { - mlx4_warn(dev, "Completion event for bogus CQ %08x\n", cqn); + mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn); return; } @@ -117,23 +96,24 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type) static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, int cq_num) { - return mlx4_cmd(dev, mailbox->dma, cq_num, 0, MLX4_CMD_SW2HW_CQ, - MLX4_CMD_TIME_CLASS_A); + return mlx4_cmd(dev, mailbox->dma | dev->caps.function, cq_num, 0, + MLX4_CMD_SW2HW_CQ, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_WRAPPED); } static int mlx4_MODIFY_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, int cq_num, u32 opmod) { return mlx4_cmd(dev, mailbox->dma, cq_num, opmod, MLX4_CMD_MODIFY_CQ, - MLX4_CMD_TIME_CLASS_A); + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); } static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, int cq_num) { - return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, cq_num, - mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ, - MLX4_CMD_TIME_CLASS_A); + return mlx4_cmd_box(dev, dev->caps.function, mailbox ? mailbox->dma : 0, + cq_num, mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); } int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq, @@ -188,6 +168,78 @@ int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq, } EXPORT_SYMBOL_GPL(mlx4_cq_resize); +int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_cq_table *cq_table = &priv->cq_table; + int err; + + *cqn = mlx4_bitmap_alloc(&cq_table->bitmap); + if (*cqn == -1) + return -ENOMEM; + + err = mlx4_table_get(dev, &cq_table->table, *cqn); + if (err) + goto err_out; + + err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn); + if (err) + goto err_put; + return 0; + +err_put: + mlx4_table_put(dev, &cq_table->table, *cqn); + +err_out: + mlx4_bitmap_free(&cq_table->bitmap, *cqn); + return err; +} + +static int mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn) +{ + u64 out_param; + int err; + + if (mlx4_is_mfunc(dev)) { + err = mlx4_cmd_imm(dev, 0, &out_param, RES_CQ, + RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + if (err) + return err; + else { + *cqn = get_param_l(&out_param); + return 0; + } + } + return __mlx4_cq_alloc_icm(dev, cqn); +} + +void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_cq_table *cq_table = &priv->cq_table; + + mlx4_table_put(dev, &cq_table->cmpt_table, cqn); + mlx4_table_put(dev, &cq_table->table, cqn); + mlx4_bitmap_free(&cq_table->bitmap, cqn); +} + +static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn) +{ + u64 in_param; + int err; + + if (mlx4_is_mfunc(dev)) { + set_param_l(&in_param, cqn); + err = mlx4_cmd(dev, in_param, RES_CQ, RES_OP_RESERVE_AND_MAP, + MLX4_CMD_FREE_RES, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + if (err) + mlx4_warn(dev, "Failed freeing cq:%d\n", cqn); + } else + __mlx4_cq_free_icm(dev, cqn); +} + int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq, unsigned vector, int collapsed) @@ -204,23 +256,15 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, cq->vector = vector; - cq->cqn = mlx4_bitmap_alloc(&cq_table->bitmap); - if (cq->cqn == -1) - return -ENOMEM; - - err = mlx4_table_get(dev, &cq_table->table, cq->cqn); - if (err) - goto err_out; - - err = mlx4_table_get(dev, &cq_table->cmpt_table, cq->cqn); + err = mlx4_cq_alloc_icm(dev, &cq->cqn); if (err) - goto err_put; + return err; spin_lock_irq(&cq_table->lock); err = radix_tree_insert(&cq_table->tree, cq->cqn, cq); spin_unlock_irq(&cq_table->lock); if (err) - goto err_cmpt_put; + goto err_icm; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) { @@ -259,14 +303,8 @@ err_radix: radix_tree_delete(&cq_table->tree, cq->cqn); spin_unlock_irq(&cq_table->lock); -err_cmpt_put: - mlx4_table_put(dev, &cq_table->cmpt_table, cq->cqn); - -err_put: - mlx4_table_put(dev, &cq_table->table, cq->cqn); - -err_out: - mlx4_bitmap_free(&cq_table->bitmap, cq->cqn); +err_icm: + mlx4_cq_free_icm(dev, cq->cqn); return err; } @@ -292,8 +330,7 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq) complete(&cq->free); wait_for_completion(&cq->free); - mlx4_table_put(dev, &cq_table->table, cq->cqn); - mlx4_bitmap_free(&cq_table->bitmap, cq->cqn); + mlx4_cq_free_icm(dev, cq->cqn); } EXPORT_SYMBOL_GPL(mlx4_cq_free); @@ -304,6 +341,8 @@ int mlx4_init_cq_table(struct mlx4_dev *dev) spin_lock_init(&cq_table->lock); INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC); + if (mlx4_is_slave(dev)) + return 0; err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs, dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0); @@ -315,6 +354,8 @@ int mlx4_init_cq_table(struct mlx4_dev *dev) void mlx4_cleanup_cq_table(struct mlx4_dev *dev) { + if (mlx4_is_slave(dev)) + return; /* Nothing to do to clean up radix_tree */ mlx4_bitmap_cleanup(&mlx4_priv(dev)->cq_table.bitmap); } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c index 5829e0b47e7..00b81272e31 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c @@ -51,10 +51,7 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv, int err; cq->size = entries; - if (mode == RX) - cq->buf_size = cq->size * sizeof(struct mlx4_cqe); - else - cq->buf_size = sizeof(struct mlx4_cqe); + cq->buf_size = cq->size * sizeof(struct mlx4_cqe); cq->ring = ring; cq->is_tx = mode; @@ -120,7 +117,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, cq->size = priv->rx_ring[cq->ring].actual_size; err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, &mdev->priv_uar, - cq->wqres.db.dma, &cq->mcq, cq->vector, cq->is_tx); + cq->wqres.db.dma, &cq->mcq, cq->vector, 0); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index 74e2a2a8a02..7dbc6a23077 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -45,13 +45,16 @@ mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; - strncpy(drvinfo->driver, DRV_NAME, 32); - strncpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")", 32); - sprintf(drvinfo->fw_version, "%d.%d.%d", + strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")", + sizeof(drvinfo->version)); + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%d.%d.%d", (u16) (mdev->dev->caps.fw_ver >> 32), (u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff), (u16) (mdev->dev->caps.fw_ver & 0xffff)); - strncpy(drvinfo->bus_info, pci_name(mdev->dev->pdev), 32); + strlcpy(drvinfo->bus_info, pci_name(mdev->dev->pdev), + sizeof(drvinfo->bus_info)); drvinfo->n_stats = 0; drvinfo->regdump_len = 0; drvinfo->eedump_len = 0; @@ -103,8 +106,17 @@ static void mlx4_en_get_wol(struct net_device *netdev, struct mlx4_en_priv *priv = netdev_priv(netdev); int err = 0; u64 config = 0; + u64 mask; - if (!(priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_WOL)) { + if ((priv->port < 1) || (priv->port > 2)) { + en_err(priv, "Failed to get WoL information\n"); + return; + } + + mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 : + MLX4_DEV_CAP_FLAG_WOL_PORT2; + + if (!(priv->mdev->dev->caps.flags & mask)) { wol->supported = 0; wol->wolopts = 0; return; @@ -133,8 +145,15 @@ static int mlx4_en_set_wol(struct net_device *netdev, struct mlx4_en_priv *priv = netdev_priv(netdev); u64 config = 0; int err = 0; + u64 mask; + + if ((priv->port < 1) || (priv->port > 2)) + return -EOPNOTSUPP; + + mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 : + MLX4_DEV_CAP_FLAG_WOL_PORT2; - if (!(priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_WOL)) + if (!(priv->mdev->dev->caps.flags & mask)) return -EOPNOTSUPP; if (wol->supported & ~WAKE_MAGIC) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 78d776bc355..72fa807b69c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -45,7 +45,7 @@ #include "mlx4_en.h" #include "en_port.h" -static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) +static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; @@ -67,9 +67,10 @@ static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) en_err(priv, "failed adding vlan %d\n", vid); mutex_unlock(&mdev->state_lock); + return 0; } -static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) +static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; @@ -93,6 +94,8 @@ static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) en_err(priv, "Failed configuring VLAN filter\n"); } mutex_unlock(&mdev->state_lock); + + return 0; } u64 mlx4_en_mac_to_u64(u8 *addr) @@ -133,7 +136,7 @@ static void mlx4_en_do_set_mac(struct work_struct *work) if (priv->port_up) { /* Remove old MAC and insert the new one */ err = mlx4_replace_mac(mdev->dev, priv->port, - priv->base_qpn, priv->mac, 0); + priv->base_qpn, priv->mac); if (err) en_err(priv, "Failed changing HW MAC address\n"); } else @@ -148,6 +151,7 @@ static void mlx4_en_clear_list(struct net_device *dev) struct mlx4_en_priv *priv = netdev_priv(dev); kfree(priv->mc_addrs); + priv->mc_addrs = NULL; priv->mc_addrs_cnt = 0; } @@ -167,6 +171,7 @@ static void mlx4_en_cache_mclist(struct net_device *dev) i = 0; netdev_for_each_mc_addr(ha, dev) memcpy(mc_addrs + i++ * ETH_ALEN, ha->addr, ETH_ALEN); + mlx4_en_clear_list(dev); priv->mc_addrs = mc_addrs; priv->mc_addrs_cnt = mc_addrs_cnt; } @@ -204,6 +209,16 @@ static void mlx4_en_do_set_multicast(struct work_struct *work) goto out; } + if (!netif_carrier_ok(dev)) { + if (!mlx4_en_QUERY_PORT(mdev, priv->port)) { + if (priv->port_state.link_state) { + priv->last_link_state = MLX4_DEV_EVENT_PORT_UP; + netif_carrier_on(dev); + en_dbg(LINK, priv, "Link Up\n"); + } + } + } + /* * Promsicuous mode: disable all filters */ @@ -599,12 +614,12 @@ int mlx4_en_start_port(struct net_device *dev) ++rx_index; } - /* Set port mac number */ - en_dbg(DRV, priv, "Setting mac for port %d\n", priv->port); - err = mlx4_register_mac(mdev->dev, priv->port, - priv->mac, &priv->base_qpn, 0); + /* Set qp number */ + en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port); + err = mlx4_get_eth_qp(mdev->dev, priv->port, + priv->mac, &priv->base_qpn); if (err) { - en_err(priv, "Failed setting port mac\n"); + en_err(priv, "Failed getting eth qp\n"); goto cq_err; } mdev->mac_removed[priv->port] = 0; @@ -699,7 +714,7 @@ tx_err: mlx4_en_release_rss_steer(priv); mac_err: - mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn); + mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn); cq_err: while (rx_index--) mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]); @@ -745,10 +760,6 @@ void mlx4_en_stop_port(struct net_device *dev) /* Flush multicast filter */ mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG); - /* Unregister Mac address for the port */ - mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn); - mdev->mac_removed[priv->port] = 1; - /* Free TX Rings */ for (i = 0; i < priv->tx_ring_num; i++) { mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]); @@ -762,6 +773,10 @@ void mlx4_en_stop_port(struct net_device *dev) /* Free RSS qps */ mlx4_en_release_rss_steer(priv); + /* Unregister Mac address for the port */ + mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn); + mdev->mac_removed[priv->port] = 1; + /* Free RX Rings */ for (i = 0; i < priv->rx_ring_num; i++) { mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]); @@ -974,6 +989,21 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) return 0; } +static int mlx4_en_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct mlx4_en_priv *priv = netdev_priv(netdev); + + if (features & NETIF_F_LOOPBACK) + priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK); + else + priv->ctrl_flags &= + cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK); + + return 0; + +} + static const struct net_device_ops mlx4_netdev_ops = { .ndo_open = mlx4_en_open, .ndo_stop = mlx4_en_close, @@ -990,6 +1020,7 @@ static const struct net_device_ops mlx4_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = mlx4_en_netpoll, #endif + .ndo_set_features = mlx4_en_set_features, }; int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, @@ -1022,6 +1053,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, priv->port = port; priv->port_up = false; priv->flags = prof->flags; + priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE | + MLX4_WQE_CTRL_SOLICITED); priv->tx_ring_num = prof->tx_ring_num; priv->rx_ring_num = prof->rx_ring_num; priv->mac_index = -1; @@ -1088,6 +1121,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, dev->features = dev->hw_features | NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; + dev->hw_features |= NETIF_F_LOOPBACK; mdev->pndev[port] = dev; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c index 03c84cd78cd..331791467a2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_port.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c @@ -41,13 +41,6 @@ #include "mlx4_en.h" -int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, - u64 mac, u64 clear, u8 mode) -{ - return mlx4_cmd(dev, (mac | (clear << 63)), port, mode, - MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B); -} - int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv) { struct mlx4_cmd_mailbox *mailbox; @@ -72,76 +65,7 @@ int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv) filter->entry[i] = cpu_to_be32(entry); } err = mlx4_cmd(dev, mailbox->dma, priv->port, 0, MLX4_CMD_SET_VLAN_FLTR, - MLX4_CMD_TIME_CLASS_B); - mlx4_free_cmd_mailbox(dev, mailbox); - return err; -} - - -int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu, - u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx) -{ - struct mlx4_cmd_mailbox *mailbox; - struct mlx4_set_port_general_context *context; - int err; - u32 in_mod; - - mailbox = mlx4_alloc_cmd_mailbox(dev); - if (IS_ERR(mailbox)) - return PTR_ERR(mailbox); - context = mailbox->buf; - memset(context, 0, sizeof *context); - - context->flags = SET_PORT_GEN_ALL_VALID; - context->mtu = cpu_to_be16(mtu); - context->pptx = (pptx * (!pfctx)) << 7; - context->pfctx = pfctx; - context->pprx = (pprx * (!pfcrx)) << 7; - context->pfcrx = pfcrx; - - in_mod = MLX4_SET_PORT_GENERAL << 8 | port; - err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, - MLX4_CMD_TIME_CLASS_B); - - mlx4_free_cmd_mailbox(dev, mailbox); - return err; -} - -int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn, - u8 promisc) -{ - struct mlx4_cmd_mailbox *mailbox; - struct mlx4_set_port_rqp_calc_context *context; - int err; - u32 in_mod; - u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ? - MCAST_DIRECT : MCAST_DEFAULT; - - if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER && - dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) - return 0; - - mailbox = mlx4_alloc_cmd_mailbox(dev); - if (IS_ERR(mailbox)) - return PTR_ERR(mailbox); - context = mailbox->buf; - memset(context, 0, sizeof *context); - - context->base_qpn = cpu_to_be32(base_qpn); - context->n_mac = dev->caps.log_num_macs; - context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT | - base_qpn); - context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT | - base_qpn); - context->intra_no_vlan = 0; - context->no_vlan = MLX4_NO_VLAN_IDX; - context->intra_vlan_miss = 0; - context->vlan_miss = MLX4_VLAN_MISS_IDX; - - in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port; - err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, - MLX4_CMD_TIME_CLASS_B); - + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); mlx4_free_cmd_mailbox(dev, mailbox); return err; } @@ -159,7 +83,8 @@ int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port) return PTR_ERR(mailbox); memset(mailbox->buf, 0, sizeof(*qport_context)); err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0, - MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B); + MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_WRAPPED); if (err) goto out; qport_context = mailbox->buf; @@ -204,7 +129,8 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) return PTR_ERR(mailbox); memset(mailbox->buf, 0, sizeof(*mlx4_en_stats)); err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0, - MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B); + MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_WRAPPED); if (err) goto out; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.h b/drivers/net/ethernet/mellanox/mlx4/en_port.h index 19eb244f516..6934fd7e66e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_port.h +++ b/drivers/net/ethernet/mellanox/mlx4/en_port.h @@ -39,49 +39,6 @@ #define SET_PORT_PROMISC_SHIFT 31 #define SET_PORT_MC_PROMISC_SHIFT 30 -enum { - MLX4_CMD_SET_VLAN_FLTR = 0x47, - MLX4_CMD_SET_MCAST_FLTR = 0x48, - MLX4_CMD_DUMP_ETH_STATS = 0x49, -}; - -enum { - MCAST_DIRECT_ONLY = 0, - MCAST_DIRECT = 1, - MCAST_DEFAULT = 2 -}; - -struct mlx4_set_port_general_context { - u8 reserved[3]; - u8 flags; - u16 reserved2; - __be16 mtu; - u8 pptx; - u8 pfctx; - u16 reserved3; - u8 pprx; - u8 pfcrx; - u16 reserved4; -}; - -struct mlx4_set_port_rqp_calc_context { - __be32 base_qpn; - u8 rererved; - u8 n_mac; - u8 n_vlan; - u8 n_prio; - u8 reserved2[3]; - u8 mac_miss; - u8 intra_no_vlan; - u8 no_vlan; - u8 intra_vlan_miss; - u8 vlan_miss; - u8 reserved3[3]; - u8 no_vlan_prio; - __be32 promisc; - __be32 mcast; -}; - #define VLAN_FLTR_SIZE 128 struct mlx4_set_vlan_fltr_mbox { __be32 entry[VLAN_FLTR_SIZE]; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c index 0dfb4ec8a9d..bcbc54c1694 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c @@ -44,7 +44,7 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, struct mlx4_en_dev *mdev = priv->mdev; memset(context, 0, sizeof *context); - context->flags = cpu_to_be32(7 << 16 | rss << 13); + context->flags = cpu_to_be32(7 << 16 | rss << MLX4_RSS_QPC_FLAG_OFFSET); context->pd = cpu_to_be32(mdev->priv_pdn); context->mtu_msgmax = 0xff; if (!is_tx && !rss) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index c2df6c35860..e8d6ad2dce0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -541,6 +541,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud unsigned int length; int polled = 0; int ip_summed; + struct ethhdr *ethh; + u64 s_mac; if (!priv->port_up) return 0; @@ -577,6 +579,19 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud goto next; } + /* Get pointer to first fragment since we haven't skb yet and + * cast it to ethhdr struct */ + ethh = (struct ethhdr *)(page_address(skb_frags[0].page) + + skb_frags[0].offset); + s_mac = mlx4_en_mac_to_u64(ethh->h_source); + + /* If source MAC is equal to our own MAC and not performing + * the selftest or flb disabled - drop the packet */ + if (s_mac == priv->mac && + (!(dev->features & NETIF_F_LOOPBACK) || + !priv->validate_loopback)) + goto next; + /* * Packet is OK - process it. */ @@ -837,9 +852,10 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_rss_map *rss_map = &priv->rss_map; struct mlx4_qp_context context; - struct mlx4_en_rss_context *rss_context; + struct mlx4_rss_context *rss_context; void *ptr; - u8 rss_mask = 0x3f; + u8 rss_mask = (MLX4_RSS_IPV4 | MLX4_RSS_TCP_IPV4 | MLX4_RSS_IPV6 | + MLX4_RSS_TCP_IPV6); int i, qpn; int err = 0; int good_qps = 0; @@ -877,18 +893,21 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn, priv->rx_ring[0].cqn, &context); - ptr = ((void *) &context) + 0x3c; + ptr = ((void *) &context) + offsetof(struct mlx4_qp_context, pri_path) + + MLX4_RSS_OFFSET_IN_QPC_PRI_PATH; rss_context = ptr; rss_context->base_qpn = cpu_to_be32(ilog2(priv->rx_ring_num) << 24 | (rss_map->base_qpn)); rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn); + if (priv->mdev->profile.udp_rss) { + rss_mask |= MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6; + rss_context->base_qpn_udp = rss_context->default_qpn; + } rss_context->flags = rss_mask; - rss_context->hash_fn = 1; + rss_context->hash_fn = MLX4_RSS_HASH_TOP; for (i = 0; i < 10; i++) rss_context->rss_key[i] = rsskey[i]; - if (priv->mdev->profile.udp_rss) - rss_context->base_qpn_udp = rss_context->default_qpn; err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context, &rss_map->indir_qp, &rss_map->indir_state); if (err) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c index 9fdbcecd499..bf2e5d3f177 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c @@ -43,7 +43,7 @@ static int mlx4_en_test_registers(struct mlx4_en_priv *priv) { return mlx4_cmd(priv->mdev->dev, 0, 0, 0, MLX4_CMD_HW_HEALTH_CHECK, - MLX4_CMD_TIME_CLASS_A); + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); } static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index d901b426753..9ef9038d062 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -307,59 +307,60 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring) return cnt; } - static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_cq *mcq = &cq->mcq; struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring]; - struct mlx4_cqe *cqe = cq->buf; + struct mlx4_cqe *cqe; u16 index; - u16 new_index; + u16 new_index, ring_index; u32 txbbs_skipped = 0; - u32 cq_last_sav; - - /* index always points to the first TXBB of the last polled descriptor */ - index = ring->cons & ring->size_mask; - new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask; - if (index == new_index) - return; + u32 cons_index = mcq->cons_index; + int size = cq->size; + u32 size_mask = ring->size_mask; + struct mlx4_cqe *buf = cq->buf; if (!priv->port_up) return; - /* - * We use a two-stage loop: - * - the first samples the HW-updated CQE - * - the second frees TXBBs until the last sample - * This lets us amortize CQE cache misses, while still polling the CQ - * until is quiescent. - */ - cq_last_sav = mcq->cons_index; - do { + index = cons_index & size_mask; + cqe = &buf[index]; + ring_index = ring->cons & size_mask; + + /* Process all completed CQEs */ + while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, + cons_index & size)) { + /* + * make sure we read the CQE after we read the + * ownership bit + */ + rmb(); + + /* Skip over last polled CQE */ + new_index = be16_to_cpu(cqe->wqe_index) & size_mask; + do { - /* Skip over last polled CQE */ - index = (index + ring->last_nr_txbb) & ring->size_mask; txbbs_skipped += ring->last_nr_txbb; - - /* Poll next CQE */ + ring_index = (ring_index + ring->last_nr_txbb) & size_mask; + /* free next descriptor */ ring->last_nr_txbb = mlx4_en_free_tx_desc( - priv, ring, index, - !!((ring->cons + txbbs_skipped) & - ring->size)); - ++mcq->cons_index; - - } while (index != new_index); + priv, ring, ring_index, + !!((ring->cons + txbbs_skipped) & + ring->size)); + } while (ring_index != new_index); + + ++cons_index; + index = cons_index & size_mask; + cqe = &buf[index]; + } - new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask; - } while (index != new_index); - AVG_PERF_COUNTER(priv->pstats.tx_coal_avg, - (u32) (mcq->cons_index - cq_last_sav)); /* * To prevent CQ overflow we first update CQ consumer and only then * the ring consumer. */ + mcq->cons_index = cons_index; mlx4_cq_set_ci(mcq); wmb(); ring->cons += txbbs_skipped; @@ -565,7 +566,8 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc)); } tx_desc->ctrl.vlan_tag = cpu_to_be16(*vlan_tag); - tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!(*vlan_tag); + tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * + (!!vlan_tx_tag_present(skb)); tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f; } @@ -676,27 +678,25 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) /* Prepare ctrl segement apart opcode+ownership, which depends on * whether LSO is used */ tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag); - tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!vlan_tag; + tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * + !!vlan_tx_tag_present(skb); tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f; - tx_desc->ctrl.srcrb_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE | - MLX4_WQE_CTRL_SOLICITED); + tx_desc->ctrl.srcrb_flags = priv->ctrl_flags; if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM | MLX4_WQE_CTRL_TCP_UDP_CSUM); ring->tx_csum++; } - if (unlikely(priv->validate_loopback)) { - /* Copy dst mac address to wqe */ - skb_reset_mac_header(skb); - ethh = eth_hdr(skb); - if (ethh && ethh->h_dest) { - mac = mlx4_en_mac_to_u64(ethh->h_dest); - mac_h = (u32) ((mac & 0xffff00000000ULL) >> 16); - mac_l = (u32) (mac & 0xffffffff); - tx_desc->ctrl.srcrb_flags |= cpu_to_be32(mac_h); - tx_desc->ctrl.imm = cpu_to_be32(mac_l); - } + /* Copy dst mac address to wqe */ + skb_reset_mac_header(skb); + ethh = eth_hdr(skb); + if (ethh && ethh->h_dest) { + mac = mlx4_en_mac_to_u64(ethh->h_dest); + mac_h = (u32) ((mac & 0xffff00000000ULL) >> 16); + mac_l = (u32) (mac & 0xffffffff); + tx_desc->ctrl.srcrb_flags |= cpu_to_be32(mac_h); + tx_desc->ctrl.imm = cpu_to_be32(mac_l); } /* Handle LSO (TSO) packets */ diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 24ee9677599..1e9b55eb721 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -31,6 +31,7 @@ * SOFTWARE. */ +#include <linux/init.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/export.h> @@ -52,30 +53,6 @@ enum { MLX4_EQ_ENTRY_SIZE = 0x20 }; -/* - * Must be packed because start is 64 bits but only aligned to 32 bits. - */ -struct mlx4_eq_context { - __be32 flags; - u16 reserved1[3]; - __be16 page_offset; - u8 log_eq_size; - u8 reserved2[4]; - u8 eq_period; - u8 reserved3; - u8 eq_max_count; - u8 reserved4[3]; - u8 intr; - u8 log_page_size; - u8 reserved5[2]; - u8 mtt_base_addr_h; - __be32 mtt_base_addr_l; - u32 reserved6[2]; - __be32 consumer_index; - __be32 producer_index; - u32 reserved7[4]; -}; - #define MLX4_EQ_STATUS_OK ( 0 << 28) #define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28) #define MLX4_EQ_OWNER_SW ( 0 << 24) @@ -100,46 +77,9 @@ struct mlx4_eq_context { (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \ (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \ (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \ - (1ull << MLX4_EVENT_TYPE_CMD)) - -struct mlx4_eqe { - u8 reserved1; - u8 type; - u8 reserved2; - u8 subtype; - union { - u32 raw[6]; - struct { - __be32 cqn; - } __packed comp; - struct { - u16 reserved1; - __be16 token; - u32 reserved2; - u8 reserved3[3]; - u8 status; - __be64 out_param; - } __packed cmd; - struct { - __be32 qpn; - } __packed qp; - struct { - __be32 srqn; - } __packed srq; - struct { - __be32 cqn; - u32 reserved1; - u8 reserved2[3]; - u8 syndrome; - } __packed cq_err; - struct { - u32 reserved1[2]; - __be32 port; - } __packed port_change; - } event; - u8 reserved3[3]; - u8 owner; -} __packed; + (1ull << MLX4_EVENT_TYPE_CMD) | \ + (1ull << MLX4_EVENT_TYPE_COMM_CHANNEL) | \ + (1ull << MLX4_EVENT_TYPE_FLR_EVENT)) static void eq_set_ci(struct mlx4_eq *eq, int req_not) { @@ -162,13 +102,144 @@ static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq) return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe; } +static struct mlx4_eqe *next_slave_event_eqe(struct mlx4_slave_event_eq *slave_eq) +{ + struct mlx4_eqe *eqe = + &slave_eq->event_eqe[slave_eq->cons & (SLAVE_EVENT_EQ_SIZE - 1)]; + return (!!(eqe->owner & 0x80) ^ + !!(slave_eq->cons & SLAVE_EVENT_EQ_SIZE)) ? + eqe : NULL; +} + +void mlx4_gen_slave_eqe(struct work_struct *work) +{ + struct mlx4_mfunc_master_ctx *master = + container_of(work, struct mlx4_mfunc_master_ctx, + slave_event_work); + struct mlx4_mfunc *mfunc = + container_of(master, struct mlx4_mfunc, master); + struct mlx4_priv *priv = container_of(mfunc, struct mlx4_priv, mfunc); + struct mlx4_dev *dev = &priv->dev; + struct mlx4_slave_event_eq *slave_eq = &mfunc->master.slave_eq; + struct mlx4_eqe *eqe; + u8 slave; + int i; + + for (eqe = next_slave_event_eqe(slave_eq); eqe; + eqe = next_slave_event_eqe(slave_eq)) { + slave = eqe->slave_id; + + /* All active slaves need to receive the event */ + if (slave == ALL_SLAVES) { + for (i = 0; i < dev->num_slaves; i++) { + if (i != dev->caps.function && + master->slave_state[i].active) + if (mlx4_GEN_EQE(dev, i, eqe)) + mlx4_warn(dev, "Failed to " + " generate event " + "for slave %d\n", i); + } + } else { + if (mlx4_GEN_EQE(dev, slave, eqe)) + mlx4_warn(dev, "Failed to generate event " + "for slave %d\n", slave); + } + ++slave_eq->cons; + } +} + + +static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_slave_event_eq *slave_eq = &priv->mfunc.master.slave_eq; + struct mlx4_eqe *s_eqe = + &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)]; + + if ((!!(s_eqe->owner & 0x80)) ^ + (!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) { + mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. " + "No free EQE on slave events queue\n", slave); + return; + } + + memcpy(s_eqe, eqe, sizeof(struct mlx4_eqe) - 1); + s_eqe->slave_id = slave; + /* ensure all information is written before setting the ownersip bit */ + wmb(); + s_eqe->owner = !!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE) ? 0x0 : 0x80; + ++slave_eq->prod; + + queue_work(priv->mfunc.master.comm_wq, + &priv->mfunc.master.slave_event_work); +} + +static void mlx4_slave_event(struct mlx4_dev *dev, int slave, + struct mlx4_eqe *eqe) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_slave_state *s_slave = + &priv->mfunc.master.slave_state[slave]; + + if (!s_slave->active) { + /*mlx4_warn(dev, "Trying to pass event to inactive slave\n");*/ + return; + } + + slave_event(dev, slave, eqe); +} + +void mlx4_master_handle_slave_flr(struct work_struct *work) +{ + struct mlx4_mfunc_master_ctx *master = + container_of(work, struct mlx4_mfunc_master_ctx, + slave_flr_event_work); + struct mlx4_mfunc *mfunc = + container_of(master, struct mlx4_mfunc, master); + struct mlx4_priv *priv = + container_of(mfunc, struct mlx4_priv, mfunc); + struct mlx4_dev *dev = &priv->dev; + struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state; + int i; + int err; + + mlx4_dbg(dev, "mlx4_handle_slave_flr\n"); + + for (i = 0 ; i < dev->num_slaves; i++) { + + if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) { + mlx4_dbg(dev, "mlx4_handle_slave_flr: " + "clean slave: %d\n", i); + + mlx4_delete_all_resources_for_slave(dev, i); + /*return the slave to running mode*/ + spin_lock(&priv->mfunc.master.slave_state_lock); + slave_state[i].last_cmd = MLX4_COMM_CMD_RESET; + slave_state[i].is_slave_going_down = 0; + spin_unlock(&priv->mfunc.master.slave_state_lock); + /*notify the FW:*/ + err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + if (err) + mlx4_warn(dev, "Failed to notify FW on " + "FLR done (slave:%d)\n", i); + } + } +} + static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) { + struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_eqe *eqe; int cqn; int eqes_found = 0; int set_ci = 0; int port; + int slave = 0; + int ret; + u32 flr_slave; + u8 update_slave_state; + int i; while ((eqe = next_eqe_sw(eq))) { /* @@ -191,14 +262,68 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) case MLX4_EVENT_TYPE_PATH_MIG_FAILED: case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR: case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR: - mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, - eqe->type); + mlx4_dbg(dev, "event %d arrived\n", eqe->type); + if (mlx4_is_master(dev)) { + /* forward only to slave owning the QP */ + ret = mlx4_get_slave_from_resource_id(dev, + RES_QP, + be32_to_cpu(eqe->event.qp.qpn) + & 0xffffff, &slave); + if (ret && ret != -ENOENT) { + mlx4_dbg(dev, "QP event %02x(%02x) on " + "EQ %d at index %u: could " + "not get slave id (%d)\n", + eqe->type, eqe->subtype, + eq->eqn, eq->cons_index, ret); + break; + } + + if (!ret && slave != dev->caps.function) { + mlx4_slave_event(dev, slave, eqe); + break; + } + + } + mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & + 0xffffff, eqe->type); break; case MLX4_EVENT_TYPE_SRQ_LIMIT: + mlx4_warn(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n", + __func__); case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR: - mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff, - eqe->type); + if (mlx4_is_master(dev)) { + /* forward only to slave owning the SRQ */ + ret = mlx4_get_slave_from_resource_id(dev, + RES_SRQ, + be32_to_cpu(eqe->event.srq.srqn) + & 0xffffff, + &slave); + if (ret && ret != -ENOENT) { + mlx4_warn(dev, "SRQ event %02x(%02x) " + "on EQ %d at index %u: could" + " not get slave id (%d)\n", + eqe->type, eqe->subtype, + eq->eqn, eq->cons_index, ret); + break; + } + mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x," + " event: %02x(%02x)\n", __func__, + slave, + be32_to_cpu(eqe->event.srq.srqn), + eqe->type, eqe->subtype); + + if (!ret && slave != dev->caps.function) { + mlx4_warn(dev, "%s: sending event " + "%02x(%02x) to slave:%d\n", + __func__, eqe->type, + eqe->subtype, slave); + mlx4_slave_event(dev, slave, eqe); + break; + } + } + mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & + 0xffffff, eqe->type); break; case MLX4_EVENT_TYPE_CMD: @@ -211,13 +336,35 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) case MLX4_EVENT_TYPE_PORT_CHANGE: port = be32_to_cpu(eqe->event.port_change.port) >> 28; if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) { - mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN, + mlx4_dispatch_event(dev, + MLX4_DEV_EVENT_PORT_DOWN, port); mlx4_priv(dev)->sense.do_sense_port[port] = 1; + if (mlx4_is_master(dev)) + /*change the state of all slave's port + * to down:*/ + for (i = 0; i < dev->num_slaves; i++) { + mlx4_dbg(dev, "%s: Sending " + "MLX4_PORT_CHANGE_SUBTYPE_DOWN" + " to slave: %d, port:%d\n", + __func__, i, port); + if (i == dev->caps.function) + continue; + mlx4_slave_event(dev, i, eqe); + } } else { - mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP, + mlx4_dispatch_event(dev, + MLX4_DEV_EVENT_PORT_UP, port); mlx4_priv(dev)->sense.do_sense_port[port] = 0; + + if (mlx4_is_master(dev)) { + for (i = 0; i < dev->num_slaves; i++) { + if (i == dev->caps.function) + continue; + mlx4_slave_event(dev, i, eqe); + } + } } break; @@ -226,7 +373,28 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) eqe->event.cq_err.syndrome == 1 ? "overrun" : "access violation", be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff); - mlx4_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn), + if (mlx4_is_master(dev)) { + ret = mlx4_get_slave_from_resource_id(dev, + RES_CQ, + be32_to_cpu(eqe->event.cq_err.cqn) + & 0xffffff, &slave); + if (ret && ret != -ENOENT) { + mlx4_dbg(dev, "CQ event %02x(%02x) on " + "EQ %d at index %u: could " + "not get slave id (%d)\n", + eqe->type, eqe->subtype, + eq->eqn, eq->cons_index, ret); + break; + } + + if (!ret && slave != dev->caps.function) { + mlx4_slave_event(dev, slave, eqe); + break; + } + } + mlx4_cq_event(dev, + be32_to_cpu(eqe->event.cq_err.cqn) + & 0xffffff, eqe->type); break; @@ -234,13 +402,60 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn); break; + case MLX4_EVENT_TYPE_COMM_CHANNEL: + if (!mlx4_is_master(dev)) { + mlx4_warn(dev, "Received comm channel event " + "for non master device\n"); + break; + } + memcpy(&priv->mfunc.master.comm_arm_bit_vector, + eqe->event.comm_channel_arm.bit_vec, + sizeof eqe->event.comm_channel_arm.bit_vec); + queue_work(priv->mfunc.master.comm_wq, + &priv->mfunc.master.comm_work); + break; + + case MLX4_EVENT_TYPE_FLR_EVENT: + flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id); + if (!mlx4_is_master(dev)) { + mlx4_warn(dev, "Non-master function received" + "FLR event\n"); + break; + } + + mlx4_dbg(dev, "FLR event for slave: %d\n", flr_slave); + + if (flr_slave > dev->num_slaves) { + mlx4_warn(dev, + "Got FLR for unknown function: %d\n", + flr_slave); + update_slave_state = 0; + } else + update_slave_state = 1; + + spin_lock(&priv->mfunc.master.slave_state_lock); + if (update_slave_state) { + priv->mfunc.master.slave_state[flr_slave].active = false; + priv->mfunc.master.slave_state[flr_slave].last_cmd = MLX4_COMM_CMD_FLR; + priv->mfunc.master.slave_state[flr_slave].is_slave_going_down = 1; + } + spin_unlock(&priv->mfunc.master.slave_state_lock); + queue_work(priv->mfunc.master.comm_wq, + &priv->mfunc.master.slave_flr_event_work); + break; case MLX4_EVENT_TYPE_EEC_CATAS_ERROR: case MLX4_EVENT_TYPE_ECC_DETECT: default: - mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u\n", - eqe->type, eqe->subtype, eq->eqn, eq->cons_index); + mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at " + "index %u. owner=%x, nent=0x%x, slave=%x, " + "ownership=%s\n", + eqe->type, eqe->subtype, eq->eqn, + eq->cons_index, eqe->owner, eq->nent, + eqe->slave_id, + !!(eqe->owner & 0x80) ^ + !!(eq->cons_index & eq->nent) ? "HW" : "SW"); break; - } + }; ++eq->cons_index; eqes_found = 1; @@ -290,25 +505,58 @@ static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr) return IRQ_HANDLED; } +int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_slave_event_eq_info *event_eq = + &priv->mfunc.master.slave_state[slave].event_eq; + u32 in_modifier = vhcr->in_modifier; + u32 eqn = in_modifier & 0x1FF; + u64 in_param = vhcr->in_param; + int err = 0; + + if (slave == dev->caps.function) + err = mlx4_cmd(dev, in_param, (in_modifier & 0x80000000) | eqn, + 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_NATIVE); + if (!err) { + if (in_modifier >> 31) { + /* unmap */ + event_eq->event_type &= ~in_param; + } else { + event_eq->eqn = eqn; + event_eq->event_type = in_param; + } + } + return err; +} + static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap, int eq_num) { return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num, - 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B); + 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_WRAPPED); } static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, int eq_num) { - return mlx4_cmd(dev, mailbox->dma, eq_num, 0, MLX4_CMD_SW2HW_EQ, - MLX4_CMD_TIME_CLASS_A); + return mlx4_cmd(dev, mailbox->dma | dev->caps.function, eq_num, 0, + MLX4_CMD_SW2HW_EQ, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_WRAPPED); } static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, int eq_num) { - return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num, 0, MLX4_CMD_HW2SW_EQ, - MLX4_CMD_TIME_CLASS_A); + return mlx4_cmd_box(dev, dev->caps.function, mailbox->dma, eq_num, + 0, MLX4_CMD_HW2SW_EQ, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_WRAPPED); } static int mlx4_num_eq_uar(struct mlx4_dev *dev) @@ -585,14 +833,16 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) for (i = 0; i < mlx4_num_eq_uar(dev); ++i) priv->eq_table.uar_map[i] = NULL; - err = mlx4_map_clr_int(dev); - if (err) - goto err_out_bitmap; + if (!mlx4_is_slave(dev)) { + err = mlx4_map_clr_int(dev); + if (err) + goto err_out_bitmap; - priv->eq_table.clr_mask = - swab32(1 << (priv->eq_table.inta_pin & 31)); - priv->eq_table.clr_int = priv->clr_base + - (priv->eq_table.inta_pin < 32 ? 4 : 0); + priv->eq_table.clr_mask = + swab32(1 << (priv->eq_table.inta_pin & 31)); + priv->eq_table.clr_int = priv->clr_base + + (priv->eq_table.inta_pin < 32 ? 4 : 0); + } priv->eq_table.irq_names = kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 + @@ -700,7 +950,8 @@ err_out_unmap: mlx4_free_eq(dev, &priv->eq_table.eq[i]); --i; } - mlx4_unmap_clr_int(dev); + if (!mlx4_is_slave(dev)) + mlx4_unmap_clr_int(dev); mlx4_free_irqs(dev); err_out_bitmap: @@ -725,7 +976,8 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev) for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) mlx4_free_eq(dev, &priv->eq_table.eq[i]); - mlx4_unmap_clr_int(dev); + if (!mlx4_is_slave(dev)) + mlx4_unmap_clr_int(dev); for (i = 0; i < mlx4_num_eq_uar(dev); ++i) if (priv->eq_table.uar_map[i]) @@ -748,7 +1000,7 @@ int mlx4_test_interrupts(struct mlx4_dev *dev) err = mlx4_NOP(dev); /* When not in MSI_X, there is only one irq to check */ - if (!(dev->flags & MLX4_FLAG_MSI_X)) + if (!(dev->flags & MLX4_FLAG_MSI_X) || mlx4_is_slave(dev)) return err; /* A loop over all completion vectors, for each vector we will check diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 435ca6e4973..a424a19280c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -32,6 +32,7 @@ * SOFTWARE. */ +#include <linux/etherdevice.h> #include <linux/mlx4/cmd.h> #include <linux/module.h> #include <linux/cache.h> @@ -48,7 +49,7 @@ enum { extern void __buggy_use_of_MLX4_GET(void); extern void __buggy_use_of_MLX4_PUT(void); -static int enable_qos; +static bool enable_qos; module_param(enable_qos, bool, 0444); MODULE_PARM_DESC(enable_qos, "Enable Quality of Service support in the HCA (default: off)"); @@ -139,12 +140,185 @@ int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg) MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET); err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_MOD_STAT_CFG, - MLX4_CMD_TIME_CLASS_A); + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); mlx4_free_cmd_mailbox(dev, mailbox); return err; } +int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + u8 field; + u32 size; + int err = 0; + +#define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0 +#define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1 +#define QUERY_FUNC_CAP_FUNCTION_OFFSET 0x3 +#define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4 +#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x10 +#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x14 +#define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x18 +#define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET 0x20 +#define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x24 +#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x28 +#define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c +#define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0X30 + +#define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3 +#define QUERY_FUNC_CAP_ETH_PROPS_OFFSET 0xc + + if (vhcr->op_modifier == 1) { + field = vhcr->in_modifier; + MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET); + + field = 0; /* ensure fvl bit is not set */ + MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_ETH_PROPS_OFFSET); + } else if (vhcr->op_modifier == 0) { + field = 1 << 7; /* enable only ethernet interface */ + MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET); + + field = slave; + MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FUNCTION_OFFSET); + + field = dev->caps.num_ports; + MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET); + + size = 0; /* no PF behavious is set for now */ + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_PF_BHVR_OFFSET); + + size = dev->caps.num_qps; + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET); + + size = dev->caps.num_srqs; + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET); + + size = dev->caps.num_cqs; + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET); + + size = dev->caps.num_eqs; + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET); + + size = dev->caps.reserved_eqs; + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); + + size = dev->caps.num_mpts; + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET); + + size = dev->caps.num_mtts; + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET); + + size = dev->caps.num_mgms + dev->caps.num_amgms; + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET); + + } else + err = -EINVAL; + + return err; +} + +int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, struct mlx4_func_cap *func_cap) +{ + struct mlx4_cmd_mailbox *mailbox; + u32 *outbox; + u8 field; + u32 size; + int i; + int err = 0; + + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FUNC_CAP, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + if (err) + goto out; + + outbox = mailbox->buf; + + MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS_OFFSET); + if (!(field & (1 << 7))) { + mlx4_err(dev, "The host doesn't support eth interface\n"); + err = -EPROTONOSUPPORT; + goto out; + } + + MLX4_GET(field, outbox, QUERY_FUNC_CAP_FUNCTION_OFFSET); + func_cap->function = field; + + MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET); + func_cap->num_ports = field; + + MLX4_GET(size, outbox, QUERY_FUNC_CAP_PF_BHVR_OFFSET); + func_cap->pf_context_behaviour = size; + + MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET); + func_cap->qp_quota = size & 0xFFFFFF; + + MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET); + func_cap->srq_quota = size & 0xFFFFFF; + + MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET); + func_cap->cq_quota = size & 0xFFFFFF; + + MLX4_GET(size, outbox, QUERY_FUNC_CAP_MAX_EQ_OFFSET); + func_cap->max_eq = size & 0xFFFFFF; + + MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); + func_cap->reserved_eq = size & 0xFFFFFF; + + MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET); + func_cap->mpt_quota = size & 0xFFFFFF; + + MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET); + func_cap->mtt_quota = size & 0xFFFFFF; + + MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET); + func_cap->mcg_quota = size & 0xFFFFFF; + + for (i = 1; i <= func_cap->num_ports; ++i) { + err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 1, + MLX4_CMD_QUERY_FUNC_CAP, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + if (err) + goto out; + + MLX4_GET(field, outbox, QUERY_FUNC_CAP_ETH_PROPS_OFFSET); + if (field & (1 << 7)) { + mlx4_err(dev, "VLAN is enforced on this port\n"); + err = -EPROTONOSUPPORT; + goto out; + } + + if (field & (1 << 6)) { + mlx4_err(dev, "Force mac is enabled on this port\n"); + err = -EPROTONOSUPPORT; + goto out; + } + + MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET); + func_cap->physical_port[i] = field; + } + + /* All other resources are allocated by the master, but we still report + * 'num' and 'reserved' capabilities as follows: + * - num remains the maximum resource index + * - 'num - reserved' is the total available objects of a resource, but + * resource indices may be less than 'reserved' + * TODO: set per-resource quotas */ + +out: + mlx4_free_cmd_mailbox(dev, mailbox); + + return err; +} + int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) { struct mlx4_cmd_mailbox *mailbox; @@ -229,7 +403,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) outbox = mailbox->buf; err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP, - MLX4_CMD_TIME_CLASS_A); + MLX4_CMD_TIME_CLASS_A, !mlx4_is_slave(dev)); if (err) goto out; @@ -396,12 +570,15 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) for (i = 1; i <= dev_cap->num_ports; ++i) { err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT, - MLX4_CMD_TIME_CLASS_B); + MLX4_CMD_TIME_CLASS_B, + !mlx4_is_slave(dev)); if (err) goto out; MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET); dev_cap->supported_port_types[i] = field & 3; + dev_cap->suggested_type[i] = (field >> 3) & 1; + dev_cap->default_sense[i] = (field >> 4) & 1; MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET); dev_cap->ib_mtu[i] = field & 0xf; MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET); @@ -470,6 +647,61 @@ out: return err; } +int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + u64 def_mac; + u8 port_type; + int err; + +#define MLX4_PORT_SUPPORT_IB (1 << 0) +#define MLX4_PORT_SUGGEST_TYPE (1 << 3) +#define MLX4_PORT_DEFAULT_SENSE (1 << 4) +#define MLX4_VF_PORT_ETH_ONLY_MASK (0xff & ~MLX4_PORT_SUPPORT_IB & \ + ~MLX4_PORT_SUGGEST_TYPE & \ + ~MLX4_PORT_DEFAULT_SENSE) + + err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0, + MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_NATIVE); + + if (!err && dev->caps.function != slave) { + /* set slave default_mac address */ + MLX4_GET(def_mac, outbox->buf, QUERY_PORT_MAC_OFFSET); + def_mac += slave << 8; + MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET); + + /* get port type - currently only eth is enabled */ + MLX4_GET(port_type, outbox->buf, + QUERY_PORT_SUPPORTED_TYPE_OFFSET); + + /* Allow only Eth port, no link sensing allowed */ + port_type &= MLX4_VF_PORT_ETH_ONLY_MASK; + + /* check eth is enabled for this port */ + if (!(port_type & 2)) + mlx4_dbg(dev, "QUERY PORT: eth not supported by host"); + + MLX4_PUT(outbox->buf, port_type, + QUERY_PORT_SUPPORTED_TYPE_OFFSET); + } + + return err; +} + +static int mlx4_QUERY_PORT(struct mlx4_dev *dev, void *ptr, u8 port) +{ + struct mlx4_cmd_mailbox *outbox = ptr; + + return mlx4_cmd_box(dev, 0, outbox->dma, port, 0, + MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_WRAPPED); +} +EXPORT_SYMBOL_GPL(mlx4_QUERY_PORT); + int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt) { struct mlx4_cmd_mailbox *mailbox; @@ -519,7 +751,8 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt) if (++nent == MLX4_MAILBOX_SIZE / 16) { err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, - MLX4_CMD_TIME_CLASS_B); + MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_NATIVE); if (err) goto out; nent = 0; @@ -528,7 +761,8 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt) } if (nent) - err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, MLX4_CMD_TIME_CLASS_B); + err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); if (err) goto out; @@ -557,13 +791,15 @@ int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm) int mlx4_UNMAP_FA(struct mlx4_dev *dev) { - return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA, MLX4_CMD_TIME_CLASS_B); + return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); } int mlx4_RUN_FW(struct mlx4_dev *dev) { - return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW, MLX4_CMD_TIME_CLASS_A); + return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); } int mlx4_QUERY_FW(struct mlx4_dev *dev) @@ -579,6 +815,7 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev) #define QUERY_FW_OUT_SIZE 0x100 #define QUERY_FW_VER_OFFSET 0x00 +#define QUERY_FW_PPF_ID 0x09 #define QUERY_FW_CMD_IF_REV_OFFSET 0x0a #define QUERY_FW_MAX_CMD_OFFSET 0x0f #define QUERY_FW_ERR_START_OFFSET 0x30 @@ -589,13 +826,16 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev) #define QUERY_FW_CLR_INT_BASE_OFFSET 0x20 #define QUERY_FW_CLR_INT_BAR_OFFSET 0x28 +#define QUERY_FW_COMM_BASE_OFFSET 0x40 +#define QUERY_FW_COMM_BAR_OFFSET 0x48 + mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); outbox = mailbox->buf; err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FW, - MLX4_CMD_TIME_CLASS_A); + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) goto out; @@ -608,6 +848,9 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev) ((fw_ver & 0xffff0000ull) >> 16) | ((fw_ver & 0x0000ffffull) << 16); + MLX4_GET(lg, outbox, QUERY_FW_PPF_ID); + dev->caps.function = lg; + MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET); if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV || cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) { @@ -649,6 +892,11 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev) MLX4_GET(fw->clr_int_bar, outbox, QUERY_FW_CLR_INT_BAR_OFFSET); fw->clr_int_bar = (fw->clr_int_bar >> 6) * 2; + MLX4_GET(fw->comm_base, outbox, QUERY_FW_COMM_BASE_OFFSET); + MLX4_GET(fw->comm_bar, outbox, QUERY_FW_COMM_BAR_OFFSET); + fw->comm_bar = (fw->comm_bar >> 6) * 2; + mlx4_dbg(dev, "Communication vector bar:%d offset:0x%llx\n", + fw->comm_bar, fw->comm_base); mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2); /* @@ -711,7 +959,7 @@ int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter) outbox = mailbox->buf; err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_ADAPTER, - MLX4_CMD_TIME_CLASS_A); + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) goto out; @@ -743,6 +991,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) #define INIT_HCA_LOG_SRQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x2f) #define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30) #define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37) +#define INIT_HCA_EQE_CQE_OFFSETS (INIT_HCA_QPC_OFFSET + 0x38) #define INIT_HCA_ALTC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40) #define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50) #define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60) @@ -831,10 +1080,11 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) /* UAR attributes */ - MLX4_PUT(inbox, (u8) (PAGE_SHIFT - 12), INIT_HCA_UAR_PAGE_SZ_OFFSET); + MLX4_PUT(inbox, param->uar_page_sz, INIT_HCA_UAR_PAGE_SZ_OFFSET); MLX4_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET); - err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000); + err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000, + MLX4_CMD_NATIVE); if (err) mlx4_err(dev, "INIT_HCA returns %d\n", err); @@ -843,6 +1093,101 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) return err; } +int mlx4_QUERY_HCA(struct mlx4_dev *dev, + struct mlx4_init_hca_param *param) +{ + struct mlx4_cmd_mailbox *mailbox; + __be32 *outbox; + int err; + +#define QUERY_HCA_GLOBAL_CAPS_OFFSET 0x04 + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + outbox = mailbox->buf; + + err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, + MLX4_CMD_QUERY_HCA, + MLX4_CMD_TIME_CLASS_B, + !mlx4_is_slave(dev)); + if (err) + goto out; + + MLX4_GET(param->global_caps, outbox, QUERY_HCA_GLOBAL_CAPS_OFFSET); + + /* QPC/EEC/CQC/EQC/RDMARC attributes */ + + MLX4_GET(param->qpc_base, outbox, INIT_HCA_QPC_BASE_OFFSET); + MLX4_GET(param->log_num_qps, outbox, INIT_HCA_LOG_QP_OFFSET); + MLX4_GET(param->srqc_base, outbox, INIT_HCA_SRQC_BASE_OFFSET); + MLX4_GET(param->log_num_srqs, outbox, INIT_HCA_LOG_SRQ_OFFSET); + MLX4_GET(param->cqc_base, outbox, INIT_HCA_CQC_BASE_OFFSET); + MLX4_GET(param->log_num_cqs, outbox, INIT_HCA_LOG_CQ_OFFSET); + MLX4_GET(param->altc_base, outbox, INIT_HCA_ALTC_BASE_OFFSET); + MLX4_GET(param->auxc_base, outbox, INIT_HCA_AUXC_BASE_OFFSET); + MLX4_GET(param->eqc_base, outbox, INIT_HCA_EQC_BASE_OFFSET); + MLX4_GET(param->log_num_eqs, outbox, INIT_HCA_LOG_EQ_OFFSET); + MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET); + MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET); + + /* multicast attributes */ + + MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET); + MLX4_GET(param->log_mc_entry_sz, outbox, + INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); + MLX4_GET(param->log_mc_hash_sz, outbox, + INIT_HCA_LOG_MC_HASH_SZ_OFFSET); + MLX4_GET(param->log_mc_table_sz, outbox, + INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); + + /* TPT attributes */ + + MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET); + MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET); + MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET); + MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET); + + /* UAR attributes */ + + MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET); + MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET); + +out: + mlx4_free_cmd_mailbox(dev, mailbox); + + return err; +} + +int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int port = vhcr->in_modifier; + int err; + + if (priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port)) + return 0; + + if (dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB) + return -ENODEV; + + /* Enable port only if it was previously disabled */ + if (!priv->mfunc.master.init_port_ref[port]) { + err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + if (err) + return err; + priv->mfunc.master.slave_state[slave].init_port_mask |= + (1 << port); + } + ++priv->mfunc.master.init_port_ref[port]; + return 0; +} + int mlx4_INIT_PORT(struct mlx4_dev *dev, int port) { struct mlx4_cmd_mailbox *mailbox; @@ -886,33 +1231,62 @@ int mlx4_INIT_PORT(struct mlx4_dev *dev, int port) MLX4_PUT(inbox, field, INIT_PORT_MAX_PKEY_OFFSET); err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT, - MLX4_CMD_TIME_CLASS_A); + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); mlx4_free_cmd_mailbox(dev, mailbox); } else err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT, - MLX4_CMD_TIME_CLASS_A); + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); return err; } EXPORT_SYMBOL_GPL(mlx4_INIT_PORT); +int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int port = vhcr->in_modifier; + int err; + + if (!(priv->mfunc.master.slave_state[slave].init_port_mask & + (1 << port))) + return 0; + + if (dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB) + return -ENODEV; + if (priv->mfunc.master.init_port_ref[port] == 1) { + err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000, + MLX4_CMD_NATIVE); + if (err) + return err; + } + priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port); + --priv->mfunc.master.init_port_ref[port]; + return 0; +} + int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port) { - return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000); + return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000, + MLX4_CMD_WRAPPED); } EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT); int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic) { - return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000); + return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000, + MLX4_CMD_NATIVE); } int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages) { int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0, MLX4_CMD_SET_ICM_SIZE, - MLX4_CMD_TIME_CLASS_A); + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (ret) return ret; @@ -929,7 +1303,7 @@ int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages) int mlx4_NOP(struct mlx4_dev *dev) { /* Input modifier of 0x1f means "finish as soon as possible." */ - return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100); + return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100, MLX4_CMD_NATIVE); } #define MLX4_WOL_SETUP_MODE (5 << 28) @@ -938,7 +1312,8 @@ int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port) u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8; return mlx4_cmd_imm(dev, 0, config, in_mod, 0x3, - MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A); + MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); } EXPORT_SYMBOL_GPL(mlx4_wol_read); @@ -947,6 +1322,6 @@ int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port) u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8; return mlx4_cmd(dev, config, in_mod, 0x1, MLX4_CMD_MOD_STAT_CFG, - MLX4_CMD_TIME_CLASS_A); + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); } EXPORT_SYMBOL_GPL(mlx4_wol_write); diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h index bf5ec228652..119e0cc9fab 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.h +++ b/drivers/net/ethernet/mellanox/mlx4/fw.h @@ -111,11 +111,30 @@ struct mlx4_dev_cap { u64 max_icm_sz; int max_gso_sz; u8 supported_port_types[MLX4_MAX_PORTS + 1]; + u8 suggested_type[MLX4_MAX_PORTS + 1]; + u8 default_sense[MLX4_MAX_PORTS + 1]; u8 log_max_macs[MLX4_MAX_PORTS + 1]; u8 log_max_vlans[MLX4_MAX_PORTS + 1]; u32 max_counters; }; +struct mlx4_func_cap { + u8 function; + u8 num_ports; + u8 flags; + u32 pf_context_behaviour; + int qp_quota; + int cq_quota; + int srq_quota; + int mpt_quota; + int mtt_quota; + int max_eq; + int reserved_eq; + int mcg_quota; + u8 physical_port[MLX4_MAX_PORTS + 1]; + u8 port_flags[MLX4_MAX_PORTS + 1]; +}; + struct mlx4_adapter { char board_id[MLX4_BOARD_ID_LEN]; u8 inta_pin; @@ -133,6 +152,7 @@ struct mlx4_init_hca_param { u64 dmpt_base; u64 cmpt_base; u64 mtt_base; + u64 global_caps; u16 log_mc_entry_sz; u16 log_mc_hash_sz; u8 log_num_qps; @@ -143,6 +163,7 @@ struct mlx4_init_hca_param { u8 log_mc_table_sz; u8 log_mpt_sz; u8 log_uar_sz; + u8 uar_page_sz; /* log pg sz in 4k chunks */ }; struct mlx4_init_ib_param { @@ -167,12 +188,19 @@ struct mlx4_set_ib_param { }; int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap); +int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, struct mlx4_func_cap *func_cap); +int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm); int mlx4_UNMAP_FA(struct mlx4_dev *dev); int mlx4_RUN_FW(struct mlx4_dev *dev); int mlx4_QUERY_FW(struct mlx4_dev *dev); int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter); int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param); +int mlx4_QUERY_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param); int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic); int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt); int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages); diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c index 02393fdf44c..a9ade1c3cad 100644 --- a/drivers/net/ethernet/mellanox/mlx4/icm.c +++ b/drivers/net/ethernet/mellanox/mlx4/icm.c @@ -213,7 +213,7 @@ static int mlx4_MAP_ICM(struct mlx4_dev *dev, struct mlx4_icm *icm, u64 virt) static int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count) { return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM, - MLX4_CMD_TIME_CLASS_B); + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); } int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm) @@ -223,7 +223,8 @@ int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm) int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev) { - return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX, MLX4_CMD_TIME_CLASS_B); + return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); } int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj) diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c index ca6feb55bd9..b4e9f6f5cc0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/intf.c +++ b/drivers/net/ethernet/mellanox/mlx4/intf.c @@ -142,7 +142,8 @@ int mlx4_register_device(struct mlx4_dev *dev) mlx4_add_device(intf, priv); mutex_unlock(&intf_mutex); - mlx4_start_catas_poll(dev); + if (!mlx4_is_slave(dev)) + mlx4_start_catas_poll(dev); return 0; } @@ -152,7 +153,8 @@ void mlx4_unregister_device(struct mlx4_dev *dev) struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_interface *intf; - mlx4_stop_catas_poll(dev); + if (!mlx4_is_slave(dev)) + mlx4_stop_catas_poll(dev); mutex_lock(&intf_mutex); list_for_each_entry(intf, &intf_list, list) diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 94bbc85a532..6bb62c580e2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -40,6 +40,7 @@ #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/io-mapping.h> +#include <linux/delay.h> #include <linux/mlx4/device.h> #include <linux/mlx4/doorbell.h> @@ -75,21 +76,42 @@ MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero"); #endif /* CONFIG_PCI_MSI */ +static int num_vfs; +module_param(num_vfs, int, 0444); +MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0"); + +static int probe_vf; +module_param(probe_vf, int, 0644); +MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)"); + +int mlx4_log_num_mgm_entry_size = 10; +module_param_named(log_num_mgm_entry_size, + mlx4_log_num_mgm_entry_size, int, 0444); +MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num" + " of qp per mcg, for example:" + " 10 gives 248.range: 9<=" + " log_num_mgm_entry_size <= 12"); + +#define MLX4_VF (1 << 0) + +#define HCA_GLOBAL_CAP_MASK 0 +#define PF_CONTEXT_BEHAVIOUR_MASK 0 + static char mlx4_version[] __devinitdata = DRV_NAME ": Mellanox ConnectX core driver v" DRV_VERSION " (" DRV_RELDATE ")\n"; static struct mlx4_profile default_profile = { - .num_qp = 1 << 17, + .num_qp = 1 << 18, .num_srq = 1 << 16, .rdmarc_per_qp = 1 << 4, .num_cq = 1 << 16, .num_mcg = 1 << 13, - .num_mpt = 1 << 17, + .num_mpt = 1 << 19, .num_mtt = 1 << 20, }; -static int log_num_mac = 2; +static int log_num_mac = 7; module_param_named(log_num_mac, log_num_mac, int, 0444); MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)"); @@ -99,15 +121,33 @@ MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)"); /* Log2 max number of VLANs per ETH port (0-7) */ #define MLX4_LOG_NUM_VLANS 7 -static int use_prio; +static bool use_prio; module_param_named(use_prio, use_prio, bool, 0444); MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports " "(0/1, default 0)"); -static int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG); +int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG); module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444); MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)"); +static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE}; +static int arr_argc = 2; +module_param_array(port_type_array, int, &arr_argc, 0444); +MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default " + "1 for IB, 2 for Ethernet"); + +struct mlx4_port_config { + struct list_head list; + enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1]; + struct pci_dev *pdev; +}; + +static inline int mlx4_master_get_num_eqs(struct mlx4_dev *dev) +{ + return dev->caps.reserved_eqs + + MLX4_MFUNC_EQ_NUM * (dev->num_slaves + 1); +} + int mlx4_check_port_params(struct mlx4_dev *dev, enum mlx4_port_type *port_type) { @@ -140,10 +180,8 @@ static void mlx4_set_port_mask(struct mlx4_dev *dev) { int i; - dev->caps.port_mask = 0; for (i = 1; i <= dev->caps.num_ports; ++i) - if (dev->caps.port_type[i] == MLX4_PORT_TYPE_IB) - dev->caps.port_mask |= 1 << (i - 1); + dev->caps.port_mask[i] = dev->caps.port_type[i]; } static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) @@ -188,12 +226,15 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev->caps.eth_mtu_cap[i] = dev_cap->eth_mtu[i]; dev->caps.def_mac[i] = dev_cap->def_mac[i]; dev->caps.supported_type[i] = dev_cap->supported_port_types[i]; + dev->caps.suggested_type[i] = dev_cap->suggested_type[i]; + dev->caps.default_sense[i] = dev_cap->default_sense[i]; dev->caps.trans_type[i] = dev_cap->trans_type[i]; dev->caps.vendor_oui[i] = dev_cap->vendor_oui[i]; dev->caps.wavelength[i] = dev_cap->wavelength[i]; dev->caps.trans_code[i] = dev_cap->trans_code[i]; } + dev->caps.uar_page_size = PAGE_SIZE; dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE; dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay; dev->caps.bf_reg_size = dev_cap->bf_reg_size; @@ -207,7 +248,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev->caps.reserved_srqs = dev_cap->reserved_srqs; dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz; dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz; - dev->caps.num_qp_per_mgm = MLX4_QP_PER_MGM; + dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev); /* * Subtract 1 from the limit because we need to allocate a * spare CQE so the HCA HW can tell the difference between an @@ -216,17 +257,18 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev->caps.max_cqes = dev_cap->max_cq_sz - 1; dev->caps.reserved_cqs = dev_cap->reserved_cqs; dev->caps.reserved_eqs = dev_cap->reserved_eqs; - dev->caps.mtts_per_seg = 1 << log_mtts_per_seg; - dev->caps.reserved_mtts = DIV_ROUND_UP(dev_cap->reserved_mtts, - dev->caps.mtts_per_seg); + dev->caps.reserved_mtts = dev_cap->reserved_mtts; dev->caps.reserved_mrws = dev_cap->reserved_mrws; - dev->caps.reserved_uars = dev_cap->reserved_uars; + + /* The first 128 UARs are used for EQ doorbells */ + dev->caps.reserved_uars = max_t(int, 128, dev_cap->reserved_uars); dev->caps.reserved_pds = dev_cap->reserved_pds; dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? dev_cap->reserved_xrcds : 0; dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? dev_cap->max_xrcds : 0; - dev->caps.mtt_entry_sz = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz; + dev->caps.mtt_entry_sz = dev_cap->mtt_entry_sz; + dev->caps.max_msg_sz = dev_cap->max_msg_sz; dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); dev->caps.flags = dev_cap->flags; @@ -235,18 +277,70 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev->caps.stat_rate_support = dev_cap->stat_rate_support; dev->caps.max_gso_sz = dev_cap->max_gso_sz; + /* Sense port always allowed on supported devices for ConnectX1 and 2 */ + if (dev->pdev->device != 0x1003) + dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; + dev->caps.log_num_macs = log_num_mac; dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS; dev->caps.log_num_prios = use_prio ? 3 : 0; for (i = 1; i <= dev->caps.num_ports; ++i) { - if (dev->caps.supported_type[i] != MLX4_PORT_TYPE_ETH) - dev->caps.port_type[i] = MLX4_PORT_TYPE_IB; - else - dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH; - dev->caps.possible_type[i] = dev->caps.port_type[i]; + dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE; + if (dev->caps.supported_type[i]) { + /* if only ETH is supported - assign ETH */ + if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH) + dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH; + /* if only IB is supported, + * assign IB only if SRIOV is off*/ + else if (dev->caps.supported_type[i] == + MLX4_PORT_TYPE_IB) { + if (dev->flags & MLX4_FLAG_SRIOV) + dev->caps.port_type[i] = + MLX4_PORT_TYPE_NONE; + else + dev->caps.port_type[i] = + MLX4_PORT_TYPE_IB; + /* if IB and ETH are supported, + * first of all check if SRIOV is on */ + } else if (dev->flags & MLX4_FLAG_SRIOV) + dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH; + else { + /* In non-SRIOV mode, we set the port type + * according to user selection of port type, + * if usere selected none, take the FW hint */ + if (port_type_array[i-1] == MLX4_PORT_TYPE_NONE) + dev->caps.port_type[i] = dev->caps.suggested_type[i] ? + MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB; + else + dev->caps.port_type[i] = port_type_array[i-1]; + } + } + /* + * Link sensing is allowed on the port if 3 conditions are true: + * 1. Both protocols are supported on the port. + * 2. Different types are supported on the port + * 3. FW declared that it supports link sensing + */ mlx4_priv(dev)->sense.sense_allowed[i] = - dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO; + ((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) && + (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) && + (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)); + + /* + * If "default_sense" bit is set, we move the port to "AUTO" mode + * and perform sense_port FW command to try and set the correct + * port type from beginning + */ + if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) { + enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE; + dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO; + mlx4_SENSE_PORT(dev, i, &sensed_port); + if (sensed_port != MLX4_PORT_TYPE_NONE) + dev->caps.port_type[i] = sensed_port; + } else { + dev->caps.possible_type[i] = dev->caps.port_type[i]; + } if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) { dev->caps.log_num_macs = dev_cap->log_max_macs[i]; @@ -262,8 +356,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) } } - mlx4_set_port_mask(dev); - dev->caps.max_counters = 1 << ilog2(dev_cap->max_counters); dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps; @@ -282,6 +374,149 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) return 0; } +/*The function checks if there are live vf, return the num of them*/ +static int mlx4_how_many_lives_vf(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_slave_state *s_state; + int i; + int ret = 0; + + for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) { + s_state = &priv->mfunc.master.slave_state[i]; + if (s_state->active && s_state->last_cmd != + MLX4_COMM_CMD_RESET) { + mlx4_warn(dev, "%s: slave: %d is still active\n", + __func__, i); + ret++; + } + } + return ret; +} + +static int mlx4_is_slave_active(struct mlx4_dev *dev, int slave) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_slave_state *s_slave; + + if (!mlx4_is_master(dev)) + return 0; + + s_slave = &priv->mfunc.master.slave_state[slave]; + return !!s_slave->active; +} +EXPORT_SYMBOL(mlx4_is_slave_active); + +static int mlx4_slave_cap(struct mlx4_dev *dev) +{ + int err; + u32 page_size; + struct mlx4_dev_cap dev_cap; + struct mlx4_func_cap func_cap; + struct mlx4_init_hca_param hca_param; + int i; + + memset(&hca_param, 0, sizeof(hca_param)); + err = mlx4_QUERY_HCA(dev, &hca_param); + if (err) { + mlx4_err(dev, "QUERY_HCA command failed, aborting.\n"); + return err; + } + + /*fail if the hca has an unknown capability */ + if ((hca_param.global_caps | HCA_GLOBAL_CAP_MASK) != + HCA_GLOBAL_CAP_MASK) { + mlx4_err(dev, "Unknown hca global capabilities\n"); + return -ENOSYS; + } + + mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz; + + memset(&dev_cap, 0, sizeof(dev_cap)); + err = mlx4_dev_cap(dev, &dev_cap); + if (err) { + mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); + return err; + } + + page_size = ~dev->caps.page_size_cap + 1; + mlx4_warn(dev, "HCA minimum page size:%d\n", page_size); + if (page_size > PAGE_SIZE) { + mlx4_err(dev, "HCA minimum page size of %d bigger than " + "kernel PAGE_SIZE of %ld, aborting.\n", + page_size, PAGE_SIZE); + return -ENODEV; + } + + /* slave gets uar page size from QUERY_HCA fw command */ + dev->caps.uar_page_size = 1 << (hca_param.uar_page_sz + 12); + + /* TODO: relax this assumption */ + if (dev->caps.uar_page_size != PAGE_SIZE) { + mlx4_err(dev, "UAR size:%d != kernel PAGE_SIZE of %ld\n", + dev->caps.uar_page_size, PAGE_SIZE); + return -ENODEV; + } + + memset(&func_cap, 0, sizeof(func_cap)); + err = mlx4_QUERY_FUNC_CAP(dev, &func_cap); + if (err) { + mlx4_err(dev, "QUERY_FUNC_CAP command failed, aborting.\n"); + return err; + } + + if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) != + PF_CONTEXT_BEHAVIOUR_MASK) { + mlx4_err(dev, "Unknown pf context behaviour\n"); + return -ENOSYS; + } + + dev->caps.function = func_cap.function; + dev->caps.num_ports = func_cap.num_ports; + dev->caps.num_qps = func_cap.qp_quota; + dev->caps.num_srqs = func_cap.srq_quota; + dev->caps.num_cqs = func_cap.cq_quota; + dev->caps.num_eqs = func_cap.max_eq; + dev->caps.reserved_eqs = func_cap.reserved_eq; + dev->caps.num_mpts = func_cap.mpt_quota; + dev->caps.num_mtts = func_cap.mtt_quota; + dev->caps.num_pds = MLX4_NUM_PDS; + dev->caps.num_mgms = 0; + dev->caps.num_amgms = 0; + + for (i = 1; i <= dev->caps.num_ports; ++i) + dev->caps.port_mask[i] = dev->caps.port_type[i]; + + if (dev->caps.num_ports > MLX4_MAX_PORTS) { + mlx4_err(dev, "HCA has %d ports, but we only support %d, " + "aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS); + return -ENODEV; + } + + if (dev->caps.uar_page_size * (dev->caps.num_uars - + dev->caps.reserved_uars) > + pci_resource_len(dev->pdev, 2)) { + mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than " + "PCI resource 2 size of 0x%llx, aborting.\n", + dev->caps.uar_page_size * dev->caps.num_uars, + (unsigned long long) pci_resource_len(dev->pdev, 2)); + return -ENODEV; + } + +#if 0 + mlx4_warn(dev, "sqp_demux:%d\n", dev->caps.sqp_demux); + mlx4_warn(dev, "num_uars:%d reserved_uars:%d uar region:0x%x bar2:0x%llx\n", + dev->caps.num_uars, dev->caps.reserved_uars, + dev->caps.uar_page_size * dev->caps.num_uars, + pci_resource_len(dev->pdev, 2)); + mlx4_warn(dev, "num_eqs:%d reserved_eqs:%d\n", dev->caps.num_eqs, + dev->caps.reserved_eqs); + mlx4_warn(dev, "num_pds:%d reserved_pds:%d slave_pd_shift:%d pd_base:%d\n", + dev->caps.num_pds, dev->caps.reserved_pds, + dev->caps.slave_pd_shift, dev->caps.pd_base); +#endif + return 0; +} /* * Change the port configuration of the device. @@ -377,7 +612,8 @@ static ssize_t set_port_type(struct device *dev, types[i] = mdev->caps.port_type[i+1]; } - if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) { + if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) && + !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) { for (i = 1; i <= mdev->caps.num_ports; i++) { if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) { mdev->caps.possible_type[i] = mdev->caps.port_type[i]; @@ -451,6 +687,7 @@ static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, { struct mlx4_priv *priv = mlx4_priv(dev); int err; + int num_eqs; err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table, cmpt_base + @@ -480,12 +717,14 @@ static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, if (err) goto err_srq; + num_eqs = (mlx4_is_master(dev)) ? + roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) : + dev->caps.num_eqs; err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table, cmpt_base + ((u64) (MLX4_CMPT_TYPE_EQ * cmpt_entry_sz) << MLX4_CMPT_SHIFT), - cmpt_entry_sz, - dev->caps.num_eqs, dev->caps.num_eqs, 0, 0); + cmpt_entry_sz, num_eqs, num_eqs, 0, 0); if (err) goto err_cq; @@ -509,6 +748,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, { struct mlx4_priv *priv = mlx4_priv(dev); u64 aux_pages; + int num_eqs; int err; err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages); @@ -540,10 +780,13 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, goto err_unmap_aux; } + + num_eqs = (mlx4_is_master(dev)) ? + roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) : + dev->caps.num_eqs; err = mlx4_init_icm_table(dev, &priv->eq_table.table, init_hca->eqc_base, dev_cap->eqc_entry_sz, - dev->caps.num_eqs, dev->caps.num_eqs, - 0, 0); + num_eqs, num_eqs, 0, 0); if (err) { mlx4_err(dev, "Failed to map EQ context memory, aborting.\n"); goto err_unmap_cmpt; @@ -563,7 +806,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table, init_hca->mtt_base, dev->caps.mtt_entry_sz, - dev->caps.num_mtt_segs, + dev->caps.num_mtts, dev->caps.reserved_mtts, 1, 0); if (err) { mlx4_err(dev, "Failed to map MTT context memory, aborting.\n"); @@ -650,7 +893,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, * and it's a lot easier than trying to track ref counts. */ err = mlx4_init_icm_table(dev, &priv->mcg_table.table, - init_hca->mc_base, MLX4_MGM_ENTRY_SIZE, + init_hca->mc_base, + mlx4_get_mgm_entry_size(dev), dev->caps.num_mgms + dev->caps.num_amgms, dev->caps.num_mgms + dev->caps.num_amgms, 0, 0); @@ -726,6 +970,16 @@ static void mlx4_free_icms(struct mlx4_dev *dev) mlx4_free_icm(dev, priv->fw.aux_icm, 0); } +static void mlx4_slave_exit(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + down(&priv->cmd.slave_sem); + if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME)) + mlx4_warn(dev, "Failed to close slave function.\n"); + up(&priv->cmd.slave_sem); +} + static int map_bf_area(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); @@ -733,8 +987,10 @@ static int map_bf_area(struct mlx4_dev *dev) resource_size_t bf_len; int err = 0; - bf_start = pci_resource_start(dev->pdev, 2) + (dev->caps.num_uars << PAGE_SHIFT); - bf_len = pci_resource_len(dev->pdev, 2) - (dev->caps.num_uars << PAGE_SHIFT); + bf_start = pci_resource_start(dev->pdev, 2) + + (dev->caps.num_uars << PAGE_SHIFT); + bf_len = pci_resource_len(dev->pdev, 2) - + (dev->caps.num_uars << PAGE_SHIFT); priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len); if (!priv->bf_mapping) err = -ENOMEM; @@ -751,10 +1007,81 @@ static void unmap_bf_area(struct mlx4_dev *dev) static void mlx4_close_hca(struct mlx4_dev *dev) { unmap_bf_area(dev); - mlx4_CLOSE_HCA(dev, 0); - mlx4_free_icms(dev); - mlx4_UNMAP_FA(dev); - mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0); + if (mlx4_is_slave(dev)) + mlx4_slave_exit(dev); + else { + mlx4_CLOSE_HCA(dev, 0); + mlx4_free_icms(dev); + mlx4_UNMAP_FA(dev); + mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0); + } +} + +static int mlx4_init_slave(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + u64 dma = (u64) priv->mfunc.vhcr_dma; + int num_of_reset_retries = NUM_OF_RESET_RETRIES; + int ret_from_reset = 0; + u32 slave_read; + u32 cmd_channel_ver; + + down(&priv->cmd.slave_sem); + priv->cmd.max_cmds = 1; + mlx4_warn(dev, "Sending reset\n"); + ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, + MLX4_COMM_TIME); + /* if we are in the middle of flr the slave will try + * NUM_OF_RESET_RETRIES times before leaving.*/ + if (ret_from_reset) { + if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) { + msleep(SLEEP_TIME_IN_RESET); + while (ret_from_reset && num_of_reset_retries) { + mlx4_warn(dev, "slave is currently in the" + "middle of FLR. retrying..." + "(try num:%d)\n", + (NUM_OF_RESET_RETRIES - + num_of_reset_retries + 1)); + ret_from_reset = + mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, + 0, MLX4_COMM_TIME); + num_of_reset_retries = num_of_reset_retries - 1; + } + } else + goto err; + } + + /* check the driver version - the slave I/F revision + * must match the master's */ + slave_read = swab32(readl(&priv->mfunc.comm->slave_read)); + cmd_channel_ver = mlx4_comm_get_version(); + + if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) != + MLX4_COMM_GET_IF_REV(slave_read)) { + mlx4_err(dev, "slave driver version is not supported" + " by the master\n"); + goto err; + } + + mlx4_warn(dev, "Sending vhcr0\n"); + if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48, + MLX4_COMM_TIME)) + goto err; + if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32, + MLX4_COMM_TIME)) + goto err; + if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16, + MLX4_COMM_TIME)) + goto err; + if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, MLX4_COMM_TIME)) + goto err; + up(&priv->cmd.slave_sem); + return 0; + +err: + mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 0); + up(&priv->cmd.slave_sem); + return -EIO; } static int mlx4_init_hca(struct mlx4_dev *dev) @@ -768,56 +1095,76 @@ static int mlx4_init_hca(struct mlx4_dev *dev) u64 icm_size; int err; - err = mlx4_QUERY_FW(dev); - if (err) { - if (err == -EACCES) - mlx4_info(dev, "non-primary physical function, skipping.\n"); - else - mlx4_err(dev, "QUERY_FW command failed, aborting.\n"); - return err; - } + if (!mlx4_is_slave(dev)) { + err = mlx4_QUERY_FW(dev); + if (err) { + if (err == -EACCES) + mlx4_info(dev, "non-primary physical function, skipping.\n"); + else + mlx4_err(dev, "QUERY_FW command failed, aborting.\n"); + goto unmap_bf; + } - err = mlx4_load_fw(dev); - if (err) { - mlx4_err(dev, "Failed to start FW, aborting.\n"); - return err; - } + err = mlx4_load_fw(dev); + if (err) { + mlx4_err(dev, "Failed to start FW, aborting.\n"); + goto unmap_bf; + } - mlx4_cfg.log_pg_sz_m = 1; - mlx4_cfg.log_pg_sz = 0; - err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg); - if (err) - mlx4_warn(dev, "Failed to override log_pg_sz parameter\n"); + mlx4_cfg.log_pg_sz_m = 1; + mlx4_cfg.log_pg_sz = 0; + err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg); + if (err) + mlx4_warn(dev, "Failed to override log_pg_sz parameter\n"); - err = mlx4_dev_cap(dev, &dev_cap); - if (err) { - mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); - goto err_stop_fw; - } + err = mlx4_dev_cap(dev, &dev_cap); + if (err) { + mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); + goto err_stop_fw; + } - profile = default_profile; + profile = default_profile; - icm_size = mlx4_make_profile(dev, &profile, &dev_cap, &init_hca); - if ((long long) icm_size < 0) { - err = icm_size; - goto err_stop_fw; - } + icm_size = mlx4_make_profile(dev, &profile, &dev_cap, + &init_hca); + if ((long long) icm_size < 0) { + err = icm_size; + goto err_stop_fw; + } - if (map_bf_area(dev)) - mlx4_dbg(dev, "Failed to map blue flame area\n"); + init_hca.log_uar_sz = ilog2(dev->caps.num_uars); + init_hca.uar_page_sz = PAGE_SHIFT - 12; - init_hca.log_uar_sz = ilog2(dev->caps.num_uars); + err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); + if (err) + goto err_stop_fw; - err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); - if (err) - goto err_stop_fw; + err = mlx4_INIT_HCA(dev, &init_hca); + if (err) { + mlx4_err(dev, "INIT_HCA command failed, aborting.\n"); + goto err_free_icm; + } + } else { + err = mlx4_init_slave(dev); + if (err) { + mlx4_err(dev, "Failed to initialize slave\n"); + goto unmap_bf; + } - err = mlx4_INIT_HCA(dev, &init_hca); - if (err) { - mlx4_err(dev, "INIT_HCA command failed, aborting.\n"); - goto err_free_icm; + err = mlx4_slave_cap(dev); + if (err) { + mlx4_err(dev, "Failed to obtain slave caps\n"); + goto err_close; + } } + if (map_bf_area(dev)) + mlx4_dbg(dev, "Failed to map blue flame area\n"); + + /*Only the master set the ports, all the rest got it from it.*/ + if (!mlx4_is_slave(dev)) + mlx4_set_port_mask(dev); + err = mlx4_QUERY_ADAPTER(dev, &adapter); if (err) { mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n"); @@ -830,16 +1177,19 @@ static int mlx4_init_hca(struct mlx4_dev *dev) return 0; err_close: - mlx4_CLOSE_HCA(dev, 0); + mlx4_close_hca(dev); err_free_icm: - mlx4_free_icms(dev); + if (!mlx4_is_slave(dev)) + mlx4_free_icms(dev); err_stop_fw: + if (!mlx4_is_slave(dev)) { + mlx4_UNMAP_FA(dev); + mlx4_free_icm(dev, priv->fw.fw_icm, 0); + } +unmap_bf: unmap_bf_area(dev); - mlx4_UNMAP_FA(dev); - mlx4_free_icm(dev, priv->fw.fw_icm, 0); - return err; } @@ -986,55 +1336,56 @@ static int mlx4_setup_hca(struct mlx4_dev *dev) goto err_srq_table_free; } - err = mlx4_init_mcg_table(dev); - if (err) { - mlx4_err(dev, "Failed to initialize " - "multicast group table, aborting.\n"); - goto err_qp_table_free; + if (!mlx4_is_slave(dev)) { + err = mlx4_init_mcg_table(dev); + if (err) { + mlx4_err(dev, "Failed to initialize " + "multicast group table, aborting.\n"); + goto err_qp_table_free; + } } err = mlx4_init_counters_table(dev); if (err && err != -ENOENT) { mlx4_err(dev, "Failed to initialize counters table, aborting.\n"); - goto err_counters_table_free; + goto err_mcg_table_free; } - for (port = 1; port <= dev->caps.num_ports; port++) { - enum mlx4_port_type port_type = 0; - mlx4_SENSE_PORT(dev, port, &port_type); - if (port_type) - dev->caps.port_type[port] = port_type; - ib_port_default_caps = 0; - err = mlx4_get_port_ib_caps(dev, port, &ib_port_default_caps); - if (err) - mlx4_warn(dev, "failed to get port %d default " - "ib capabilities (%d). Continuing with " - "caps = 0\n", port, err); - dev->caps.ib_port_def_cap[port] = ib_port_default_caps; - - err = mlx4_check_ext_port_caps(dev, port); - if (err) - mlx4_warn(dev, "failed to get port %d extended " - "port capabilities support info (%d)." - " Assuming not supported\n", port, err); + if (!mlx4_is_slave(dev)) { + for (port = 1; port <= dev->caps.num_ports; port++) { + ib_port_default_caps = 0; + err = mlx4_get_port_ib_caps(dev, port, + &ib_port_default_caps); + if (err) + mlx4_warn(dev, "failed to get port %d default " + "ib capabilities (%d). Continuing " + "with caps = 0\n", port, err); + dev->caps.ib_port_def_cap[port] = ib_port_default_caps; + + err = mlx4_check_ext_port_caps(dev, port); + if (err) + mlx4_warn(dev, "failed to get port %d extended " + "port capabilities support info (%d)." + " Assuming not supported\n", + port, err); - err = mlx4_SET_PORT(dev, port); - if (err) { - mlx4_err(dev, "Failed to set port %d, aborting\n", - port); - goto err_mcg_table_free; + err = mlx4_SET_PORT(dev, port); + if (err) { + mlx4_err(dev, "Failed to set port %d, aborting\n", + port); + goto err_counters_table_free; + } } } - mlx4_set_port_mask(dev); return 0; -err_mcg_table_free: - mlx4_cleanup_mcg_table(dev); - err_counters_table_free: mlx4_cleanup_counters_table(dev); +err_mcg_table_free: + mlx4_cleanup_mcg_table(dev); + err_qp_table_free: mlx4_cleanup_qp_table(dev); @@ -1081,8 +1432,16 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev) int i; if (msi_x) { - nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, - nreq); + /* In multifunction mode each function gets 2 msi-X vectors + * one for data path completions anf the other for asynch events + * or command completions */ + if (mlx4_is_mfunc(dev)) { + nreq = 2; + } else { + nreq = min_t(int, dev->caps.num_eqs - + dev->caps.reserved_eqs, nreq); + } + entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); if (!entries) goto no_msi; @@ -1138,16 +1497,24 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port) info->dev = dev; info->port = port; - mlx4_init_mac_table(dev, &info->mac_table); - mlx4_init_vlan_table(dev, &info->vlan_table); - info->base_qpn = dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] + + if (!mlx4_is_slave(dev)) { + INIT_RADIX_TREE(&info->mac_tree, GFP_KERNEL); + mlx4_init_mac_table(dev, &info->mac_table); + mlx4_init_vlan_table(dev, &info->vlan_table); + info->base_qpn = + dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] + (port - 1) * (1 << log_num_mac); + } sprintf(info->dev_name, "mlx4_port%d", port); info->port_attr.attr.name = info->dev_name; - info->port_attr.attr.mode = S_IRUGO | S_IWUSR; + if (mlx4_is_mfunc(dev)) + info->port_attr.attr.mode = S_IRUGO; + else { + info->port_attr.attr.mode = S_IRUGO | S_IWUSR; + info->port_attr.store = set_port_type; + } info->port_attr.show = show_port_type; - info->port_attr.store = set_port_type; sysfs_attr_init(&info->port_attr.attr); err = device_create_file(&dev->pdev->dev, &info->port_attr); @@ -1220,6 +1587,46 @@ static void mlx4_clear_steering(struct mlx4_dev *dev) kfree(priv->steer); } +static int extended_func_num(struct pci_dev *pdev) +{ + return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn); +} + +#define MLX4_OWNER_BASE 0x8069c +#define MLX4_OWNER_SIZE 4 + +static int mlx4_get_ownership(struct mlx4_dev *dev) +{ + void __iomem *owner; + u32 ret; + + owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE, + MLX4_OWNER_SIZE); + if (!owner) { + mlx4_err(dev, "Failed to obtain ownership bit\n"); + return -ENOMEM; + } + + ret = readl(owner); + iounmap(owner); + return (int) !!ret; +} + +static void mlx4_free_ownership(struct mlx4_dev *dev) +{ + void __iomem *owner; + + owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE, + MLX4_OWNER_SIZE); + if (!owner) { + mlx4_err(dev, "Failed to obtain ownership bit\n"); + return; + } + writel(0, owner); + msleep(1000); + iounmap(owner); +} + static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { struct mlx4_priv *priv; @@ -1235,13 +1642,20 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) "aborting.\n"); return err; } - + if (num_vfs > MLX4_MAX_NUM_VF) { + printk(KERN_ERR "There are more VF's (%d) than allowed(%d)\n", + num_vfs, MLX4_MAX_NUM_VF); + return -EINVAL; + } /* - * Check for BARs. We expect 0: 1MB + * Check for BARs. */ - if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) || - pci_resource_len(pdev, 0) != 1 << 20) { - dev_err(&pdev->dev, "Missing DCS, aborting.\n"); + if (((id == NULL) || !(id->driver_data & MLX4_VF)) && + !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + dev_err(&pdev->dev, "Missing DCS, aborting." + "(id == 0X%p, id->driver_data: 0x%lx," + " pci_resource_flags(pdev, 0):0x%lx)\n", id, + id ? id->driver_data : 0, pci_resource_flags(pdev, 0)); err = -ENODEV; goto err_disable_pdev; } @@ -1305,42 +1719,132 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) mutex_init(&priv->bf_mutex); dev->rev_id = pdev->revision; + /* Detect if this device is a virtual function */ + if (id && id->driver_data & MLX4_VF) { + /* When acting as pf, we normally skip vfs unless explicitly + * requested to probe them. */ + if (num_vfs && extended_func_num(pdev) > probe_vf) { + mlx4_warn(dev, "Skipping virtual function:%d\n", + extended_func_num(pdev)); + err = -ENODEV; + goto err_free_dev; + } + mlx4_warn(dev, "Detected virtual function - running in slave mode\n"); + dev->flags |= MLX4_FLAG_SLAVE; + } else { + /* We reset the device and enable SRIOV only for physical + * devices. Try to claim ownership on the device; + * if already taken, skip -- do not allow multiple PFs */ + err = mlx4_get_ownership(dev); + if (err) { + if (err < 0) + goto err_free_dev; + else { + mlx4_warn(dev, "Multiple PFs not yet supported." + " Skipping PF.\n"); + err = -EINVAL; + goto err_free_dev; + } + } - /* - * Now reset the HCA before we touch the PCI capabilities or - * attempt a firmware command, since a boot ROM may have left - * the HCA in an undefined state. - */ - err = mlx4_reset(dev); - if (err) { - mlx4_err(dev, "Failed to reset HCA, aborting.\n"); - goto err_free_dev; + if (num_vfs) { + mlx4_warn(dev, "Enabling sriov with:%d vfs\n", num_vfs); + err = pci_enable_sriov(pdev, num_vfs); + if (err) { + mlx4_err(dev, "Failed to enable sriov," + "continuing without sriov enabled" + " (err = %d).\n", err); + num_vfs = 0; + err = 0; + } else { + mlx4_warn(dev, "Running in master mode\n"); + dev->flags |= MLX4_FLAG_SRIOV | + MLX4_FLAG_MASTER; + dev->num_vfs = num_vfs; + } + } + + /* + * Now reset the HCA before we touch the PCI capabilities or + * attempt a firmware command, since a boot ROM may have left + * the HCA in an undefined state. + */ + err = mlx4_reset(dev); + if (err) { + mlx4_err(dev, "Failed to reset HCA, aborting.\n"); + goto err_rel_own; + } } +slave_start: if (mlx4_cmd_init(dev)) { mlx4_err(dev, "Failed to init command interface, aborting.\n"); - goto err_free_dev; + goto err_sriov; + } + + /* In slave functions, the communication channel must be initialized + * before posting commands. Also, init num_slaves before calling + * mlx4_init_hca */ + if (mlx4_is_mfunc(dev)) { + if (mlx4_is_master(dev)) + dev->num_slaves = MLX4_MAX_NUM_SLAVES; + else { + dev->num_slaves = 0; + if (mlx4_multi_func_init(dev)) { + mlx4_err(dev, "Failed to init slave mfunc" + " interface, aborting.\n"); + goto err_cmd; + } + } } err = mlx4_init_hca(dev); - if (err) - goto err_cmd; + if (err) { + if (err == -EACCES) { + /* Not primary Physical function + * Running in slave mode */ + mlx4_cmd_cleanup(dev); + dev->flags |= MLX4_FLAG_SLAVE; + dev->flags &= ~MLX4_FLAG_MASTER; + goto slave_start; + } else + goto err_mfunc; + } + + /* In master functions, the communication channel must be initialized + * after obtaining its address from fw */ + if (mlx4_is_master(dev)) { + if (mlx4_multi_func_init(dev)) { + mlx4_err(dev, "Failed to init master mfunc" + "interface, aborting.\n"); + goto err_close; + } + } err = mlx4_alloc_eq_table(dev); if (err) - goto err_close; + goto err_master_mfunc; priv->msix_ctl.pool_bm = 0; spin_lock_init(&priv->msix_ctl.pool_lock); mlx4_enable_msi_x(dev); - - err = mlx4_init_steering(dev); - if (err) + if ((mlx4_is_mfunc(dev)) && + !(dev->flags & MLX4_FLAG_MSI_X)) { + mlx4_err(dev, "INTx is not supported in multi-function mode." + " aborting.\n"); goto err_free_eq; + } + + if (!mlx4_is_slave(dev)) { + err = mlx4_init_steering(dev); + if (err) + goto err_free_eq; + } err = mlx4_setup_hca(dev); - if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X)) { + if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) && + !mlx4_is_mfunc(dev)) { dev->flags &= ~MLX4_FLAG_MSI_X; pci_disable_msix(pdev); err = mlx4_setup_hca(dev); @@ -1383,20 +1887,37 @@ err_port: mlx4_cleanup_uar_table(dev); err_steer: - mlx4_clear_steering(dev); + if (!mlx4_is_slave(dev)) + mlx4_clear_steering(dev); err_free_eq: mlx4_free_eq_table(dev); +err_master_mfunc: + if (mlx4_is_master(dev)) + mlx4_multi_func_cleanup(dev); + err_close: if (dev->flags & MLX4_FLAG_MSI_X) pci_disable_msix(pdev); mlx4_close_hca(dev); +err_mfunc: + if (mlx4_is_slave(dev)) + mlx4_multi_func_cleanup(dev); + err_cmd: mlx4_cmd_cleanup(dev); +err_sriov: + if (num_vfs && (dev->flags & MLX4_FLAG_SRIOV)) + pci_disable_sriov(pdev); + +err_rel_own: + if (!mlx4_is_slave(dev)) + mlx4_free_ownership(dev); + err_free_dev: kfree(priv); @@ -1424,6 +1945,12 @@ static void mlx4_remove_one(struct pci_dev *pdev) int p; if (dev) { + /* in SRIOV it is not allowed to unload the pf's + * driver while there are alive vf's */ + if (mlx4_is_master(dev)) { + if (mlx4_how_many_lives_vf(dev)) + printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n"); + } mlx4_stop_sense(dev); mlx4_unregister_device(dev); @@ -1443,17 +1970,31 @@ static void mlx4_remove_one(struct pci_dev *pdev) mlx4_cleanup_xrcd_table(dev); mlx4_cleanup_pd_table(dev); + if (mlx4_is_master(dev)) + mlx4_free_resource_tracker(dev); + iounmap(priv->kar); mlx4_uar_free(dev, &priv->driver_uar); mlx4_cleanup_uar_table(dev); - mlx4_clear_steering(dev); + if (!mlx4_is_slave(dev)) + mlx4_clear_steering(dev); mlx4_free_eq_table(dev); + if (mlx4_is_master(dev)) + mlx4_multi_func_cleanup(dev); mlx4_close_hca(dev); + if (mlx4_is_slave(dev)) + mlx4_multi_func_cleanup(dev); mlx4_cmd_cleanup(dev); if (dev->flags & MLX4_FLAG_MSI_X) pci_disable_msix(pdev); + if (num_vfs && (dev->flags & MLX4_FLAG_SRIOV)) { + mlx4_warn(dev, "Disabling sriov\n"); + pci_disable_sriov(pdev); + } + if (!mlx4_is_slave(dev)) + mlx4_free_ownership(dev); kfree(priv); pci_release_regions(pdev); pci_disable_device(pdev); @@ -1468,33 +2009,48 @@ int mlx4_restart_one(struct pci_dev *pdev) } static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = { - { PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */ - { PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */ - { PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */ - { PCI_VDEVICE(MELLANOX, 0x6732) }, /* MT25408 "Hermon" DDR PCIe gen2 */ - { PCI_VDEVICE(MELLANOX, 0x673c) }, /* MT25408 "Hermon" QDR PCIe gen2 */ - { PCI_VDEVICE(MELLANOX, 0x6368) }, /* MT25408 "Hermon" EN 10GigE */ - { PCI_VDEVICE(MELLANOX, 0x6750) }, /* MT25408 "Hermon" EN 10GigE PCIe gen2 */ - { PCI_VDEVICE(MELLANOX, 0x6372) }, /* MT25458 ConnectX EN 10GBASE-T 10GigE */ - { PCI_VDEVICE(MELLANOX, 0x675a) }, /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */ - { PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/ - { PCI_VDEVICE(MELLANOX, 0x6746) }, /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */ - { PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX2 40GigE PCIe gen2 */ - { PCI_VDEVICE(MELLANOX, 0x1002) }, /* MT25400 Family [ConnectX-2 Virtual Function] */ - { PCI_VDEVICE(MELLANOX, 0x1003) }, /* MT27500 Family [ConnectX-3] */ - { PCI_VDEVICE(MELLANOX, 0x1004) }, /* MT27500 Family [ConnectX-3 Virtual Function] */ - { PCI_VDEVICE(MELLANOX, 0x1005) }, /* MT27510 Family */ - { PCI_VDEVICE(MELLANOX, 0x1006) }, /* MT27511 Family */ - { PCI_VDEVICE(MELLANOX, 0x1007) }, /* MT27520 Family */ - { PCI_VDEVICE(MELLANOX, 0x1008) }, /* MT27521 Family */ - { PCI_VDEVICE(MELLANOX, 0x1009) }, /* MT27530 Family */ - { PCI_VDEVICE(MELLANOX, 0x100a) }, /* MT27531 Family */ - { PCI_VDEVICE(MELLANOX, 0x100b) }, /* MT27540 Family */ - { PCI_VDEVICE(MELLANOX, 0x100c) }, /* MT27541 Family */ - { PCI_VDEVICE(MELLANOX, 0x100d) }, /* MT27550 Family */ - { PCI_VDEVICE(MELLANOX, 0x100e) }, /* MT27551 Family */ - { PCI_VDEVICE(MELLANOX, 0x100f) }, /* MT27560 Family */ - { PCI_VDEVICE(MELLANOX, 0x1010) }, /* MT27561 Family */ + /* MT25408 "Hermon" SDR */ + { PCI_VDEVICE(MELLANOX, 0x6340), 0 }, + /* MT25408 "Hermon" DDR */ + { PCI_VDEVICE(MELLANOX, 0x634a), 0 }, + /* MT25408 "Hermon" QDR */ + { PCI_VDEVICE(MELLANOX, 0x6354), 0 }, + /* MT25408 "Hermon" DDR PCIe gen2 */ + { PCI_VDEVICE(MELLANOX, 0x6732), 0 }, + /* MT25408 "Hermon" QDR PCIe gen2 */ + { PCI_VDEVICE(MELLANOX, 0x673c), 0 }, + /* MT25408 "Hermon" EN 10GigE */ + { PCI_VDEVICE(MELLANOX, 0x6368), 0 }, + /* MT25408 "Hermon" EN 10GigE PCIe gen2 */ + { PCI_VDEVICE(MELLANOX, 0x6750), 0 }, + /* MT25458 ConnectX EN 10GBASE-T 10GigE */ + { PCI_VDEVICE(MELLANOX, 0x6372), 0 }, + /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */ + { PCI_VDEVICE(MELLANOX, 0x675a), 0 }, + /* MT26468 ConnectX EN 10GigE PCIe gen2*/ + { PCI_VDEVICE(MELLANOX, 0x6764), 0 }, + /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */ + { PCI_VDEVICE(MELLANOX, 0x6746), 0 }, + /* MT26478 ConnectX2 40GigE PCIe gen2 */ + { PCI_VDEVICE(MELLANOX, 0x676e), 0 }, + /* MT25400 Family [ConnectX-2 Virtual Function] */ + { PCI_VDEVICE(MELLANOX, 0x1002), MLX4_VF }, + /* MT27500 Family [ConnectX-3] */ + { PCI_VDEVICE(MELLANOX, 0x1003), 0 }, + /* MT27500 Family [ConnectX-3 Virtual Function] */ + { PCI_VDEVICE(MELLANOX, 0x1004), MLX4_VF }, + { PCI_VDEVICE(MELLANOX, 0x1005), 0 }, /* MT27510 Family */ + { PCI_VDEVICE(MELLANOX, 0x1006), 0 }, /* MT27511 Family */ + { PCI_VDEVICE(MELLANOX, 0x1007), 0 }, /* MT27520 Family */ + { PCI_VDEVICE(MELLANOX, 0x1008), 0 }, /* MT27521 Family */ + { PCI_VDEVICE(MELLANOX, 0x1009), 0 }, /* MT27530 Family */ + { PCI_VDEVICE(MELLANOX, 0x100a), 0 }, /* MT27531 Family */ + { PCI_VDEVICE(MELLANOX, 0x100b), 0 }, /* MT27540 Family */ + { PCI_VDEVICE(MELLANOX, 0x100c), 0 }, /* MT27541 Family */ + { PCI_VDEVICE(MELLANOX, 0x100d), 0 }, /* MT27550 Family */ + { PCI_VDEVICE(MELLANOX, 0x100e), 0 }, /* MT27551 Family */ + { PCI_VDEVICE(MELLANOX, 0x100f), 0 }, /* MT27560 Family */ + { PCI_VDEVICE(MELLANOX, 0x1010), 0 }, /* MT27561 Family */ { 0, } }; @@ -1523,6 +2079,12 @@ static int __init mlx4_verify_params(void) return -1; } + /* Check if module param for ports type has legal combination */ + if (port_type_array[0] == false && port_type_array[1] == true) { + printk(KERN_WARNING "Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n"); + port_type_array[0] = true; + } + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index 978688c3104..0785d9b2a26 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -44,28 +44,47 @@ static const u8 zero_gid[16]; /* automatically initialized to 0 */ +struct mlx4_mgm { + __be32 next_gid_index; + __be32 members_count; + u32 reserved[2]; + u8 gid[16]; + __be32 qp[MLX4_MAX_QP_PER_MGM]; +}; + +int mlx4_get_mgm_entry_size(struct mlx4_dev *dev) +{ + return min((1 << mlx4_log_num_mgm_entry_size), MLX4_MAX_MGM_ENTRY_SIZE); +} + +int mlx4_get_qp_per_mgm(struct mlx4_dev *dev) +{ + return 4 * (mlx4_get_mgm_entry_size(dev) / 16 - 2); +} + static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index, struct mlx4_cmd_mailbox *mailbox) { return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG, - MLX4_CMD_TIME_CLASS_A); + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); } static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index, struct mlx4_cmd_mailbox *mailbox) { return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG, - MLX4_CMD_TIME_CLASS_A); + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); } -static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 vep_num, u8 port, u8 steer, +static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 port, u8 steer, struct mlx4_cmd_mailbox *mailbox) { u32 in_mod; - in_mod = (u32) vep_num << 24 | (u32) port << 16 | steer << 1; + in_mod = (u32) port << 16 | steer << 1; return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1, - MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A); + MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); } static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, @@ -75,7 +94,8 @@ static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, int err; err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod, - MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A); + MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); if (!err) *hash = imm; @@ -102,7 +122,7 @@ static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 pf_num, * Add new entry to steering data structure. * All promisc QPs should be added as well */ -static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port, +static int new_steering_entry(struct mlx4_dev *dev, u8 port, enum mlx4_steer_type steer, unsigned int index, u32 qpn) { @@ -115,10 +135,8 @@ static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port, struct mlx4_promisc_qp *dqp = NULL; u32 prot; int err; - u8 pf_num; - pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1); - s_steer = &mlx4_priv(dev)->steer[pf_num]; + s_steer = &mlx4_priv(dev)->steer[0]; new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL); if (!new_entry) return -ENOMEM; @@ -130,7 +148,7 @@ static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port, /* If the given qpn is also a promisc qp, * it should be inserted to duplicates list */ - pqp = get_promisc_qp(dev, pf_num, steer, qpn); + pqp = get_promisc_qp(dev, 0, steer, qpn); if (pqp) { dqp = kmalloc(sizeof *dqp, GFP_KERNEL); if (!dqp) { @@ -165,7 +183,7 @@ static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port, /* don't add already existing qpn */ if (pqp->qpn == qpn) continue; - if (members_count == MLX4_QP_PER_MGM) { + if (members_count == dev->caps.num_qp_per_mgm) { /* out of space */ err = -ENOMEM; goto out_mailbox; @@ -193,7 +211,7 @@ out_alloc: } /* update the data structures with existing steering entry */ -static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port, +static int existing_steering_entry(struct mlx4_dev *dev, u8 port, enum mlx4_steer_type steer, unsigned int index, u32 qpn) { @@ -201,12 +219,10 @@ static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port, struct mlx4_steer_index *tmp_entry, *entry = NULL; struct mlx4_promisc_qp *pqp; struct mlx4_promisc_qp *dqp; - u8 pf_num; - pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1); - s_steer = &mlx4_priv(dev)->steer[pf_num]; + s_steer = &mlx4_priv(dev)->steer[0]; - pqp = get_promisc_qp(dev, pf_num, steer, qpn); + pqp = get_promisc_qp(dev, 0, steer, qpn); if (!pqp) return 0; /* nothing to do */ @@ -225,7 +241,7 @@ static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port, * we need to add it as a duplicate to this entry * for future references */ list_for_each_entry(dqp, &entry->duplicates, list) { - if (qpn == dqp->qpn) + if (qpn == pqp->qpn) return 0; /* qp is already duplicated */ } @@ -241,20 +257,18 @@ static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port, /* Check whether a qpn is a duplicate on steering entry * If so, it should not be removed from mgm */ -static bool check_duplicate_entry(struct mlx4_dev *dev, u8 vep_num, u8 port, +static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port, enum mlx4_steer_type steer, unsigned int index, u32 qpn) { struct mlx4_steer *s_steer; struct mlx4_steer_index *tmp_entry, *entry = NULL; struct mlx4_promisc_qp *dqp, *tmp_dqp; - u8 pf_num; - pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1); - s_steer = &mlx4_priv(dev)->steer[pf_num]; + s_steer = &mlx4_priv(dev)->steer[0]; /* if qp is not promisc, it cannot be duplicated */ - if (!get_promisc_qp(dev, pf_num, steer, qpn)) + if (!get_promisc_qp(dev, 0, steer, qpn)) return false; /* The qp is promisc qp so it is a duplicate on this index @@ -279,7 +293,7 @@ static bool check_duplicate_entry(struct mlx4_dev *dev, u8 vep_num, u8 port, } /* I a steering entry contains only promisc QPs, it can be removed. */ -static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port, +static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port, enum mlx4_steer_type steer, unsigned int index, u32 tqpn) { @@ -291,10 +305,8 @@ static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port, u32 members_count; bool ret = false; int i; - u8 pf_num; - pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1); - s_steer = &mlx4_priv(dev)->steer[pf_num]; + s_steer = &mlx4_priv(dev)->steer[0]; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) @@ -306,7 +318,7 @@ static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port, members_count = be32_to_cpu(mgm->members_count) & 0xffffff; for (i = 0; i < members_count; i++) { qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK; - if (!get_promisc_qp(dev, pf_num, steer, qpn) && qpn != tqpn) { + if (!get_promisc_qp(dev, 0, steer, qpn) && qpn != tqpn) { /* the qp is not promisc, the entry can't be removed */ goto out; } @@ -332,7 +344,7 @@ out: return ret; } -static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port, +static int add_promisc_qp(struct mlx4_dev *dev, u8 port, enum mlx4_steer_type steer, u32 qpn) { struct mlx4_steer *s_steer; @@ -347,14 +359,13 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port, bool found; int last_index; int err; - u8 pf_num; struct mlx4_priv *priv = mlx4_priv(dev); - pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1); - s_steer = &mlx4_priv(dev)->steer[pf_num]; + + s_steer = &mlx4_priv(dev)->steer[0]; mutex_lock(&priv->mcg_table.mutex); - if (get_promisc_qp(dev, pf_num, steer, qpn)) { + if (get_promisc_qp(dev, 0, steer, qpn)) { err = 0; /* Noting to do, already exists */ goto out_mutex; } @@ -397,7 +408,7 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port, } if (!found) { /* Need to add the qpn to mgm */ - if (members_count == MLX4_QP_PER_MGM) { + if (members_count == dev->caps.num_qp_per_mgm) { /* entry is full */ err = -ENOMEM; goto out_mailbox; @@ -420,7 +431,7 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port, mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30); - err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox); + err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox); if (err) goto out_list; @@ -439,7 +450,7 @@ out_mutex: return err; } -static int remove_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port, +static int remove_promisc_qp(struct mlx4_dev *dev, u8 port, enum mlx4_steer_type steer, u32 qpn) { struct mlx4_priv *priv = mlx4_priv(dev); @@ -454,13 +465,11 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port, bool back_to_list = false; int loc, i; int err; - u8 pf_num; - pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1); - s_steer = &mlx4_priv(dev)->steer[pf_num]; + s_steer = &mlx4_priv(dev)->steer[0]; mutex_lock(&priv->mcg_table.mutex); - pqp = get_promisc_qp(dev, pf_num, steer, qpn); + pqp = get_promisc_qp(dev, 0, steer, qpn); if (unlikely(!pqp)) { mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn); /* nothing to do */ @@ -479,12 +488,13 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port, goto out_list; } mgm = mailbox->buf; + memset(mgm, 0, sizeof *mgm); members_count = 0; list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30); - err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox); + err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox); if (err) goto out_mailbox; @@ -649,12 +659,13 @@ int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], } index += dev->caps.num_mgms; + new_entry = 1; memset(mgm, 0, sizeof *mgm); memcpy(mgm->gid, gid, 16); } members_count = be32_to_cpu(mgm->members_count) & 0xffffff; - if (members_count == MLX4_QP_PER_MGM) { + if (members_count == dev->caps.num_qp_per_mgm) { mlx4_err(dev, "MGM at index %x is full.\n", index); err = -ENOMEM; goto out; @@ -696,9 +707,9 @@ out: if (prot == MLX4_PROT_ETH) { /* manage the steering entry for promisc mode */ if (new_entry) - new_steering_entry(dev, 0, port, steer, index, qp->qpn); + new_steering_entry(dev, port, steer, index, qp->qpn); else - existing_steering_entry(dev, 0, port, steer, + existing_steering_entry(dev, port, steer, index, qp->qpn); } if (err && link && index != -1) { @@ -749,7 +760,7 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], /* if this pq is also a promisc qp, it shouldn't be removed */ if (prot == MLX4_PROT_ETH && - check_duplicate_entry(dev, 0, port, steer, index, qp->qpn)) + check_duplicate_entry(dev, port, steer, index, qp->qpn)) goto out; members_count = be32_to_cpu(mgm->members_count) & 0xffffff; @@ -769,7 +780,8 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], mgm->qp[i - 1] = 0; if (prot == MLX4_PROT_ETH) - removed_entry = can_remove_steering_entry(dev, 0, port, steer, index, qp->qpn); + removed_entry = can_remove_steering_entry(dev, port, steer, + index, qp->qpn); if (i != 1 && (prot != MLX4_PROT_ETH || !removed_entry)) { err = mlx4_WRITE_ENTRY(dev, index, mailbox); goto out; @@ -828,6 +840,34 @@ out: return err; } +static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp, + u8 gid[16], u8 attach, u8 block_loopback, + enum mlx4_protocol prot) +{ + struct mlx4_cmd_mailbox *mailbox; + int err = 0; + int qpn; + + if (!mlx4_is_mfunc(dev)) + return -EBADF; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + memcpy(mailbox->buf, gid, 16); + qpn = qp->qpn; + qpn |= (prot << 28); + if (attach && block_loopback) + qpn |= (1 << 31); + + err = mlx4_cmd(dev, mailbox->dma, qpn, attach, + MLX4_CMD_QP_ATTACH, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_WRAPPED); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], int block_mcast_loopback, enum mlx4_protocol prot) @@ -843,9 +883,12 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], if (prot == MLX4_PROT_ETH) gid[7] |= (steer << 1); - return mlx4_qp_attach_common(dev, qp, gid, - block_mcast_loopback, prot, - steer); + if (mlx4_is_mfunc(dev)) + return mlx4_QP_ATTACH(dev, qp, gid, 1, + block_mcast_loopback, prot); + + return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback, + prot, steer); } EXPORT_SYMBOL_GPL(mlx4_multicast_attach); @@ -860,22 +903,90 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)) return 0; - if (prot == MLX4_PROT_ETH) { + if (prot == MLX4_PROT_ETH) gid[7] |= (steer << 1); - } + + if (mlx4_is_mfunc(dev)) + return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot); return mlx4_qp_detach_common(dev, qp, gid, prot, steer); } EXPORT_SYMBOL_GPL(mlx4_multicast_detach); +int mlx4_unicast_attach(struct mlx4_dev *dev, + struct mlx4_qp *qp, u8 gid[16], + int block_mcast_loopback, enum mlx4_protocol prot) +{ + if (prot == MLX4_PROT_ETH && + !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)) + return 0; + + if (prot == MLX4_PROT_ETH) + gid[7] |= (MLX4_UC_STEER << 1); + + if (mlx4_is_mfunc(dev)) + return mlx4_QP_ATTACH(dev, qp, gid, 1, + block_mcast_loopback, prot); + + return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback, + prot, MLX4_UC_STEER); +} +EXPORT_SYMBOL_GPL(mlx4_unicast_attach); + +int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, + u8 gid[16], enum mlx4_protocol prot) +{ + if (prot == MLX4_PROT_ETH && + !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)) + return 0; + + if (prot == MLX4_PROT_ETH) + gid[7] |= (MLX4_UC_STEER << 1); + + if (mlx4_is_mfunc(dev)) + return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot); + + return mlx4_qp_detach_common(dev, qp, gid, prot, MLX4_UC_STEER); +} +EXPORT_SYMBOL_GPL(mlx4_unicast_detach); + +int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + u32 qpn = (u32) vhcr->in_param & 0xffffffff; + u8 port = vhcr->in_param >> 62; + enum mlx4_steer_type steer = vhcr->in_modifier; + + /* Promiscuous unicast is not allowed in mfunc */ + if (mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER) + return 0; + + if (vhcr->op_modifier) + return add_promisc_qp(dev, port, steer, qpn); + else + return remove_promisc_qp(dev, port, steer, qpn); +} + +static int mlx4_PROMISC(struct mlx4_dev *dev, u32 qpn, + enum mlx4_steer_type steer, u8 add, u8 port) +{ + return mlx4_cmd(dev, (u64) qpn | (u64) port << 62, (u32) steer, add, + MLX4_CMD_PROMISC, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_WRAPPED); +} int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port) { if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)) return 0; + if (mlx4_is_mfunc(dev)) + return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 1, port); - return add_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn); + return add_promisc_qp(dev, port, MLX4_MC_STEER, qpn); } EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add); @@ -884,8 +995,10 @@ int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port) if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)) return 0; + if (mlx4_is_mfunc(dev)) + return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 0, port); - return remove_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn); + return remove_promisc_qp(dev, port, MLX4_MC_STEER, qpn); } EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove); @@ -894,8 +1007,10 @@ int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port) if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)) return 0; + if (mlx4_is_mfunc(dev)) + return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 1, port); - return add_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn); + return add_promisc_qp(dev, port, MLX4_UC_STEER, qpn); } EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add); @@ -904,7 +1019,10 @@ int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port) if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)) return 0; - return remove_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn); + if (mlx4_is_mfunc(dev)) + return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 0, port); + + return remove_promisc_qp(dev, port, MLX4_UC_STEER, qpn); } EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove); diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 5dfa68ffc11..a80121a2b51 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -46,21 +46,25 @@ #include <linux/mlx4/device.h> #include <linux/mlx4/driver.h> #include <linux/mlx4/doorbell.h> +#include <linux/mlx4/cmd.h> #define DRV_NAME "mlx4_core" -#define DRV_VERSION "1.0" -#define DRV_RELDATE "July 14, 2011" +#define PFX DRV_NAME ": " +#define DRV_VERSION "1.1" +#define DRV_RELDATE "Dec, 2011" enum { MLX4_HCR_BASE = 0x80680, MLX4_HCR_SIZE = 0x0001c, - MLX4_CLR_INT_SIZE = 0x00008 + MLX4_CLR_INT_SIZE = 0x00008, + MLX4_SLAVE_COMM_BASE = 0x0, + MLX4_COMM_PAGESIZE = 0x1000 }; enum { - MLX4_MGM_ENTRY_SIZE = 0x100, - MLX4_QP_PER_MGM = 4 * (MLX4_MGM_ENTRY_SIZE / 16 - 2), - MLX4_MTT_ENTRY_PER_SEG = 8 + MLX4_MAX_MGM_ENTRY_SIZE = 0x1000, + MLX4_MAX_QP_PER_MGM = 4 * (MLX4_MAX_MGM_ENTRY_SIZE / 16 - 2), + MLX4_MTT_ENTRY_PER_SEG = 8, }; enum { @@ -80,6 +84,94 @@ enum { MLX4_NUM_CMPTS = MLX4_CMPT_NUM_TYPE << MLX4_CMPT_SHIFT }; +enum mlx4_mr_state { + MLX4_MR_DISABLED = 0, + MLX4_MR_EN_HW, + MLX4_MR_EN_SW +}; + +#define MLX4_COMM_TIME 10000 +enum { + MLX4_COMM_CMD_RESET, + MLX4_COMM_CMD_VHCR0, + MLX4_COMM_CMD_VHCR1, + MLX4_COMM_CMD_VHCR2, + MLX4_COMM_CMD_VHCR_EN, + MLX4_COMM_CMD_VHCR_POST, + MLX4_COMM_CMD_FLR = 254 +}; + +/*The flag indicates that the slave should delay the RESET cmd*/ +#define MLX4_DELAY_RESET_SLAVE 0xbbbbbbb +/*indicates how many retries will be done if we are in the middle of FLR*/ +#define NUM_OF_RESET_RETRIES 10 +#define SLEEP_TIME_IN_RESET (2 * 1000) +enum mlx4_resource { + RES_QP, + RES_CQ, + RES_SRQ, + RES_XRCD, + RES_MPT, + RES_MTT, + RES_MAC, + RES_VLAN, + RES_EQ, + RES_COUNTER, + MLX4_NUM_OF_RESOURCE_TYPE +}; + +enum mlx4_alloc_mode { + RES_OP_RESERVE, + RES_OP_RESERVE_AND_MAP, + RES_OP_MAP_ICM, +}; + + +/* + *Virtual HCR structures. + * mlx4_vhcr is the sw representation, in machine endianess + * + * mlx4_vhcr_cmd is the formalized structure, the one that is passed + * to FW to go through communication channel. + * It is big endian, and has the same structure as the physical HCR + * used by command interface + */ +struct mlx4_vhcr { + u64 in_param; + u64 out_param; + u32 in_modifier; + u32 errno; + u16 op; + u16 token; + u8 op_modifier; + u8 e_bit; +}; + +struct mlx4_vhcr_cmd { + __be64 in_param; + __be32 in_modifier; + __be64 out_param; + __be16 token; + u16 reserved; + u8 status; + u8 flags; + __be16 opcode; +}; + +struct mlx4_cmd_info { + u16 opcode; + bool has_inbox; + bool has_outbox; + bool out_is_imm; + bool encode_slave_id; + int (*verify)(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox); + int (*wrapper)(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +}; + #ifdef CONFIG_MLX4_DEBUG extern int mlx4_debug_level; #else /* CONFIG_MLX4_DEBUG */ @@ -99,6 +191,12 @@ do { \ #define mlx4_warn(mdev, format, arg...) \ dev_warn(&mdev->pdev->dev, format, ##arg) +extern int mlx4_log_num_mgm_entry_size; +extern int log_mtts_per_seg; + +#define MLX4_MAX_NUM_SLAVES (MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF) +#define ALL_SLAVES 0xff + struct mlx4_bitmap { u32 last; u32 top; @@ -130,6 +228,147 @@ struct mlx4_icm_table { struct mlx4_icm **icm; }; +/* + * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits. + */ +struct mlx4_mpt_entry { + __be32 flags; + __be32 qpn; + __be32 key; + __be32 pd_flags; + __be64 start; + __be64 length; + __be32 lkey; + __be32 win_cnt; + u8 reserved1[3]; + u8 mtt_rep; + __be64 mtt_addr; + __be32 mtt_sz; + __be32 entity_size; + __be32 first_byte_offset; +} __packed; + +/* + * Must be packed because start is 64 bits but only aligned to 32 bits. + */ +struct mlx4_eq_context { + __be32 flags; + u16 reserved1[3]; + __be16 page_offset; + u8 log_eq_size; + u8 reserved2[4]; + u8 eq_period; + u8 reserved3; + u8 eq_max_count; + u8 reserved4[3]; + u8 intr; + u8 log_page_size; + u8 reserved5[2]; + u8 mtt_base_addr_h; + __be32 mtt_base_addr_l; + u32 reserved6[2]; + __be32 consumer_index; + __be32 producer_index; + u32 reserved7[4]; +}; + +struct mlx4_cq_context { + __be32 flags; + u16 reserved1[3]; + __be16 page_offset; + __be32 logsize_usrpage; + __be16 cq_period; + __be16 cq_max_count; + u8 reserved2[3]; + u8 comp_eqn; + u8 log_page_size; + u8 reserved3[2]; + u8 mtt_base_addr_h; + __be32 mtt_base_addr_l; + __be32 last_notified_index; + __be32 solicit_producer_index; + __be32 consumer_index; + __be32 producer_index; + u32 reserved4[2]; + __be64 db_rec_addr; +}; + +struct mlx4_srq_context { + __be32 state_logsize_srqn; + u8 logstride; + u8 reserved1; + __be16 xrcd; + __be32 pg_offset_cqn; + u32 reserved2; + u8 log_page_size; + u8 reserved3[2]; + u8 mtt_base_addr_h; + __be32 mtt_base_addr_l; + __be32 pd; + __be16 limit_watermark; + __be16 wqe_cnt; + u16 reserved4; + __be16 wqe_counter; + u32 reserved5; + __be64 db_rec_addr; +}; + +struct mlx4_eqe { + u8 reserved1; + u8 type; + u8 reserved2; + u8 subtype; + union { + u32 raw[6]; + struct { + __be32 cqn; + } __packed comp; + struct { + u16 reserved1; + __be16 token; + u32 reserved2; + u8 reserved3[3]; + u8 status; + __be64 out_param; + } __packed cmd; + struct { + __be32 qpn; + } __packed qp; + struct { + __be32 srqn; + } __packed srq; + struct { + __be32 cqn; + u32 reserved1; + u8 reserved2[3]; + u8 syndrome; + } __packed cq_err; + struct { + u32 reserved1[2]; + __be32 port; + } __packed port_change; + struct { + #define COMM_CHANNEL_BIT_ARRAY_SIZE 4 + u32 reserved; + u32 bit_vec[COMM_CHANNEL_BIT_ARRAY_SIZE]; + } __packed comm_channel_arm; + struct { + u8 port; + u8 reserved[3]; + __be64 mac; + } __packed mac_update; + struct { + u8 port; + } __packed sw_event; + struct { + __be32 slave_id; + } __packed flr_event; + } event; + u8 slave_id; + u8 reserved3[2]; + u8 owner; +} __packed; + struct mlx4_eq { struct mlx4_dev *dev; void __iomem *doorbell; @@ -142,6 +381,18 @@ struct mlx4_eq { struct mlx4_mtt mtt; }; +struct mlx4_slave_eqe { + u8 type; + u8 port; + u32 param; +}; + +struct mlx4_slave_event_eq_info { + u32 eqn; + u16 token; + u64 event_type; +}; + struct mlx4_profile { int num_qp; int rdmarc_per_qp; @@ -155,16 +406,37 @@ struct mlx4_profile { struct mlx4_fw { u64 clr_int_base; u64 catas_offset; + u64 comm_base; struct mlx4_icm *fw_icm; struct mlx4_icm *aux_icm; u32 catas_size; u16 fw_pages; u8 clr_int_bar; u8 catas_bar; + u8 comm_bar; }; -#define MGM_QPN_MASK 0x00FFFFFF -#define MGM_BLCK_LB_BIT 30 +struct mlx4_comm { + u32 slave_write; + u32 slave_read; +}; + +enum { + MLX4_MCAST_CONFIG = 0, + MLX4_MCAST_DISABLE = 1, + MLX4_MCAST_ENABLE = 2, +}; + +#define VLAN_FLTR_SIZE 128 + +struct mlx4_vlan_fltr { + __be32 entry[VLAN_FLTR_SIZE]; +}; + +struct mlx4_mcast_entry { + struct list_head list; + u64 addr; +}; struct mlx4_promisc_qp { struct list_head list; @@ -177,19 +449,87 @@ struct mlx4_steer_index { struct list_head duplicates; }; -struct mlx4_mgm { - __be32 next_gid_index; - __be32 members_count; - u32 reserved[2]; - u8 gid[16]; - __be32 qp[MLX4_QP_PER_MGM]; +struct mlx4_slave_state { + u8 comm_toggle; + u8 last_cmd; + u8 init_port_mask; + bool active; + u8 function; + dma_addr_t vhcr_dma; + u16 mtu[MLX4_MAX_PORTS + 1]; + __be32 ib_cap_mask[MLX4_MAX_PORTS + 1]; + struct mlx4_slave_eqe eq[MLX4_MFUNC_MAX_EQES]; + struct list_head mcast_filters[MLX4_MAX_PORTS + 1]; + struct mlx4_vlan_fltr *vlan_filter[MLX4_MAX_PORTS + 1]; + struct mlx4_slave_event_eq_info event_eq; + u16 eq_pi; + u16 eq_ci; + spinlock_t lock; + /*initialized via the kzalloc*/ + u8 is_slave_going_down; + u32 cookie; +}; + +struct slave_list { + struct mutex mutex; + struct list_head res_list[MLX4_NUM_OF_RESOURCE_TYPE]; +}; + +struct mlx4_resource_tracker { + spinlock_t lock; + /* tree for each resources */ + struct radix_tree_root res_tree[MLX4_NUM_OF_RESOURCE_TYPE]; + /* num_of_slave's lists, one per slave */ + struct slave_list *slave_list; +}; + +#define SLAVE_EVENT_EQ_SIZE 128 +struct mlx4_slave_event_eq { + u32 eqn; + u32 cons; + u32 prod; + struct mlx4_eqe event_eqe[SLAVE_EVENT_EQ_SIZE]; +}; + +struct mlx4_master_qp0_state { + int proxy_qp0_active; + int qp0_active; + int port_active; +}; + +struct mlx4_mfunc_master_ctx { + struct mlx4_slave_state *slave_state; + struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1]; + int init_port_ref[MLX4_MAX_PORTS + 1]; + u16 max_mtu[MLX4_MAX_PORTS + 1]; + int disable_mcast_ref[MLX4_MAX_PORTS + 1]; + struct mlx4_resource_tracker res_tracker; + struct workqueue_struct *comm_wq; + struct work_struct comm_work; + struct work_struct slave_event_work; + struct work_struct slave_flr_event_work; + spinlock_t slave_state_lock; + __be32 comm_arm_bit_vector[4]; + struct mlx4_eqe cmd_eqe; + struct mlx4_slave_event_eq slave_eq; + struct mutex gen_eqe_mutex[MLX4_MFUNC_MAX]; +}; + +struct mlx4_mfunc { + struct mlx4_comm __iomem *comm; + struct mlx4_vhcr_cmd *vhcr; + dma_addr_t vhcr_dma; + + struct mlx4_mfunc_master_ctx master; }; + struct mlx4_cmd { struct pci_pool *pool; void __iomem *hcr; struct mutex hcr_mutex; struct semaphore poll_sem; struct semaphore event_sem; + struct semaphore slave_sem; int max_cmds; spinlock_t context_lock; int free_head; @@ -197,6 +537,7 @@ struct mlx4_cmd { u16 token_mask; u8 use_events; u8 toggle; + u8 comm_toggle; }; struct mlx4_uar_table { @@ -287,6 +628,48 @@ struct mlx4_vlan_table { int max; }; +#define SET_PORT_GEN_ALL_VALID 0x7 +#define SET_PORT_PROMISC_SHIFT 31 +#define SET_PORT_MC_PROMISC_SHIFT 30 + +enum { + MCAST_DIRECT_ONLY = 0, + MCAST_DIRECT = 1, + MCAST_DEFAULT = 2 +}; + + +struct mlx4_set_port_general_context { + u8 reserved[3]; + u8 flags; + u16 reserved2; + __be16 mtu; + u8 pptx; + u8 pfctx; + u16 reserved3; + u8 pprx; + u8 pfcrx; + u16 reserved4; +}; + +struct mlx4_set_port_rqp_calc_context { + __be32 base_qpn; + u8 rererved; + u8 n_mac; + u8 n_vlan; + u8 n_prio; + u8 reserved2[3]; + u8 mac_miss; + u8 intra_no_vlan; + u8 no_vlan; + u8 intra_vlan_miss; + u8 vlan_miss; + u8 reserved3[3]; + u8 no_vlan_prio; + __be32 promisc; + __be32 mcast; +}; + struct mlx4_mac_entry { u64 mac; }; @@ -333,6 +716,7 @@ struct mlx4_priv { struct mlx4_fw fw; struct mlx4_cmd cmd; + struct mlx4_mfunc mfunc; struct mlx4_bitmap pd_bitmap; struct mlx4_bitmap xrcd_bitmap; @@ -359,6 +743,7 @@ struct mlx4_priv { struct list_head bf_list; struct mutex bf_mutex; struct io_mapping *bf_mapping; + int reserved_mtts; }; static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev) @@ -403,6 +788,62 @@ void mlx4_cleanup_cq_table(struct mlx4_dev *dev); void mlx4_cleanup_qp_table(struct mlx4_dev *dev); void mlx4_cleanup_srq_table(struct mlx4_dev *dev); void mlx4_cleanup_mcg_table(struct mlx4_dev *dev); +int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn); +void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn); +int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn); +void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn); +int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn); +void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn); +int __mlx4_mr_reserve(struct mlx4_dev *dev); +void __mlx4_mr_release(struct mlx4_dev *dev, u32 index); +int __mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index); +void __mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index); +u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order); +void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg, int order); + +int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_SYNC_TPT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, + int *base); +void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt); +int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac); +void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac); +int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac); +int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, + int start_index, int npages, u64 *page_list); void mlx4_start_catas_poll(struct mlx4_dev *dev); void mlx4_stop_catas_poll(struct mlx4_dev *dev); @@ -419,13 +860,113 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, struct mlx4_profile *request, struct mlx4_dev_cap *dev_cap, struct mlx4_init_hca_param *init_hca); +void mlx4_master_comm_channel(struct work_struct *work); +void mlx4_gen_slave_eqe(struct work_struct *work); +void mlx4_master_handle_slave_flr(struct work_struct *work); + +int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_COMM_INT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); + +int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe); int mlx4_cmd_init(struct mlx4_dev *dev); void mlx4_cmd_cleanup(struct mlx4_dev *dev); +int mlx4_multi_func_init(struct mlx4_dev *dev); +void mlx4_multi_func_cleanup(struct mlx4_dev *dev); void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param); int mlx4_cmd_use_events(struct mlx4_dev *dev); void mlx4_cmd_use_polling(struct mlx4_dev *dev); +int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param, + unsigned long timeout); + void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn); void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type); @@ -452,12 +993,113 @@ void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table); void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table); int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port); +/* resource tracker functions*/ +int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev, + enum mlx4_resource resource_type, + int resource_id, int *slave); +void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave_id); +int mlx4_init_resource_tracker(struct mlx4_dev *dev); + +void mlx4_free_resource_tracker(struct mlx4_dev *dev); + +int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps); int mlx4_check_ext_port_caps(struct mlx4_dev *dev, u8 port); + +int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); + +int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], enum mlx4_protocol prot, enum mlx4_steer_type steer); int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], int block_mcast_loopback, enum mlx4_protocol prot, enum mlx4_steer_type steer); +int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_common_set_vlan_fltr(struct mlx4_dev *dev, int function, + int port, void *buf); +int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave, u32 in_mod, + struct mlx4_cmd_mailbox *outbox); +int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_PKEY_TABLE_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); + +int mlx4_get_mgm_entry_size(struct mlx4_dev *dev); +int mlx4_get_qp_per_mgm(struct mlx4_dev *dev); + +static inline void set_param_l(u64 *arg, u32 val) +{ + *((u32 *)arg) = val; +} + +static inline void set_param_h(u64 *arg, u32 val) +{ + *arg = (*arg & 0xffffffff) | ((u64) val << 32); +} + +static inline u32 get_param_l(u64 *arg) +{ + return (u32) (*arg & 0xffffffff); +} + +static inline u32 get_param_h(u64 *arg) +{ + return (u32)(*arg >> 32); +} + +static inline spinlock_t *mlx4_tlock(struct mlx4_dev *dev) +{ + return &mlx4_priv(dev)->mfunc.master.res_tracker.lock; +} + +#define NOT_MASKED_PD_BITS 17 + #endif /* MLX4_H */ diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 207b5add3ca..f2a8e65f5f8 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -51,8 +51,8 @@ #include "en_port.h" #define DRV_NAME "mlx4_en" -#define DRV_VERSION "1.5.4.2" -#define DRV_RELDATE "October 2011" +#define DRV_VERSION "2.0" +#define DRV_RELDATE "Dec 2011" #define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN) @@ -366,16 +366,6 @@ struct mlx4_en_rss_map { enum mlx4_qp_state indir_state; }; -struct mlx4_en_rss_context { - __be32 base_qpn; - __be32 default_qpn; - u16 reserved; - u8 hash_fn; - u8 flags; - __be32 rss_key[10]; - __be32 base_qpn_udp; -}; - struct mlx4_en_port_state { int link_state; int link_speed; @@ -463,6 +453,7 @@ struct mlx4_en_priv { int base_qpn; struct mlx4_en_rss_map rss_map; + u32 ctrl_flags; u32 flags; #define MLX4_EN_FLAG_PROMISC 0x1 #define MLX4_EN_FLAG_MC_PROMISC 0x2 @@ -495,9 +486,9 @@ struct mlx4_en_priv { enum mlx4_en_wol { MLX4_EN_WOL_MAGIC = (1ULL << 61), MLX4_EN_WOL_ENABLED = (1ULL << 62), - MLX4_EN_WOL_DO_MODIFY = (1ULL << 63), }; +#define MLX4_EN_WOL_DO_MODIFY (1ULL << 63) void mlx4_en_destroy_netdev(struct net_device *dev); int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index efa3e77355e..01df5567e16 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c @@ -32,35 +32,17 @@ * SOFTWARE. */ +#include <linux/init.h> #include <linux/errno.h> #include <linux/export.h> #include <linux/slab.h> +#include <linux/kernel.h> #include <linux/mlx4/cmd.h> #include "mlx4.h" #include "icm.h" -/* - * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits. - */ -struct mlx4_mpt_entry { - __be32 flags; - __be32 qpn; - __be32 key; - __be32 pd_flags; - __be64 start; - __be64 length; - __be32 lkey; - __be32 win_cnt; - u8 reserved1[3]; - u8 mtt_rep; - __be64 mtt_seg; - __be32 mtt_sz; - __be32 entity_size; - __be32 first_byte_offset; -} __packed; - #define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28) #define MLX4_MPT_FLAG_FREE (0x3UL << 28) #define MLX4_MPT_FLAG_MIO (1 << 17) @@ -180,22 +162,48 @@ static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy) kfree(buddy->num_free); } -static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order) +u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order) { struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; u32 seg; + int seg_order; + u32 offset; - seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, order); + seg_order = max_t(int, order - log_mtts_per_seg, 0); + + seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, seg_order); if (seg == -1) return -1; - if (mlx4_table_get_range(dev, &mr_table->mtt_table, seg, - seg + (1 << order) - 1)) { - mlx4_buddy_free(&mr_table->mtt_buddy, seg, order); + offset = seg * (1 << log_mtts_per_seg); + + if (mlx4_table_get_range(dev, &mr_table->mtt_table, offset, + offset + (1 << order) - 1)) { + mlx4_buddy_free(&mr_table->mtt_buddy, seg, seg_order); return -1; } - return seg; + return offset; +} + +static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order) +{ + u64 in_param; + u64 out_param; + int err; + + if (mlx4_is_mfunc(dev)) { + set_param_l(&in_param, order); + err = mlx4_cmd_imm(dev, in_param, &out_param, RES_MTT, + RES_OP_RESERVE_AND_MAP, + MLX4_CMD_ALLOC_RES, + MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_WRAPPED); + if (err) + return -1; + return get_param_l(&out_param); + } + return __mlx4_alloc_mtt_range(dev, order); } int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, @@ -210,33 +218,63 @@ int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, } else mtt->page_shift = page_shift; - for (mtt->order = 0, i = dev->caps.mtts_per_seg; i < npages; i <<= 1) + for (mtt->order = 0, i = 1; i < npages; i <<= 1) ++mtt->order; - mtt->first_seg = mlx4_alloc_mtt_range(dev, mtt->order); - if (mtt->first_seg == -1) + mtt->offset = mlx4_alloc_mtt_range(dev, mtt->order); + if (mtt->offset == -1) return -ENOMEM; return 0; } EXPORT_SYMBOL_GPL(mlx4_mtt_init); -void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt) +void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order) { + u32 first_seg; + int seg_order; struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; + seg_order = max_t(int, order - log_mtts_per_seg, 0); + first_seg = offset / (1 << log_mtts_per_seg); + + mlx4_buddy_free(&mr_table->mtt_buddy, first_seg, seg_order); + mlx4_table_put_range(dev, &mr_table->mtt_table, offset, + offset + (1 << order) - 1); +} + +static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order) +{ + u64 in_param; + int err; + + if (mlx4_is_mfunc(dev)) { + set_param_l(&in_param, offset); + set_param_h(&in_param, order); + err = mlx4_cmd(dev, in_param, RES_MTT, RES_OP_RESERVE_AND_MAP, + MLX4_CMD_FREE_RES, + MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_WRAPPED); + if (err) + mlx4_warn(dev, "Failed to free mtt range at:" + "%d order:%d\n", offset, order); + return; + } + __mlx4_free_mtt_range(dev, offset, order); +} + +void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt) +{ if (mtt->order < 0) return; - mlx4_buddy_free(&mr_table->mtt_buddy, mtt->first_seg, mtt->order); - mlx4_table_put_range(dev, &mr_table->mtt_table, mtt->first_seg, - mtt->first_seg + (1 << mtt->order) - 1); + mlx4_free_mtt_range(dev, mtt->offset, mtt->order); } EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup); u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt) { - return (u64) mtt->first_seg * dev->caps.mtt_entry_sz; + return (u64) mtt->offset * dev->caps.mtt_entry_sz; } EXPORT_SYMBOL_GPL(mlx4_mtt_addr); @@ -253,69 +291,205 @@ static u32 key_to_hw_index(u32 key) static int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, int mpt_index) { - return mlx4_cmd(dev, mailbox->dma, mpt_index, 0, MLX4_CMD_SW2HW_MPT, - MLX4_CMD_TIME_CLASS_B); + return mlx4_cmd(dev, mailbox->dma | dev->caps.function , mpt_index, + 0, MLX4_CMD_SW2HW_MPT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_WRAPPED); } static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, int mpt_index) { return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index, - !mailbox, MLX4_CMD_HW2SW_MPT, MLX4_CMD_TIME_CLASS_B); + !mailbox, MLX4_CMD_HW2SW_MPT, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); } -int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access, - int npages, int page_shift, struct mlx4_mr *mr) +static int mlx4_mr_reserve_range(struct mlx4_dev *dev, int cnt, int align, + u32 *base_mridx) { struct mlx4_priv *priv = mlx4_priv(dev); - u32 index; - int err; + u32 mridx; - index = mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap); - if (index == -1) + mridx = mlx4_bitmap_alloc_range(&priv->mr_table.mpt_bitmap, cnt, align); + if (mridx == -1) return -ENOMEM; + *base_mridx = mridx; + return 0; + +} +EXPORT_SYMBOL_GPL(mlx4_mr_reserve_range); + +static void mlx4_mr_release_range(struct mlx4_dev *dev, u32 base_mridx, int cnt) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + mlx4_bitmap_free_range(&priv->mr_table.mpt_bitmap, base_mridx, cnt); +} +EXPORT_SYMBOL_GPL(mlx4_mr_release_range); + +static int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd, + u64 iova, u64 size, u32 access, int npages, + int page_shift, struct mlx4_mr *mr) +{ mr->iova = iova; mr->size = size; mr->pd = pd; mr->access = access; - mr->enabled = 0; - mr->key = hw_index_to_key(index); + mr->enabled = MLX4_MR_DISABLED; + mr->key = hw_index_to_key(mridx); + + return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); +} +EXPORT_SYMBOL_GPL(mlx4_mr_alloc_reserved); + +static int mlx4_WRITE_MTT(struct mlx4_dev *dev, + struct mlx4_cmd_mailbox *mailbox, + int num_entries) +{ + return mlx4_cmd(dev, mailbox->dma, num_entries, 0, MLX4_CMD_WRITE_MTT, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); +} - err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); +int __mlx4_mr_reserve(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + return mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap); +} + +static int mlx4_mr_reserve(struct mlx4_dev *dev) +{ + u64 out_param; + + if (mlx4_is_mfunc(dev)) { + if (mlx4_cmd_imm(dev, 0, &out_param, RES_MPT, RES_OP_RESERVE, + MLX4_CMD_ALLOC_RES, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED)) + return -1; + return get_param_l(&out_param); + } + return __mlx4_mr_reserve(dev); +} + +void __mlx4_mr_release(struct mlx4_dev *dev, u32 index) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index); +} + +static void mlx4_mr_release(struct mlx4_dev *dev, u32 index) +{ + u64 in_param; + + if (mlx4_is_mfunc(dev)) { + set_param_l(&in_param, index); + if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_RESERVE, + MLX4_CMD_FREE_RES, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED)) + mlx4_warn(dev, "Failed to release mr index:%d\n", + index); + return; + } + __mlx4_mr_release(dev, index); +} + +int __mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index) +{ + struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; + + return mlx4_table_get(dev, &mr_table->dmpt_table, index); +} + +static int mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index) +{ + u64 param; + + if (mlx4_is_mfunc(dev)) { + set_param_l(¶m, index); + return mlx4_cmd_imm(dev, param, ¶m, RES_MPT, RES_OP_MAP_ICM, + MLX4_CMD_ALLOC_RES, + MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_WRAPPED); + } + return __mlx4_mr_alloc_icm(dev, index); +} + +void __mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index) +{ + struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; + + mlx4_table_put(dev, &mr_table->dmpt_table, index); +} + +static void mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index) +{ + u64 in_param; + + if (mlx4_is_mfunc(dev)) { + set_param_l(&in_param, index); + if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_MAP_ICM, + MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_WRAPPED)) + mlx4_warn(dev, "Failed to free icm of mr index:%d\n", + index); + return; + } + return __mlx4_mr_free_icm(dev, index); +} + +int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access, + int npages, int page_shift, struct mlx4_mr *mr) +{ + u32 index; + int err; + + index = mlx4_mr_reserve(dev); + if (index == -1) + return -ENOMEM; + + err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size, + access, npages, page_shift, mr); if (err) - mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index); + mlx4_mr_release(dev, index); return err; } EXPORT_SYMBOL_GPL(mlx4_mr_alloc); -void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr) +static void mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr) { - struct mlx4_priv *priv = mlx4_priv(dev); int err; - if (mr->enabled) { + if (mr->enabled == MLX4_MR_EN_HW) { err = mlx4_HW2SW_MPT(dev, NULL, key_to_hw_index(mr->key) & (dev->caps.num_mpts - 1)); if (err) - mlx4_warn(dev, "HW2SW_MPT failed (%d)\n", err); - } + mlx4_warn(dev, "xxx HW2SW_MPT failed (%d)\n", err); + mr->enabled = MLX4_MR_EN_SW; + } mlx4_mtt_cleanup(dev, &mr->mtt); - mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, key_to_hw_index(mr->key)); +} +EXPORT_SYMBOL_GPL(mlx4_mr_free_reserved); + +void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr) +{ + mlx4_mr_free_reserved(dev, mr); + if (mr->enabled) + mlx4_mr_free_icm(dev, key_to_hw_index(mr->key)); + mlx4_mr_release(dev, key_to_hw_index(mr->key)); } EXPORT_SYMBOL_GPL(mlx4_mr_free); int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr) { - struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; struct mlx4_cmd_mailbox *mailbox; struct mlx4_mpt_entry *mpt_entry; int err; - err = mlx4_table_get(dev, &mr_table->dmpt_table, key_to_hw_index(mr->key)); + err = mlx4_mr_alloc_icm(dev, key_to_hw_index(mr->key)); if (err) return err; @@ -340,9 +514,10 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr) if (mr->mtt.order < 0) { mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL); - mpt_entry->mtt_seg = 0; + mpt_entry->mtt_addr = 0; } else { - mpt_entry->mtt_seg = cpu_to_be64(mlx4_mtt_addr(dev, &mr->mtt)); + mpt_entry->mtt_addr = cpu_to_be64(mlx4_mtt_addr(dev, + &mr->mtt)); } if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) { @@ -350,8 +525,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr) mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE); mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG | MLX4_MPT_PD_FLAG_RAE); - mpt_entry->mtt_sz = cpu_to_be32((1 << mr->mtt.order) * - dev->caps.mtts_per_seg); + mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order); } else { mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS); } @@ -362,8 +536,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr) mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err); goto err_cmd; } - - mr->enabled = 1; + mr->enabled = MLX4_MR_EN_HW; mlx4_free_cmd_mailbox(dev, mailbox); @@ -373,7 +546,7 @@ err_cmd: mlx4_free_cmd_mailbox(dev, mailbox); err_table: - mlx4_table_put(dev, &mr_table->dmpt_table, key_to_hw_index(mr->key)); + mlx4_mr_free_icm(dev, key_to_hw_index(mr->key)); return err; } EXPORT_SYMBOL_GPL(mlx4_mr_enable); @@ -385,18 +558,10 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt, __be64 *mtts; dma_addr_t dma_handle; int i; - int s = start_index * sizeof (u64); - /* All MTTs must fit in the same page */ - if (start_index / (PAGE_SIZE / sizeof (u64)) != - (start_index + npages - 1) / (PAGE_SIZE / sizeof (u64))) - return -EINVAL; - - if (start_index & (dev->caps.mtts_per_seg - 1)) - return -EINVAL; + mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->offset + + start_index, &dma_handle); - mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->first_seg + - s / dev->caps.mtt_entry_sz, &dma_handle); if (!mtts) return -ENOMEM; @@ -412,27 +577,75 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt, return 0; } -int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, - int start_index, int npages, u64 *page_list) +int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, + int start_index, int npages, u64 *page_list) { + int err = 0; int chunk; - int err; + int mtts_per_page; + int max_mtts_first_page; - if (mtt->order < 0) - return -EINVAL; + /* compute how may mtts fit in the first page */ + mtts_per_page = PAGE_SIZE / sizeof(u64); + max_mtts_first_page = mtts_per_page - (mtt->offset + start_index) + % mtts_per_page; + + chunk = min_t(int, max_mtts_first_page, npages); while (npages > 0) { - chunk = min_t(int, PAGE_SIZE / sizeof(u64), npages); err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list); if (err) return err; - npages -= chunk; start_index += chunk; page_list += chunk; + + chunk = min_t(int, mtts_per_page, npages); } + return err; +} - return 0; +int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, + int start_index, int npages, u64 *page_list) +{ + struct mlx4_cmd_mailbox *mailbox = NULL; + __be64 *inbox = NULL; + int chunk; + int err = 0; + int i; + + if (mtt->order < 0) + return -EINVAL; + + if (mlx4_is_mfunc(dev)) { + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + inbox = mailbox->buf; + + while (npages > 0) { + chunk = min_t(int, MLX4_MAILBOX_SIZE / sizeof(u64) - 2, + npages); + inbox[0] = cpu_to_be64(mtt->offset + start_index); + inbox[1] = 0; + for (i = 0; i < chunk; ++i) + inbox[i + 2] = cpu_to_be64(page_list[i] | + MLX4_MTT_FLAG_PRESENT); + err = mlx4_WRITE_MTT(dev, mailbox, chunk); + if (err) { + mlx4_free_cmd_mailbox(dev, mailbox); + return err; + } + + npages -= chunk; + start_index += chunk; + page_list += chunk; + } + mlx4_free_cmd_mailbox(dev, mailbox); + return err; + } + + return __mlx4_write_mtt(dev, mtt, start_index, npages, page_list); } EXPORT_SYMBOL_GPL(mlx4_write_mtt); @@ -462,21 +675,34 @@ EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt); int mlx4_init_mr_table(struct mlx4_dev *dev) { - struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_mr_table *mr_table = &priv->mr_table; int err; + if (!is_power_of_2(dev->caps.num_mpts)) + return -EINVAL; + + /* Nothing to do for slaves - all MR handling is forwarded + * to the master */ + if (mlx4_is_slave(dev)) + return 0; + err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts, ~0, dev->caps.reserved_mrws, 0); if (err) return err; err = mlx4_buddy_init(&mr_table->mtt_buddy, - ilog2(dev->caps.num_mtt_segs)); + ilog2(dev->caps.num_mtts / + (1 << log_mtts_per_seg))); if (err) goto err_buddy; if (dev->caps.reserved_mtts) { - if (mlx4_alloc_mtt_range(dev, fls(dev->caps.reserved_mtts - 1)) == -1) { + priv->reserved_mtts = + mlx4_alloc_mtt_range(dev, + fls(dev->caps.reserved_mtts - 1)); + if (priv->reserved_mtts < 0) { mlx4_warn(dev, "MTT table of order %d is too small.\n", mr_table->mtt_buddy.max_order); err = -ENOMEM; @@ -497,8 +723,14 @@ err_buddy: void mlx4_cleanup_mr_table(struct mlx4_dev *dev) { - struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_mr_table *mr_table = &priv->mr_table; + if (mlx4_is_slave(dev)) + return; + if (priv->reserved_mtts >= 0) + mlx4_free_mtt_range(dev, priv->reserved_mtts, + fls(dev->caps.reserved_mtts - 1)); mlx4_buddy_cleanup(&mr_table->mtt_buddy); mlx4_bitmap_cleanup(&mr_table->mpt_bitmap); } @@ -581,7 +813,7 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, int max_maps, u8 page_shift, struct mlx4_fmr *fmr) { struct mlx4_priv *priv = mlx4_priv(dev); - u64 mtt_seg; + u64 mtt_offset; int err = -ENOMEM; if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32) @@ -601,11 +833,12 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, if (err) return err; - mtt_seg = fmr->mr.mtt.first_seg * dev->caps.mtt_entry_sz; + mtt_offset = fmr->mr.mtt.offset * dev->caps.mtt_entry_sz; fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table, - fmr->mr.mtt.first_seg, + fmr->mr.mtt.offset, &fmr->dma_handle); + if (!fmr->mtts) { err = -ENOMEM; goto err_free; @@ -619,6 +852,46 @@ err_free: } EXPORT_SYMBOL_GPL(mlx4_fmr_alloc); +static int mlx4_fmr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, + u32 pd, u32 access, int max_pages, + int max_maps, u8 page_shift, struct mlx4_fmr *fmr) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int err = -ENOMEM; + + if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32) + return -EINVAL; + + /* All MTTs must fit in the same page */ + if (max_pages * sizeof *fmr->mtts > PAGE_SIZE) + return -EINVAL; + + fmr->page_shift = page_shift; + fmr->max_pages = max_pages; + fmr->max_maps = max_maps; + fmr->maps = 0; + + err = mlx4_mr_alloc_reserved(dev, mridx, pd, 0, 0, access, max_pages, + page_shift, &fmr->mr); + if (err) + return err; + + fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table, + fmr->mr.mtt.offset, + &fmr->dma_handle); + if (!fmr->mtts) { + err = -ENOMEM; + goto err_free; + } + + return 0; + +err_free: + mlx4_mr_free_reserved(dev, &fmr->mr); + return err; +} +EXPORT_SYMBOL_GPL(mlx4_fmr_alloc_reserved); + int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr) { struct mlx4_priv *priv = mlx4_priv(dev); @@ -640,12 +913,32 @@ EXPORT_SYMBOL_GPL(mlx4_fmr_enable); void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u32 *lkey, u32 *rkey) { + struct mlx4_cmd_mailbox *mailbox; + int err; + if (!fmr->maps) return; fmr->maps = 0; - *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW; + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) { + err = PTR_ERR(mailbox); + printk(KERN_WARNING "mlx4_ib: mlx4_alloc_cmd_mailbox" + " failed (%d)\n", err); + return; + } + + err = mlx4_HW2SW_MPT(dev, NULL, + key_to_hw_index(fmr->mr.key) & + (dev->caps.num_mpts - 1)); + mlx4_free_cmd_mailbox(dev, mailbox); + if (err) { + printk(KERN_WARNING "mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n", + err); + return; + } + fmr->mr.enabled = MLX4_MR_EN_SW; } EXPORT_SYMBOL_GPL(mlx4_fmr_unmap); @@ -654,15 +947,28 @@ int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr) if (fmr->maps) return -EBUSY; - fmr->mr.enabled = 0; mlx4_mr_free(dev, &fmr->mr); + fmr->mr.enabled = MLX4_MR_DISABLED; return 0; } EXPORT_SYMBOL_GPL(mlx4_fmr_free); +static int mlx4_fmr_free_reserved(struct mlx4_dev *dev, struct mlx4_fmr *fmr) +{ + if (fmr->maps) + return -EBUSY; + + mlx4_mr_free_reserved(dev, &fmr->mr); + fmr->mr.enabled = MLX4_MR_DISABLED; + + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_fmr_free_reserved); + int mlx4_SYNC_TPT(struct mlx4_dev *dev) { - return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000); + return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000, + MLX4_CMD_WRAPPED); } EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT); diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c index 260ed259ce9..5c9a54df17a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/pd.c +++ b/drivers/net/ethernet/mellanox/mlx4/pd.c @@ -31,6 +31,7 @@ * SOFTWARE. */ +#include <linux/init.h> #include <linux/errno.h> #include <linux/export.h> #include <linux/io-mapping.h> @@ -51,7 +52,8 @@ int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn) *pdn = mlx4_bitmap_alloc(&priv->pd_bitmap); if (*pdn == -1) return -ENOMEM; - + if (mlx4_is_mfunc(dev)) + *pdn |= (dev->caps.function + 1) << NOT_MASKED_PD_BITS; return 0; } EXPORT_SYMBOL_GPL(mlx4_pd_alloc); @@ -85,7 +87,8 @@ int mlx4_init_pd_table(struct mlx4_dev *dev) struct mlx4_priv *priv = mlx4_priv(dev); return mlx4_bitmap_init(&priv->pd_bitmap, dev->caps.num_pds, - (1 << 24) - 1, dev->caps.reserved_pds, 0); + (1 << NOT_MASKED_PD_BITS) - 1, + dev->caps.reserved_pds, 0); } void mlx4_cleanup_pd_table(struct mlx4_dev *dev) @@ -108,13 +111,19 @@ void mlx4_cleanup_xrcd_table(struct mlx4_dev *dev) int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar) { + int offset; + uar->index = mlx4_bitmap_alloc(&mlx4_priv(dev)->uar_table.bitmap); if (uar->index == -1) return -ENOMEM; - uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + uar->index; + if (mlx4_is_slave(dev)) + offset = uar->index % ((int) pci_resource_len(dev->pdev, 2) / + dev->caps.uar_page_size); + else + offset = uar->index; + uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + offset; uar->map = NULL; - return 0; } EXPORT_SYMBOL_GPL(mlx4_uar_alloc); @@ -232,7 +241,7 @@ int mlx4_init_uar_table(struct mlx4_dev *dev) return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap, dev->caps.num_uars, dev->caps.num_uars - 1, - max(128, dev->caps.reserved_uars), 0); + dev->caps.reserved_uars, 0); } void mlx4_cleanup_uar_table(struct mlx4_dev *dev) diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index d942aea4927..88b52e54752 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -70,41 +70,12 @@ void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table) table->total = 0; } -static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port, - __be64 *entries) -{ - struct mlx4_cmd_mailbox *mailbox; - u32 in_mod; - int err; - - mailbox = mlx4_alloc_cmd_mailbox(dev); - if (IS_ERR(mailbox)) - return PTR_ERR(mailbox); - - memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE); - - in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port; - err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, - MLX4_CMD_TIME_CLASS_B); - - mlx4_free_cmd_mailbox(dev, mailbox); - return err; -} - -static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port, - u64 mac, int *qpn, u8 reserve) +static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn) { struct mlx4_qp qp; u8 gid[16] = {0}; int err; - if (reserve) { - err = mlx4_qp_reserve_range(dev, 1, 1, qpn); - if (err) { - mlx4_err(dev, "Failed to reserve qp for mac registration\n"); - return err; - } - } qp.qpn = *qpn; mac &= 0xffffffffffffULL; @@ -113,16 +84,15 @@ static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port, gid[5] = port; gid[7] = MLX4_UC_STEER << 1; - err = mlx4_qp_attach_common(dev, &qp, gid, 0, - MLX4_PROT_ETH, MLX4_UC_STEER); - if (err && reserve) - mlx4_qp_release_range(dev, *qpn, 1); + err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH); + if (err) + mlx4_warn(dev, "Failed Attaching Unicast\n"); return err; } static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port, - u64 mac, int qpn, u8 free) + u64 mac, int qpn) { struct mlx4_qp qp; u8 gid[16] = {0}; @@ -134,60 +104,164 @@ static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port, gid[5] = port; gid[7] = MLX4_UC_STEER << 1; - mlx4_qp_detach_common(dev, &qp, gid, MLX4_PROT_ETH, MLX4_UC_STEER); - if (free) - mlx4_qp_release_range(dev, qpn, 1); + mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH); +} + +static int validate_index(struct mlx4_dev *dev, + struct mlx4_mac_table *table, int index) +{ + int err = 0; + + if (index < 0 || index >= table->max || !table->entries[index]) { + mlx4_warn(dev, "No valid Mac entry for the given index\n"); + err = -EINVAL; + } + return err; +} + +static int find_index(struct mlx4_dev *dev, + struct mlx4_mac_table *table, u64 mac) +{ + int i; + + for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { + if ((mac & MLX4_MAC_MASK) == + (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) + return i; + } + /* Mac not found */ + return -EINVAL; } -int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap) +int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn) { struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; - struct mlx4_mac_table *table = &info->mac_table; struct mlx4_mac_entry *entry; - int i, err = 0; - int free = -1; + int index = 0; + int err = 0; - if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) { - err = mlx4_uc_steer_add(dev, port, mac, qpn, 1); - if (err) - return err; + mlx4_dbg(dev, "Registering MAC: 0x%llx for adding\n", + (unsigned long long) mac); + index = mlx4_register_mac(dev, port, mac); + if (index < 0) { + err = index; + mlx4_err(dev, "Failed adding MAC: 0x%llx\n", + (unsigned long long) mac); + return err; + } - entry = kmalloc(sizeof *entry, GFP_KERNEL); - if (!entry) { - mlx4_uc_steer_release(dev, port, mac, *qpn, 1); - return -ENOMEM; - } + if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)) { + *qpn = info->base_qpn + index; + return 0; + } + + err = mlx4_qp_reserve_range(dev, 1, 1, qpn); + mlx4_dbg(dev, "Reserved qp %d\n", *qpn); + if (err) { + mlx4_err(dev, "Failed to reserve qp for mac registration\n"); + goto qp_err; + } + + err = mlx4_uc_steer_add(dev, port, mac, qpn); + if (err) + goto steer_err; - entry->mac = mac; - err = radix_tree_insert(&info->mac_tree, *qpn, entry); - if (err) { + entry = kmalloc(sizeof *entry, GFP_KERNEL); + if (!entry) { + err = -ENOMEM; + goto alloc_err; + } + entry->mac = mac; + err = radix_tree_insert(&info->mac_tree, *qpn, entry); + if (err) + goto insert_err; + return 0; + +insert_err: + kfree(entry); + +alloc_err: + mlx4_uc_steer_release(dev, port, mac, *qpn); + +steer_err: + mlx4_qp_release_range(dev, *qpn, 1); + +qp_err: + mlx4_unregister_mac(dev, port, mac); + return err; +} +EXPORT_SYMBOL_GPL(mlx4_get_eth_qp); + +void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn) +{ + struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; + struct mlx4_mac_entry *entry; + + mlx4_dbg(dev, "Registering MAC: 0x%llx for deleting\n", + (unsigned long long) mac); + mlx4_unregister_mac(dev, port, mac); + + if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) { + entry = radix_tree_lookup(&info->mac_tree, qpn); + if (entry) { + mlx4_dbg(dev, "Releasing qp: port %d, mac 0x%llx," + " qpn %d\n", port, + (unsigned long long) mac, qpn); + mlx4_uc_steer_release(dev, port, entry->mac, qpn); + mlx4_qp_release_range(dev, qpn, 1); + radix_tree_delete(&info->mac_tree, qpn); kfree(entry); - mlx4_uc_steer_release(dev, port, mac, *qpn, 1); - return err; } } +} +EXPORT_SYMBOL_GPL(mlx4_put_eth_qp); + +static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port, + __be64 *entries) +{ + struct mlx4_cmd_mailbox *mailbox; + u32 in_mod; + int err; - mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac); + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE); + + in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port; + + err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + +int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac) +{ + struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; + struct mlx4_mac_table *table = &info->mac_table; + int i, err = 0; + int free = -1; + + mlx4_dbg(dev, "Registering MAC: 0x%llx for port %d\n", + (unsigned long long) mac, port); mutex_lock(&table->mutex); - for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) { - if (free < 0 && !table->refs[i]) { + for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { + if (free < 0 && !table->entries[i]) { free = i; continue; } if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) { - /* MAC already registered, increase references count */ - ++table->refs[i]; + /* MAC already registered, Must not have duplicates */ + err = -EEXIST; goto out; } } - if (free < 0) { - err = -ENOMEM; - goto out; - } - mlx4_dbg(dev, "Free MAC index is %d\n", free); if (table->total == table->max) { @@ -197,103 +271,103 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap) } /* Register new MAC */ - table->refs[free] = 1; table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID); err = mlx4_set_port_mac_table(dev, port, table->entries); if (unlikely(err)) { - mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) mac); - table->refs[free] = 0; + mlx4_err(dev, "Failed adding MAC: 0x%llx\n", + (unsigned long long) mac); table->entries[free] = 0; goto out; } - if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)) - *qpn = info->base_qpn + free; + err = free; ++table->total; out: mutex_unlock(&table->mutex); return err; } -EXPORT_SYMBOL_GPL(mlx4_register_mac); +EXPORT_SYMBOL_GPL(__mlx4_register_mac); -static int validate_index(struct mlx4_dev *dev, - struct mlx4_mac_table *table, int index) +int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac) { - int err = 0; + u64 out_param; + int err; - if (index < 0 || index >= table->max || !table->entries[index]) { - mlx4_warn(dev, "No valid Mac entry for the given index\n"); - err = -EINVAL; - } - return err; -} + if (mlx4_is_mfunc(dev)) { + set_param_l(&out_param, port); + err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC, + RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + if (err) + return err; -static int find_index(struct mlx4_dev *dev, - struct mlx4_mac_table *table, u64 mac) -{ - int i; - for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { - if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) - return i; + return get_param_l(&out_param); } - /* Mac not found */ - return -EINVAL; + return __mlx4_register_mac(dev, port, mac); } +EXPORT_SYMBOL_GPL(mlx4_register_mac); -void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn) + +void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac) { struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; struct mlx4_mac_table *table = &info->mac_table; - int index = qpn - info->base_qpn; - struct mlx4_mac_entry *entry; + int index; - if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) { - entry = radix_tree_lookup(&info->mac_tree, qpn); - if (entry) { - mlx4_uc_steer_release(dev, port, entry->mac, qpn, 1); - radix_tree_delete(&info->mac_tree, qpn); - index = find_index(dev, table, entry->mac); - kfree(entry); - } - } + index = find_index(dev, table, mac); mutex_lock(&table->mutex); if (validate_index(dev, table, index)) goto out; - /* Check whether this address has reference count */ - if (!(--table->refs[index])) { - table->entries[index] = 0; - mlx4_set_port_mac_table(dev, port, table->entries); - --table->total; - } + table->entries[index] = 0; + mlx4_set_port_mac_table(dev, port, table->entries); + --table->total; out: mutex_unlock(&table->mutex); } +EXPORT_SYMBOL_GPL(__mlx4_unregister_mac); + +void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac) +{ + u64 out_param; + int err; + + if (mlx4_is_mfunc(dev)) { + set_param_l(&out_param, port); + err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC, + RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + return; + } + __mlx4_unregister_mac(dev, port, mac); + return; +} EXPORT_SYMBOL_GPL(mlx4_unregister_mac); -int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac, u8 wrap) +int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac) { struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; struct mlx4_mac_table *table = &info->mac_table; - int index = qpn - info->base_qpn; struct mlx4_mac_entry *entry; - int err; + int index = qpn - info->base_qpn; + int err = 0; if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) { entry = radix_tree_lookup(&info->mac_tree, qpn); if (!entry) return -EINVAL; - index = find_index(dev, table, entry->mac); - mlx4_uc_steer_release(dev, port, entry->mac, qpn, 0); + mlx4_uc_steer_release(dev, port, entry->mac, qpn); + mlx4_unregister_mac(dev, port, entry->mac); entry->mac = new_mac; - err = mlx4_uc_steer_add(dev, port, entry->mac, &qpn, 0); - if (err || index < 0) - return err; + mlx4_register_mac(dev, port, new_mac); + err = mlx4_uc_steer_add(dev, port, entry->mac, &qpn); + return err; } + /* CX1 doesn't support multi-functions */ mutex_lock(&table->mutex); err = validate_index(dev, table, index); @@ -304,7 +378,8 @@ int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac, u8 wra err = mlx4_set_port_mac_table(dev, port, table->entries); if (unlikely(err)) { - mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) new_mac); + mlx4_err(dev, "Failed adding MAC: 0x%llx\n", + (unsigned long long) new_mac); table->entries[index] = 0; } out: @@ -312,6 +387,7 @@ out: return err; } EXPORT_SYMBOL_GPL(mlx4_replace_mac); + static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port, __be32 *entries) { @@ -326,7 +402,7 @@ static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port, memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE); in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port; err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, - MLX4_CMD_TIME_CLASS_B); + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); mlx4_free_cmd_mailbox(dev, mailbox); @@ -352,7 +428,8 @@ int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx) } EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan); -int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index) +static int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, + int *index) { struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; int i, err = 0; @@ -387,7 +464,7 @@ int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index) goto out; } - /* Register new MAC */ + /* Register new VLAN */ table->refs[free] = 1; table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID); @@ -405,9 +482,27 @@ out: mutex_unlock(&table->mutex); return err; } + +int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index) +{ + u64 out_param; + int err; + + if (mlx4_is_mfunc(dev)) { + set_param_l(&out_param, port); + err = mlx4_cmd_imm(dev, vlan, &out_param, RES_VLAN, + RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + if (!err) + *index = get_param_l(&out_param); + + return err; + } + return __mlx4_register_vlan(dev, port, vlan, index); +} EXPORT_SYMBOL_GPL(mlx4_register_vlan); -void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index) +static void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index) { struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; @@ -432,6 +527,25 @@ void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index) out: mutex_unlock(&table->mutex); } + +void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index) +{ + u64 in_param; + int err; + + if (mlx4_is_mfunc(dev)) { + set_param_l(&in_param, port); + err = mlx4_cmd(dev, in_param, RES_VLAN, RES_OP_RESERVE_AND_MAP, + MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_WRAPPED); + if (!err) + mlx4_warn(dev, "Failed freeing vlan at index:%d\n", + index); + + return; + } + __mlx4_unregister_vlan(dev, port, index); +} EXPORT_SYMBOL_GPL(mlx4_unregister_vlan); int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps) @@ -462,7 +576,8 @@ int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps) *(__be32 *) (&inbuf[20]) = cpu_to_be32(port); err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3, - MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C); + MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C, + MLX4_CMD_NATIVE); if (!err) *caps = *(__be32 *) (outbuf + 84); mlx4_free_cmd_mailbox(dev, inmailbox); @@ -499,7 +614,8 @@ int mlx4_check_ext_port_caps(struct mlx4_dev *dev, u8 port) *(__be32 *) (&inbuf[20]) = cpu_to_be32(port); err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3, - MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C); + MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C, + MLX4_CMD_NATIVE); packet_error = be16_to_cpu(*(__be16 *) (outbuf + 4)); @@ -512,6 +628,139 @@ int mlx4_check_ext_port_caps(struct mlx4_dev *dev, u8 port) return err; } +static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, + u8 op_mod, struct mlx4_cmd_mailbox *inbox) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_port_info *port_info; + struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master; + struct mlx4_slave_state *slave_st = &master->slave_state[slave]; + struct mlx4_set_port_rqp_calc_context *qpn_context; + struct mlx4_set_port_general_context *gen_context; + int reset_qkey_viols; + int port; + int is_eth; + u32 in_modifier; + u32 promisc; + u16 mtu, prev_mtu; + int err; + int i; + __be32 agg_cap_mask; + __be32 slave_cap_mask; + __be32 new_cap_mask; + + port = in_mod & 0xff; + in_modifier = in_mod >> 8; + is_eth = op_mod; + port_info = &priv->port[port]; + + /* Slaves cannot perform SET_PORT operations except changing MTU */ + if (is_eth) { + if (slave != dev->caps.function && + in_modifier != MLX4_SET_PORT_GENERAL) { + mlx4_warn(dev, "denying SET_PORT for slave:%d\n", + slave); + return -EINVAL; + } + switch (in_modifier) { + case MLX4_SET_PORT_RQP_CALC: + qpn_context = inbox->buf; + qpn_context->base_qpn = + cpu_to_be32(port_info->base_qpn); + qpn_context->n_mac = 0x7; + promisc = be32_to_cpu(qpn_context->promisc) >> + SET_PORT_PROMISC_SHIFT; + qpn_context->promisc = cpu_to_be32( + promisc << SET_PORT_PROMISC_SHIFT | + port_info->base_qpn); + promisc = be32_to_cpu(qpn_context->mcast) >> + SET_PORT_MC_PROMISC_SHIFT; + qpn_context->mcast = cpu_to_be32( + promisc << SET_PORT_MC_PROMISC_SHIFT | + port_info->base_qpn); + break; + case MLX4_SET_PORT_GENERAL: + gen_context = inbox->buf; + /* Mtu is configured as the max MTU among all the + * the functions on the port. */ + mtu = be16_to_cpu(gen_context->mtu); + mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port]); + prev_mtu = slave_st->mtu[port]; + slave_st->mtu[port] = mtu; + if (mtu > master->max_mtu[port]) + master->max_mtu[port] = mtu; + if (mtu < prev_mtu && prev_mtu == + master->max_mtu[port]) { + slave_st->mtu[port] = mtu; + master->max_mtu[port] = mtu; + for (i = 0; i < dev->num_slaves; i++) { + master->max_mtu[port] = + max(master->max_mtu[port], + master->slave_state[i].mtu[port]); + } + } + + gen_context->mtu = cpu_to_be16(master->max_mtu[port]); + break; + } + return mlx4_cmd(dev, inbox->dma, in_mod, op_mod, + MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_NATIVE); + } + + /* For IB, we only consider: + * - The capability mask, which is set to the aggregate of all + * slave function capabilities + * - The QKey violatin counter - reset according to each request. + */ + + if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { + reset_qkey_viols = (*(u8 *) inbox->buf) & 0x40; + new_cap_mask = ((__be32 *) inbox->buf)[2]; + } else { + reset_qkey_viols = ((u8 *) inbox->buf)[3] & 0x1; + new_cap_mask = ((__be32 *) inbox->buf)[1]; + } + + agg_cap_mask = 0; + slave_cap_mask = + priv->mfunc.master.slave_state[slave].ib_cap_mask[port]; + priv->mfunc.master.slave_state[slave].ib_cap_mask[port] = new_cap_mask; + for (i = 0; i < dev->num_slaves; i++) + agg_cap_mask |= + priv->mfunc.master.slave_state[i].ib_cap_mask[port]; + + /* only clear mailbox for guests. Master may be setting + * MTU or PKEY table size + */ + if (slave != dev->caps.function) + memset(inbox->buf, 0, 256); + if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { + *(u8 *) inbox->buf = !!reset_qkey_viols << 6; + ((__be32 *) inbox->buf)[2] = agg_cap_mask; + } else { + ((u8 *) inbox->buf)[3] = !!reset_qkey_viols; + ((__be32 *) inbox->buf)[1] = agg_cap_mask; + } + + err = mlx4_cmd(dev, inbox->dma, port, is_eth, MLX4_CMD_SET_PORT, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); + if (err) + priv->mfunc.master.slave_state[slave].ib_cap_mask[port] = + slave_cap_mask; + return err; +} + +int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + return mlx4_common_set_port(dev, slave, vhcr->in_modifier, + vhcr->op_modifier, inbox); +} + int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port) { struct mlx4_cmd_mailbox *mailbox; @@ -528,8 +777,127 @@ int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port) ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port]; err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT, - MLX4_CMD_TIME_CLASS_B); + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); mlx4_free_cmd_mailbox(dev, mailbox); return err; } + +int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu, + u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_set_port_general_context *context; + int err; + u32 in_mod; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + context = mailbox->buf; + memset(context, 0, sizeof *context); + + context->flags = SET_PORT_GEN_ALL_VALID; + context->mtu = cpu_to_be16(mtu); + context->pptx = (pptx * (!pfctx)) << 7; + context->pfctx = pfctx; + context->pprx = (pprx * (!pfcrx)) << 7; + context->pfcrx = pfcrx; + + in_mod = MLX4_SET_PORT_GENERAL << 8 | port; + err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL(mlx4_SET_PORT_general); + +int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn, + u8 promisc) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_set_port_rqp_calc_context *context; + int err; + u32 in_mod; + u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ? + MCAST_DIRECT : MCAST_DEFAULT; + + if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER && + dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) + return 0; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + context = mailbox->buf; + memset(context, 0, sizeof *context); + + context->base_qpn = cpu_to_be32(base_qpn); + context->n_mac = dev->caps.log_num_macs; + context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT | + base_qpn); + context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT | + base_qpn); + context->intra_no_vlan = 0; + context->no_vlan = MLX4_NO_VLAN_IDX; + context->intra_vlan_miss = 0; + context->vlan_miss = MLX4_VLAN_MISS_IDX; + + in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port; + err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc); + +int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err = 0; + + return err; +} + +int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, + u64 mac, u64 clear, u8 mode) +{ + return mlx4_cmd(dev, (mac | (clear << 63)), port, mode, + MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_WRAPPED); +} +EXPORT_SYMBOL(mlx4_SET_MCAST_FLTR); + +int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err = 0; + + return err; +} + +int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave, + u32 in_mod, struct mlx4_cmd_mailbox *outbox) +{ + return mlx4_cmd_box(dev, 0, outbox->dma, in_mod, 0, + MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_NATIVE); +} + +int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + return mlx4_common_dump_eth_stats(dev, slave, + vhcr->in_modifier, outbox); +} diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c index b967647d0c7..66f91ca7a7c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/profile.c +++ b/drivers/net/ethernet/mellanox/mlx4/profile.c @@ -98,8 +98,8 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, profile[MLX4_RES_EQ].size = dev_cap->eqc_entry_sz; profile[MLX4_RES_DMPT].size = dev_cap->dmpt_entry_sz; profile[MLX4_RES_CMPT].size = dev_cap->cmpt_entry_sz; - profile[MLX4_RES_MTT].size = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz; - profile[MLX4_RES_MCG].size = MLX4_MGM_ENTRY_SIZE; + profile[MLX4_RES_MTT].size = dev_cap->mtt_entry_sz; + profile[MLX4_RES_MCG].size = mlx4_get_mgm_entry_size(dev); profile[MLX4_RES_QP].num = request->num_qp; profile[MLX4_RES_RDMARC].num = request->num_qp * request->rdmarc_per_qp; @@ -210,7 +210,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, init_hca->cmpt_base = profile[i].start; break; case MLX4_RES_MTT: - dev->caps.num_mtt_segs = profile[i].num; + dev->caps.num_mtts = profile[i].num; priv->mr_table.mtt_base = profile[i].start; init_hca->mtt_base = profile[i].start; break; @@ -218,7 +218,8 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, dev->caps.num_mgms = profile[i].num >> 1; dev->caps.num_amgms = profile[i].num >> 1; init_hca->mc_base = profile[i].start; - init_hca->log_mc_entry_sz = ilog2(MLX4_MGM_ENTRY_SIZE); + init_hca->log_mc_entry_sz = + ilog2(mlx4_get_mgm_entry_size(dev)); init_hca->log_mc_table_sz = profile[i].log_num; init_hca->log_mc_hash_sz = profile[i].log_num - 1; break; diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index 15f870cb259..6b03ac8b900 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c @@ -35,6 +35,8 @@ #include <linux/gfp.h> #include <linux/export.h> +#include <linux/init.h> + #include <linux/mlx4/cmd.h> #include <linux/mlx4/qp.h> @@ -55,7 +57,7 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type) spin_unlock(&qp_table->lock); if (!qp) { - mlx4_warn(dev, "Async event for bogus QP %08x\n", qpn); + mlx4_dbg(dev, "Async event for none existent QP %08x\n", qpn); return; } @@ -65,10 +67,17 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type) complete(&qp->free); } -int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, - enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state, - struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar, - int sqd_event, struct mlx4_qp *qp) +static int is_qp0(struct mlx4_dev *dev, struct mlx4_qp *qp) +{ + return qp->qpn >= dev->caps.sqp_start && + qp->qpn <= dev->caps.sqp_start + 1; +} + +static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, + enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state, + struct mlx4_qp_context *context, + enum mlx4_qp_optpar optpar, + int sqd_event, struct mlx4_qp *qp, int native) { static const u16 op[MLX4_QP_NUM_STATE][MLX4_QP_NUM_STATE] = { [MLX4_QP_STATE_RST] = { @@ -110,16 +119,26 @@ int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, } }; + struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_cmd_mailbox *mailbox; int ret = 0; + u8 port; if (cur_state >= MLX4_QP_NUM_STATE || new_state >= MLX4_QP_NUM_STATE || !op[cur_state][new_state]) return -EINVAL; - if (op[cur_state][new_state] == MLX4_CMD_2RST_QP) - return mlx4_cmd(dev, 0, qp->qpn, 2, - MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A); + if (op[cur_state][new_state] == MLX4_CMD_2RST_QP) { + ret = mlx4_cmd(dev, 0, qp->qpn, 2, + MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A, native); + if (mlx4_is_master(dev) && cur_state != MLX4_QP_STATE_ERR && + cur_state != MLX4_QP_STATE_RST && + is_qp0(dev, qp)) { + port = (qp->qpn & 1) + 1; + priv->mfunc.master.qp0_state[port].qp0_active = 0; + } + return ret; + } mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) @@ -132,107 +151,218 @@ int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; } + port = ((context->pri_path.sched_queue >> 6) & 1) + 1; + if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) + context->pri_path.sched_queue = (context->pri_path.sched_queue & + 0xc3); + *(__be32 *) mailbox->buf = cpu_to_be32(optpar); memcpy(mailbox->buf + 8, context, sizeof *context); ((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn = cpu_to_be32(qp->qpn); - ret = mlx4_cmd(dev, mailbox->dma, qp->qpn | (!!sqd_event << 31), + ret = mlx4_cmd(dev, mailbox->dma | dev->caps.function, + qp->qpn | (!!sqd_event << 31), new_state == MLX4_QP_STATE_RST ? 2 : 0, - op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C); + op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C, native); mlx4_free_cmd_mailbox(dev, mailbox); return ret; } + +int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, + enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state, + struct mlx4_qp_context *context, + enum mlx4_qp_optpar optpar, + int sqd_event, struct mlx4_qp *qp) +{ + return __mlx4_qp_modify(dev, mtt, cur_state, new_state, context, + optpar, sqd_event, qp, 0); +} EXPORT_SYMBOL_GPL(mlx4_qp_modify); -int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base) +int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, + int *base) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_qp_table *qp_table = &priv->qp_table; - int qpn; - qpn = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align); - if (qpn == -1) + *base = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align); + if (*base == -1) return -ENOMEM; - *base = qpn; return 0; } + +int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base) +{ + u64 in_param; + u64 out_param; + int err; + + if (mlx4_is_mfunc(dev)) { + set_param_l(&in_param, cnt); + set_param_h(&in_param, align); + err = mlx4_cmd_imm(dev, in_param, &out_param, + RES_QP, RES_OP_RESERVE, + MLX4_CMD_ALLOC_RES, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + if (err) + return err; + + *base = get_param_l(&out_param); + return 0; + } + return __mlx4_qp_reserve_range(dev, cnt, align, base); +} EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range); -void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt) +void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_qp_table *qp_table = &priv->qp_table; - if (base_qpn < dev->caps.sqp_start + 8) - return; + if (mlx4_is_qp_reserved(dev, (u32) base_qpn)) + return; mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt); } + +void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt) +{ + u64 in_param; + int err; + + if (mlx4_is_mfunc(dev)) { + set_param_l(&in_param, base_qpn); + set_param_h(&in_param, cnt); + err = mlx4_cmd(dev, in_param, RES_QP, RES_OP_RESERVE, + MLX4_CMD_FREE_RES, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + if (err) { + mlx4_warn(dev, "Failed to release qp range" + " base:%d cnt:%d\n", base_qpn, cnt); + } + } else + __mlx4_qp_release_range(dev, base_qpn, cnt); +} EXPORT_SYMBOL_GPL(mlx4_qp_release_range); -int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp) +int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_qp_table *qp_table = &priv->qp_table; int err; - if (!qpn) - return -EINVAL; - - qp->qpn = qpn; - - err = mlx4_table_get(dev, &qp_table->qp_table, qp->qpn); + err = mlx4_table_get(dev, &qp_table->qp_table, qpn); if (err) goto err_out; - err = mlx4_table_get(dev, &qp_table->auxc_table, qp->qpn); + err = mlx4_table_get(dev, &qp_table->auxc_table, qpn); if (err) goto err_put_qp; - err = mlx4_table_get(dev, &qp_table->altc_table, qp->qpn); + err = mlx4_table_get(dev, &qp_table->altc_table, qpn); if (err) goto err_put_auxc; - err = mlx4_table_get(dev, &qp_table->rdmarc_table, qp->qpn); + err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn); if (err) goto err_put_altc; - err = mlx4_table_get(dev, &qp_table->cmpt_table, qp->qpn); + err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn); if (err) goto err_put_rdmarc; - spin_lock_irq(&qp_table->lock); - err = radix_tree_insert(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1), qp); - spin_unlock_irq(&qp_table->lock); - if (err) - goto err_put_cmpt; - - atomic_set(&qp->refcount, 1); - init_completion(&qp->free); - return 0; -err_put_cmpt: - mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn); - err_put_rdmarc: - mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn); + mlx4_table_put(dev, &qp_table->rdmarc_table, qpn); err_put_altc: - mlx4_table_put(dev, &qp_table->altc_table, qp->qpn); + mlx4_table_put(dev, &qp_table->altc_table, qpn); err_put_auxc: - mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn); + mlx4_table_put(dev, &qp_table->auxc_table, qpn); err_put_qp: - mlx4_table_put(dev, &qp_table->qp_table, qp->qpn); + mlx4_table_put(dev, &qp_table->qp_table, qpn); err_out: return err; } + +static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn) +{ + u64 param; + + if (mlx4_is_mfunc(dev)) { + set_param_l(¶m, qpn); + return mlx4_cmd_imm(dev, param, ¶m, RES_QP, RES_OP_MAP_ICM, + MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_WRAPPED); + } + return __mlx4_qp_alloc_icm(dev, qpn); +} + +void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_qp_table *qp_table = &priv->qp_table; + + mlx4_table_put(dev, &qp_table->cmpt_table, qpn); + mlx4_table_put(dev, &qp_table->rdmarc_table, qpn); + mlx4_table_put(dev, &qp_table->altc_table, qpn); + mlx4_table_put(dev, &qp_table->auxc_table, qpn); + mlx4_table_put(dev, &qp_table->qp_table, qpn); +} + +static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn) +{ + u64 in_param; + + if (mlx4_is_mfunc(dev)) { + set_param_l(&in_param, qpn); + if (mlx4_cmd(dev, in_param, RES_QP, RES_OP_MAP_ICM, + MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_WRAPPED)) + mlx4_warn(dev, "Failed to free icm of qp:%d\n", qpn); + } else + __mlx4_qp_free_icm(dev, qpn); +} + +int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_qp_table *qp_table = &priv->qp_table; + int err; + + if (!qpn) + return -EINVAL; + + qp->qpn = qpn; + + err = mlx4_qp_alloc_icm(dev, qpn); + if (err) + return err; + + spin_lock_irq(&qp_table->lock); + err = radix_tree_insert(&dev->qp_table_tree, qp->qpn & + (dev->caps.num_qps - 1), qp); + spin_unlock_irq(&qp_table->lock); + if (err) + goto err_icm; + + atomic_set(&qp->refcount, 1); + init_completion(&qp->free); + + return 0; + +err_icm: + mlx4_qp_free_icm(dev, qpn); + return err; +} + EXPORT_SYMBOL_GPL(mlx4_qp_alloc); void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp) @@ -248,24 +378,18 @@ EXPORT_SYMBOL_GPL(mlx4_qp_remove); void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp) { - struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; - if (atomic_dec_and_test(&qp->refcount)) complete(&qp->free); wait_for_completion(&qp->free); - mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn); - mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn); - mlx4_table_put(dev, &qp_table->altc_table, qp->qpn); - mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn); - mlx4_table_put(dev, &qp_table->qp_table, qp->qpn); + mlx4_qp_free_icm(dev, qp->qpn); } EXPORT_SYMBOL_GPL(mlx4_qp_free); static int mlx4_CONF_SPECIAL_QP(struct mlx4_dev *dev, u32 base_qpn) { return mlx4_cmd(dev, 0, base_qpn, 0, MLX4_CMD_CONF_SPECIAL_QP, - MLX4_CMD_TIME_CLASS_B); + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); } int mlx4_init_qp_table(struct mlx4_dev *dev) @@ -276,6 +400,8 @@ int mlx4_init_qp_table(struct mlx4_dev *dev) spin_lock_init(&qp_table->lock); INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC); + if (mlx4_is_slave(dev)) + return 0; /* * We reserve 2 extra QPs per port for the special QPs. The @@ -327,6 +453,9 @@ int mlx4_init_qp_table(struct mlx4_dev *dev) void mlx4_cleanup_qp_table(struct mlx4_dev *dev) { + if (mlx4_is_slave(dev)) + return; + mlx4_CONF_SPECIAL_QP(dev, 0); mlx4_bitmap_cleanup(&mlx4_priv(dev)->qp_table.bitmap); } @@ -342,7 +471,8 @@ int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp, return PTR_ERR(mailbox); err = mlx4_cmd_box(dev, 0, mailbox->dma, qp->qpn, 0, - MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A); + MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_WRAPPED); if (!err) memcpy(context, mailbox->buf + 8, sizeof *context); diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c new file mode 100644 index 00000000000..ed20751a057 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -0,0 +1,3104 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. + * All rights reserved. + * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/sched.h> +#include <linux/pci.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/io.h> +#include <linux/slab.h> +#include <linux/mlx4/cmd.h> +#include <linux/mlx4/qp.h> + +#include "mlx4.h" +#include "fw.h" + +#define MLX4_MAC_VALID (1ull << 63) +#define MLX4_MAC_MASK 0x7fffffffffffffffULL +#define ETH_ALEN 6 + +struct mac_res { + struct list_head list; + u64 mac; + u8 port; +}; + +struct res_common { + struct list_head list; + u32 res_id; + int owner; + int state; + int from_state; + int to_state; + int removing; +}; + +enum { + RES_ANY_BUSY = 1 +}; + +struct res_gid { + struct list_head list; + u8 gid[16]; + enum mlx4_protocol prot; +}; + +enum res_qp_states { + RES_QP_BUSY = RES_ANY_BUSY, + + /* QP number was allocated */ + RES_QP_RESERVED, + + /* ICM memory for QP context was mapped */ + RES_QP_MAPPED, + + /* QP is in hw ownership */ + RES_QP_HW +}; + +static inline const char *qp_states_str(enum res_qp_states state) +{ + switch (state) { + case RES_QP_BUSY: return "RES_QP_BUSY"; + case RES_QP_RESERVED: return "RES_QP_RESERVED"; + case RES_QP_MAPPED: return "RES_QP_MAPPED"; + case RES_QP_HW: return "RES_QP_HW"; + default: return "Unknown"; + } +} + +struct res_qp { + struct res_common com; + struct res_mtt *mtt; + struct res_cq *rcq; + struct res_cq *scq; + struct res_srq *srq; + struct list_head mcg_list; + spinlock_t mcg_spl; + int local_qpn; +}; + +enum res_mtt_states { + RES_MTT_BUSY = RES_ANY_BUSY, + RES_MTT_ALLOCATED, +}; + +static inline const char *mtt_states_str(enum res_mtt_states state) +{ + switch (state) { + case RES_MTT_BUSY: return "RES_MTT_BUSY"; + case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED"; + default: return "Unknown"; + } +} + +struct res_mtt { + struct res_common com; + int order; + atomic_t ref_count; +}; + +enum res_mpt_states { + RES_MPT_BUSY = RES_ANY_BUSY, + RES_MPT_RESERVED, + RES_MPT_MAPPED, + RES_MPT_HW, +}; + +struct res_mpt { + struct res_common com; + struct res_mtt *mtt; + int key; +}; + +enum res_eq_states { + RES_EQ_BUSY = RES_ANY_BUSY, + RES_EQ_RESERVED, + RES_EQ_HW, +}; + +struct res_eq { + struct res_common com; + struct res_mtt *mtt; +}; + +enum res_cq_states { + RES_CQ_BUSY = RES_ANY_BUSY, + RES_CQ_ALLOCATED, + RES_CQ_HW, +}; + +struct res_cq { + struct res_common com; + struct res_mtt *mtt; + atomic_t ref_count; +}; + +enum res_srq_states { + RES_SRQ_BUSY = RES_ANY_BUSY, + RES_SRQ_ALLOCATED, + RES_SRQ_HW, +}; + +static inline const char *srq_states_str(enum res_srq_states state) +{ + switch (state) { + case RES_SRQ_BUSY: return "RES_SRQ_BUSY"; + case RES_SRQ_ALLOCATED: return "RES_SRQ_ALLOCATED"; + case RES_SRQ_HW: return "RES_SRQ_HW"; + default: return "Unknown"; + } +} + +struct res_srq { + struct res_common com; + struct res_mtt *mtt; + struct res_cq *cq; + atomic_t ref_count; +}; + +enum res_counter_states { + RES_COUNTER_BUSY = RES_ANY_BUSY, + RES_COUNTER_ALLOCATED, +}; + +static inline const char *counter_states_str(enum res_counter_states state) +{ + switch (state) { + case RES_COUNTER_BUSY: return "RES_COUNTER_BUSY"; + case RES_COUNTER_ALLOCATED: return "RES_COUNTER_ALLOCATED"; + default: return "Unknown"; + } +} + +struct res_counter { + struct res_common com; + int port; +}; + +/* For Debug uses */ +static const char *ResourceType(enum mlx4_resource rt) +{ + switch (rt) { + case RES_QP: return "RES_QP"; + case RES_CQ: return "RES_CQ"; + case RES_SRQ: return "RES_SRQ"; + case RES_MPT: return "RES_MPT"; + case RES_MTT: return "RES_MTT"; + case RES_MAC: return "RES_MAC"; + case RES_EQ: return "RES_EQ"; + case RES_COUNTER: return "RES_COUNTER"; + default: return "Unknown resource type !!!"; + }; +} + +int mlx4_init_resource_tracker(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int i; + int t; + + priv->mfunc.master.res_tracker.slave_list = + kzalloc(dev->num_slaves * sizeof(struct slave_list), + GFP_KERNEL); + if (!priv->mfunc.master.res_tracker.slave_list) + return -ENOMEM; + + for (i = 0 ; i < dev->num_slaves; i++) { + for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t) + INIT_LIST_HEAD(&priv->mfunc.master.res_tracker. + slave_list[i].res_list[t]); + mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex); + } + + mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n", + dev->num_slaves); + for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) + INIT_RADIX_TREE(&priv->mfunc.master.res_tracker.res_tree[i], + GFP_ATOMIC|__GFP_NOWARN); + + spin_lock_init(&priv->mfunc.master.res_tracker.lock); + return 0 ; +} + +void mlx4_free_resource_tracker(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int i; + + if (priv->mfunc.master.res_tracker.slave_list) { + for (i = 0 ; i < dev->num_slaves; i++) + mlx4_delete_all_resources_for_slave(dev, i); + + kfree(priv->mfunc.master.res_tracker.slave_list); + } +} + +static void update_ud_gid(struct mlx4_dev *dev, + struct mlx4_qp_context *qp_ctx, u8 slave) +{ + u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff; + + if (MLX4_QP_ST_UD == ts) + qp_ctx->pri_path.mgid_index = 0x80 | slave; + + mlx4_dbg(dev, "slave %d, new gid index: 0x%x ", + slave, qp_ctx->pri_path.mgid_index); +} + +static int mpt_mask(struct mlx4_dev *dev) +{ + return dev->caps.num_mpts - 1; +} + +static void *find_res(struct mlx4_dev *dev, int res_id, + enum mlx4_resource type) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + return radix_tree_lookup(&priv->mfunc.master.res_tracker.res_tree[type], + res_id); +} + +static int get_res(struct mlx4_dev *dev, int slave, int res_id, + enum mlx4_resource type, + void *res) +{ + struct res_common *r; + int err = 0; + + spin_lock_irq(mlx4_tlock(dev)); + r = find_res(dev, res_id, type); + if (!r) { + err = -ENONET; + goto exit; + } + + if (r->state == RES_ANY_BUSY) { + err = -EBUSY; + goto exit; + } + + if (r->owner != slave) { + err = -EPERM; + goto exit; + } + + r->from_state = r->state; + r->state = RES_ANY_BUSY; + mlx4_dbg(dev, "res %s id 0x%x to busy\n", + ResourceType(type), r->res_id); + + if (res) + *((struct res_common **)res) = r; + +exit: + spin_unlock_irq(mlx4_tlock(dev)); + return err; +} + +int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev, + enum mlx4_resource type, + int res_id, int *slave) +{ + + struct res_common *r; + int err = -ENOENT; + int id = res_id; + + if (type == RES_QP) + id &= 0x7fffff; + spin_lock(mlx4_tlock(dev)); + + r = find_res(dev, id, type); + if (r) { + *slave = r->owner; + err = 0; + } + spin_unlock(mlx4_tlock(dev)); + + return err; +} + +static void put_res(struct mlx4_dev *dev, int slave, int res_id, + enum mlx4_resource type) +{ + struct res_common *r; + + spin_lock_irq(mlx4_tlock(dev)); + r = find_res(dev, res_id, type); + if (r) + r->state = r->from_state; + spin_unlock_irq(mlx4_tlock(dev)); +} + +static struct res_common *alloc_qp_tr(int id) +{ + struct res_qp *ret; + + ret = kzalloc(sizeof *ret, GFP_KERNEL); + if (!ret) + return NULL; + + ret->com.res_id = id; + ret->com.state = RES_QP_RESERVED; + INIT_LIST_HEAD(&ret->mcg_list); + spin_lock_init(&ret->mcg_spl); + + return &ret->com; +} + +static struct res_common *alloc_mtt_tr(int id, int order) +{ + struct res_mtt *ret; + + ret = kzalloc(sizeof *ret, GFP_KERNEL); + if (!ret) + return NULL; + + ret->com.res_id = id; + ret->order = order; + ret->com.state = RES_MTT_ALLOCATED; + atomic_set(&ret->ref_count, 0); + + return &ret->com; +} + +static struct res_common *alloc_mpt_tr(int id, int key) +{ + struct res_mpt *ret; + + ret = kzalloc(sizeof *ret, GFP_KERNEL); + if (!ret) + return NULL; + + ret->com.res_id = id; + ret->com.state = RES_MPT_RESERVED; + ret->key = key; + + return &ret->com; +} + +static struct res_common *alloc_eq_tr(int id) +{ + struct res_eq *ret; + + ret = kzalloc(sizeof *ret, GFP_KERNEL); + if (!ret) + return NULL; + + ret->com.res_id = id; + ret->com.state = RES_EQ_RESERVED; + + return &ret->com; +} + +static struct res_common *alloc_cq_tr(int id) +{ + struct res_cq *ret; + + ret = kzalloc(sizeof *ret, GFP_KERNEL); + if (!ret) + return NULL; + + ret->com.res_id = id; + ret->com.state = RES_CQ_ALLOCATED; + atomic_set(&ret->ref_count, 0); + + return &ret->com; +} + +static struct res_common *alloc_srq_tr(int id) +{ + struct res_srq *ret; + + ret = kzalloc(sizeof *ret, GFP_KERNEL); + if (!ret) + return NULL; + + ret->com.res_id = id; + ret->com.state = RES_SRQ_ALLOCATED; + atomic_set(&ret->ref_count, 0); + + return &ret->com; +} + +static struct res_common *alloc_counter_tr(int id) +{ + struct res_counter *ret; + + ret = kzalloc(sizeof *ret, GFP_KERNEL); + if (!ret) + return NULL; + + ret->com.res_id = id; + ret->com.state = RES_COUNTER_ALLOCATED; + + return &ret->com; +} + +static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave, + int extra) +{ + struct res_common *ret; + + switch (type) { + case RES_QP: + ret = alloc_qp_tr(id); + break; + case RES_MPT: + ret = alloc_mpt_tr(id, extra); + break; + case RES_MTT: + ret = alloc_mtt_tr(id, extra); + break; + case RES_EQ: + ret = alloc_eq_tr(id); + break; + case RES_CQ: + ret = alloc_cq_tr(id); + break; + case RES_SRQ: + ret = alloc_srq_tr(id); + break; + case RES_MAC: + printk(KERN_ERR "implementation missing\n"); + return NULL; + case RES_COUNTER: + ret = alloc_counter_tr(id); + break; + + default: + return NULL; + } + if (ret) + ret->owner = slave; + + return ret; +} + +static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count, + enum mlx4_resource type, int extra) +{ + int i; + int err; + struct mlx4_priv *priv = mlx4_priv(dev); + struct res_common **res_arr; + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct radix_tree_root *root = &tracker->res_tree[type]; + + res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL); + if (!res_arr) + return -ENOMEM; + + for (i = 0; i < count; ++i) { + res_arr[i] = alloc_tr(base + i, type, slave, extra); + if (!res_arr[i]) { + for (--i; i >= 0; --i) + kfree(res_arr[i]); + + kfree(res_arr); + return -ENOMEM; + } + } + + spin_lock_irq(mlx4_tlock(dev)); + for (i = 0; i < count; ++i) { + if (find_res(dev, base + i, type)) { + err = -EEXIST; + goto undo; + } + err = radix_tree_insert(root, base + i, res_arr[i]); + if (err) + goto undo; + list_add_tail(&res_arr[i]->list, + &tracker->slave_list[slave].res_list[type]); + } + spin_unlock_irq(mlx4_tlock(dev)); + kfree(res_arr); + + return 0; + +undo: + for (--i; i >= base; --i) + radix_tree_delete(&tracker->res_tree[type], i); + + spin_unlock_irq(mlx4_tlock(dev)); + + for (i = 0; i < count; ++i) + kfree(res_arr[i]); + + kfree(res_arr); + + return err; +} + +static int remove_qp_ok(struct res_qp *res) +{ + if (res->com.state == RES_QP_BUSY) + return -EBUSY; + else if (res->com.state != RES_QP_RESERVED) + return -EPERM; + + return 0; +} + +static int remove_mtt_ok(struct res_mtt *res, int order) +{ + if (res->com.state == RES_MTT_BUSY || + atomic_read(&res->ref_count)) { + printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n", + __func__, __LINE__, + mtt_states_str(res->com.state), + atomic_read(&res->ref_count)); + return -EBUSY; + } else if (res->com.state != RES_MTT_ALLOCATED) + return -EPERM; + else if (res->order != order) + return -EINVAL; + + return 0; +} + +static int remove_mpt_ok(struct res_mpt *res) +{ + if (res->com.state == RES_MPT_BUSY) + return -EBUSY; + else if (res->com.state != RES_MPT_RESERVED) + return -EPERM; + + return 0; +} + +static int remove_eq_ok(struct res_eq *res) +{ + if (res->com.state == RES_MPT_BUSY) + return -EBUSY; + else if (res->com.state != RES_MPT_RESERVED) + return -EPERM; + + return 0; +} + +static int remove_counter_ok(struct res_counter *res) +{ + if (res->com.state == RES_COUNTER_BUSY) + return -EBUSY; + else if (res->com.state != RES_COUNTER_ALLOCATED) + return -EPERM; + + return 0; +} + +static int remove_cq_ok(struct res_cq *res) +{ + if (res->com.state == RES_CQ_BUSY) + return -EBUSY; + else if (res->com.state != RES_CQ_ALLOCATED) + return -EPERM; + + return 0; +} + +static int remove_srq_ok(struct res_srq *res) +{ + if (res->com.state == RES_SRQ_BUSY) + return -EBUSY; + else if (res->com.state != RES_SRQ_ALLOCATED) + return -EPERM; + + return 0; +} + +static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra) +{ + switch (type) { + case RES_QP: + return remove_qp_ok((struct res_qp *)res); + case RES_CQ: + return remove_cq_ok((struct res_cq *)res); + case RES_SRQ: + return remove_srq_ok((struct res_srq *)res); + case RES_MPT: + return remove_mpt_ok((struct res_mpt *)res); + case RES_MTT: + return remove_mtt_ok((struct res_mtt *)res, extra); + case RES_MAC: + return -ENOSYS; + case RES_EQ: + return remove_eq_ok((struct res_eq *)res); + case RES_COUNTER: + return remove_counter_ok((struct res_counter *)res); + default: + return -EINVAL; + } +} + +static int rem_res_range(struct mlx4_dev *dev, int slave, int base, int count, + enum mlx4_resource type, int extra) +{ + int i; + int err; + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct res_common *r; + + spin_lock_irq(mlx4_tlock(dev)); + for (i = base; i < base + count; ++i) { + r = radix_tree_lookup(&tracker->res_tree[type], i); + if (!r) { + err = -ENOENT; + goto out; + } + if (r->owner != slave) { + err = -EPERM; + goto out; + } + err = remove_ok(r, type, extra); + if (err) + goto out; + } + + for (i = base; i < base + count; ++i) { + r = radix_tree_lookup(&tracker->res_tree[type], i); + radix_tree_delete(&tracker->res_tree[type], i); + list_del(&r->list); + kfree(r); + } + err = 0; + +out: + spin_unlock_irq(mlx4_tlock(dev)); + + return err; +} + +static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn, + enum res_qp_states state, struct res_qp **qp, + int alloc) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct res_qp *r; + int err = 0; + + spin_lock_irq(mlx4_tlock(dev)); + r = radix_tree_lookup(&tracker->res_tree[RES_QP], qpn); + if (!r) + err = -ENOENT; + else if (r->com.owner != slave) + err = -EPERM; + else { + switch (state) { + case RES_QP_BUSY: + mlx4_dbg(dev, "%s: failed RES_QP, 0x%x\n", + __func__, r->com.res_id); + err = -EBUSY; + break; + + case RES_QP_RESERVED: + if (r->com.state == RES_QP_MAPPED && !alloc) + break; + + mlx4_dbg(dev, "failed RES_QP, 0x%x\n", r->com.res_id); + err = -EINVAL; + break; + + case RES_QP_MAPPED: + if ((r->com.state == RES_QP_RESERVED && alloc) || + r->com.state == RES_QP_HW) + break; + else { + mlx4_dbg(dev, "failed RES_QP, 0x%x\n", + r->com.res_id); + err = -EINVAL; + } + + break; + + case RES_QP_HW: + if (r->com.state != RES_QP_MAPPED) + err = -EINVAL; + break; + default: + err = -EINVAL; + } + + if (!err) { + r->com.from_state = r->com.state; + r->com.to_state = state; + r->com.state = RES_QP_BUSY; + if (qp) + *qp = (struct res_qp *)r; + } + } + + spin_unlock_irq(mlx4_tlock(dev)); + + return err; +} + +static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index, + enum res_mpt_states state, struct res_mpt **mpt) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct res_mpt *r; + int err = 0; + + spin_lock_irq(mlx4_tlock(dev)); + r = radix_tree_lookup(&tracker->res_tree[RES_MPT], index); + if (!r) + err = -ENOENT; + else if (r->com.owner != slave) + err = -EPERM; + else { + switch (state) { + case RES_MPT_BUSY: + err = -EINVAL; + break; + + case RES_MPT_RESERVED: + if (r->com.state != RES_MPT_MAPPED) + err = -EINVAL; + break; + + case RES_MPT_MAPPED: + if (r->com.state != RES_MPT_RESERVED && + r->com.state != RES_MPT_HW) + err = -EINVAL; + break; + + case RES_MPT_HW: + if (r->com.state != RES_MPT_MAPPED) + err = -EINVAL; + break; + default: + err = -EINVAL; + } + + if (!err) { + r->com.from_state = r->com.state; + r->com.to_state = state; + r->com.state = RES_MPT_BUSY; + if (mpt) + *mpt = (struct res_mpt *)r; + } + } + + spin_unlock_irq(mlx4_tlock(dev)); + + return err; +} + +static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index, + enum res_eq_states state, struct res_eq **eq) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct res_eq *r; + int err = 0; + + spin_lock_irq(mlx4_tlock(dev)); + r = radix_tree_lookup(&tracker->res_tree[RES_EQ], index); + if (!r) + err = -ENOENT; + else if (r->com.owner != slave) + err = -EPERM; + else { + switch (state) { + case RES_EQ_BUSY: + err = -EINVAL; + break; + + case RES_EQ_RESERVED: + if (r->com.state != RES_EQ_HW) + err = -EINVAL; + break; + + case RES_EQ_HW: + if (r->com.state != RES_EQ_RESERVED) + err = -EINVAL; + break; + + default: + err = -EINVAL; + } + + if (!err) { + r->com.from_state = r->com.state; + r->com.to_state = state; + r->com.state = RES_EQ_BUSY; + if (eq) + *eq = r; + } + } + + spin_unlock_irq(mlx4_tlock(dev)); + + return err; +} + +static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn, + enum res_cq_states state, struct res_cq **cq) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct res_cq *r; + int err; + + spin_lock_irq(mlx4_tlock(dev)); + r = radix_tree_lookup(&tracker->res_tree[RES_CQ], cqn); + if (!r) + err = -ENOENT; + else if (r->com.owner != slave) + err = -EPERM; + else { + switch (state) { + case RES_CQ_BUSY: + err = -EBUSY; + break; + + case RES_CQ_ALLOCATED: + if (r->com.state != RES_CQ_HW) + err = -EINVAL; + else if (atomic_read(&r->ref_count)) + err = -EBUSY; + else + err = 0; + break; + + case RES_CQ_HW: + if (r->com.state != RES_CQ_ALLOCATED) + err = -EINVAL; + else + err = 0; + break; + + default: + err = -EINVAL; + } + + if (!err) { + r->com.from_state = r->com.state; + r->com.to_state = state; + r->com.state = RES_CQ_BUSY; + if (cq) + *cq = r; + } + } + + spin_unlock_irq(mlx4_tlock(dev)); + + return err; +} + +static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index, + enum res_cq_states state, struct res_srq **srq) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct res_srq *r; + int err = 0; + + spin_lock_irq(mlx4_tlock(dev)); + r = radix_tree_lookup(&tracker->res_tree[RES_SRQ], index); + if (!r) + err = -ENOENT; + else if (r->com.owner != slave) + err = -EPERM; + else { + switch (state) { + case RES_SRQ_BUSY: + err = -EINVAL; + break; + + case RES_SRQ_ALLOCATED: + if (r->com.state != RES_SRQ_HW) + err = -EINVAL; + else if (atomic_read(&r->ref_count)) + err = -EBUSY; + break; + + case RES_SRQ_HW: + if (r->com.state != RES_SRQ_ALLOCATED) + err = -EINVAL; + break; + + default: + err = -EINVAL; + } + + if (!err) { + r->com.from_state = r->com.state; + r->com.to_state = state; + r->com.state = RES_SRQ_BUSY; + if (srq) + *srq = r; + } + } + + spin_unlock_irq(mlx4_tlock(dev)); + + return err; +} + +static void res_abort_move(struct mlx4_dev *dev, int slave, + enum mlx4_resource type, int id) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct res_common *r; + + spin_lock_irq(mlx4_tlock(dev)); + r = radix_tree_lookup(&tracker->res_tree[type], id); + if (r && (r->owner == slave)) + r->state = r->from_state; + spin_unlock_irq(mlx4_tlock(dev)); +} + +static void res_end_move(struct mlx4_dev *dev, int slave, + enum mlx4_resource type, int id) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct res_common *r; + + spin_lock_irq(mlx4_tlock(dev)); + r = radix_tree_lookup(&tracker->res_tree[type], id); + if (r && (r->owner == slave)) + r->state = r->to_state; + spin_unlock_irq(mlx4_tlock(dev)); +} + +static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn) +{ + return mlx4_is_qp_reserved(dev, qpn); +} + +static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param, u64 *out_param) +{ + int err; + int count; + int align; + int base; + int qpn; + + switch (op) { + case RES_OP_RESERVE: + count = get_param_l(&in_param); + align = get_param_h(&in_param); + err = __mlx4_qp_reserve_range(dev, count, align, &base); + if (err) + return err; + + err = add_res_range(dev, slave, base, count, RES_QP, 0); + if (err) { + __mlx4_qp_release_range(dev, base, count); + return err; + } + set_param_l(out_param, base); + break; + case RES_OP_MAP_ICM: + qpn = get_param_l(&in_param) & 0x7fffff; + if (valid_reserved(dev, slave, qpn)) { + err = add_res_range(dev, slave, qpn, 1, RES_QP, 0); + if (err) + return err; + } + + err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, + NULL, 1); + if (err) + return err; + + if (!valid_reserved(dev, slave, qpn)) { + err = __mlx4_qp_alloc_icm(dev, qpn); + if (err) { + res_abort_move(dev, slave, RES_QP, qpn); + return err; + } + } + + res_end_move(dev, slave, RES_QP, qpn); + break; + + default: + err = -EINVAL; + break; + } + return err; +} + +static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param, u64 *out_param) +{ + int err = -EINVAL; + int base; + int order; + + if (op != RES_OP_RESERVE_AND_MAP) + return err; + + order = get_param_l(&in_param); + base = __mlx4_alloc_mtt_range(dev, order); + if (base == -1) + return -ENOMEM; + + err = add_res_range(dev, slave, base, 1, RES_MTT, order); + if (err) + __mlx4_free_mtt_range(dev, base, order); + else + set_param_l(out_param, base); + + return err; +} + +static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param, u64 *out_param) +{ + int err = -EINVAL; + int index; + int id; + struct res_mpt *mpt; + + switch (op) { + case RES_OP_RESERVE: + index = __mlx4_mr_reserve(dev); + if (index == -1) + break; + id = index & mpt_mask(dev); + + err = add_res_range(dev, slave, id, 1, RES_MPT, index); + if (err) { + __mlx4_mr_release(dev, index); + break; + } + set_param_l(out_param, index); + break; + case RES_OP_MAP_ICM: + index = get_param_l(&in_param); + id = index & mpt_mask(dev); + err = mr_res_start_move_to(dev, slave, id, + RES_MPT_MAPPED, &mpt); + if (err) + return err; + + err = __mlx4_mr_alloc_icm(dev, mpt->key); + if (err) { + res_abort_move(dev, slave, RES_MPT, id); + return err; + } + + res_end_move(dev, slave, RES_MPT, id); + break; + } + return err; +} + +static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param, u64 *out_param) +{ + int cqn; + int err; + + switch (op) { + case RES_OP_RESERVE_AND_MAP: + err = __mlx4_cq_alloc_icm(dev, &cqn); + if (err) + break; + + err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0); + if (err) { + __mlx4_cq_free_icm(dev, cqn); + break; + } + + set_param_l(out_param, cqn); + break; + + default: + err = -EINVAL; + } + + return err; +} + +static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param, u64 *out_param) +{ + int srqn; + int err; + + switch (op) { + case RES_OP_RESERVE_AND_MAP: + err = __mlx4_srq_alloc_icm(dev, &srqn); + if (err) + break; + + err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0); + if (err) { + __mlx4_srq_free_icm(dev, srqn); + break; + } + + set_param_l(out_param, srqn); + break; + + default: + err = -EINVAL; + } + + return err; +} + +static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct mac_res *res; + + res = kzalloc(sizeof *res, GFP_KERNEL); + if (!res) + return -ENOMEM; + res->mac = mac; + res->port = (u8) port; + list_add_tail(&res->list, + &tracker->slave_list[slave].res_list[RES_MAC]); + return 0; +} + +static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac, + int port) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct list_head *mac_list = + &tracker->slave_list[slave].res_list[RES_MAC]; + struct mac_res *res, *tmp; + + list_for_each_entry_safe(res, tmp, mac_list, list) { + if (res->mac == mac && res->port == (u8) port) { + list_del(&res->list); + kfree(res); + break; + } + } +} + +static void rem_slave_macs(struct mlx4_dev *dev, int slave) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct list_head *mac_list = + &tracker->slave_list[slave].res_list[RES_MAC]; + struct mac_res *res, *tmp; + + list_for_each_entry_safe(res, tmp, mac_list, list) { + list_del(&res->list); + __mlx4_unregister_mac(dev, res->port, res->mac); + kfree(res); + } +} + +static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param, u64 *out_param) +{ + int err = -EINVAL; + int port; + u64 mac; + + if (op != RES_OP_RESERVE_AND_MAP) + return err; + + port = get_param_l(out_param); + mac = in_param; + + err = __mlx4_register_mac(dev, port, mac); + if (err >= 0) { + set_param_l(out_param, err); + err = 0; + } + + if (!err) { + err = mac_add_to_slave(dev, slave, mac, port); + if (err) + __mlx4_unregister_mac(dev, port, mac); + } + return err; +} + +static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param, u64 *out_param) +{ + return 0; +} + +int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + int alop = vhcr->op_modifier; + + switch (vhcr->in_modifier) { + case RES_QP: + err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param, &vhcr->out_param); + break; + + case RES_MTT: + err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param, &vhcr->out_param); + break; + + case RES_MPT: + err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param, &vhcr->out_param); + break; + + case RES_CQ: + err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param, &vhcr->out_param); + break; + + case RES_SRQ: + err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param, &vhcr->out_param); + break; + + case RES_MAC: + err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param, &vhcr->out_param); + break; + + case RES_VLAN: + err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param, &vhcr->out_param); + break; + + default: + err = -EINVAL; + break; + } + + return err; +} + +static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param) +{ + int err; + int count; + int base; + int qpn; + + switch (op) { + case RES_OP_RESERVE: + base = get_param_l(&in_param) & 0x7fffff; + count = get_param_h(&in_param); + err = rem_res_range(dev, slave, base, count, RES_QP, 0); + if (err) + break; + __mlx4_qp_release_range(dev, base, count); + break; + case RES_OP_MAP_ICM: + qpn = get_param_l(&in_param) & 0x7fffff; + err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED, + NULL, 0); + if (err) + return err; + + if (!valid_reserved(dev, slave, qpn)) + __mlx4_qp_free_icm(dev, qpn); + + res_end_move(dev, slave, RES_QP, qpn); + + if (valid_reserved(dev, slave, qpn)) + err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0); + break; + default: + err = -EINVAL; + break; + } + return err; +} + +static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param, u64 *out_param) +{ + int err = -EINVAL; + int base; + int order; + + if (op != RES_OP_RESERVE_AND_MAP) + return err; + + base = get_param_l(&in_param); + order = get_param_h(&in_param); + err = rem_res_range(dev, slave, base, 1, RES_MTT, order); + if (!err) + __mlx4_free_mtt_range(dev, base, order); + return err; +} + +static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param) +{ + int err = -EINVAL; + int index; + int id; + struct res_mpt *mpt; + + switch (op) { + case RES_OP_RESERVE: + index = get_param_l(&in_param); + id = index & mpt_mask(dev); + err = get_res(dev, slave, id, RES_MPT, &mpt); + if (err) + break; + index = mpt->key; + put_res(dev, slave, id, RES_MPT); + + err = rem_res_range(dev, slave, id, 1, RES_MPT, 0); + if (err) + break; + __mlx4_mr_release(dev, index); + break; + case RES_OP_MAP_ICM: + index = get_param_l(&in_param); + id = index & mpt_mask(dev); + err = mr_res_start_move_to(dev, slave, id, + RES_MPT_RESERVED, &mpt); + if (err) + return err; + + __mlx4_mr_free_icm(dev, mpt->key); + res_end_move(dev, slave, RES_MPT, id); + return err; + break; + default: + err = -EINVAL; + break; + } + return err; +} + +static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param, u64 *out_param) +{ + int cqn; + int err; + + switch (op) { + case RES_OP_RESERVE_AND_MAP: + cqn = get_param_l(&in_param); + err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0); + if (err) + break; + + __mlx4_cq_free_icm(dev, cqn); + break; + + default: + err = -EINVAL; + break; + } + + return err; +} + +static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param, u64 *out_param) +{ + int srqn; + int err; + + switch (op) { + case RES_OP_RESERVE_AND_MAP: + srqn = get_param_l(&in_param); + err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0); + if (err) + break; + + __mlx4_srq_free_icm(dev, srqn); + break; + + default: + err = -EINVAL; + break; + } + + return err; +} + +static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param, u64 *out_param) +{ + int port; + int err = 0; + + switch (op) { + case RES_OP_RESERVE_AND_MAP: + port = get_param_l(out_param); + mac_del_from_slave(dev, slave, in_param, port); + __mlx4_unregister_mac(dev, port, in_param); + break; + default: + err = -EINVAL; + break; + } + + return err; + +} + +static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param, u64 *out_param) +{ + return 0; +} + +int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err = -EINVAL; + int alop = vhcr->op_modifier; + + switch (vhcr->in_modifier) { + case RES_QP: + err = qp_free_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param); + break; + + case RES_MTT: + err = mtt_free_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param, &vhcr->out_param); + break; + + case RES_MPT: + err = mpt_free_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param); + break; + + case RES_CQ: + err = cq_free_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param, &vhcr->out_param); + break; + + case RES_SRQ: + err = srq_free_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param, &vhcr->out_param); + break; + + case RES_MAC: + err = mac_free_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param, &vhcr->out_param); + break; + + case RES_VLAN: + err = vlan_free_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param, &vhcr->out_param); + break; + + default: + break; + } + return err; +} + +/* ugly but other choices are uglier */ +static int mr_phys_mpt(struct mlx4_mpt_entry *mpt) +{ + return (be32_to_cpu(mpt->flags) >> 9) & 1; +} + +static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt) +{ + return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8; +} + +static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt) +{ + return be32_to_cpu(mpt->mtt_sz); +} + +static int mr_get_pdn(struct mlx4_mpt_entry *mpt) +{ + return be32_to_cpu(mpt->pd_flags) & 0xffffff; +} + +static int qp_get_mtt_addr(struct mlx4_qp_context *qpc) +{ + return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8; +} + +static int srq_get_mtt_addr(struct mlx4_srq_context *srqc) +{ + return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8; +} + +static int qp_get_mtt_size(struct mlx4_qp_context *qpc) +{ + int page_shift = (qpc->log_page_size & 0x3f) + 12; + int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf; + int log_sq_sride = qpc->sq_size_stride & 7; + int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf; + int log_rq_stride = qpc->rq_size_stride & 7; + int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1; + int rss = (be32_to_cpu(qpc->flags) >> 13) & 1; + int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1; + int sq_size; + int rq_size; + int total_pages; + int total_mem; + int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f; + + sq_size = 1 << (log_sq_size + log_sq_sride + 4); + rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4)); + total_mem = sq_size + rq_size; + total_pages = + roundup_pow_of_two((total_mem + (page_offset << 6)) >> + page_shift); + + return total_pages; +} + +static int qp_get_pdn(struct mlx4_qp_context *qpc) +{ + return be32_to_cpu(qpc->pd) & 0xffffff; +} + +static int pdn2slave(int pdn) +{ + return (pdn >> NOT_MASKED_PD_BITS) - 1; +} + +static int check_mtt_range(struct mlx4_dev *dev, int slave, int start, + int size, struct res_mtt *mtt) +{ + int res_start = mtt->com.res_id; + int res_size = (1 << mtt->order); + + if (start < res_start || start + size > res_start + res_size) + return -EPERM; + return 0; +} + +int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + int index = vhcr->in_modifier; + struct res_mtt *mtt; + struct res_mpt *mpt; + int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz; + int phys; + int id; + + id = index & mpt_mask(dev); + err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt); + if (err) + return err; + + phys = mr_phys_mpt(inbox->buf); + if (!phys) { + err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); + if (err) + goto ex_abort; + + err = check_mtt_range(dev, slave, mtt_base, + mr_get_mtt_size(inbox->buf), mtt); + if (err) + goto ex_put; + + mpt->mtt = mtt; + } + + if (pdn2slave(mr_get_pdn(inbox->buf)) != slave) { + err = -EPERM; + goto ex_put; + } + + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); + if (err) + goto ex_put; + + if (!phys) { + atomic_inc(&mtt->ref_count); + put_res(dev, slave, mtt->com.res_id, RES_MTT); + } + + res_end_move(dev, slave, RES_MPT, id); + return 0; + +ex_put: + if (!phys) + put_res(dev, slave, mtt->com.res_id, RES_MTT); +ex_abort: + res_abort_move(dev, slave, RES_MPT, id); + + return err; +} + +int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + int index = vhcr->in_modifier; + struct res_mpt *mpt; + int id; + + id = index & mpt_mask(dev); + err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt); + if (err) + return err; + + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); + if (err) + goto ex_abort; + + if (mpt->mtt) + atomic_dec(&mpt->mtt->ref_count); + + res_end_move(dev, slave, RES_MPT, id); + return 0; + +ex_abort: + res_abort_move(dev, slave, RES_MPT, id); + + return err; +} + +int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + int index = vhcr->in_modifier; + struct res_mpt *mpt; + int id; + + id = index & mpt_mask(dev); + err = get_res(dev, slave, id, RES_MPT, &mpt); + if (err) + return err; + + if (mpt->com.from_state != RES_MPT_HW) { + err = -EBUSY; + goto out; + } + + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); + +out: + put_res(dev, slave, id, RES_MPT); + return err; +} + +static int qp_get_rcqn(struct mlx4_qp_context *qpc) +{ + return be32_to_cpu(qpc->cqn_recv) & 0xffffff; +} + +static int qp_get_scqn(struct mlx4_qp_context *qpc) +{ + return be32_to_cpu(qpc->cqn_send) & 0xffffff; +} + +static u32 qp_get_srqn(struct mlx4_qp_context *qpc) +{ + return be32_to_cpu(qpc->srqn) & 0x1ffffff; +} + +int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + int qpn = vhcr->in_modifier & 0x7fffff; + struct res_mtt *mtt; + struct res_qp *qp; + struct mlx4_qp_context *qpc = inbox->buf + 8; + int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz; + int mtt_size = qp_get_mtt_size(qpc); + struct res_cq *rcq; + struct res_cq *scq; + int rcqn = qp_get_rcqn(qpc); + int scqn = qp_get_scqn(qpc); + u32 srqn = qp_get_srqn(qpc) & 0xffffff; + int use_srq = (qp_get_srqn(qpc) >> 24) & 1; + struct res_srq *srq; + int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff; + + err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0); + if (err) + return err; + qp->local_qpn = local_qpn; + + err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); + if (err) + goto ex_abort; + + err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt); + if (err) + goto ex_put_mtt; + + if (pdn2slave(qp_get_pdn(qpc)) != slave) { + err = -EPERM; + goto ex_put_mtt; + } + + err = get_res(dev, slave, rcqn, RES_CQ, &rcq); + if (err) + goto ex_put_mtt; + + if (scqn != rcqn) { + err = get_res(dev, slave, scqn, RES_CQ, &scq); + if (err) + goto ex_put_rcq; + } else + scq = rcq; + + if (use_srq) { + err = get_res(dev, slave, srqn, RES_SRQ, &srq); + if (err) + goto ex_put_scq; + } + + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); + if (err) + goto ex_put_srq; + atomic_inc(&mtt->ref_count); + qp->mtt = mtt; + atomic_inc(&rcq->ref_count); + qp->rcq = rcq; + atomic_inc(&scq->ref_count); + qp->scq = scq; + + if (scqn != rcqn) + put_res(dev, slave, scqn, RES_CQ); + + if (use_srq) { + atomic_inc(&srq->ref_count); + put_res(dev, slave, srqn, RES_SRQ); + qp->srq = srq; + } + put_res(dev, slave, rcqn, RES_CQ); + put_res(dev, slave, mtt_base, RES_MTT); + res_end_move(dev, slave, RES_QP, qpn); + + return 0; + +ex_put_srq: + if (use_srq) + put_res(dev, slave, srqn, RES_SRQ); +ex_put_scq: + if (scqn != rcqn) + put_res(dev, slave, scqn, RES_CQ); +ex_put_rcq: + put_res(dev, slave, rcqn, RES_CQ); +ex_put_mtt: + put_res(dev, slave, mtt_base, RES_MTT); +ex_abort: + res_abort_move(dev, slave, RES_QP, qpn); + + return err; +} + +static int eq_get_mtt_addr(struct mlx4_eq_context *eqc) +{ + return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8; +} + +static int eq_get_mtt_size(struct mlx4_eq_context *eqc) +{ + int log_eq_size = eqc->log_eq_size & 0x1f; + int page_shift = (eqc->log_page_size & 0x3f) + 12; + + if (log_eq_size + 5 < page_shift) + return 1; + + return 1 << (log_eq_size + 5 - page_shift); +} + +static int cq_get_mtt_addr(struct mlx4_cq_context *cqc) +{ + return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8; +} + +static int cq_get_mtt_size(struct mlx4_cq_context *cqc) +{ + int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f; + int page_shift = (cqc->log_page_size & 0x3f) + 12; + + if (log_cq_size + 5 < page_shift) + return 1; + + return 1 << (log_cq_size + 5 - page_shift); +} + +int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + int eqn = vhcr->in_modifier; + int res_id = (slave << 8) | eqn; + struct mlx4_eq_context *eqc = inbox->buf; + int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz; + int mtt_size = eq_get_mtt_size(eqc); + struct res_eq *eq; + struct res_mtt *mtt; + + err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0); + if (err) + return err; + err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq); + if (err) + goto out_add; + + err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); + if (err) + goto out_move; + + err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt); + if (err) + goto out_put; + + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); + if (err) + goto out_put; + + atomic_inc(&mtt->ref_count); + eq->mtt = mtt; + put_res(dev, slave, mtt->com.res_id, RES_MTT); + res_end_move(dev, slave, RES_EQ, res_id); + return 0; + +out_put: + put_res(dev, slave, mtt->com.res_id, RES_MTT); +out_move: + res_abort_move(dev, slave, RES_EQ, res_id); +out_add: + rem_res_range(dev, slave, res_id, 1, RES_EQ, 0); + return err; +} + +static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start, + int len, struct res_mtt **res) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct res_mtt *mtt; + int err = -EINVAL; + + spin_lock_irq(mlx4_tlock(dev)); + list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT], + com.list) { + if (!check_mtt_range(dev, slave, start, len, mtt)) { + *res = mtt; + mtt->com.from_state = mtt->com.state; + mtt->com.state = RES_MTT_BUSY; + err = 0; + break; + } + } + spin_unlock_irq(mlx4_tlock(dev)); + + return err; +} + +int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + struct mlx4_mtt mtt; + __be64 *page_list = inbox->buf; + u64 *pg_list = (u64 *)page_list; + int i; + struct res_mtt *rmtt = NULL; + int start = be64_to_cpu(page_list[0]); + int npages = vhcr->in_modifier; + int err; + + err = get_containing_mtt(dev, slave, start, npages, &rmtt); + if (err) + return err; + + /* Call the SW implementation of write_mtt: + * - Prepare a dummy mtt struct + * - Translate inbox contents to simple addresses in host endianess */ + mtt.offset = 0; /* TBD this is broken but I don't handle it since + we don't really use it */ + mtt.order = 0; + mtt.page_shift = 0; + for (i = 0; i < npages; ++i) + pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL); + + err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages, + ((u64 *)page_list + 2)); + + if (rmtt) + put_res(dev, slave, rmtt->com.res_id, RES_MTT); + + return err; +} + +int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int eqn = vhcr->in_modifier; + int res_id = eqn | (slave << 8); + struct res_eq *eq; + int err; + + err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq); + if (err) + return err; + + err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL); + if (err) + goto ex_abort; + + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); + if (err) + goto ex_put; + + atomic_dec(&eq->mtt->ref_count); + put_res(dev, slave, eq->mtt->com.res_id, RES_MTT); + res_end_move(dev, slave, RES_EQ, res_id); + rem_res_range(dev, slave, res_id, 1, RES_EQ, 0); + + return 0; + +ex_put: + put_res(dev, slave, eq->mtt->com.res_id, RES_MTT); +ex_abort: + res_abort_move(dev, slave, RES_EQ, res_id); + + return err; +} + +int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_slave_event_eq_info *event_eq; + struct mlx4_cmd_mailbox *mailbox; + u32 in_modifier = 0; + int err; + int res_id; + struct res_eq *req; + + if (!priv->mfunc.master.slave_state) + return -EINVAL; + + event_eq = &priv->mfunc.master.slave_state[slave].event_eq; + + /* Create the event only if the slave is registered */ + if ((event_eq->event_type & (1 << eqe->type)) == 0) + return 0; + + mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]); + res_id = (slave << 8) | event_eq->eqn; + err = get_res(dev, slave, res_id, RES_EQ, &req); + if (err) + goto unlock; + + if (req->com.from_state != RES_EQ_HW) { + err = -EINVAL; + goto put; + } + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) { + err = PTR_ERR(mailbox); + goto put; + } + + if (eqe->type == MLX4_EVENT_TYPE_CMD) { + ++event_eq->token; + eqe->event.cmd.token = cpu_to_be16(event_eq->token); + } + + memcpy(mailbox->buf, (u8 *) eqe, 28); + + in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16); + + err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0, + MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_NATIVE); + + put_res(dev, slave, res_id, RES_EQ); + mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]); + mlx4_free_cmd_mailbox(dev, mailbox); + return err; + +put: + put_res(dev, slave, res_id, RES_EQ); + +unlock: + mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]); + return err; +} + +int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int eqn = vhcr->in_modifier; + int res_id = eqn | (slave << 8); + struct res_eq *eq; + int err; + + err = get_res(dev, slave, res_id, RES_EQ, &eq); + if (err) + return err; + + if (eq->com.from_state != RES_EQ_HW) { + err = -EINVAL; + goto ex_put; + } + + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); + +ex_put: + put_res(dev, slave, res_id, RES_EQ); + return err; +} + +int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + int cqn = vhcr->in_modifier; + struct mlx4_cq_context *cqc = inbox->buf; + int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz; + struct res_cq *cq; + struct res_mtt *mtt; + + err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq); + if (err) + return err; + err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); + if (err) + goto out_move; + err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt); + if (err) + goto out_put; + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); + if (err) + goto out_put; + atomic_inc(&mtt->ref_count); + cq->mtt = mtt; + put_res(dev, slave, mtt->com.res_id, RES_MTT); + res_end_move(dev, slave, RES_CQ, cqn); + return 0; + +out_put: + put_res(dev, slave, mtt->com.res_id, RES_MTT); +out_move: + res_abort_move(dev, slave, RES_CQ, cqn); + return err; +} + +int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + int cqn = vhcr->in_modifier; + struct res_cq *cq; + + err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq); + if (err) + return err; + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); + if (err) + goto out_move; + atomic_dec(&cq->mtt->ref_count); + res_end_move(dev, slave, RES_CQ, cqn); + return 0; + +out_move: + res_abort_move(dev, slave, RES_CQ, cqn); + return err; +} + +int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int cqn = vhcr->in_modifier; + struct res_cq *cq; + int err; + + err = get_res(dev, slave, cqn, RES_CQ, &cq); + if (err) + return err; + + if (cq->com.from_state != RES_CQ_HW) + goto ex_put; + + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); +ex_put: + put_res(dev, slave, cqn, RES_CQ); + + return err; +} + +static int handle_resize(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd, + struct res_cq *cq) +{ + int err; + struct res_mtt *orig_mtt; + struct res_mtt *mtt; + struct mlx4_cq_context *cqc = inbox->buf; + int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz; + + err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt); + if (err) + return err; + + if (orig_mtt != cq->mtt) { + err = -EINVAL; + goto ex_put; + } + + err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); + if (err) + goto ex_put; + + err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt); + if (err) + goto ex_put1; + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); + if (err) + goto ex_put1; + atomic_dec(&orig_mtt->ref_count); + put_res(dev, slave, orig_mtt->com.res_id, RES_MTT); + atomic_inc(&mtt->ref_count); + cq->mtt = mtt; + put_res(dev, slave, mtt->com.res_id, RES_MTT); + return 0; + +ex_put1: + put_res(dev, slave, mtt->com.res_id, RES_MTT); +ex_put: + put_res(dev, slave, orig_mtt->com.res_id, RES_MTT); + + return err; + +} + +int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int cqn = vhcr->in_modifier; + struct res_cq *cq; + int err; + + err = get_res(dev, slave, cqn, RES_CQ, &cq); + if (err) + return err; + + if (cq->com.from_state != RES_CQ_HW) + goto ex_put; + + if (vhcr->op_modifier == 0) { + err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq); + if (err) + goto ex_put; + } + + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); +ex_put: + put_res(dev, slave, cqn, RES_CQ); + + return err; +} + +static int srq_get_pdn(struct mlx4_srq_context *srqc) +{ + return be32_to_cpu(srqc->pd) & 0xffffff; +} + +static int srq_get_mtt_size(struct mlx4_srq_context *srqc) +{ + int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf; + int log_rq_stride = srqc->logstride & 7; + int page_shift = (srqc->log_page_size & 0x3f) + 12; + + if (log_srq_size + log_rq_stride + 4 < page_shift) + return 1; + + return 1 << (log_srq_size + log_rq_stride + 4 - page_shift); +} + +int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + int srqn = vhcr->in_modifier; + struct res_mtt *mtt; + struct res_srq *srq; + struct mlx4_srq_context *srqc = inbox->buf; + int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz; + + if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff)) + return -EINVAL; + + err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq); + if (err) + return err; + err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); + if (err) + goto ex_abort; + err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc), + mtt); + if (err) + goto ex_put_mtt; + + if (pdn2slave(srq_get_pdn(srqc)) != slave) { + err = -EPERM; + goto ex_put_mtt; + } + + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); + if (err) + goto ex_put_mtt; + + atomic_inc(&mtt->ref_count); + srq->mtt = mtt; + put_res(dev, slave, mtt->com.res_id, RES_MTT); + res_end_move(dev, slave, RES_SRQ, srqn); + return 0; + +ex_put_mtt: + put_res(dev, slave, mtt->com.res_id, RES_MTT); +ex_abort: + res_abort_move(dev, slave, RES_SRQ, srqn); + + return err; +} + +int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + int srqn = vhcr->in_modifier; + struct res_srq *srq; + + err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq); + if (err) + return err; + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); + if (err) + goto ex_abort; + atomic_dec(&srq->mtt->ref_count); + if (srq->cq) + atomic_dec(&srq->cq->ref_count); + res_end_move(dev, slave, RES_SRQ, srqn); + + return 0; + +ex_abort: + res_abort_move(dev, slave, RES_SRQ, srqn); + + return err; +} + +int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + int srqn = vhcr->in_modifier; + struct res_srq *srq; + + err = get_res(dev, slave, srqn, RES_SRQ, &srq); + if (err) + return err; + if (srq->com.from_state != RES_SRQ_HW) { + err = -EBUSY; + goto out; + } + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); +out: + put_res(dev, slave, srqn, RES_SRQ); + return err; +} + +int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + int srqn = vhcr->in_modifier; + struct res_srq *srq; + + err = get_res(dev, slave, srqn, RES_SRQ, &srq); + if (err) + return err; + + if (srq->com.from_state != RES_SRQ_HW) { + err = -EBUSY; + goto out; + } + + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); +out: + put_res(dev, slave, srqn, RES_SRQ); + return err; +} + +int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + int qpn = vhcr->in_modifier & 0x7fffff; + struct res_qp *qp; + + err = get_res(dev, slave, qpn, RES_QP, &qp); + if (err) + return err; + if (qp->com.from_state != RES_QP_HW) { + err = -EBUSY; + goto out; + } + + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); +out: + put_res(dev, slave, qpn, RES_QP); + return err; +} + +int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + struct mlx4_qp_context *qpc = inbox->buf + 8; + + update_ud_gid(dev, qpc, (u8)slave); + + return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); +} + +int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + int qpn = vhcr->in_modifier & 0x7fffff; + struct res_qp *qp; + + err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0); + if (err) + return err; + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); + if (err) + goto ex_abort; + + atomic_dec(&qp->mtt->ref_count); + atomic_dec(&qp->rcq->ref_count); + atomic_dec(&qp->scq->ref_count); + if (qp->srq) + atomic_dec(&qp->srq->ref_count); + res_end_move(dev, slave, RES_QP, qpn); + return 0; + +ex_abort: + res_abort_move(dev, slave, RES_QP, qpn); + + return err; +} + +static struct res_gid *find_gid(struct mlx4_dev *dev, int slave, + struct res_qp *rqp, u8 *gid) +{ + struct res_gid *res; + + list_for_each_entry(res, &rqp->mcg_list, list) { + if (!memcmp(res->gid, gid, 16)) + return res; + } + return NULL; +} + +static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp, + u8 *gid, enum mlx4_protocol prot) +{ + struct res_gid *res; + int err; + + res = kzalloc(sizeof *res, GFP_KERNEL); + if (!res) + return -ENOMEM; + + spin_lock_irq(&rqp->mcg_spl); + if (find_gid(dev, slave, rqp, gid)) { + kfree(res); + err = -EEXIST; + } else { + memcpy(res->gid, gid, 16); + res->prot = prot; + list_add_tail(&res->list, &rqp->mcg_list); + err = 0; + } + spin_unlock_irq(&rqp->mcg_spl); + + return err; +} + +static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp, + u8 *gid, enum mlx4_protocol prot) +{ + struct res_gid *res; + int err; + + spin_lock_irq(&rqp->mcg_spl); + res = find_gid(dev, slave, rqp, gid); + if (!res || res->prot != prot) + err = -EINVAL; + else { + list_del(&res->list); + kfree(res); + err = 0; + } + spin_unlock_irq(&rqp->mcg_spl); + + return err; +} + +int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + struct mlx4_qp qp; /* dummy for calling attach/detach */ + u8 *gid = inbox->buf; + enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7; + int err, err1; + int qpn; + struct res_qp *rqp; + int attach = vhcr->op_modifier; + int block_loopback = vhcr->in_modifier >> 31; + u8 steer_type_mask = 2; + enum mlx4_steer_type type = gid[7] & steer_type_mask; + + qpn = vhcr->in_modifier & 0xffffff; + err = get_res(dev, slave, qpn, RES_QP, &rqp); + if (err) + return err; + + qp.qpn = qpn; + if (attach) { + err = add_mcg_res(dev, slave, rqp, gid, prot); + if (err) + goto ex_put; + + err = mlx4_qp_attach_common(dev, &qp, gid, + block_loopback, prot, type); + if (err) + goto ex_rem; + } else { + err = rem_mcg_res(dev, slave, rqp, gid, prot); + if (err) + goto ex_put; + err = mlx4_qp_detach_common(dev, &qp, gid, prot, type); + } + + put_res(dev, slave, qpn, RES_QP); + return 0; + +ex_rem: + /* ignore error return below, already in error */ + err1 = rem_mcg_res(dev, slave, rqp, gid, prot); +ex_put: + put_res(dev, slave, qpn, RES_QP); + + return err; +} + +enum { + BUSY_MAX_RETRIES = 10 +}; + +int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + int index = vhcr->in_modifier & 0xffff; + + err = get_res(dev, slave, index, RES_COUNTER, NULL); + if (err) + return err; + + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); + put_res(dev, slave, index, RES_COUNTER); + return err; +} + +static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp) +{ + struct res_gid *rgid; + struct res_gid *tmp; + int err; + struct mlx4_qp qp; /* dummy for calling attach/detach */ + + list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) { + qp.qpn = rqp->local_qpn; + err = mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot, + MLX4_MC_STEER); + list_del(&rgid->list); + kfree(rgid); + } +} + +static int _move_all_busy(struct mlx4_dev *dev, int slave, + enum mlx4_resource type, int print) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = + &priv->mfunc.master.res_tracker; + struct list_head *rlist = &tracker->slave_list[slave].res_list[type]; + struct res_common *r; + struct res_common *tmp; + int busy; + + busy = 0; + spin_lock_irq(mlx4_tlock(dev)); + list_for_each_entry_safe(r, tmp, rlist, list) { + if (r->owner == slave) { + if (!r->removing) { + if (r->state == RES_ANY_BUSY) { + if (print) + mlx4_dbg(dev, + "%s id 0x%x is busy\n", + ResourceType(type), + r->res_id); + ++busy; + } else { + r->from_state = r->state; + r->state = RES_ANY_BUSY; + r->removing = 1; + } + } + } + } + spin_unlock_irq(mlx4_tlock(dev)); + + return busy; +} + +static int move_all_busy(struct mlx4_dev *dev, int slave, + enum mlx4_resource type) +{ + unsigned long begin; + int busy; + + begin = jiffies; + do { + busy = _move_all_busy(dev, slave, type, 0); + if (time_after(jiffies, begin + 5 * HZ)) + break; + if (busy) + cond_resched(); + } while (busy); + + if (busy) + busy = _move_all_busy(dev, slave, type, 1); + + return busy; +} +static void rem_slave_qps(struct mlx4_dev *dev, int slave) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct list_head *qp_list = + &tracker->slave_list[slave].res_list[RES_QP]; + struct res_qp *qp; + struct res_qp *tmp; + int state; + u64 in_param; + int qpn; + int err; + + err = move_all_busy(dev, slave, RES_QP); + if (err) + mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy" + "for slave %d\n", slave); + + spin_lock_irq(mlx4_tlock(dev)); + list_for_each_entry_safe(qp, tmp, qp_list, com.list) { + spin_unlock_irq(mlx4_tlock(dev)); + if (qp->com.owner == slave) { + qpn = qp->com.res_id; + detach_qp(dev, slave, qp); + state = qp->com.from_state; + while (state != 0) { + switch (state) { + case RES_QP_RESERVED: + spin_lock_irq(mlx4_tlock(dev)); + radix_tree_delete(&tracker->res_tree[RES_QP], + qp->com.res_id); + list_del(&qp->com.list); + spin_unlock_irq(mlx4_tlock(dev)); + kfree(qp); + state = 0; + break; + case RES_QP_MAPPED: + if (!valid_reserved(dev, slave, qpn)) + __mlx4_qp_free_icm(dev, qpn); + state = RES_QP_RESERVED; + break; + case RES_QP_HW: + in_param = slave; + err = mlx4_cmd(dev, in_param, + qp->local_qpn, 2, + MLX4_CMD_2RST_QP, + MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (err) + mlx4_dbg(dev, "rem_slave_qps: failed" + " to move slave %d qpn %d to" + " reset\n", slave, + qp->local_qpn); + atomic_dec(&qp->rcq->ref_count); + atomic_dec(&qp->scq->ref_count); + atomic_dec(&qp->mtt->ref_count); + if (qp->srq) + atomic_dec(&qp->srq->ref_count); + state = RES_QP_MAPPED; + break; + default: + state = 0; + } + } + } + spin_lock_irq(mlx4_tlock(dev)); + } + spin_unlock_irq(mlx4_tlock(dev)); +} + +static void rem_slave_srqs(struct mlx4_dev *dev, int slave) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct list_head *srq_list = + &tracker->slave_list[slave].res_list[RES_SRQ]; + struct res_srq *srq; + struct res_srq *tmp; + int state; + u64 in_param; + LIST_HEAD(tlist); + int srqn; + int err; + + err = move_all_busy(dev, slave, RES_SRQ); + if (err) + mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to " + "busy for slave %d\n", slave); + + spin_lock_irq(mlx4_tlock(dev)); + list_for_each_entry_safe(srq, tmp, srq_list, com.list) { + spin_unlock_irq(mlx4_tlock(dev)); + if (srq->com.owner == slave) { + srqn = srq->com.res_id; + state = srq->com.from_state; + while (state != 0) { + switch (state) { + case RES_SRQ_ALLOCATED: + __mlx4_srq_free_icm(dev, srqn); + spin_lock_irq(mlx4_tlock(dev)); + radix_tree_delete(&tracker->res_tree[RES_SRQ], + srqn); + list_del(&srq->com.list); + spin_unlock_irq(mlx4_tlock(dev)); + kfree(srq); + state = 0; + break; + + case RES_SRQ_HW: + in_param = slave; + err = mlx4_cmd(dev, in_param, srqn, 1, + MLX4_CMD_HW2SW_SRQ, + MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (err) + mlx4_dbg(dev, "rem_slave_srqs: failed" + " to move slave %d srq %d to" + " SW ownership\n", + slave, srqn); + + atomic_dec(&srq->mtt->ref_count); + if (srq->cq) + atomic_dec(&srq->cq->ref_count); + state = RES_SRQ_ALLOCATED; + break; + + default: + state = 0; + } + } + } + spin_lock_irq(mlx4_tlock(dev)); + } + spin_unlock_irq(mlx4_tlock(dev)); +} + +static void rem_slave_cqs(struct mlx4_dev *dev, int slave) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct list_head *cq_list = + &tracker->slave_list[slave].res_list[RES_CQ]; + struct res_cq *cq; + struct res_cq *tmp; + int state; + u64 in_param; + LIST_HEAD(tlist); + int cqn; + int err; + + err = move_all_busy(dev, slave, RES_CQ); + if (err) + mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to " + "busy for slave %d\n", slave); + + spin_lock_irq(mlx4_tlock(dev)); + list_for_each_entry_safe(cq, tmp, cq_list, com.list) { + spin_unlock_irq(mlx4_tlock(dev)); + if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) { + cqn = cq->com.res_id; + state = cq->com.from_state; + while (state != 0) { + switch (state) { + case RES_CQ_ALLOCATED: + __mlx4_cq_free_icm(dev, cqn); + spin_lock_irq(mlx4_tlock(dev)); + radix_tree_delete(&tracker->res_tree[RES_CQ], + cqn); + list_del(&cq->com.list); + spin_unlock_irq(mlx4_tlock(dev)); + kfree(cq); + state = 0; + break; + + case RES_CQ_HW: + in_param = slave; + err = mlx4_cmd(dev, in_param, cqn, 1, + MLX4_CMD_HW2SW_CQ, + MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (err) + mlx4_dbg(dev, "rem_slave_cqs: failed" + " to move slave %d cq %d to" + " SW ownership\n", + slave, cqn); + atomic_dec(&cq->mtt->ref_count); + state = RES_CQ_ALLOCATED; + break; + + default: + state = 0; + } + } + } + spin_lock_irq(mlx4_tlock(dev)); + } + spin_unlock_irq(mlx4_tlock(dev)); +} + +static void rem_slave_mrs(struct mlx4_dev *dev, int slave) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct list_head *mpt_list = + &tracker->slave_list[slave].res_list[RES_MPT]; + struct res_mpt *mpt; + struct res_mpt *tmp; + int state; + u64 in_param; + LIST_HEAD(tlist); + int mptn; + int err; + + err = move_all_busy(dev, slave, RES_MPT); + if (err) + mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to " + "busy for slave %d\n", slave); + + spin_lock_irq(mlx4_tlock(dev)); + list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) { + spin_unlock_irq(mlx4_tlock(dev)); + if (mpt->com.owner == slave) { + mptn = mpt->com.res_id; + state = mpt->com.from_state; + while (state != 0) { + switch (state) { + case RES_MPT_RESERVED: + __mlx4_mr_release(dev, mpt->key); + spin_lock_irq(mlx4_tlock(dev)); + radix_tree_delete(&tracker->res_tree[RES_MPT], + mptn); + list_del(&mpt->com.list); + spin_unlock_irq(mlx4_tlock(dev)); + kfree(mpt); + state = 0; + break; + + case RES_MPT_MAPPED: + __mlx4_mr_free_icm(dev, mpt->key); + state = RES_MPT_RESERVED; + break; + + case RES_MPT_HW: + in_param = slave; + err = mlx4_cmd(dev, in_param, mptn, 0, + MLX4_CMD_HW2SW_MPT, + MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (err) + mlx4_dbg(dev, "rem_slave_mrs: failed" + " to move slave %d mpt %d to" + " SW ownership\n", + slave, mptn); + if (mpt->mtt) + atomic_dec(&mpt->mtt->ref_count); + state = RES_MPT_MAPPED; + break; + default: + state = 0; + } + } + } + spin_lock_irq(mlx4_tlock(dev)); + } + spin_unlock_irq(mlx4_tlock(dev)); +} + +static void rem_slave_mtts(struct mlx4_dev *dev, int slave) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = + &priv->mfunc.master.res_tracker; + struct list_head *mtt_list = + &tracker->slave_list[slave].res_list[RES_MTT]; + struct res_mtt *mtt; + struct res_mtt *tmp; + int state; + LIST_HEAD(tlist); + int base; + int err; + + err = move_all_busy(dev, slave, RES_MTT); + if (err) + mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to " + "busy for slave %d\n", slave); + + spin_lock_irq(mlx4_tlock(dev)); + list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) { + spin_unlock_irq(mlx4_tlock(dev)); + if (mtt->com.owner == slave) { + base = mtt->com.res_id; + state = mtt->com.from_state; + while (state != 0) { + switch (state) { + case RES_MTT_ALLOCATED: + __mlx4_free_mtt_range(dev, base, + mtt->order); + spin_lock_irq(mlx4_tlock(dev)); + radix_tree_delete(&tracker->res_tree[RES_MTT], + base); + list_del(&mtt->com.list); + spin_unlock_irq(mlx4_tlock(dev)); + kfree(mtt); + state = 0; + break; + + default: + state = 0; + } + } + } + spin_lock_irq(mlx4_tlock(dev)); + } + spin_unlock_irq(mlx4_tlock(dev)); +} + +static void rem_slave_eqs(struct mlx4_dev *dev, int slave) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct list_head *eq_list = + &tracker->slave_list[slave].res_list[RES_EQ]; + struct res_eq *eq; + struct res_eq *tmp; + int err; + int state; + LIST_HEAD(tlist); + int eqn; + struct mlx4_cmd_mailbox *mailbox; + + err = move_all_busy(dev, slave, RES_EQ); + if (err) + mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to " + "busy for slave %d\n", slave); + + spin_lock_irq(mlx4_tlock(dev)); + list_for_each_entry_safe(eq, tmp, eq_list, com.list) { + spin_unlock_irq(mlx4_tlock(dev)); + if (eq->com.owner == slave) { + eqn = eq->com.res_id; + state = eq->com.from_state; + while (state != 0) { + switch (state) { + case RES_EQ_RESERVED: + spin_lock_irq(mlx4_tlock(dev)); + radix_tree_delete(&tracker->res_tree[RES_EQ], + eqn); + list_del(&eq->com.list); + spin_unlock_irq(mlx4_tlock(dev)); + kfree(eq); + state = 0; + break; + + case RES_EQ_HW: + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) { + cond_resched(); + continue; + } + err = mlx4_cmd_box(dev, slave, 0, + eqn & 0xff, 0, + MLX4_CMD_HW2SW_EQ, + MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + mlx4_dbg(dev, "rem_slave_eqs: failed" + " to move slave %d eqs %d to" + " SW ownership\n", slave, eqn); + mlx4_free_cmd_mailbox(dev, mailbox); + if (!err) { + atomic_dec(&eq->mtt->ref_count); + state = RES_EQ_RESERVED; + } + break; + + default: + state = 0; + } + } + } + spin_lock_irq(mlx4_tlock(dev)); + } + spin_unlock_irq(mlx4_tlock(dev)); +} + +void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); + /*VLAN*/ + rem_slave_macs(dev, slave); + rem_slave_qps(dev, slave); + rem_slave_srqs(dev, slave); + rem_slave_cqs(dev, slave); + rem_slave_mrs(dev, slave); + rem_slave_eqs(dev, slave); + rem_slave_mtts(dev, slave); + mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); +} diff --git a/drivers/net/ethernet/mellanox/mlx4/sense.c b/drivers/net/ethernet/mellanox/mlx4/sense.c index e2337a7411d..80249829352 100644 --- a/drivers/net/ethernet/mellanox/mlx4/sense.c +++ b/drivers/net/ethernet/mellanox/mlx4/sense.c @@ -45,7 +45,8 @@ int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port, int err = 0; err = mlx4_cmd_imm(dev, 0, &out_param, port, 0, - MLX4_CMD_SENSE_PORT, MLX4_CMD_TIME_CLASS_B); + MLX4_CMD_SENSE_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_WRAPPED); if (err) { mlx4_err(dev, "Sense command failed for port: %d\n", port); return err; diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c index 9cbf3fce014..2823fffc638 100644 --- a/drivers/net/ethernet/mellanox/mlx4/srq.c +++ b/drivers/net/ethernet/mellanox/mlx4/srq.c @@ -31,6 +31,8 @@ * SOFTWARE. */ +#include <linux/init.h> + #include <linux/mlx4/cmd.h> #include <linux/export.h> #include <linux/gfp.h> @@ -38,26 +40,6 @@ #include "mlx4.h" #include "icm.h" -struct mlx4_srq_context { - __be32 state_logsize_srqn; - u8 logstride; - u8 reserved1; - __be16 xrcd; - __be32 pg_offset_cqn; - u32 reserved2; - u8 log_page_size; - u8 reserved3[2]; - u8 mtt_base_addr_h; - __be32 mtt_base_addr_l; - __be32 pd; - __be16 limit_watermark; - __be16 wqe_cnt; - u16 reserved4; - __be16 wqe_counter; - u32 reserved5; - __be64 db_rec_addr; -}; - void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type) { struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; @@ -85,8 +67,9 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type) static int mlx4_SW2HW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, int srq_num) { - return mlx4_cmd(dev, mailbox->dma, srq_num, 0, MLX4_CMD_SW2HW_SRQ, - MLX4_CMD_TIME_CLASS_A); + return mlx4_cmd(dev, mailbox->dma | dev->caps.function, srq_num, 0, + MLX4_CMD_SW2HW_SRQ, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_WRAPPED); } static int mlx4_HW2SW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, @@ -94,48 +77,109 @@ static int mlx4_HW2SW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox { return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, srq_num, mailbox ? 0 : 1, MLX4_CMD_HW2SW_SRQ, - MLX4_CMD_TIME_CLASS_A); + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); } static int mlx4_ARM_SRQ(struct mlx4_dev *dev, int srq_num, int limit_watermark) { return mlx4_cmd(dev, limit_watermark, srq_num, 0, MLX4_CMD_ARM_SRQ, - MLX4_CMD_TIME_CLASS_B); + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); } static int mlx4_QUERY_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, int srq_num) { return mlx4_cmd_box(dev, 0, mailbox->dma, srq_num, 0, MLX4_CMD_QUERY_SRQ, - MLX4_CMD_TIME_CLASS_A); + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); } -int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd, - struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq) +int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn) { struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; - struct mlx4_cmd_mailbox *mailbox; - struct mlx4_srq_context *srq_context; - u64 mtt_addr; int err; - srq->srqn = mlx4_bitmap_alloc(&srq_table->bitmap); - if (srq->srqn == -1) + + *srqn = mlx4_bitmap_alloc(&srq_table->bitmap); + if (*srqn == -1) return -ENOMEM; - err = mlx4_table_get(dev, &srq_table->table, srq->srqn); + err = mlx4_table_get(dev, &srq_table->table, *srqn); if (err) goto err_out; - err = mlx4_table_get(dev, &srq_table->cmpt_table, srq->srqn); + err = mlx4_table_get(dev, &srq_table->cmpt_table, *srqn); if (err) goto err_put; + return 0; + +err_put: + mlx4_table_put(dev, &srq_table->table, *srqn); + +err_out: + mlx4_bitmap_free(&srq_table->bitmap, *srqn); + return err; +} + +static int mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn) +{ + u64 out_param; + int err; + + if (mlx4_is_mfunc(dev)) { + err = mlx4_cmd_imm(dev, 0, &out_param, RES_SRQ, + RES_OP_RESERVE_AND_MAP, + MLX4_CMD_ALLOC_RES, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + if (!err) + *srqn = get_param_l(&out_param); + + return err; + } + return __mlx4_srq_alloc_icm(dev, srqn); +} + +void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn) +{ + struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; + + mlx4_table_put(dev, &srq_table->cmpt_table, srqn); + mlx4_table_put(dev, &srq_table->table, srqn); + mlx4_bitmap_free(&srq_table->bitmap, srqn); +} + +static void mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn) +{ + u64 in_param; + + if (mlx4_is_mfunc(dev)) { + set_param_l(&in_param, srqn); + if (mlx4_cmd(dev, in_param, RES_SRQ, RES_OP_RESERVE_AND_MAP, + MLX4_CMD_FREE_RES, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED)) + mlx4_warn(dev, "Failed freeing cq:%d\n", srqn); + return; + } + __mlx4_srq_free_icm(dev, srqn); +} + +int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd, + struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq) +{ + struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_srq_context *srq_context; + u64 mtt_addr; + int err; + + err = mlx4_srq_alloc_icm(dev, &srq->srqn); + if (err) + return err; spin_lock_irq(&srq_table->lock); err = radix_tree_insert(&srq_table->tree, srq->srqn, srq); spin_unlock_irq(&srq_table->lock); if (err) - goto err_cmpt_put; + goto err_icm; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) { @@ -174,15 +218,8 @@ err_radix: radix_tree_delete(&srq_table->tree, srq->srqn); spin_unlock_irq(&srq_table->lock); -err_cmpt_put: - mlx4_table_put(dev, &srq_table->cmpt_table, srq->srqn); - -err_put: - mlx4_table_put(dev, &srq_table->table, srq->srqn); - -err_out: - mlx4_bitmap_free(&srq_table->bitmap, srq->srqn); - +err_icm: + mlx4_srq_free_icm(dev, srq->srqn); return err; } EXPORT_SYMBOL_GPL(mlx4_srq_alloc); @@ -204,8 +241,7 @@ void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq) complete(&srq->free); wait_for_completion(&srq->free); - mlx4_table_put(dev, &srq_table->table, srq->srqn); - mlx4_bitmap_free(&srq_table->bitmap, srq->srqn); + mlx4_srq_free_icm(dev, srq->srqn); } EXPORT_SYMBOL_GPL(mlx4_srq_free); @@ -245,6 +281,8 @@ int mlx4_init_srq_table(struct mlx4_dev *dev) spin_lock_init(&srq_table->lock); INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC); + if (mlx4_is_slave(dev)) + return 0; err = mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs, dev->caps.num_srqs - 1, dev->caps.reserved_srqs, 0); @@ -256,5 +294,7 @@ int mlx4_init_srq_table(struct mlx4_dev *dev) void mlx4_cleanup_srq_table(struct mlx4_dev *dev) { + if (mlx4_is_slave(dev)) + return; mlx4_bitmap_cleanup(&mlx4_priv(dev)->srq_table.bitmap); } diff --git a/drivers/net/ethernet/micrel/Kconfig b/drivers/net/ethernet/micrel/Kconfig index d10c2e15f4e..1ea811cf515 100644 --- a/drivers/net/ethernet/micrel/Kconfig +++ b/drivers/net/ethernet/micrel/Kconfig @@ -42,6 +42,8 @@ config KS8851 select NET_CORE select MII select CRC32 + select MISC_DEVICES + select EEPROM_93CX6 ---help--- SPI driver for Micrel KS8851 SPI attached network chip. diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c index 4a6ae057e3b..75ec87a822b 100644 --- a/drivers/net/ethernet/micrel/ks8842.c +++ b/drivers/net/ethernet/micrel/ks8842.c @@ -1264,18 +1264,7 @@ static struct platform_driver ks8842_platform_driver = { .remove = ks8842_remove, }; -static int __init ks8842_init(void) -{ - return platform_driver_register(&ks8842_platform_driver); -} - -static void __exit ks8842_exit(void) -{ - platform_driver_unregister(&ks8842_platform_driver); -} - -module_init(ks8842_init); -module_exit(ks8842_exit); +module_platform_driver(ks8842_platform_driver); MODULE_DESCRIPTION("Timberdale KS8842 ethernet driver"); MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>"); diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c index f56743a28fc..6b35e7da9a9 100644 --- a/drivers/net/ethernet/micrel/ks8851.c +++ b/drivers/net/ethernet/micrel/ks8851.c @@ -22,6 +22,7 @@ #include <linux/cache.h> #include <linux/crc32.h> #include <linux/mii.h> +#include <linux/eeprom_93cx6.h> #include <linux/spi/spi.h> @@ -82,6 +83,7 @@ union ks8851_tx_hdr { * @rc_ccr: Cached copy of KS_CCR. * @rc_rxqcr: Cached copy of KS_RXQCR. * @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom + * @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM. * * The @lock ensures that the chip is protected when certain operations are * in progress. When the read or write packet transfer is in progress, most @@ -128,6 +130,8 @@ struct ks8851_net { struct spi_message spi_msg2; struct spi_transfer spi_xfer1; struct spi_transfer spi_xfer2[2]; + + struct eeprom_93cx6 eeprom; }; static int msg_enable; @@ -343,6 +347,26 @@ static void ks8851_soft_reset(struct ks8851_net *ks, unsigned op) } /** + * ks8851_set_powermode - set power mode of the device + * @ks: The device state + * @pwrmode: The power mode value to write to KS_PMECR. + * + * Change the power mode of the chip. + */ +static void ks8851_set_powermode(struct ks8851_net *ks, unsigned pwrmode) +{ + unsigned pmecr; + + netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode); + + pmecr = ks8851_rdreg16(ks, KS_PMECR); + pmecr &= ~PMECR_PM_MASK; + pmecr |= pwrmode; + + ks8851_wrreg16(ks, KS_PMECR, pmecr); +} + +/** * ks8851_write_mac_addr - write mac address to device registers * @dev: The network device * @@ -358,8 +382,15 @@ static int ks8851_write_mac_addr(struct net_device *dev) mutex_lock(&ks->lock); + /* + * Wake up chip in case it was powered off when stopped; otherwise, + * the first write to the MAC address does not take effect. + */ + ks8851_set_powermode(ks, PMECR_PM_NORMAL); for (i = 0; i < ETH_ALEN; i++) ks8851_wrreg8(ks, KS_MAR(i), dev->dev_addr[i]); + if (!netif_running(dev)) + ks8851_set_powermode(ks, PMECR_PM_SOFTDOWN); mutex_unlock(&ks->lock); @@ -367,21 +398,47 @@ static int ks8851_write_mac_addr(struct net_device *dev) } /** + * ks8851_read_mac_addr - read mac address from device registers + * @dev: The network device + * + * Update our copy of the KS8851 MAC address from the registers of @dev. +*/ +static void ks8851_read_mac_addr(struct net_device *dev) +{ + struct ks8851_net *ks = netdev_priv(dev); + int i; + + mutex_lock(&ks->lock); + + for (i = 0; i < ETH_ALEN; i++) + dev->dev_addr[i] = ks8851_rdreg8(ks, KS_MAR(i)); + + mutex_unlock(&ks->lock); +} + +/** * ks8851_init_mac - initialise the mac address * @ks: The device structure * * Get or create the initial mac address for the device and then set that - * into the station address register. Currently we assume that the device - * does not have a valid mac address in it, and so we use random_ether_addr() + * into the station address register. If there is an EEPROM present, then + * we try that. If no valid mac address is found we use random_ether_addr() * to create a new one. - * - * In future, the driver should check to see if the device has an EEPROM - * attached and whether that has a valid ethernet address in it. */ static void ks8851_init_mac(struct ks8851_net *ks) { struct net_device *dev = ks->netdev; + /* first, try reading what we've got already */ + if (ks->rc_ccr & CCR_EEPROM) { + ks8851_read_mac_addr(dev); + if (is_valid_ether_addr(dev->dev_addr)) + return; + + netdev_err(ks->netdev, "invalid mac address read %pM\n", + dev->dev_addr); + } + random_ether_addr(dev->dev_addr); ks8851_write_mac_addr(dev); } @@ -739,26 +796,6 @@ static void ks8851_tx_work(struct work_struct *work) } /** - * ks8851_set_powermode - set power mode of the device - * @ks: The device state - * @pwrmode: The power mode value to write to KS_PMECR. - * - * Change the power mode of the chip. - */ -static void ks8851_set_powermode(struct ks8851_net *ks, unsigned pwrmode) -{ - unsigned pmecr; - - netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode); - - pmecr = ks8851_rdreg16(ks, KS_PMECR); - pmecr &= ~PMECR_PM_MASK; - pmecr |= pwrmode; - - ks8851_wrreg16(ks, KS_PMECR, pmecr); -} - -/** * ks8851_net_open - open network device * @dev: The network device being opened. * @@ -1038,234 +1075,6 @@ static const struct net_device_ops ks8851_netdev_ops = { .ndo_validate_addr = eth_validate_addr, }; -/* Companion eeprom access */ - -enum { /* EEPROM programming states */ - EEPROM_CONTROL, - EEPROM_ADDRESS, - EEPROM_DATA, - EEPROM_COMPLETE -}; - -/** - * ks8851_eeprom_read - read a 16bits word in ks8851 companion EEPROM - * @dev: The network device the PHY is on. - * @addr: EEPROM address to read - * - * eeprom_size: used to define the data coding length. Can be changed - * through debug-fs. - * - * Programs a read on the EEPROM using ks8851 EEPROM SW access feature. - * Warning: The READ feature is not supported on ks8851 revision 0. - * - * Rough programming model: - * - on period start: set clock high and read value on bus - * - on period / 2: set clock low and program value on bus - * - start on period / 2 - */ -unsigned int ks8851_eeprom_read(struct net_device *dev, unsigned int addr) -{ - struct ks8851_net *ks = netdev_priv(dev); - int eepcr; - int ctrl = EEPROM_OP_READ; - int state = EEPROM_CONTROL; - int bit_count = EEPROM_OP_LEN - 1; - unsigned int data = 0; - int dummy; - unsigned int addr_len; - - addr_len = (ks->eeprom_size == 128) ? 6 : 8; - - /* start transaction: chip select high, authorize write */ - mutex_lock(&ks->lock); - eepcr = EEPCR_EESA | EEPCR_EESRWA; - ks8851_wrreg16(ks, KS_EEPCR, eepcr); - eepcr |= EEPCR_EECS; - ks8851_wrreg16(ks, KS_EEPCR, eepcr); - mutex_unlock(&ks->lock); - - while (state != EEPROM_COMPLETE) { - /* falling clock period starts... */ - /* set EED_IO pin for control and address */ - eepcr &= ~EEPCR_EEDO; - switch (state) { - case EEPROM_CONTROL: - eepcr |= ((ctrl >> bit_count) & 1) << 2; - if (bit_count-- <= 0) { - bit_count = addr_len - 1; - state = EEPROM_ADDRESS; - } - break; - case EEPROM_ADDRESS: - eepcr |= ((addr >> bit_count) & 1) << 2; - bit_count--; - break; - case EEPROM_DATA: - /* Change to receive mode */ - eepcr &= ~EEPCR_EESRWA; - break; - } - - /* lower clock */ - eepcr &= ~EEPCR_EESCK; - - mutex_lock(&ks->lock); - ks8851_wrreg16(ks, KS_EEPCR, eepcr); - mutex_unlock(&ks->lock); - - /* waitread period / 2 */ - udelay(EEPROM_SK_PERIOD / 2); - - /* rising clock period starts... */ - - /* raise clock */ - mutex_lock(&ks->lock); - eepcr |= EEPCR_EESCK; - ks8851_wrreg16(ks, KS_EEPCR, eepcr); - mutex_unlock(&ks->lock); - - /* Manage read */ - switch (state) { - case EEPROM_ADDRESS: - if (bit_count < 0) { - bit_count = EEPROM_DATA_LEN - 1; - state = EEPROM_DATA; - } - break; - case EEPROM_DATA: - mutex_lock(&ks->lock); - dummy = ks8851_rdreg16(ks, KS_EEPCR); - mutex_unlock(&ks->lock); - data |= ((dummy >> EEPCR_EESB_OFFSET) & 1) << bit_count; - if (bit_count-- <= 0) - state = EEPROM_COMPLETE; - break; - } - - /* wait period / 2 */ - udelay(EEPROM_SK_PERIOD / 2); - } - - /* close transaction */ - mutex_lock(&ks->lock); - eepcr &= ~EEPCR_EECS; - ks8851_wrreg16(ks, KS_EEPCR, eepcr); - eepcr = 0; - ks8851_wrreg16(ks, KS_EEPCR, eepcr); - mutex_unlock(&ks->lock); - - return data; -} - -/** - * ks8851_eeprom_write - write a 16bits word in ks8851 companion EEPROM - * @dev: The network device the PHY is on. - * @op: operand (can be WRITE, EWEN, EWDS) - * @addr: EEPROM address to write - * @data: data to write - * - * eeprom_size: used to define the data coding length. Can be changed - * through debug-fs. - * - * Programs a write on the EEPROM using ks8851 EEPROM SW access feature. - * - * Note that a write enable is required before writing data. - * - * Rough programming model: - * - on period start: set clock high - * - on period / 2: set clock low and program value on bus - * - start on period / 2 - */ -void ks8851_eeprom_write(struct net_device *dev, unsigned int op, - unsigned int addr, unsigned int data) -{ - struct ks8851_net *ks = netdev_priv(dev); - int eepcr; - int state = EEPROM_CONTROL; - int bit_count = EEPROM_OP_LEN - 1; - unsigned int addr_len; - - addr_len = (ks->eeprom_size == 128) ? 6 : 8; - - switch (op) { - case EEPROM_OP_EWEN: - addr = 0x30; - break; - case EEPROM_OP_EWDS: - addr = 0; - break; - } - - /* start transaction: chip select high, authorize write */ - mutex_lock(&ks->lock); - eepcr = EEPCR_EESA | EEPCR_EESRWA; - ks8851_wrreg16(ks, KS_EEPCR, eepcr); - eepcr |= EEPCR_EECS; - ks8851_wrreg16(ks, KS_EEPCR, eepcr); - mutex_unlock(&ks->lock); - - while (state != EEPROM_COMPLETE) { - /* falling clock period starts... */ - /* set EED_IO pin for control and address */ - eepcr &= ~EEPCR_EEDO; - switch (state) { - case EEPROM_CONTROL: - eepcr |= ((op >> bit_count) & 1) << 2; - if (bit_count-- <= 0) { - bit_count = addr_len - 1; - state = EEPROM_ADDRESS; - } - break; - case EEPROM_ADDRESS: - eepcr |= ((addr >> bit_count) & 1) << 2; - if (bit_count-- <= 0) { - if (op == EEPROM_OP_WRITE) { - bit_count = EEPROM_DATA_LEN - 1; - state = EEPROM_DATA; - } else { - state = EEPROM_COMPLETE; - } - } - break; - case EEPROM_DATA: - eepcr |= ((data >> bit_count) & 1) << 2; - if (bit_count-- <= 0) - state = EEPROM_COMPLETE; - break; - } - - /* lower clock */ - eepcr &= ~EEPCR_EESCK; - - mutex_lock(&ks->lock); - ks8851_wrreg16(ks, KS_EEPCR, eepcr); - mutex_unlock(&ks->lock); - - /* wait period / 2 */ - udelay(EEPROM_SK_PERIOD / 2); - - /* rising clock period starts... */ - - /* raise clock */ - eepcr |= EEPCR_EESCK; - mutex_lock(&ks->lock); - ks8851_wrreg16(ks, KS_EEPCR, eepcr); - mutex_unlock(&ks->lock); - - /* wait period / 2 */ - udelay(EEPROM_SK_PERIOD / 2); - } - - /* close transaction */ - mutex_lock(&ks->lock); - eepcr &= ~EEPCR_EECS; - ks8851_wrreg16(ks, KS_EEPCR, eepcr); - eepcr = 0; - ks8851_wrreg16(ks, KS_EEPCR, eepcr); - mutex_unlock(&ks->lock); - -} - /* ethtool support */ static void ks8851_get_drvinfo(struct net_device *dev, @@ -1312,115 +1121,141 @@ static int ks8851_nway_reset(struct net_device *dev) return mii_nway_restart(&ks->mii); } -static int ks8851_get_eeprom_len(struct net_device *dev) -{ - struct ks8851_net *ks = netdev_priv(dev); - return ks->eeprom_size; -} +/* EEPROM support */ -static int ks8851_get_eeprom(struct net_device *dev, - struct ethtool_eeprom *eeprom, u8 *bytes) +static void ks8851_eeprom_regread(struct eeprom_93cx6 *ee) { - struct ks8851_net *ks = netdev_priv(dev); - u16 *eeprom_buff; - int first_word; - int last_word; - int ret_val = 0; - u16 i; - - if (eeprom->len == 0) - return -EINVAL; + struct ks8851_net *ks = ee->data; + unsigned val; - if (eeprom->len > ks->eeprom_size) - return -EINVAL; + val = ks8851_rdreg16(ks, KS_EEPCR); - eeprom->magic = ks8851_rdreg16(ks, KS_CIDER); + ee->reg_data_out = (val & EEPCR_EESB) ? 1 : 0; + ee->reg_data_clock = (val & EEPCR_EESCK) ? 1 : 0; + ee->reg_chip_select = (val & EEPCR_EECS) ? 1 : 0; +} - first_word = eeprom->offset >> 1; - last_word = (eeprom->offset + eeprom->len - 1) >> 1; +static void ks8851_eeprom_regwrite(struct eeprom_93cx6 *ee) +{ + struct ks8851_net *ks = ee->data; + unsigned val = EEPCR_EESA; /* default - eeprom access on */ + + if (ee->drive_data) + val |= EEPCR_EESRWA; + if (ee->reg_data_in) + val |= EEPCR_EEDO; + if (ee->reg_data_clock) + val |= EEPCR_EESCK; + if (ee->reg_chip_select) + val |= EEPCR_EECS; + + ks8851_wrreg16(ks, KS_EEPCR, val); +} - eeprom_buff = kmalloc(sizeof(u16) * - (last_word - first_word + 1), GFP_KERNEL); - if (!eeprom_buff) - return -ENOMEM; +/** + * ks8851_eeprom_claim - claim device EEPROM and activate the interface + * @ks: The network device state. + * + * Check for the presence of an EEPROM, and then activate software access + * to the device. + */ +static int ks8851_eeprom_claim(struct ks8851_net *ks) +{ + if (!(ks->rc_ccr & CCR_EEPROM)) + return -ENOENT; - for (i = 0; i < last_word - first_word + 1; i++) - eeprom_buff[i] = ks8851_eeprom_read(dev, first_word + 1); + mutex_lock(&ks->lock); - /* Device's eeprom is little-endian, word addressable */ - for (i = 0; i < last_word - first_word + 1; i++) - le16_to_cpus(&eeprom_buff[i]); + /* start with clock low, cs high */ + ks8851_wrreg16(ks, KS_EEPCR, EEPCR_EESA | EEPCR_EECS); + return 0; +} - memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len); - kfree(eeprom_buff); +/** + * ks8851_eeprom_release - release the EEPROM interface + * @ks: The device state + * + * Release the software access to the device EEPROM + */ +static void ks8851_eeprom_release(struct ks8851_net *ks) +{ + unsigned val = ks8851_rdreg16(ks, KS_EEPCR); - return ret_val; + ks8851_wrreg16(ks, KS_EEPCR, val & ~EEPCR_EESA); + mutex_unlock(&ks->lock); } +#define KS_EEPROM_MAGIC (0x00008851) + static int ks8851_set_eeprom(struct net_device *dev, - struct ethtool_eeprom *eeprom, u8 *bytes) + struct ethtool_eeprom *ee, u8 *data) { struct ks8851_net *ks = netdev_priv(dev); - u16 *eeprom_buff; - void *ptr; - int max_len; - int first_word; - int last_word; - int ret_val = 0; - u16 i; - - if (eeprom->len == 0) - return -EOPNOTSUPP; - - if (eeprom->len > ks->eeprom_size) + int offset = ee->offset; + int len = ee->len; + u16 tmp; + + /* currently only support byte writing */ + if (len != 1) return -EINVAL; - if (eeprom->magic != ks8851_rdreg16(ks, KS_CIDER)) - return -EFAULT; + if (ee->magic != KS_EEPROM_MAGIC) + return -EINVAL; - first_word = eeprom->offset >> 1; - last_word = (eeprom->offset + eeprom->len - 1) >> 1; - max_len = (last_word - first_word + 1) * 2; - eeprom_buff = kmalloc(max_len, GFP_KERNEL); - if (!eeprom_buff) - return -ENOMEM; + if (ks8851_eeprom_claim(ks)) + return -ENOENT; - ptr = (void *)eeprom_buff; + eeprom_93cx6_wren(&ks->eeprom, true); - if (eeprom->offset & 1) { - /* need read/modify/write of first changed EEPROM word */ - /* only the second byte of the word is being modified */ - eeprom_buff[0] = ks8851_eeprom_read(dev, first_word); - ptr++; + /* ethtool currently only supports writing bytes, which means + * we have to read/modify/write our 16bit EEPROMs */ + + eeprom_93cx6_read(&ks->eeprom, offset/2, &tmp); + + if (offset & 1) { + tmp &= 0xff; + tmp |= *data << 8; + } else { + tmp &= 0xff00; + tmp |= *data; } - if ((eeprom->offset + eeprom->len) & 1) - /* need read/modify/write of last changed EEPROM word */ - /* only the first byte of the word is being modified */ - eeprom_buff[last_word - first_word] = - ks8851_eeprom_read(dev, last_word); + eeprom_93cx6_write(&ks->eeprom, offset/2, tmp); + eeprom_93cx6_wren(&ks->eeprom, false); - /* Device's eeprom is little-endian, word addressable */ - le16_to_cpus(&eeprom_buff[0]); - le16_to_cpus(&eeprom_buff[last_word - first_word]); + ks8851_eeprom_release(ks); - memcpy(ptr, bytes, eeprom->len); + return 0; +} - for (i = 0; i < last_word - first_word + 1; i++) - eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]); +static int ks8851_get_eeprom(struct net_device *dev, + struct ethtool_eeprom *ee, u8 *data) +{ + struct ks8851_net *ks = netdev_priv(dev); + int offset = ee->offset; + int len = ee->len; - ks8851_eeprom_write(dev, EEPROM_OP_EWEN, 0, 0); + /* must be 2 byte aligned */ + if (len & 1 || offset & 1) + return -EINVAL; - for (i = 0; i < last_word - first_word + 1; i++) { - ks8851_eeprom_write(dev, EEPROM_OP_WRITE, first_word + i, - eeprom_buff[i]); - mdelay(EEPROM_WRITE_TIME); - } + if (ks8851_eeprom_claim(ks)) + return -ENOENT; + + ee->magic = KS_EEPROM_MAGIC; - ks8851_eeprom_write(dev, EEPROM_OP_EWDS, 0, 0); + eeprom_93cx6_multiread(&ks->eeprom, offset/2, (__le16 *)data, len/2); + ks8851_eeprom_release(ks); - kfree(eeprom_buff); - return ret_val; + return 0; +} + +static int ks8851_get_eeprom_len(struct net_device *dev) +{ + struct ks8851_net *ks = netdev_priv(dev); + + /* currently, we assume it is an 93C46 attached, so return 128 */ + return ks->rc_ccr & CCR_EEPROM ? 128 : 0; } static const struct ethtool_ops ks8851_ethtool_ops = { @@ -1613,6 +1448,13 @@ static int __devinit ks8851_probe(struct spi_device *spi) spi_message_add_tail(&ks->spi_xfer2[0], &ks->spi_msg2); spi_message_add_tail(&ks->spi_xfer2[1], &ks->spi_msg2); + /* setup EEPROM state */ + + ks->eeprom.data = ks; + ks->eeprom.width = PCI_EEPROM_WIDTH_93C46; + ks->eeprom.register_read = ks8851_eeprom_regread; + ks->eeprom.register_write = ks8851_eeprom_regwrite; + /* setup mii state */ ks->mii.dev = ndev; ks->mii.phy_id = 1, @@ -1674,9 +1516,10 @@ static int __devinit ks8851_probe(struct spi_device *spi) goto err_netdev; } - netdev_info(ndev, "revision %d, MAC %pM, IRQ %d\n", + netdev_info(ndev, "revision %d, MAC %pM, IRQ %d, %s EEPROM\n", CIDER_REV_GET(ks8851_rdreg16(ks, KS_CIDER)), - ndev->dev_addr, ndev->irq); + ndev->dev_addr, ndev->irq, + ks->rc_ccr & CCR_EEPROM ? "has" : "no"); return 0; diff --git a/drivers/net/ethernet/micrel/ks8851.h b/drivers/net/ethernet/micrel/ks8851.h index 537fb06e593..b0fae86aaca 100644 --- a/drivers/net/ethernet/micrel/ks8851.h +++ b/drivers/net/ethernet/micrel/ks8851.h @@ -16,7 +16,7 @@ #define CCR_32PIN (1 << 0) /* MAC address registers */ -#define KS_MAR(_m) 0x15 - (_m) +#define KS_MAR(_m) (0x15 - (_m)) #define KS_MARL 0x10 #define KS_MARM 0x12 #define KS_MARH 0x14 @@ -27,22 +27,11 @@ #define KS_EEPCR 0x22 #define EEPCR_EESRWA (1 << 5) #define EEPCR_EESA (1 << 4) -#define EEPCR_EESB_OFFSET 3 -#define EEPCR_EESB (1 << EEPCR_EESB_OFFSET) +#define EEPCR_EESB (1 << 3) #define EEPCR_EEDO (1 << 2) #define EEPCR_EESCK (1 << 1) #define EEPCR_EECS (1 << 0) -#define EEPROM_OP_LEN 3 /* bits:*/ -#define EEPROM_OP_READ 0x06 -#define EEPROM_OP_EWEN 0x04 -#define EEPROM_OP_WRITE 0x05 -#define EEPROM_OP_EWDS 0x14 - -#define EEPROM_DATA_LEN 16 /* 16 bits EEPROM */ -#define EEPROM_WRITE_TIME 4 /* wrt ack time in ms */ -#define EEPROM_SK_PERIOD 400 /* in us */ - #define KS_MBIR 0x24 #define MBIR_TXMBF (1 << 12) #define MBIR_TXMBFA (1 << 11) diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c index d19c849059d..e58e78e5c93 100644 --- a/drivers/net/ethernet/micrel/ks8851_mll.c +++ b/drivers/net/ethernet/micrel/ks8851_mll.c @@ -1500,8 +1500,7 @@ static int ks_hw_init(struct ks_net *ks) ks->all_mcast = 0; ks->mcast_lst_size = 0; - ks->frame_head_info = (struct type_frame_head *) \ - kmalloc(MHEADER_SIZE, GFP_KERNEL); + ks->frame_head_info = kmalloc(MHEADER_SIZE, GFP_KERNEL); if (!ks->frame_head_info) { pr_err("Error: Fail to allocate frame memory\n"); return false; @@ -1659,18 +1658,7 @@ static struct platform_driver ks8851_platform_driver = { .remove = __devexit_p(ks8851_remove), }; -static int __init ks8851_init(void) -{ - return platform_driver_register(&ks8851_platform_driver); -} - -static void __exit ks8851_exit(void) -{ - platform_driver_unregister(&ks8851_platform_driver); -} - -module_init(ks8851_init); -module_exit(ks8851_exit); +module_platform_driver(ks8851_platform_driver); MODULE_DESCRIPTION("KS8851 MLL Network driver"); MODULE_AUTHOR("David Choi <david.choi@micrel.com>"); diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c index 7ece990381c..6ed09a85f03 100644 --- a/drivers/net/ethernet/micrel/ksz884x.c +++ b/drivers/net/ethernet/micrel/ksz884x.c @@ -743,8 +743,7 @@ /* Change default LED mode. */ #define SET_DEFAULT_LED LED_SPEED_DUPLEX_ACT -#define MAC_ADDR_LEN 6 -#define MAC_ADDR_ORDER(i) (MAC_ADDR_LEN - 1 - (i)) +#define MAC_ADDR_ORDER(i) (ETH_ALEN - 1 - (i)) #define MAX_ETHERNET_BODY_SIZE 1500 #define ETHERNET_HEADER_SIZE 14 @@ -1043,7 +1042,7 @@ enum { * @valid: Valid setting indicating the entry is being used. */ struct ksz_mac_table { - u8 mac_addr[MAC_ADDR_LEN]; + u8 mac_addr[ETH_ALEN]; u16 vid; u8 fid; u8 ports; @@ -1187,8 +1186,8 @@ struct ksz_switch { u8 diffserv[DIFFSERV_ENTRIES]; u8 p_802_1p[PRIO_802_1P_ENTRIES]; - u8 br_addr[MAC_ADDR_LEN]; - u8 other_addr[MAC_ADDR_LEN]; + u8 br_addr[ETH_ALEN]; + u8 other_addr[ETH_ALEN]; u8 broad_per; u8 member; @@ -1292,14 +1291,14 @@ struct ksz_hw { int tx_int_mask; int tx_size; - u8 perm_addr[MAC_ADDR_LEN]; - u8 override_addr[MAC_ADDR_LEN]; - u8 address[ADDITIONAL_ENTRIES][MAC_ADDR_LEN]; + u8 perm_addr[ETH_ALEN]; + u8 override_addr[ETH_ALEN]; + u8 address[ADDITIONAL_ENTRIES][ETH_ALEN]; u8 addr_list_size; u8 mac_override; u8 promiscuous; u8 all_multi; - u8 multi_list[MAX_MULTICAST_LIST][MAC_ADDR_LEN]; + u8 multi_list[MAX_MULTICAST_LIST][ETH_ALEN]; u8 multi_bits[HW_MULTICAST_SIZE]; u8 multi_list_size; @@ -3654,7 +3653,7 @@ static void hw_add_wol_bcast(struct ksz_hw *hw) static const u8 mask[] = { 0x3F }; static const u8 pattern[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; - hw_set_wol_frame(hw, 2, 1, mask, MAC_ADDR_LEN, pattern); + hw_set_wol_frame(hw, 2, 1, mask, ETH_ALEN, pattern); } /** @@ -3689,7 +3688,7 @@ static void hw_add_wol_ucast(struct ksz_hw *hw) { static const u8 mask[] = { 0x3F }; - hw_set_wol_frame(hw, 0, 1, mask, MAC_ADDR_LEN, hw->override_addr); + hw_set_wol_frame(hw, 0, 1, mask, ETH_ALEN, hw->override_addr); } /** @@ -4055,7 +4054,7 @@ static void hw_set_addr(struct ksz_hw *hw) { int i; - for (i = 0; i < MAC_ADDR_LEN; i++) + for (i = 0; i < ETH_ALEN; i++) writeb(hw->override_addr[MAC_ADDR_ORDER(i)], hw->io + KS884X_ADDR_0_OFFSET + i); @@ -4072,17 +4071,16 @@ static void hw_read_addr(struct ksz_hw *hw) { int i; - for (i = 0; i < MAC_ADDR_LEN; i++) + for (i = 0; i < ETH_ALEN; i++) hw->perm_addr[MAC_ADDR_ORDER(i)] = readb(hw->io + KS884X_ADDR_0_OFFSET + i); if (!hw->mac_override) { - memcpy(hw->override_addr, hw->perm_addr, MAC_ADDR_LEN); + memcpy(hw->override_addr, hw->perm_addr, ETH_ALEN); if (empty_addr(hw->override_addr)) { - memcpy(hw->perm_addr, DEFAULT_MAC_ADDRESS, - MAC_ADDR_LEN); + memcpy(hw->perm_addr, DEFAULT_MAC_ADDRESS, ETH_ALEN); memcpy(hw->override_addr, DEFAULT_MAC_ADDRESS, - MAC_ADDR_LEN); + ETH_ALEN); hw->override_addr[5] += hw->id; hw_set_addr(hw); } @@ -4130,16 +4128,16 @@ static int hw_add_addr(struct ksz_hw *hw, u8 *mac_addr) int i; int j = ADDITIONAL_ENTRIES; - if (!memcmp(hw->override_addr, mac_addr, MAC_ADDR_LEN)) + if (!memcmp(hw->override_addr, mac_addr, ETH_ALEN)) return 0; for (i = 0; i < hw->addr_list_size; i++) { - if (!memcmp(hw->address[i], mac_addr, MAC_ADDR_LEN)) + if (!memcmp(hw->address[i], mac_addr, ETH_ALEN)) return 0; if (ADDITIONAL_ENTRIES == j && empty_addr(hw->address[i])) j = i; } if (j < ADDITIONAL_ENTRIES) { - memcpy(hw->address[j], mac_addr, MAC_ADDR_LEN); + memcpy(hw->address[j], mac_addr, ETH_ALEN); hw_ena_add_addr(hw, j, hw->address[j]); return 0; } @@ -4151,8 +4149,8 @@ static int hw_del_addr(struct ksz_hw *hw, u8 *mac_addr) int i; for (i = 0; i < hw->addr_list_size; i++) { - if (!memcmp(hw->address[i], mac_addr, MAC_ADDR_LEN)) { - memset(hw->address[i], 0, MAC_ADDR_LEN); + if (!memcmp(hw->address[i], mac_addr, ETH_ALEN)) { + memset(hw->address[i], 0, ETH_ALEN); writel(0, hw->io + ADD_ADDR_INCR * i + KS_ADD_ADDR_0_HI); return 0; @@ -4382,12 +4380,10 @@ static void ksz_update_timer(struct ksz_timer_info *info) */ static int ksz_alloc_soft_desc(struct ksz_desc_info *desc_info, int transmit) { - desc_info->ring = kmalloc(sizeof(struct ksz_desc) * desc_info->alloc, - GFP_KERNEL); + desc_info->ring = kzalloc(sizeof(struct ksz_desc) * desc_info->alloc, + GFP_KERNEL); if (!desc_info->ring) return 1; - memset((void *) desc_info->ring, 0, - sizeof(struct ksz_desc) * desc_info->alloc); hw_init_desc(desc_info, transmit); return 0; } @@ -5676,7 +5672,7 @@ static int netdev_set_mac_address(struct net_device *dev, void *addr) hw_del_addr(hw, dev->dev_addr); else { hw->mac_override = 1; - memcpy(hw->override_addr, mac->sa_data, MAC_ADDR_LEN); + memcpy(hw->override_addr, mac->sa_data, ETH_ALEN); } memcpy(dev->dev_addr, mac->sa_data, MAX_ADDR_LEN); @@ -5786,7 +5782,7 @@ static void netdev_set_rx_mode(struct net_device *dev) netdev_for_each_mc_addr(ha, dev) { if (i >= MAX_MULTICAST_LIST) break; - memcpy(hw->multi_list[i++], ha->addr, MAC_ADDR_LEN); + memcpy(hw->multi_list[i++], ha->addr, ETH_ALEN); } hw->multi_list_size = (u8) i; hw_set_grp_addr(hw); @@ -6093,9 +6089,10 @@ static void netdev_get_drvinfo(struct net_device *dev, struct dev_priv *priv = netdev_priv(dev); struct dev_info *hw_priv = priv->adapter; - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - strcpy(info->bus_info, pci_name(hw_priv->pdev)); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(hw_priv->pdev), + sizeof(info->bus_info)); } /** @@ -6587,7 +6584,8 @@ static void netdev_get_ethtool_stats(struct net_device *dev, * * Return 0 if successful; otherwise an error code. */ -static int netdev_set_features(struct net_device *dev, u32 features) +static int netdev_set_features(struct net_device *dev, + netdev_features_t features) { struct dev_priv *priv = netdev_priv(dev); struct dev_info *hw_priv = priv->adapter; @@ -6609,7 +6607,7 @@ static int netdev_set_features(struct net_device *dev, u32 features) return 0; } -static struct ethtool_ops netdev_ethtool_ops = { +static const struct ethtool_ops netdev_ethtool_ops = { .get_settings = netdev_get_settings, .set_settings = netdev_set_settings, .nway_reset = netdev_nway_reset, @@ -6860,7 +6858,7 @@ static void get_mac_addr(struct dev_info *hw_priv, u8 *macaddr, int port) int num; i = j = num = got_num = 0; - while (j < MAC_ADDR_LEN) { + while (j < ETH_ALEN) { if (macaddr[i]) { int digit; @@ -6891,7 +6889,7 @@ static void get_mac_addr(struct dev_info *hw_priv, u8 *macaddr, int port) } i++; } - if (MAC_ADDR_LEN == j) { + if (ETH_ALEN == j) { if (MAIN_PORT == port) hw_priv->hw.mac_override = 1; } @@ -7058,7 +7056,7 @@ static int __devinit pcidev_init(struct pci_dev *pdev, /* Multiple device interfaces mode requires a second MAC address. */ if (hw->dev_count > 1) { - memcpy(sw->other_addr, hw->override_addr, MAC_ADDR_LEN); + memcpy(sw->other_addr, hw->override_addr, ETH_ALEN); read_other_addr(hw); if (mac1addr[0] != ':') get_mac_addr(hw_priv, mac1addr, OTHER_PORT); @@ -7108,12 +7106,11 @@ static int __devinit pcidev_init(struct pci_dev *pdev, dev->irq = pdev->irq; if (MAIN_PORT == i) memcpy(dev->dev_addr, hw_priv->hw.override_addr, - MAC_ADDR_LEN); + ETH_ALEN); else { - memcpy(dev->dev_addr, sw->other_addr, - MAC_ADDR_LEN); + memcpy(dev->dev_addr, sw->other_addr, ETH_ALEN); if (!memcmp(sw->other_addr, hw->override_addr, - MAC_ADDR_LEN)) + ETH_ALEN)) dev->dev_addr[5] += port->first_port; } diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index 0778edcf7b9..20b72ecb020 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -1491,7 +1491,7 @@ myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget) * access to avoid theoretical race condition with functions that * change NETIF_F_LRO flag at runtime. */ - bool lro_enabled = ACCESS_ONCE(mgp->dev->features) & NETIF_F_LRO; + bool lro_enabled = !!(ACCESS_ONCE(mgp->dev->features) & NETIF_F_LRO); while (rx_done->entry[idx].length != 0 && work_done < budget) { length = ntohs(rx_done->entry[idx].length); @@ -3149,7 +3149,8 @@ static int myri10ge_set_mac_address(struct net_device *dev, void *addr) return 0; } -static u32 myri10ge_fix_features(struct net_device *dev, u32 features) +static netdev_features_t myri10ge_fix_features(struct net_device *dev, + netdev_features_t features) { if (!(features & NETIF_F_RXCSUM)) features &= ~NETIF_F_LRO; diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp.h b/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp.h index 11be150e4d6..b7fc26c4f73 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp.h +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp.h @@ -356,7 +356,7 @@ enum myri10ge_mcp_cmd_type { MXGEFW_CMD_GET_DCA_OFFSET = 56, /* offset of dca control for WDMAs */ - /* VMWare NetQueue commands */ + /* VMware NetQueue commands */ MXGEFW_CMD_NETQ_GET_FILTERS_PER_QUEUE = 57, MXGEFW_CMD_NETQ_ADD_FILTER = 58, /* data0 = filter_id << 16 | queue << 8 | type */ diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c index fc7c6a932ad..5b89fd377ae 100644 --- a/drivers/net/ethernet/natsemi/jazzsonic.c +++ b/drivers/net/ethernet/natsemi/jazzsonic.c @@ -294,15 +294,4 @@ static struct platform_driver jazz_sonic_driver = { }, }; -static int __init jazz_sonic_init_module(void) -{ - return platform_driver_register(&jazz_sonic_driver); -} - -static void __exit jazz_sonic_cleanup_module(void) -{ - platform_driver_unregister(&jazz_sonic_driver); -} - -module_init(jazz_sonic_init_module); -module_exit(jazz_sonic_cleanup_module); +module_platform_driver(jazz_sonic_driver); diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c index a2eacbfb425..70367d76fc8 100644 --- a/drivers/net/ethernet/natsemi/macsonic.c +++ b/drivers/net/ethernet/natsemi/macsonic.c @@ -643,15 +643,4 @@ static struct platform_driver mac_sonic_driver = { }, }; -static int __init mac_sonic_init_module(void) -{ - return platform_driver_register(&mac_sonic_driver); -} - -static void __exit mac_sonic_cleanup_module(void) -{ - platform_driver_unregister(&mac_sonic_driver); -} - -module_init(mac_sonic_init_module); -module_exit(mac_sonic_cleanup_module); +module_platform_driver(mac_sonic_driver); diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c index 6ca047aab79..ac7b16b6e7a 100644 --- a/drivers/net/ethernet/natsemi/natsemi.c +++ b/drivers/net/ethernet/natsemi/natsemi.c @@ -2555,9 +2555,9 @@ static void set_rx_mode(struct net_device *dev) static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct netdev_private *np = netdev_priv(dev); - strncpy(info->driver, DRV_NAME, ETHTOOL_BUSINFO_LEN); - strncpy(info->version, DRV_VERSION, ETHTOOL_BUSINFO_LEN); - strncpy(info->bus_info, pci_name(np->pci_dev), ETHTOOL_BUSINFO_LEN); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); } static int get_regs_len(struct net_device *dev) diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c index 2b8f64ddfb5..c24b46cbfe2 100644 --- a/drivers/net/ethernet/natsemi/ns83820.c +++ b/drivers/net/ethernet/natsemi/ns83820.c @@ -1364,9 +1364,9 @@ static int ns83820_set_settings(struct net_device *ndev, static void ns83820_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) { struct ns83820 *dev = PRIV(ndev); - strcpy(info->driver, "ns83820"); - strcpy(info->version, VERSION); - strcpy(info->bus_info, pci_name(dev->pci_dev)); + strlcpy(info->driver, "ns83820", sizeof(info->driver)); + strlcpy(info->version, VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(dev->pci_dev), sizeof(info->bus_info)); } static u32 ns83820_get_link(struct net_device *ndev) diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c index ccf61b9da8d..e01c0a07a93 100644 --- a/drivers/net/ethernet/natsemi/xtsonic.c +++ b/drivers/net/ethernet/natsemi/xtsonic.c @@ -319,15 +319,4 @@ static struct platform_driver xtsonic_driver = { }, }; -static int __init xtsonic_init(void) -{ - return platform_driver_register(&xtsonic_driver); -} - -static void __exit xtsonic_cleanup(void) -{ - platform_driver_unregister(&xtsonic_driver); -} - -module_init(xtsonic_init); -module_exit(xtsonic_cleanup); +module_platform_driver(xtsonic_driver); diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index c27fb3dda9f..97f63e12d86 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -5391,10 +5391,9 @@ static void s2io_ethtool_gdrvinfo(struct net_device *dev, { struct s2io_nic *sp = netdev_priv(dev); - strncpy(info->driver, s2io_driver_name, sizeof(info->driver)); - strncpy(info->version, s2io_driver_version, sizeof(info->version)); - strncpy(info->fw_version, "", sizeof(info->fw_version)); - strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info)); + strlcpy(info->driver, s2io_driver_name, sizeof(info->driver)); + strlcpy(info->version, s2io_driver_version, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info)); info->regdump_len = XENA_REG_SPACE; info->eedump_len = XENA_EEPROM_SPACE; } @@ -6616,10 +6615,10 @@ static void s2io_ethtool_get_strings(struct net_device *dev, } } -static int s2io_set_features(struct net_device *dev, u32 features) +static int s2io_set_features(struct net_device *dev, netdev_features_t features) { struct s2io_nic *sp = netdev_priv(dev); - u32 changed = (features ^ dev->features) & NETIF_F_LRO; + netdev_features_t changed = (features ^ dev->features) & NETIF_F_LRO; if (changed && netif_running(dev)) { int rc; diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index a83197d757c..ef76725454d 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -2662,9 +2662,10 @@ static void vxge_poll_vp_lockup(unsigned long data) mod_timer(&vdev->vp_lockup_timer, jiffies + HZ / 1000); } -static u32 vxge_fix_features(struct net_device *dev, u32 features) +static netdev_features_t vxge_fix_features(struct net_device *dev, + netdev_features_t features) { - u32 changed = dev->features ^ features; + netdev_features_t changed = dev->features ^ features; /* Enabling RTH requires some of the logic in vxge_device_register and a * vpath reset. Due to these restrictions, only allow modification @@ -2676,10 +2677,10 @@ static u32 vxge_fix_features(struct net_device *dev, u32 features) return features; } -static int vxge_set_features(struct net_device *dev, u32 features) +static int vxge_set_features(struct net_device *dev, netdev_features_t features) { struct vxgedev *vdev = netdev_priv(dev); - u32 changed = dev->features ^ features; + netdev_features_t changed = dev->features ^ features; if (!(changed & NETIF_F_RXHASH)) return 0; @@ -3304,7 +3305,7 @@ static void vxge_tx_watchdog(struct net_device *dev) * * Add the vlan id to the devices vlan id table */ -static void +static int vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) { struct vxgedev *vdev = netdev_priv(dev); @@ -3319,6 +3320,7 @@ vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) vxge_hw_vpath_vid_add(vpath->handle, vid); } set_bit(vid, vdev->active_vlans); + return 0; } /** @@ -3328,7 +3330,7 @@ vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) * * Remove the vlan id from the device's vlan id table */ -static void +static int vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) { struct vxgedev *vdev = netdev_priv(dev); @@ -3347,6 +3349,7 @@ vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) vxge_debug_entryexit(VXGE_TRACE, "%s:%d Exiting...", __func__, __LINE__); clear_bit(vid, vdev->active_vlans); + return 0; } static const struct net_device_ops vxge_netdev_ops = { diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c index f1bfb8f8fcf..b75a0497d58 100644 --- a/drivers/net/ethernet/nuvoton/w90p910_ether.c +++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c @@ -1103,18 +1103,7 @@ static struct platform_driver w90p910_ether_driver = { }, }; -static int __init w90p910_ether_init(void) -{ - return platform_driver_register(&w90p910_ether_driver); -} - -static void __exit w90p910_ether_exit(void) -{ - platform_driver_unregister(&w90p910_ether_driver); -} - -module_init(w90p910_ether_init); -module_exit(w90p910_ether_exit); +module_platform_driver(w90p910_ether_driver); MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>"); MODULE_DESCRIPTION("w90p910 MAC driver!"); diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 1c61d36e657..4c4e7f45838 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -65,7 +65,8 @@ #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/prefetch.h> -#include <linux/io.h> +#include <linux/u64_stats_sync.h> +#include <linux/io.h> #include <asm/irq.h> #include <asm/system.h> @@ -736,6 +737,16 @@ struct nv_skb_map { * - tx setup is lockless: it relies on netif_tx_lock. Actual submission * needs netdev_priv(dev)->lock :-( * - set_multicast_list: preparation lockless, relies on netif_tx_lock. + * + * Hardware stats updates are protected by hwstats_lock: + * - updated by nv_do_stats_poll (timer). This is meant to avoid + * integer wraparound in the NIC stats registers, at low frequency + * (0.1 Hz) + * - updated by nv_get_ethtool_stats + nv_get_stats64 + * + * Software stats are accessed only through 64b synchronization points + * and are not subject to other synchronization techniques (single + * update thread on the TX or RX paths). */ /* in dev: base, irq */ @@ -745,9 +756,10 @@ struct fe_priv { struct net_device *dev; struct napi_struct napi; - /* General data: - * Locking: spin_lock(&np->lock); */ + /* hardware stats are updated in syscall and timer */ + spinlock_t hwstats_lock; struct nv_ethtool_stats estats; + int in_shutdown; u32 linkspeed; int duplex; @@ -798,6 +810,13 @@ struct fe_priv { u32 nic_poll_irq; int rx_ring_size; + /* RX software stats */ + struct u64_stats_sync swstats_rx_syncp; + u64 stat_rx_packets; + u64 stat_rx_bytes; /* not always available in HW */ + u64 stat_rx_missed_errors; + u64 stat_rx_dropped; + /* media detection workaround. * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); */ @@ -820,6 +839,12 @@ struct fe_priv { struct nv_skb_map *tx_end_flip; int tx_stop; + /* TX software stats */ + struct u64_stats_sync swstats_tx_syncp; + u64 stat_tx_packets; /* not always available in HW */ + u64 stat_tx_bytes; + u64 stat_tx_dropped; + /* msi/msi-x fields */ u32 msi_flags; struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS]; @@ -892,6 +917,11 @@ enum { static int dma_64bit = NV_DMA_64BIT_ENABLED; /* + * Debug output control for tx_timeout + */ +static bool debug_tx_timeout = false; + +/* * Crossover Detection * Realtek 8201 phy + some OEM boards do not work properly. */ @@ -1630,11 +1660,19 @@ static void nv_mac_reset(struct net_device *dev) pci_push(base); } -static void nv_get_hw_stats(struct net_device *dev) +/* Caller must appropriately lock netdev_priv(dev)->hwstats_lock */ +static void nv_update_stats(struct net_device *dev) { struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); + /* If it happens that this is run in top-half context, then + * replace the spin_lock of hwstats_lock with + * spin_lock_irqsave() in calling functions. */ + WARN_ONCE(in_irq(), "forcedeth: estats spin_lock(_bh) from top-half"); + assert_spin_locked(&np->hwstats_lock); + + /* query hardware */ np->estats.tx_bytes += readl(base + NvRegTxCnt); np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt); np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt); @@ -1693,40 +1731,73 @@ static void nv_get_hw_stats(struct net_device *dev) } /* - * nv_get_stats: dev->get_stats function + * nv_get_stats64: dev->ndo_get_stats64 function * Get latest stats value from the nic. * Called with read_lock(&dev_base_lock) held for read - * only synchronized against unregister_netdevice. */ -static struct net_device_stats *nv_get_stats(struct net_device *dev) +static struct rtnl_link_stats64* +nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage) + __acquires(&netdev_priv(dev)->hwstats_lock) + __releases(&netdev_priv(dev)->hwstats_lock) { struct fe_priv *np = netdev_priv(dev); + unsigned int syncp_start; + + /* + * Note: because HW stats are not always available and for + * consistency reasons, the following ifconfig stats are + * managed by software: rx_bytes, tx_bytes, rx_packets and + * tx_packets. The related hardware stats reported by ethtool + * should be equivalent to these ifconfig stats, with 4 + * additional bytes per packet (Ethernet FCS CRC), except for + * tx_packets when TSO kicks in. + */ + + /* software stats */ + do { + syncp_start = u64_stats_fetch_begin_bh(&np->swstats_rx_syncp); + storage->rx_packets = np->stat_rx_packets; + storage->rx_bytes = np->stat_rx_bytes; + storage->rx_dropped = np->stat_rx_dropped; + storage->rx_missed_errors = np->stat_rx_missed_errors; + } while (u64_stats_fetch_retry_bh(&np->swstats_rx_syncp, syncp_start)); + + do { + syncp_start = u64_stats_fetch_begin_bh(&np->swstats_tx_syncp); + storage->tx_packets = np->stat_tx_packets; + storage->tx_bytes = np->stat_tx_bytes; + storage->tx_dropped = np->stat_tx_dropped; + } while (u64_stats_fetch_retry_bh(&np->swstats_tx_syncp, syncp_start)); /* If the nic supports hw counters then retrieve latest values */ - if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) { - nv_get_hw_stats(dev); + if (np->driver_data & DEV_HAS_STATISTICS_V123) { + spin_lock_bh(&np->hwstats_lock); - /* - * Note: because HW stats are not always available and - * for consistency reasons, the following ifconfig - * stats are managed by software: rx_bytes, tx_bytes, - * rx_packets and tx_packets. The related hardware - * stats reported by ethtool should be equivalent to - * these ifconfig stats, with 4 additional bytes per - * packet (Ethernet FCS CRC). - */ + nv_update_stats(dev); + + /* generic stats */ + storage->rx_errors = np->estats.rx_errors_total; + storage->tx_errors = np->estats.tx_errors_total; - /* copy to net_device stats */ - dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors; - dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors; - dev->stats.rx_crc_errors = np->estats.rx_crc_errors; - dev->stats.rx_over_errors = np->estats.rx_over_errors; - dev->stats.rx_fifo_errors = np->estats.rx_drop_frame; - dev->stats.rx_errors = np->estats.rx_errors_total; - dev->stats.tx_errors = np->estats.tx_errors_total; + /* meaningful only when NIC supports stats v3 */ + storage->multicast = np->estats.rx_multicast; + + /* detailed rx_errors */ + storage->rx_length_errors = np->estats.rx_length_error; + storage->rx_over_errors = np->estats.rx_over_errors; + storage->rx_crc_errors = np->estats.rx_crc_errors; + storage->rx_frame_errors = np->estats.rx_frame_align_error; + storage->rx_fifo_errors = np->estats.rx_drop_frame; + + /* detailed tx_errors */ + storage->tx_carrier_errors = np->estats.tx_carrier_errors; + storage->tx_fifo_errors = np->estats.tx_fifo_errors; + + spin_unlock_bh(&np->hwstats_lock); } - return &dev->stats; + return storage; } /* @@ -1759,8 +1830,12 @@ static int nv_alloc_rx(struct net_device *dev) np->put_rx.orig = np->first_rx.orig; if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) np->put_rx_ctx = np->first_rx_ctx; - } else + } else { + u64_stats_update_begin(&np->swstats_rx_syncp); + np->stat_rx_dropped++; + u64_stats_update_end(&np->swstats_rx_syncp); return 1; + } } return 0; } @@ -1791,8 +1866,12 @@ static int nv_alloc_rx_optimized(struct net_device *dev) np->put_rx.ex = np->first_rx.ex; if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) np->put_rx_ctx = np->first_rx_ctx; - } else + } else { + u64_stats_update_begin(&np->swstats_rx_syncp); + np->stat_rx_dropped++; + u64_stats_update_end(&np->swstats_rx_syncp); return 1; + } } return 0; } @@ -1849,6 +1928,7 @@ static void nv_init_tx(struct net_device *dev) np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1]; np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb; np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1]; + netdev_reset_queue(np->dev); np->tx_pkts_in_progress = 0; np->tx_change_owner = NULL; np->tx_end_flip = NULL; @@ -1927,8 +2007,11 @@ static void nv_drain_tx(struct net_device *dev) np->tx_ring.ex[i].bufhigh = 0; np->tx_ring.ex[i].buflow = 0; } - if (nv_release_txskb(np, &np->tx_skb[i])) - dev->stats.tx_dropped++; + if (nv_release_txskb(np, &np->tx_skb[i])) { + u64_stats_update_begin(&np->swstats_tx_syncp); + np->stat_tx_dropped++; + u64_stats_update_end(&np->swstats_tx_syncp); + } np->tx_skb[i].dma = 0; np->tx_skb[i].dma_len = 0; np->tx_skb[i].dma_single = 0; @@ -2194,6 +2277,9 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) /* set tx flags */ start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); + + netdev_sent_queue(np->dev, skb->len); + np->put_tx.orig = put_tx; spin_unlock_irqrestore(&np->lock, flags); @@ -2338,6 +2424,9 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, /* set tx flags */ start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); + + netdev_sent_queue(np->dev, skb->len); + np->put_tx.ex = put_tx; spin_unlock_irqrestore(&np->lock, flags); @@ -2375,6 +2464,7 @@ static int nv_tx_done(struct net_device *dev, int limit) u32 flags; int tx_work = 0; struct ring_desc *orig_get_tx = np->get_tx.orig; + unsigned int bytes_compl = 0; while ((np->get_tx.orig != np->put_tx.orig) && !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) && @@ -2385,12 +2475,16 @@ static int nv_tx_done(struct net_device *dev, int limit) if (np->desc_ver == DESC_VER_1) { if (flags & NV_TX_LASTPACKET) { if (flags & NV_TX_ERROR) { - if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK)) + if ((flags & NV_TX_RETRYERROR) + && !(flags & NV_TX_RETRYCOUNT_MASK)) nv_legacybackoff_reseed(dev); } else { - dev->stats.tx_packets++; - dev->stats.tx_bytes += np->get_tx_ctx->skb->len; + u64_stats_update_begin(&np->swstats_tx_syncp); + np->stat_tx_packets++; + np->stat_tx_bytes += np->get_tx_ctx->skb->len; + u64_stats_update_end(&np->swstats_tx_syncp); } + bytes_compl += np->get_tx_ctx->skb->len; dev_kfree_skb_any(np->get_tx_ctx->skb); np->get_tx_ctx->skb = NULL; tx_work++; @@ -2398,12 +2492,16 @@ static int nv_tx_done(struct net_device *dev, int limit) } else { if (flags & NV_TX2_LASTPACKET) { if (flags & NV_TX2_ERROR) { - if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) + if ((flags & NV_TX2_RETRYERROR) + && !(flags & NV_TX2_RETRYCOUNT_MASK)) nv_legacybackoff_reseed(dev); } else { - dev->stats.tx_packets++; - dev->stats.tx_bytes += np->get_tx_ctx->skb->len; + u64_stats_update_begin(&np->swstats_tx_syncp); + np->stat_tx_packets++; + np->stat_tx_bytes += np->get_tx_ctx->skb->len; + u64_stats_update_end(&np->swstats_tx_syncp); } + bytes_compl += np->get_tx_ctx->skb->len; dev_kfree_skb_any(np->get_tx_ctx->skb); np->get_tx_ctx->skb = NULL; tx_work++; @@ -2414,6 +2512,9 @@ static int nv_tx_done(struct net_device *dev, int limit) if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) np->get_tx_ctx = np->first_tx_ctx; } + + netdev_completed_queue(np->dev, tx_work, bytes_compl); + if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) { np->tx_stop = 0; netif_wake_queue(dev); @@ -2427,6 +2528,7 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit) u32 flags; int tx_work = 0; struct ring_desc_ex *orig_get_tx = np->get_tx.ex; + unsigned long bytes_cleaned = 0; while ((np->get_tx.ex != np->put_tx.ex) && !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) && @@ -2436,17 +2538,21 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit) if (flags & NV_TX2_LASTPACKET) { if (flags & NV_TX2_ERROR) { - if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) { + if ((flags & NV_TX2_RETRYERROR) + && !(flags & NV_TX2_RETRYCOUNT_MASK)) { if (np->driver_data & DEV_HAS_GEAR_MODE) nv_gear_backoff_reseed(dev); else nv_legacybackoff_reseed(dev); } } else { - dev->stats.tx_packets++; - dev->stats.tx_bytes += np->get_tx_ctx->skb->len; + u64_stats_update_begin(&np->swstats_tx_syncp); + np->stat_tx_packets++; + np->stat_tx_bytes += np->get_tx_ctx->skb->len; + u64_stats_update_end(&np->swstats_tx_syncp); } + bytes_cleaned += np->get_tx_ctx->skb->len; dev_kfree_skb_any(np->get_tx_ctx->skb); np->get_tx_ctx->skb = NULL; tx_work++; @@ -2454,11 +2560,15 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit) if (np->tx_limit) nv_tx_flip_ownership(dev); } + if (unlikely(np->get_tx.ex++ == np->last_tx.ex)) np->get_tx.ex = np->first_tx.ex; if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) np->get_tx_ctx = np->first_tx_ctx; } + + netdev_completed_queue(np->dev, tx_work, bytes_cleaned); + if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) { np->tx_stop = 0; netif_wake_queue(dev); @@ -2477,56 +2587,64 @@ static void nv_tx_timeout(struct net_device *dev) u32 status; union ring_type put_tx; int saved_tx_limit; - int i; if (np->msi_flags & NV_MSI_X_ENABLED) status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; else status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; - netdev_info(dev, "Got tx_timeout. irq: %08x\n", status); + netdev_warn(dev, "Got tx_timeout. irq status: %08x\n", status); - netdev_info(dev, "Ring at %lx\n", (unsigned long)np->ring_addr); - netdev_info(dev, "Dumping tx registers\n"); - for (i = 0; i <= np->register_size; i += 32) { - netdev_info(dev, - "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n", - i, - readl(base + i + 0), readl(base + i + 4), - readl(base + i + 8), readl(base + i + 12), - readl(base + i + 16), readl(base + i + 20), - readl(base + i + 24), readl(base + i + 28)); - } - netdev_info(dev, "Dumping tx ring\n"); - for (i = 0; i < np->tx_ring_size; i += 4) { - if (!nv_optimized(np)) { - netdev_info(dev, - "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n", - i, - le32_to_cpu(np->tx_ring.orig[i].buf), - le32_to_cpu(np->tx_ring.orig[i].flaglen), - le32_to_cpu(np->tx_ring.orig[i+1].buf), - le32_to_cpu(np->tx_ring.orig[i+1].flaglen), - le32_to_cpu(np->tx_ring.orig[i+2].buf), - le32_to_cpu(np->tx_ring.orig[i+2].flaglen), - le32_to_cpu(np->tx_ring.orig[i+3].buf), - le32_to_cpu(np->tx_ring.orig[i+3].flaglen)); - } else { + if (unlikely(debug_tx_timeout)) { + int i; + + netdev_info(dev, "Ring at %lx\n", (unsigned long)np->ring_addr); + netdev_info(dev, "Dumping tx registers\n"); + for (i = 0; i <= np->register_size; i += 32) { netdev_info(dev, - "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n", + "%3x: %08x %08x %08x %08x " + "%08x %08x %08x %08x\n", i, - le32_to_cpu(np->tx_ring.ex[i].bufhigh), - le32_to_cpu(np->tx_ring.ex[i].buflow), - le32_to_cpu(np->tx_ring.ex[i].flaglen), - le32_to_cpu(np->tx_ring.ex[i+1].bufhigh), - le32_to_cpu(np->tx_ring.ex[i+1].buflow), - le32_to_cpu(np->tx_ring.ex[i+1].flaglen), - le32_to_cpu(np->tx_ring.ex[i+2].bufhigh), - le32_to_cpu(np->tx_ring.ex[i+2].buflow), - le32_to_cpu(np->tx_ring.ex[i+2].flaglen), - le32_to_cpu(np->tx_ring.ex[i+3].bufhigh), - le32_to_cpu(np->tx_ring.ex[i+3].buflow), - le32_to_cpu(np->tx_ring.ex[i+3].flaglen)); + readl(base + i + 0), readl(base + i + 4), + readl(base + i + 8), readl(base + i + 12), + readl(base + i + 16), readl(base + i + 20), + readl(base + i + 24), readl(base + i + 28)); + } + netdev_info(dev, "Dumping tx ring\n"); + for (i = 0; i < np->tx_ring_size; i += 4) { + if (!nv_optimized(np)) { + netdev_info(dev, + "%03x: %08x %08x // %08x %08x " + "// %08x %08x // %08x %08x\n", + i, + le32_to_cpu(np->tx_ring.orig[i].buf), + le32_to_cpu(np->tx_ring.orig[i].flaglen), + le32_to_cpu(np->tx_ring.orig[i+1].buf), + le32_to_cpu(np->tx_ring.orig[i+1].flaglen), + le32_to_cpu(np->tx_ring.orig[i+2].buf), + le32_to_cpu(np->tx_ring.orig[i+2].flaglen), + le32_to_cpu(np->tx_ring.orig[i+3].buf), + le32_to_cpu(np->tx_ring.orig[i+3].flaglen)); + } else { + netdev_info(dev, + "%03x: %08x %08x %08x " + "// %08x %08x %08x " + "// %08x %08x %08x " + "// %08x %08x %08x\n", + i, + le32_to_cpu(np->tx_ring.ex[i].bufhigh), + le32_to_cpu(np->tx_ring.ex[i].buflow), + le32_to_cpu(np->tx_ring.ex[i].flaglen), + le32_to_cpu(np->tx_ring.ex[i+1].bufhigh), + le32_to_cpu(np->tx_ring.ex[i+1].buflow), + le32_to_cpu(np->tx_ring.ex[i+1].flaglen), + le32_to_cpu(np->tx_ring.ex[i+2].bufhigh), + le32_to_cpu(np->tx_ring.ex[i+2].buflow), + le32_to_cpu(np->tx_ring.ex[i+2].flaglen), + le32_to_cpu(np->tx_ring.ex[i+3].bufhigh), + le32_to_cpu(np->tx_ring.ex[i+3].buflow), + le32_to_cpu(np->tx_ring.ex[i+3].flaglen)); + } } } @@ -2649,8 +2767,11 @@ static int nv_rx_process(struct net_device *dev, int limit) } /* the rest are hard errors */ else { - if (flags & NV_RX_MISSEDFRAME) - dev->stats.rx_missed_errors++; + if (flags & NV_RX_MISSEDFRAME) { + u64_stats_update_begin(&np->swstats_rx_syncp); + np->stat_rx_missed_errors++; + u64_stats_update_end(&np->swstats_rx_syncp); + } dev_kfree_skb(skb); goto next_pkt; } @@ -2693,8 +2814,10 @@ static int nv_rx_process(struct net_device *dev, int limit) skb_put(skb, len); skb->protocol = eth_type_trans(skb, dev); napi_gro_receive(&np->napi, skb); - dev->stats.rx_packets++; - dev->stats.rx_bytes += len; + u64_stats_update_begin(&np->swstats_rx_syncp); + np->stat_rx_packets++; + np->stat_rx_bytes += len; + u64_stats_update_end(&np->swstats_rx_syncp); next_pkt: if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) np->get_rx.orig = np->first_rx.orig; @@ -2777,8 +2900,10 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit) __vlan_hwaccel_put_tag(skb, vid); } napi_gro_receive(&np->napi, skb); - dev->stats.rx_packets++; - dev->stats.rx_bytes += len; + u64_stats_update_begin(&np->swstats_rx_syncp); + np->stat_rx_packets++; + np->stat_rx_bytes += len; + u64_stats_update_end(&np->swstats_rx_syncp); } else { dev_kfree_skb(skb); } @@ -3021,6 +3146,73 @@ static void nv_update_pause(struct net_device *dev, u32 pause_flags) } } +static void nv_force_linkspeed(struct net_device *dev, int speed, int duplex) +{ + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + u32 phyreg, txreg; + int mii_status; + + np->linkspeed = NVREG_LINKSPEED_FORCE|speed; + np->duplex = duplex; + + /* see if gigabit phy */ + mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); + if (mii_status & PHY_GIGABIT) { + np->gigabit = PHY_GIGABIT; + phyreg = readl(base + NvRegSlotTime); + phyreg &= ~(0x3FF00); + if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) + phyreg |= NVREG_SLOTTIME_10_100_FULL; + else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100) + phyreg |= NVREG_SLOTTIME_10_100_FULL; + else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000) + phyreg |= NVREG_SLOTTIME_1000_FULL; + writel(phyreg, base + NvRegSlotTime); + } + + phyreg = readl(base + NvRegPhyInterface); + phyreg &= ~(PHY_HALF|PHY_100|PHY_1000); + if (np->duplex == 0) + phyreg |= PHY_HALF; + if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100) + phyreg |= PHY_100; + else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == + NVREG_LINKSPEED_1000) + phyreg |= PHY_1000; + writel(phyreg, base + NvRegPhyInterface); + + if (phyreg & PHY_RGMII) { + if ((np->linkspeed & NVREG_LINKSPEED_MASK) == + NVREG_LINKSPEED_1000) + txreg = NVREG_TX_DEFERRAL_RGMII_1000; + else + txreg = NVREG_TX_DEFERRAL_RGMII_10_100; + } else { + txreg = NVREG_TX_DEFERRAL_DEFAULT; + } + writel(txreg, base + NvRegTxDeferral); + + if (np->desc_ver == DESC_VER_1) { + txreg = NVREG_TX_WM_DESC1_DEFAULT; + } else { + if ((np->linkspeed & NVREG_LINKSPEED_MASK) == + NVREG_LINKSPEED_1000) + txreg = NVREG_TX_WM_DESC2_3_1000; + else + txreg = NVREG_TX_WM_DESC2_3_DEFAULT; + } + writel(txreg, base + NvRegTxWatermark); + + writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD), + base + NvRegMisc1); + pci_push(base); + writel(np->linkspeed, base + NvRegLinkSpeed); + pci_push(base); + + return; +} + /** * nv_update_linkspeed: Setup the MAC according to the link partner * @dev: Network device to be configured @@ -3042,11 +3234,25 @@ static int nv_update_linkspeed(struct net_device *dev) int newls = np->linkspeed; int newdup = np->duplex; int mii_status; + u32 bmcr; int retval = 0; u32 control_1000, status_1000, phyreg, pause_flags, txreg; u32 txrxFlags = 0; u32 phy_exp; + /* If device loopback is enabled, set carrier on and enable max link + * speed. + */ + bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); + if (bmcr & BMCR_LOOPBACK) { + if (netif_running(dev)) { + nv_force_linkspeed(dev, NVREG_LINKSPEED_1000, 1); + if (!netif_carrier_ok(dev)) + netif_carrier_on(dev); + } + return 1; + } + /* BMSR_LSTATUS is latched, read it twice: * we want the current value. */ @@ -3729,6 +3935,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test) writel(0, base + NvRegMSIXMap0); writel(0, base + NvRegMSIXMap1); } + netdev_info(dev, "MSI-X enabled\n"); } } if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { @@ -3750,6 +3957,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test) writel(0, base + NvRegMSIMap1); /* enable msi vector 0 */ writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); + netdev_info(dev, "MSI enabled\n"); } } if (ret != 0) { @@ -3904,11 +4112,18 @@ static void nv_poll_controller(struct net_device *dev) #endif static void nv_do_stats_poll(unsigned long data) + __acquires(&netdev_priv(dev)->hwstats_lock) + __releases(&netdev_priv(dev)->hwstats_lock) { struct net_device *dev = (struct net_device *) data; struct fe_priv *np = netdev_priv(dev); - nv_get_hw_stats(dev); + /* If lock is currently taken, the stats are being refreshed + * and hence fresh enough */ + if (spin_trylock(&np->hwstats_lock)) { + nv_update_stats(dev); + spin_unlock(&np->hwstats_lock); + } if (!np->in_shutdown) mod_timer(&np->stats_poll, @@ -3918,9 +4133,9 @@ static void nv_do_stats_poll(unsigned long data) static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct fe_priv *np = netdev_priv(dev); - strcpy(info->driver, DRV_NAME); - strcpy(info->version, FORCEDETH_VERSION); - strcpy(info->bus_info, pci_name(np->pci_dev)); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, FORCEDETH_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); } static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) @@ -4473,7 +4688,63 @@ static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* return 0; } -static u32 nv_fix_features(struct net_device *dev, u32 features) +static int nv_set_loopback(struct net_device *dev, netdev_features_t features) +{ + struct fe_priv *np = netdev_priv(dev); + unsigned long flags; + u32 miicontrol; + int err, retval = 0; + + spin_lock_irqsave(&np->lock, flags); + miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); + if (features & NETIF_F_LOOPBACK) { + if (miicontrol & BMCR_LOOPBACK) { + spin_unlock_irqrestore(&np->lock, flags); + netdev_info(dev, "Loopback already enabled\n"); + return 0; + } + nv_disable_irq(dev); + /* Turn on loopback mode */ + miicontrol |= BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000; + err = mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol); + if (err) { + retval = PHY_ERROR; + spin_unlock_irqrestore(&np->lock, flags); + phy_init(dev); + } else { + if (netif_running(dev)) { + /* Force 1000 Mbps full-duplex */ + nv_force_linkspeed(dev, NVREG_LINKSPEED_1000, + 1); + /* Force link up */ + netif_carrier_on(dev); + } + spin_unlock_irqrestore(&np->lock, flags); + netdev_info(dev, + "Internal PHY loopback mode enabled.\n"); + } + } else { + if (!(miicontrol & BMCR_LOOPBACK)) { + spin_unlock_irqrestore(&np->lock, flags); + netdev_info(dev, "Loopback already disabled\n"); + return 0; + } + nv_disable_irq(dev); + /* Turn off loopback */ + spin_unlock_irqrestore(&np->lock, flags); + netdev_info(dev, "Internal PHY loopback mode disabled.\n"); + phy_init(dev); + } + msleep(500); + spin_lock_irqsave(&np->lock, flags); + nv_enable_irq(dev); + spin_unlock_irqrestore(&np->lock, flags); + + return retval; +} + +static netdev_features_t nv_fix_features(struct net_device *dev, + netdev_features_t features) { /* vlan is dependent on rx checksum offload */ if (features & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX)) @@ -4482,7 +4753,7 @@ static u32 nv_fix_features(struct net_device *dev, u32 features) return features; } -static void nv_vlan_mode(struct net_device *dev, u32 features) +static void nv_vlan_mode(struct net_device *dev, netdev_features_t features) { struct fe_priv *np = get_nvpriv(dev); @@ -4503,11 +4774,18 @@ static void nv_vlan_mode(struct net_device *dev, u32 features) spin_unlock_irq(&np->lock); } -static int nv_set_features(struct net_device *dev, u32 features) +static int nv_set_features(struct net_device *dev, netdev_features_t features) { struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); - u32 changed = dev->features ^ features; + netdev_features_t changed = dev->features ^ features; + int retval; + + if ((changed & NETIF_F_LOOPBACK) && netif_running(dev)) { + retval = nv_set_loopback(dev, features); + if (retval != 0) + return retval; + } if (changed & NETIF_F_RXCSUM) { spin_lock_irq(&np->lock); @@ -4553,14 +4831,18 @@ static int nv_get_sset_count(struct net_device *dev, int sset) } } -static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer) +static void nv_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *estats, u64 *buffer) + __acquires(&netdev_priv(dev)->hwstats_lock) + __releases(&netdev_priv(dev)->hwstats_lock) { struct fe_priv *np = netdev_priv(dev); - /* update stats */ - nv_get_hw_stats(dev); - - memcpy(buffer, &np->estats, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64)); + spin_lock_bh(&np->hwstats_lock); + nv_update_stats(dev); + memcpy(buffer, &np->estats, + nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64)); + spin_unlock_bh(&np->hwstats_lock); } static int nv_link_test(struct net_device *dev) @@ -5142,6 +5424,12 @@ static int nv_open(struct net_device *dev) spin_unlock_irq(&np->lock); + /* If the loopback feature was set while the device was down, make sure + * that it's set correctly now. + */ + if (dev->features & NETIF_F_LOOPBACK) + nv_set_loopback(dev, dev->features); + return 0; out_drain: nv_drain_rxtx(dev); @@ -5198,7 +5486,7 @@ static int nv_close(struct net_device *dev) static const struct net_device_ops nv_netdev_ops = { .ndo_open = nv_open, .ndo_stop = nv_close, - .ndo_get_stats = nv_get_stats, + .ndo_get_stats64 = nv_get_stats64, .ndo_start_xmit = nv_start_xmit, .ndo_tx_timeout = nv_tx_timeout, .ndo_change_mtu = nv_change_mtu, @@ -5215,7 +5503,7 @@ static const struct net_device_ops nv_netdev_ops = { static const struct net_device_ops nv_netdev_ops_optimized = { .ndo_open = nv_open, .ndo_stop = nv_close, - .ndo_get_stats = nv_get_stats, + .ndo_get_stats64 = nv_get_stats64, .ndo_start_xmit = nv_start_xmit_optimized, .ndo_tx_timeout = nv_tx_timeout, .ndo_change_mtu = nv_change_mtu, @@ -5254,6 +5542,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i np->dev = dev; np->pci_dev = pci_dev; spin_lock_init(&np->lock); + spin_lock_init(&np->hwstats_lock); SET_NETDEV_DEV(dev, &pci_dev->dev); init_timer(&np->oom_kick); @@ -5262,7 +5551,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i init_timer(&np->nic_poll); np->nic_poll.data = (unsigned long) dev; np->nic_poll.function = nv_do_nic_poll; /* timer handler */ - init_timer(&np->stats_poll); + init_timer_deferrable(&np->stats_poll); np->stats_poll.data = (unsigned long) dev; np->stats_poll.function = nv_do_stats_poll; /* timer handler */ @@ -5346,6 +5635,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i dev->features |= dev->hw_features; + /* Add loopback capability to the device. */ + dev->hw_features |= NETIF_F_LOOPBACK; + np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG; if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) || (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) || @@ -5621,12 +5913,14 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n", dev->name, np->phy_oui, np->phyaddr, dev->dev_addr); - dev_info(&pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n", + dev_info(&pci_dev->dev, "%s%s%s%s%s%s%s%s%s%s%sdesc-v%u\n", dev->features & NETIF_F_HIGHDMA ? "highdma " : "", dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ? "csum " : "", dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ? "vlan " : "", + dev->features & (NETIF_F_LOOPBACK) ? + "loopback " : "", id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "", id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "", id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "", @@ -6000,6 +6294,9 @@ module_param(phy_cross, int, 0); MODULE_PARM_DESC(phy_cross, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0."); module_param(phy_power_down, int, 0); MODULE_PARM_DESC(phy_power_down, "Power down phy and disable link when interface is down (1), or leave phy powered up (0)."); +module_param(debug_tx_timeout, bool, 0); +MODULE_PARM_DESC(debug_tx_timeout, + "Dump tx related registers and ring when tx_timeout happens"); MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>"); MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver"); diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c index 8c8027176be..ac4e72d529e 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c @@ -161,10 +161,10 @@ static void pch_gbe_get_drvinfo(struct net_device *netdev, { struct pch_gbe_adapter *adapter = netdev_priv(netdev); - strcpy(drvinfo->driver, KBUILD_MODNAME); - strcpy(drvinfo->version, pch_driver_version); - strcpy(drvinfo->fw_version, "N/A"); - strcpy(drvinfo->bus_info, pci_name(adapter->pdev)); + strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, pch_driver_version, sizeof(drvinfo->version)); + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); drvinfo->regdump_len = pch_gbe_get_regs_len(netdev); } diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 48406ca382f..964e9c0948b 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -2109,10 +2109,11 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu) * Returns * 0: HW state updated successfully */ -static int pch_gbe_set_features(struct net_device *netdev, u32 features) +static int pch_gbe_set_features(struct net_device *netdev, + netdev_features_t features) { struct pch_gbe_adapter *adapter = netdev_priv(netdev); - u32 changed = features ^ netdev->features; + netdev_features_t changed = features ^ netdev->features; if (!(changed & NETIF_F_RXCSUM)) return 0; diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c index e09ea83b8c4..8a371985319 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c @@ -83,14 +83,18 @@ netxen_nic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) u32 fw_minor = 0; u32 fw_build = 0; - strncpy(drvinfo->driver, netxen_nic_driver_name, 32); - strncpy(drvinfo->version, NETXEN_NIC_LINUX_VERSIONID, 32); + strlcpy(drvinfo->driver, netxen_nic_driver_name, + sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, NETXEN_NIC_LINUX_VERSIONID, + sizeof(drvinfo->version)); fw_major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR); fw_minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR); fw_build = NXRD32(adapter, NETXEN_FW_VERSION_SUB); - sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build); + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%d.%d.%d", fw_major, fw_minor, fw_build); - strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); drvinfo->regdump_len = NETXEN_NIC_REGS_LEN; drvinfo->eedump_len = netxen_nic_get_eeprom_len(dev); } diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 8cf3173ba48..7dd9a4b107e 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -544,7 +544,8 @@ static void netxen_set_multicast_list(struct net_device *dev) adapter->set_multi(dev); } -static u32 netxen_fix_features(struct net_device *dev, u32 features) +static netdev_features_t netxen_fix_features(struct net_device *dev, + netdev_features_t features) { if (!(features & NETIF_F_RXCSUM)) { netdev_info(dev, "disabling LRO as RXCSUM is off\n"); @@ -555,7 +556,8 @@ static u32 netxen_fix_features(struct net_device *dev, u32 features) return features; } -static int netxen_set_features(struct net_device *dev, u32 features) +static int netxen_set_features(struct net_device *dev, + netdev_features_t features) { struct netxen_adapter *adapter = netdev_priv(dev); int hw_lro; diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index a4bdff438a5..7931531c3a4 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -1735,10 +1735,11 @@ static void ql_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *drvinfo) { struct ql3_adapter *qdev = netdev_priv(ndev); - strncpy(drvinfo->driver, ql3xxx_driver_name, 32); - strncpy(drvinfo->version, ql3xxx_driver_version, 32); - strncpy(drvinfo->fw_version, "N/A", 32); - strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32); + strlcpy(drvinfo->driver, ql3xxx_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, ql3xxx_driver_version, + sizeof(drvinfo->version)); + strlcpy(drvinfo->bus_info, pci_name(qdev->pdev), + sizeof(drvinfo->bus_info)); drvinfo->regdump_len = 0; drvinfo->eedump_len = 0; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 7ed53dbb864..60976fc4ccc 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -1466,8 +1466,9 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup); int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu); int qlcnic_change_mtu(struct net_device *netdev, int new_mtu); -u32 qlcnic_fix_features(struct net_device *netdev, u32 features); -int qlcnic_set_features(struct net_device *netdev, u32 features); +netdev_features_t qlcnic_fix_features(struct net_device *netdev, + netdev_features_t features); +int qlcnic_set_features(struct net_device *netdev, netdev_features_t features); int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable); int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable); int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index 8aa1c6e8667..cc228cf3d84 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c @@ -140,11 +140,14 @@ qlcnic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR); fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR); fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB); - sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build); - - strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); - strlcpy(drvinfo->driver, qlcnic_driver_name, 32); - strlcpy(drvinfo->version, QLCNIC_LINUX_VERSIONID, 32); + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%d.%d.%d", fw_major, fw_minor, fw_build); + + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); + strlcpy(drvinfo->driver, qlcnic_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, QLCNIC_LINUX_VERSIONID, + sizeof(drvinfo->version)); } static int diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c index bcb81e47543..b528e52a8ee 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c @@ -817,12 +817,13 @@ int qlcnic_change_mtu(struct net_device *netdev, int mtu) } -u32 qlcnic_fix_features(struct net_device *netdev, u32 features) +netdev_features_t qlcnic_fix_features(struct net_device *netdev, + netdev_features_t features) { struct qlcnic_adapter *adapter = netdev_priv(netdev); if ((adapter->flags & QLCNIC_ESWITCH_ENABLED)) { - u32 changed = features ^ netdev->features; + netdev_features_t changed = features ^ netdev->features; features ^= changed & (NETIF_F_ALL_CSUM | NETIF_F_RXCSUM); } @@ -833,10 +834,10 @@ u32 qlcnic_fix_features(struct net_device *netdev, u32 features) } -int qlcnic_set_features(struct net_device *netdev, u32 features) +int qlcnic_set_features(struct net_device *netdev, netdev_features_t features) { struct qlcnic_adapter *adapter = netdev_priv(netdev); - u32 changed = netdev->features ^ features; + netdev_features_t changed = netdev->features ^ features; int hw_lro = (features & NETIF_F_LRO) ? QLCNIC_LRO_ENABLED : 0; if (!(changed & NETIF_F_LRO)) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 0bd163828e3..69b8e4ef14d 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -97,8 +97,8 @@ static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32); static int qlcnicvf_start_firmware(struct qlcnic_adapter *); static void qlcnic_set_netdev_features(struct qlcnic_adapter *, struct qlcnic_esw_func_cfg *); -static void qlcnic_vlan_rx_add(struct net_device *, u16); -static void qlcnic_vlan_rx_del(struct net_device *, u16); +static int qlcnic_vlan_rx_add(struct net_device *, u16); +static int qlcnic_vlan_rx_del(struct net_device *, u16); /* PCI Device ID Table */ #define ENTRY(device) \ @@ -735,20 +735,22 @@ qlcnic_set_vlan_config(struct qlcnic_adapter *adapter, adapter->pvid = 0; } -static void +static int qlcnic_vlan_rx_add(struct net_device *netdev, u16 vid) { struct qlcnic_adapter *adapter = netdev_priv(netdev); set_bit(vid, adapter->vlans); + return 0; } -static void +static int qlcnic_vlan_rx_del(struct net_device *netdev, u16 vid) { struct qlcnic_adapter *adapter = netdev_priv(netdev); qlcnic_restore_indev_addr(netdev, NETDEV_DOWN); clear_bit(vid, adapter->vlans); + return 0; } static void @@ -792,7 +794,7 @@ qlcnic_set_netdev_features(struct qlcnic_adapter *adapter, struct qlcnic_esw_func_cfg *esw_cfg) { struct net_device *netdev = adapter->netdev; - unsigned long features, vlan_features; + netdev_features_t features, vlan_features; features = (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM | NETIF_F_IPV6_CSUM | NETIF_F_GRO); diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c index 9b67bfea035..8e2c2a74f3a 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c @@ -366,13 +366,16 @@ static void ql_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *drvinfo) { struct ql_adapter *qdev = netdev_priv(ndev); - strncpy(drvinfo->driver, qlge_driver_name, 32); - strncpy(drvinfo->version, qlge_driver_version, 32); - snprintf(drvinfo->fw_version, 32, "v%d.%d.%d", + strlcpy(drvinfo->driver, qlge_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, qlge_driver_version, + sizeof(drvinfo->version)); + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "v%d.%d.%d", (qdev->fw_rev_id & 0x00ff0000) >> 16, (qdev->fw_rev_id & 0x0000ff00) >> 8, (qdev->fw_rev_id & 0x000000ff)); - strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32); + strlcpy(drvinfo->bus_info, pci_name(qdev->pdev), + sizeof(drvinfo->bus_info)); drvinfo->n_stats = 0; drvinfo->testinfo_len = 0; if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index c92afcd912e..b5489873728 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -2307,7 +2307,7 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget) return work_done; } -static void qlge_vlan_mode(struct net_device *ndev, u32 features) +static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features) { struct ql_adapter *qdev = netdev_priv(ndev); @@ -2323,7 +2323,8 @@ static void qlge_vlan_mode(struct net_device *ndev, u32 features) } } -static u32 qlge_fix_features(struct net_device *ndev, u32 features) +static netdev_features_t qlge_fix_features(struct net_device *ndev, + netdev_features_t features) { /* * Since there is no support for separate rx/tx vlan accel @@ -2337,9 +2338,10 @@ static u32 qlge_fix_features(struct net_device *ndev, u32 features) return features; } -static int qlge_set_features(struct net_device *ndev, u32 features) +static int qlge_set_features(struct net_device *ndev, + netdev_features_t features) { - u32 changed = ndev->features ^ features; + netdev_features_t changed = ndev->features ^ features; if (changed & NETIF_F_HW_VLAN_RX) qlge_vlan_mode(ndev, features); @@ -2347,56 +2349,66 @@ static int qlge_set_features(struct net_device *ndev, u32 features) return 0; } -static void __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid) +static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid) { u32 enable_bit = MAC_ADDR_E; + int err; - if (ql_set_mac_addr_reg - (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) { + err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit, + MAC_ADDR_TYPE_VLAN, vid); + if (err) netif_err(qdev, ifup, qdev->ndev, "Failed to init vlan address.\n"); - } + return err; } -static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid) +static int qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid) { struct ql_adapter *qdev = netdev_priv(ndev); int status; + int err; status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); if (status) - return; + return status; - __qlge_vlan_rx_add_vid(qdev, vid); + err = __qlge_vlan_rx_add_vid(qdev, vid); set_bit(vid, qdev->active_vlans); ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); + + return err; } -static void __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid) +static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid) { u32 enable_bit = 0; + int err; - if (ql_set_mac_addr_reg - (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) { + err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit, + MAC_ADDR_TYPE_VLAN, vid); + if (err) netif_err(qdev, ifup, qdev->ndev, "Failed to clear vlan address.\n"); - } + return err; } -static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid) +static int qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid) { struct ql_adapter *qdev = netdev_priv(ndev); int status; + int err; status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); if (status) - return; + return status; - __qlge_vlan_rx_kill_vid(qdev, vid); + err = __qlge_vlan_rx_kill_vid(qdev, vid); clear_bit(vid, qdev->active_vlans); ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); + + return err; } static void qlge_restore_vlan(struct ql_adapter *qdev) diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c index 4bf68cfef39..87aa4393507 100644 --- a/drivers/net/ethernet/rdc/r6040.c +++ b/drivers/net/ethernet/rdc/r6040.c @@ -52,12 +52,6 @@ #define DRV_VERSION "0.28" #define DRV_RELDATE "07Oct2011" -/* PHY CHIP Address */ -#define PHY1_ADDR 1 /* For MAC1 */ -#define PHY2_ADDR 3 /* For MAC2 */ -#define PHY_MODE 0x3100 /* PHY CHIP Register 0 */ -#define PHY_CAP 0x01E1 /* PHY CHIP Register 4 */ - /* Time in jiffies before concluding the transmitter is hung. */ #define TX_TIMEOUT (6000 * HZ / 1000) @@ -69,8 +63,11 @@ /* MAC registers */ #define MCR0 0x00 /* Control register 0 */ +#define MCR0_RCVEN 0x0002 /* Receive enable */ #define MCR0_PROMISC 0x0020 /* Promiscuous mode */ #define MCR0_HASH_EN 0x0100 /* Enable multicast hash table function */ +#define MCR0_XMTEN 0x1000 /* Transmission enable */ +#define MCR0_FD 0x8000 /* Full/Half duplex */ #define MCR1 0x04 /* Control register 1 */ #define MAC_RST 0x0001 /* Reset the MAC */ #define MBCR 0x08 /* Bus control */ @@ -129,6 +126,7 @@ #define PHY_CC 0x88 /* PHY status change configuration register */ #define PHY_ST 0x8A /* PHY status register */ #define MAC_SM 0xAC /* MAC status machine */ +#define MAC_SM_RST 0x0002 /* MAC status machine reset */ #define MAC_ID 0xBE /* Identifier register */ #define TX_DCNT 0x80 /* TX descriptor count */ @@ -154,9 +152,6 @@ #define DSC_RX_MIDH_HIT 0x0004 /* RX MID table hit (no error) */ #define DSC_RX_IDX_MID_MASK 3 /* RX mask for the index of matched MIDx */ -/* PHY settings */ -#define ICPLUS_PHY_ID 0x0243 - MODULE_AUTHOR("Sten Wang <sten.wang@rdc.com.tw>," "Daniel Gimpelevich <daniel@gimpelevich.san-francisco.ca.us>," "Florian Fainelli <florian@openwrt.org>"); @@ -178,7 +173,7 @@ struct r6040_descriptor { struct r6040_descriptor *vndescp; /* 14-17 */ struct sk_buff *skb_ptr; /* 18-1B */ u32 rev2; /* 1C-1F */ -} __attribute__((aligned(32))); +} __aligned(32); struct r6040_private { spinlock_t lock; /* driver lock */ @@ -191,7 +186,7 @@ struct r6040_private { struct r6040_descriptor *tx_ring; dma_addr_t rx_ring_dma; dma_addr_t tx_ring_dma; - u16 tx_free_desc, phy_addr; + u16 tx_free_desc; u16 mcr0, mcr1; struct net_device *dev; struct mii_bus *mii_bus; @@ -206,8 +201,6 @@ static char version[] __devinitdata = DRV_NAME ": RDC R6040 NAPI net driver," "version "DRV_VERSION " (" DRV_RELDATE ")"; -static int phy_table[] = { PHY1_ADDR, PHY2_ADDR }; - /* Read a word data from PHY Chip */ static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg) { @@ -379,11 +372,11 @@ static void r6040_init_mac_regs(struct net_device *dev) iowrite16(MAC_RST, ioaddr + MCR1); while (limit--) { cmd = ioread16(ioaddr + MCR1); - if (cmd & 0x1) + if (cmd & MAC_RST) break; } /* Reset internal state machine */ - iowrite16(2, ioaddr + MAC_SM); + iowrite16(MAC_SM_RST, ioaddr + MAC_SM); iowrite16(0, ioaddr + MAC_SM); mdelay(5); @@ -409,7 +402,7 @@ static void r6040_init_mac_regs(struct net_device *dev) iowrite16(INT_MASK, ioaddr + MIER); /* Enable TX and RX */ - iowrite16(lp->mcr0 | 0x0002, ioaddr); + iowrite16(lp->mcr0 | MCR0_RCVEN, ioaddr); /* Let TX poll the descriptors * we may got called by r6040_tx_timeout which has left @@ -461,7 +454,7 @@ static void r6040_down(struct net_device *dev) iowrite16(MAC_RST, ioaddr + MCR1); /* Reset RDC MAC */ while (limit--) { cmd = ioread16(ioaddr + MCR1); - if (cmd & 0x1) + if (cmd & MAC_RST) break; } @@ -742,9 +735,10 @@ static void r6040_mac_address(struct net_device *dev) void __iomem *ioaddr = lp->base; u16 *adrp; - /* MAC operation register */ - iowrite16(0x01, ioaddr + MCR1); /* Reset MAC */ - iowrite16(2, ioaddr + MAC_SM); /* Reset internal state machine */ + /* Reset MAC */ + iowrite16(MAC_RST, ioaddr + MCR1); + /* Reset internal state machine */ + iowrite16(MAC_SM_RST, ioaddr + MAC_SM); iowrite16(0, ioaddr + MAC_SM); mdelay(5); @@ -1013,7 +1007,7 @@ static void r6040_adjust_link(struct net_device *dev) /* reflect duplex change */ if (phydev->link && (lp->old_duplex != phydev->duplex)) { - lp->mcr0 |= (phydev->duplex == DUPLEX_FULL ? 0x8000 : 0); + lp->mcr0 |= (phydev->duplex == DUPLEX_FULL ? MCR0_FD : 0); iowrite16(lp->mcr0, ioaddr); status_changed = 1; @@ -1166,8 +1160,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev, lp->dev = dev; /* Init RDC private data */ - lp->mcr0 = 0x1002; - lp->phy_addr = phy_table[card_idx]; + lp->mcr0 = MCR0_XMTEN | MCR0; /* The RDC-specific entries in the device structure. */ dev->netdev_ops = &r6040_netdev_ops; @@ -1188,7 +1181,8 @@ static int __devinit r6040_init_one(struct pci_dev *pdev, lp->mii_bus->write = r6040_mdiobus_write; lp->mii_bus->reset = r6040_mdiobus_reset; lp->mii_bus->name = "r6040_eth_mii"; - snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%x", card_idx); + snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", + dev_name(&pdev->dev), card_idx); lp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); if (!lp->mii_bus->irq) { dev_err(&pdev->dev, "mii_bus irq allocation failed\n"); diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index ee5da9293ce..cc6b391479c 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -859,7 +859,6 @@ static void __cp_set_rx_mode (struct net_device *dev) struct cp_private *cp = netdev_priv(dev); u32 mc_filter[2]; /* Multicast hash filter */ int rx_mode; - u32 tmp; /* Note: do not reorder, GCC is clever about common statements. */ if (dev->flags & IFF_PROMISC) { @@ -886,11 +885,9 @@ static void __cp_set_rx_mode (struct net_device *dev) } /* We can safely update without stopping the chip. */ - tmp = cp_rx_config | rx_mode; - if (cp->rx_config != tmp) { - cpw32_f (RxConfig, tmp); - cp->rx_config = tmp; - } + cp->rx_config = cp_rx_config | rx_mode; + cpw32_f(RxConfig, cp->rx_config); + cpw32_f (MAR0 + 0, mc_filter[0]); cpw32_f (MAR0 + 4, mc_filter[1]); } @@ -1319,9 +1316,9 @@ static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info { struct cp_private *cp = netdev_priv(dev); - strcpy (info->driver, DRV_NAME); - strcpy (info->version, DRV_VERSION); - strcpy (info->bus_info, pci_name(cp->pdev)); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info)); } static void cp_get_ringparam(struct net_device *dev, @@ -1392,7 +1389,7 @@ static void cp_set_msglevel(struct net_device *dev, u32 value) cp->msg_enable = value; } -static int cp_set_features(struct net_device *dev, u32 features) +static int cp_set_features(struct net_device *dev, netdev_features_t features) { struct cp_private *cp = netdev_priv(dev); unsigned long flags; @@ -1589,7 +1586,7 @@ static int cp_set_mac_address(struct net_device *dev, void *p) No extra delay is needed with 33Mhz PCI, but 66Mhz may change this. */ -#define eeprom_delay() readl(ee_addr) +#define eeprom_delay() readb(ee_addr) /* The EEPROM commands include the alway-set leading bit. */ #define EE_EXTEND_CMD (4) diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c index 4d6b254fc6c..a8779bedb3d 100644 --- a/drivers/net/ethernet/realtek/8139too.c +++ b/drivers/net/ethernet/realtek/8139too.c @@ -1122,7 +1122,7 @@ static void __devexit rtl8139_remove_one (struct pci_dev *pdev) No extra delay is needed with 33Mhz PCI, but 66Mhz may change this. */ -#define eeprom_delay() (void)RTL_R32(Cfg9346) +#define eeprom_delay() (void)RTL_R8(Cfg9346) /* The EEPROM commands include the alway-set leading bit. */ #define EE_WRITE_CMD (5) @@ -2330,9 +2330,9 @@ static int rtl8139_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) static void rtl8139_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct rtl8139_private *tp = netdev_priv(dev); - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - strcpy(info->bus_info, pci_name(tp->pci_dev)); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info)); info->regdump_len = tp->regs_len; } diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index c8f47f17186..7a0c800b50a 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -69,9 +69,6 @@ The RTL chips use a 64 element hash table based on the Ethernet CRC. */ static const int multicast_filter_limit = 32; -/* MAC address length */ -#define MAC_ADDR_LEN 6 - #define MAX_READ_REQUEST_SHIFT 12 #define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */ #define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */ @@ -1406,12 +1403,13 @@ static void rtl8169_get_drvinfo(struct net_device *dev, struct rtl8169_private *tp = netdev_priv(dev); struct rtl_fw *rtl_fw = tp->rtl_fw; - strcpy(info->driver, MODULENAME); - strcpy(info->version, RTL8169_VERSION); - strcpy(info->bus_info, pci_name(tp->pci_dev)); + strlcpy(info->driver, MODULENAME, sizeof(info->driver)); + strlcpy(info->version, RTL8169_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info)); BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version)); - strcpy(info->fw_version, IS_ERR_OR_NULL(rtl_fw) ? "N/A" : - rtl_fw->version); + if (!IS_ERR_OR_NULL(rtl_fw)) + strlcpy(info->fw_version, rtl_fw->version, + sizeof(info->fw_version)); } static int rtl8169_get_regs_len(struct net_device *dev) @@ -1555,7 +1553,8 @@ static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) return ret; } -static u32 rtl8169_fix_features(struct net_device *dev, u32 features) +static netdev_features_t rtl8169_fix_features(struct net_device *dev, + netdev_features_t features) { struct rtl8169_private *tp = netdev_priv(dev); @@ -1569,7 +1568,8 @@ static u32 rtl8169_fix_features(struct net_device *dev, u32 features) return features; } -static int rtl8169_set_features(struct net_device *dev, u32 features) +static int rtl8169_set_features(struct net_device *dev, + netdev_features_t features) { struct rtl8169_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; @@ -4101,7 +4101,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) spin_lock_init(&tp->lock); /* Get MAC address */ - for (i = 0; i < MAC_ADDR_LEN; i++) + for (i = 0; i < ETH_ALEN; i++) dev->dev_addr[i] = RTL_R8(MAC0 + i); memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 9b230740c6a..fc9bda9bc36 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -1369,13 +1369,13 @@ static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data) } } -static struct ethtool_ops sh_eth_ethtool_ops = { +static const struct ethtool_ops sh_eth_ethtool_ops = { .get_settings = sh_eth_get_settings, .set_settings = sh_eth_set_settings, - .nway_reset = sh_eth_nway_reset, + .nway_reset = sh_eth_nway_reset, .get_msglevel = sh_eth_get_msglevel, .set_msglevel = sh_eth_set_msglevel, - .get_link = ethtool_op_get_link, + .get_link = ethtool_op_get_link, .get_strings = sh_eth_get_strings, .get_ethtool_stats = sh_eth_get_ethtool_stats, .get_sset_count = sh_eth_get_sset_count, @@ -1957,18 +1957,7 @@ static struct platform_driver sh_eth_driver = { }, }; -static int __init sh_eth_init(void) -{ - return platform_driver_register(&sh_eth_driver); -} - -static void __exit sh_eth_cleanup(void) -{ - platform_driver_unregister(&sh_eth_driver); -} - -module_init(sh_eth_init); -module_exit(sh_eth_cleanup); +module_platform_driver(sh_eth_driver); MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda"); MODULE_DESCRIPTION("Renesas SuperH Ethernet driver"); diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c index c3673f151a4..f955a19eb22 100644 --- a/drivers/net/ethernet/seeq/sgiseeq.c +++ b/drivers/net/ethernet/seeq/sgiseeq.c @@ -834,23 +834,7 @@ static struct platform_driver sgiseeq_driver = { } }; -static int __init sgiseeq_module_init(void) -{ - if (platform_driver_register(&sgiseeq_driver)) { - printk(KERN_ERR "Driver registration failed\n"); - return -ENODEV; - } - - return 0; -} - -static void __exit sgiseeq_module_exit(void) -{ - platform_driver_unregister(&sgiseeq_driver); -} - -module_init(sgiseeq_module_init); -module_exit(sgiseeq_module_exit); +module_platform_driver(sgiseeq_driver); MODULE_DESCRIPTION("SGI Seeq 8003 driver"); MODULE_AUTHOR("Linux/MIPS Mailing List <linux-mips@linux-mips.org>"); diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index d5731f1fe6d..e43702f33b6 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -1336,7 +1336,8 @@ static int efx_probe_nic(struct efx_nic *efx) if (efx->n_channels > 1) get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key)); for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++) - efx->rx_indir_table[i] = i % efx->n_rx_channels; + efx->rx_indir_table[i] = + ethtool_rxfh_indir_default(i, efx->n_rx_channels); efx_set_channels(efx); netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels); @@ -1900,7 +1901,7 @@ static void efx_set_multicast_list(struct net_device *net_dev) /* Otherwise efx_start_port() will do this */ } -static int efx_set_features(struct net_device *net_dev, u32 data) +static int efx_set_features(struct net_device *net_dev, netdev_features_t data) { struct efx_nic *efx = netdev_priv(net_dev); @@ -2235,9 +2236,9 @@ static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = { {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, PCI_DEVICE_ID_SOLARFLARE_SFC4000B), .driver_data = (unsigned long) &falcon_b0_nic_type}, - {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, BETHPAGE_A_P_DEVID), + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0803), /* SFC9020 */ .driver_data = (unsigned long) &siena_a0_nic_type}, - {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, SIENA_A_P_DEVID), + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813), /* SFL9021 */ .driver_data = (unsigned long) &siena_a0_nic_type}, {0} /* end of list */ }; diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index 4764793ed23..a3541ac6ea0 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -14,10 +14,6 @@ #include "net_driver.h" #include "filter.h" -/* PCI IDs */ -#define BETHPAGE_A_P_DEVID 0x0803 -#define SIENA_A_P_DEVID 0x0813 - /* Solarstorm controllers use BAR 0 for I/O space and BAR 2(&3) for memory */ #define EFX_MEM_BAR 2 @@ -65,13 +61,23 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue); extern int efx_probe_filters(struct efx_nic *efx); extern void efx_restore_filters(struct efx_nic *efx); extern void efx_remove_filters(struct efx_nic *efx); -extern int efx_filter_insert_filter(struct efx_nic *efx, +extern s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec, bool replace); -extern int efx_filter_remove_filter(struct efx_nic *efx, - struct efx_filter_spec *spec); +extern int efx_filter_remove_id_safe(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id); +extern int efx_filter_get_filter_safe(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id, struct efx_filter_spec *); extern void efx_filter_clear_rx(struct efx_nic *efx, enum efx_filter_priority priority); +extern u32 efx_filter_count_rx_used(struct efx_nic *efx, + enum efx_filter_priority priority); +extern u32 efx_filter_get_rx_id_limit(struct efx_nic *efx); +extern s32 efx_filter_get_rx_ids(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 *buf, u32 size); #ifdef CONFIG_RFS_ACCEL extern int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, u16 rxq_index, u32 flow_id); diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index f3cd96dfa39..29b2ebfef19 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -818,9 +818,58 @@ static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags) return efx_reset(efx, rc); } +static int efx_ethtool_get_class_rule(struct efx_nic *efx, + struct ethtool_rx_flow_spec *rule) +{ + struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec; + struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec; + struct efx_filter_spec spec; + u16 vid; + u8 proto; + int rc; + + rc = efx_filter_get_filter_safe(efx, EFX_FILTER_PRI_MANUAL, + rule->location, &spec); + if (rc) + return rc; + + if (spec.dmaq_id == 0xfff) + rule->ring_cookie = RX_CLS_FLOW_DISC; + else + rule->ring_cookie = spec.dmaq_id; + + rc = efx_filter_get_eth_local(&spec, &vid, + rule->h_u.ether_spec.h_dest); + if (rc == 0) { + rule->flow_type = ETHER_FLOW; + memset(rule->m_u.ether_spec.h_dest, ~0, ETH_ALEN); + if (vid != EFX_FILTER_VID_UNSPEC) { + rule->flow_type |= FLOW_EXT; + rule->h_ext.vlan_tci = htons(vid); + rule->m_ext.vlan_tci = htons(0xfff); + } + return 0; + } + + rc = efx_filter_get_ipv4_local(&spec, &proto, + &ip_entry->ip4dst, &ip_entry->pdst); + if (rc != 0) { + rc = efx_filter_get_ipv4_full( + &spec, &proto, &ip_entry->ip4src, &ip_entry->psrc, + &ip_entry->ip4dst, &ip_entry->pdst); + EFX_WARN_ON_PARANOID(rc); + ip_mask->ip4src = ~0; + ip_mask->psrc = ~0; + } + rule->flow_type = (proto == IPPROTO_TCP) ? TCP_V4_FLOW : UDP_V4_FLOW; + ip_mask->ip4dst = ~0; + ip_mask->pdst = ~0; + return rc; +} + static int efx_ethtool_get_rxnfc(struct net_device *net_dev, - struct ethtool_rxnfc *info, u32 *rules __always_unused) + struct ethtool_rxnfc *info, u32 *rule_locs) { struct efx_nic *efx = netdev_priv(net_dev); @@ -862,42 +911,80 @@ efx_ethtool_get_rxnfc(struct net_device *net_dev, return 0; } + case ETHTOOL_GRXCLSRLCNT: + info->data = efx_filter_get_rx_id_limit(efx); + if (info->data == 0) + return -EOPNOTSUPP; + info->data |= RX_CLS_LOC_SPECIAL; + info->rule_cnt = + efx_filter_count_rx_used(efx, EFX_FILTER_PRI_MANUAL); + return 0; + + case ETHTOOL_GRXCLSRULE: + if (efx_filter_get_rx_id_limit(efx) == 0) + return -EOPNOTSUPP; + return efx_ethtool_get_class_rule(efx, &info->fs); + + case ETHTOOL_GRXCLSRLALL: { + s32 rc; + info->data = efx_filter_get_rx_id_limit(efx); + if (info->data == 0) + return -EOPNOTSUPP; + rc = efx_filter_get_rx_ids(efx, EFX_FILTER_PRI_MANUAL, + rule_locs, info->rule_cnt); + if (rc < 0) + return rc; + info->rule_cnt = rc; + return 0; + } + default: return -EOPNOTSUPP; } } -static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev, - struct ethtool_rx_ntuple *ntuple) +static int efx_ethtool_set_class_rule(struct efx_nic *efx, + struct ethtool_rx_flow_spec *rule) { - struct efx_nic *efx = netdev_priv(net_dev); - struct ethtool_tcpip4_spec *ip_entry = &ntuple->fs.h_u.tcp_ip4_spec; - struct ethtool_tcpip4_spec *ip_mask = &ntuple->fs.m_u.tcp_ip4_spec; - struct ethhdr *mac_entry = &ntuple->fs.h_u.ether_spec; - struct ethhdr *mac_mask = &ntuple->fs.m_u.ether_spec; - struct efx_filter_spec filter; + struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec; + struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec; + struct ethhdr *mac_entry = &rule->h_u.ether_spec; + struct ethhdr *mac_mask = &rule->m_u.ether_spec; + struct efx_filter_spec spec; int rc; - /* Range-check action */ - if (ntuple->fs.action < ETHTOOL_RXNTUPLE_ACTION_CLEAR || - ntuple->fs.action >= (s32)efx->n_rx_channels) + /* Check that user wants us to choose the location */ + if (rule->location != RX_CLS_LOC_ANY && + rule->location != RX_CLS_LOC_FIRST && + rule->location != RX_CLS_LOC_LAST) + return -EINVAL; + + /* Range-check ring_cookie */ + if (rule->ring_cookie >= efx->n_rx_channels && + rule->ring_cookie != RX_CLS_FLOW_DISC) return -EINVAL; - if (~ntuple->fs.data_mask) + /* Check for unsupported extensions */ + if ((rule->flow_type & FLOW_EXT) && + (rule->m_ext.vlan_etype | rule->m_ext.data[0] | + rule->m_ext.data[1])) return -EINVAL; - efx_filter_init_rx(&filter, EFX_FILTER_PRI_MANUAL, 0, - (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP) ? - 0xfff : ntuple->fs.action); + efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL, + (rule->location == RX_CLS_LOC_FIRST) ? + EFX_FILTER_FLAG_RX_OVERRIDE_IP : 0, + (rule->ring_cookie == RX_CLS_FLOW_DISC) ? + 0xfff : rule->ring_cookie); - switch (ntuple->fs.flow_type) { + switch (rule->flow_type) { case TCP_V4_FLOW: case UDP_V4_FLOW: { - u8 proto = (ntuple->fs.flow_type == TCP_V4_FLOW ? + u8 proto = (rule->flow_type == TCP_V4_FLOW ? IPPROTO_TCP : IPPROTO_UDP); /* Must match all of destination, */ - if (ip_mask->ip4dst | ip_mask->pdst) + if ((__force u32)~ip_mask->ip4dst | + (__force u16)~ip_mask->pdst) return -EINVAL; /* all or none of source, */ if ((ip_mask->ip4src | ip_mask->psrc) && @@ -905,17 +992,17 @@ static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev, (__force u16)~ip_mask->psrc)) return -EINVAL; /* and nothing else */ - if ((u8)~ip_mask->tos | (u16)~ntuple->fs.vlan_tag_mask) + if (ip_mask->tos | rule->m_ext.vlan_tci) return -EINVAL; - if (!ip_mask->ip4src) - rc = efx_filter_set_ipv4_full(&filter, proto, + if (ip_mask->ip4src) + rc = efx_filter_set_ipv4_full(&spec, proto, ip_entry->ip4dst, ip_entry->pdst, ip_entry->ip4src, ip_entry->psrc); else - rc = efx_filter_set_ipv4_local(&filter, proto, + rc = efx_filter_set_ipv4_local(&spec, proto, ip_entry->ip4dst, ip_entry->pdst); if (rc) @@ -923,23 +1010,24 @@ static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev, break; } - case ETHER_FLOW: - /* Must match all of destination, */ - if (!is_zero_ether_addr(mac_mask->h_dest)) + case ETHER_FLOW | FLOW_EXT: + /* Must match all or none of VID */ + if (rule->m_ext.vlan_tci != htons(0xfff) && + rule->m_ext.vlan_tci != 0) return -EINVAL; - /* all or none of VID, */ - if (ntuple->fs.vlan_tag_mask != 0xf000 && - ntuple->fs.vlan_tag_mask != 0xffff) + case ETHER_FLOW: + /* Must match all of destination */ + if (!is_broadcast_ether_addr(mac_mask->h_dest)) return -EINVAL; /* and nothing else */ - if (!is_broadcast_ether_addr(mac_mask->h_source) || - mac_mask->h_proto != htons(0xffff)) + if (!is_zero_ether_addr(mac_mask->h_source) || + mac_mask->h_proto) return -EINVAL; rc = efx_filter_set_eth_local( - &filter, - (ntuple->fs.vlan_tag_mask == 0xf000) ? - ntuple->fs.vlan_tag : EFX_FILTER_VID_UNSPEC, + &spec, + (rule->flow_type & FLOW_EXT && rule->m_ext.vlan_tci) ? + ntohs(rule->h_ext.vlan_tci) : EFX_FILTER_VID_UNSPEC, mac_entry->h_dest); if (rc) return rc; @@ -949,47 +1037,57 @@ static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev, return -EINVAL; } - if (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_CLEAR) - return efx_filter_remove_filter(efx, &filter); + rc = efx_filter_insert_filter(efx, &spec, true); + if (rc < 0) + return rc; - rc = efx_filter_insert_filter(efx, &filter, true); - return rc < 0 ? rc : 0; + rule->location = rc; + return 0; } -static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev, - struct ethtool_rxfh_indir *indir) +static int efx_ethtool_set_rxnfc(struct net_device *net_dev, + struct ethtool_rxnfc *info) { struct efx_nic *efx = netdev_priv(net_dev); - size_t copy_size = - min_t(size_t, indir->size, ARRAY_SIZE(efx->rx_indir_table)); - if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) + if (efx_filter_get_rx_id_limit(efx) == 0) return -EOPNOTSUPP; - indir->size = ARRAY_SIZE(efx->rx_indir_table); - memcpy(indir->ring_index, efx->rx_indir_table, - copy_size * sizeof(indir->ring_index[0])); - return 0; + switch (info->cmd) { + case ETHTOOL_SRXCLSRLINS: + return efx_ethtool_set_class_rule(efx, &info->fs); + + case ETHTOOL_SRXCLSRLDEL: + return efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_MANUAL, + info->fs.location); + + default: + return -EOPNOTSUPP; + } } -static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev, - const struct ethtool_rxfh_indir *indir) +static u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev) { struct efx_nic *efx = netdev_priv(net_dev); - size_t i; - if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) - return -EOPNOTSUPP; + return (efx_nic_rev(efx) < EFX_REV_FALCON_B0 ? + 0 : ARRAY_SIZE(efx->rx_indir_table)); +} - /* Validate size and indices */ - if (indir->size != ARRAY_SIZE(efx->rx_indir_table)) - return -EINVAL; - for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++) - if (indir->ring_index[i] >= efx->n_rx_channels) - return -EINVAL; +static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev, u32 *indir) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + memcpy(indir, efx->rx_indir_table, sizeof(efx->rx_indir_table)); + return 0; +} + +static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev, + const u32 *indir) +{ + struct efx_nic *efx = netdev_priv(net_dev); - memcpy(efx->rx_indir_table, indir->ring_index, - sizeof(efx->rx_indir_table)); + memcpy(efx->rx_indir_table, indir, sizeof(efx->rx_indir_table)); efx_nic_push_rx_indir_table(efx); return 0; } @@ -1019,7 +1117,8 @@ const struct ethtool_ops efx_ethtool_ops = { .set_wol = efx_ethtool_set_wol, .reset = efx_ethtool_reset, .get_rxnfc = efx_ethtool_get_rxnfc, - .set_rx_ntuple = efx_ethtool_set_rx_ntuple, + .set_rxnfc = efx_ethtool_set_rxnfc, + .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size, .get_rxfh_indir = efx_ethtool_get_rxfh_indir, .set_rxfh_indir = efx_ethtool_set_rxfh_indir, }; diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c index 97b606b92e8..8ae1ebd3539 100644 --- a/drivers/net/ethernet/sfc/falcon.c +++ b/drivers/net/ethernet/sfc/falcon.c @@ -610,7 +610,7 @@ static void falcon_stats_complete(struct efx_nic *efx) if (!nic_data->stats_pending) return; - nic_data->stats_pending = 0; + nic_data->stats_pending = false; if (*nic_data->stats_dma_done == FALCON_STATS_DONE) { rmb(); /* read the done flag before the stats */ efx->mac_op->update_stats(efx); diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c index 2b9636f96e0..1fbbbee7b1a 100644 --- a/drivers/net/ethernet/sfc/filter.c +++ b/drivers/net/ethernet/sfc/filter.c @@ -155,6 +155,16 @@ static inline void __efx_filter_set_ipv4(struct efx_filter_spec *spec, spec->data[2] = ntohl(host2); } +static inline void __efx_filter_get_ipv4(const struct efx_filter_spec *spec, + __be32 *host1, __be16 *port1, + __be32 *host2, __be16 *port2) +{ + *host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16); + *port1 = htons(spec->data[0]); + *host2 = htonl(spec->data[2]); + *port2 = htons(spec->data[1] >> 16); +} + /** * efx_filter_set_ipv4_local - specify IPv4 host, transport protocol and port * @spec: Specification to initialise @@ -205,6 +215,26 @@ int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto, return 0; } +int efx_filter_get_ipv4_local(const struct efx_filter_spec *spec, + u8 *proto, __be32 *host, __be16 *port) +{ + __be32 host1; + __be16 port1; + + switch (spec->type) { + case EFX_FILTER_TCP_WILD: + *proto = IPPROTO_TCP; + __efx_filter_get_ipv4(spec, &host1, &port1, host, port); + return 0; + case EFX_FILTER_UDP_WILD: + *proto = IPPROTO_UDP; + __efx_filter_get_ipv4(spec, &host1, port, host, &port1); + return 0; + default: + return -EINVAL; + } +} + /** * efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports * @spec: Specification to initialise @@ -242,6 +272,25 @@ int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto, return 0; } +int efx_filter_get_ipv4_full(const struct efx_filter_spec *spec, + u8 *proto, __be32 *host, __be16 *port, + __be32 *rhost, __be16 *rport) +{ + switch (spec->type) { + case EFX_FILTER_TCP_FULL: + *proto = IPPROTO_TCP; + break; + case EFX_FILTER_UDP_FULL: + *proto = IPPROTO_UDP; + break; + default: + return -EINVAL; + } + + __efx_filter_get_ipv4(spec, rhost, rport, host, port); + return 0; +} + /** * efx_filter_set_eth_local - specify local Ethernet address and optional VID * @spec: Specification to initialise @@ -270,6 +319,29 @@ int efx_filter_set_eth_local(struct efx_filter_spec *spec, return 0; } +int efx_filter_get_eth_local(const struct efx_filter_spec *spec, + u16 *vid, u8 *addr) +{ + switch (spec->type) { + case EFX_FILTER_MAC_WILD: + *vid = EFX_FILTER_VID_UNSPEC; + break; + case EFX_FILTER_MAC_FULL: + *vid = spec->data[0]; + break; + default: + return -EINVAL; + } + + addr[0] = spec->data[2] >> 8; + addr[1] = spec->data[2]; + addr[2] = spec->data[1] >> 24; + addr[3] = spec->data[1] >> 16; + addr[4] = spec->data[1] >> 8; + addr[5] = spec->data[1]; + return 0; +} + /* Build a filter entry and return its n-tuple key. */ static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec) { @@ -332,7 +404,7 @@ static bool efx_filter_equal(const struct efx_filter_spec *left, static int efx_filter_search(struct efx_filter_table *table, struct efx_filter_spec *spec, u32 key, - bool for_insert, int *depth_required) + bool for_insert, unsigned int *depth_required) { unsigned hash, incr, filter_idx, depth, depth_max; @@ -366,12 +438,59 @@ static int efx_filter_search(struct efx_filter_table *table, } } -/* Construct/deconstruct external filter IDs */ +/* + * Construct/deconstruct external filter IDs. These must be ordered + * by matching priority, for RX NFC semantics. + * + * Each RX MAC filter entry has a flag for whether it can override an + * RX IP filter that also matches. So we assign locations for MAC + * filters with overriding behaviour, then for IP filters, then for + * MAC filters without overriding behaviour. + */ + +#define EFX_FILTER_INDEX_WIDTH 13 +#define EFX_FILTER_INDEX_MASK ((1 << EFX_FILTER_INDEX_WIDTH) - 1) + +static inline u32 efx_filter_make_id(enum efx_filter_table_id table_id, + unsigned int index, u8 flags) +{ + return (table_id == EFX_FILTER_TABLE_RX_MAC && + flags & EFX_FILTER_FLAG_RX_OVERRIDE_IP) ? + index : + (table_id + 1) << EFX_FILTER_INDEX_WIDTH | index; +} + +static inline enum efx_filter_table_id efx_filter_id_table_id(u32 id) +{ + return (id <= EFX_FILTER_INDEX_MASK) ? + EFX_FILTER_TABLE_RX_MAC : + (id >> EFX_FILTER_INDEX_WIDTH) - 1; +} + +static inline unsigned int efx_filter_id_index(u32 id) +{ + return id & EFX_FILTER_INDEX_MASK; +} -static inline int -efx_filter_make_id(enum efx_filter_table_id table_id, unsigned index) +static inline u8 efx_filter_id_flags(u32 id) { - return table_id << 16 | index; + return (id <= EFX_FILTER_INDEX_MASK) ? + EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_OVERRIDE_IP : + EFX_FILTER_FLAG_RX; +} + +u32 efx_filter_get_rx_id_limit(struct efx_nic *efx) +{ + struct efx_filter_state *state = efx->filter_state; + + if (state->table[EFX_FILTER_TABLE_RX_MAC].size != 0) + return ((EFX_FILTER_TABLE_RX_MAC + 1) << EFX_FILTER_INDEX_WIDTH) + + state->table[EFX_FILTER_TABLE_RX_MAC].size; + else if (state->table[EFX_FILTER_TABLE_RX_IP].size != 0) + return ((EFX_FILTER_TABLE_RX_IP + 1) << EFX_FILTER_INDEX_WIDTH) + + state->table[EFX_FILTER_TABLE_RX_IP].size; + else + return 0; } /** @@ -384,14 +503,14 @@ efx_filter_make_id(enum efx_filter_table_id table_id, unsigned index) * On success, return the filter ID. * On failure, return a negative error code. */ -int efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec, +s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec, bool replace) { struct efx_filter_state *state = efx->filter_state; struct efx_filter_table *table = efx_filter_spec_table(state, spec); struct efx_filter_spec *saved_spec; efx_oword_t filter; - int filter_idx, depth; + unsigned int filter_idx, depth; u32 key; int rc; @@ -439,7 +558,7 @@ int efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec, netif_vdbg(efx, hw, efx->net_dev, "%s: filter type %d index %d rxq %u set", __func__, spec->type, filter_idx, spec->dmaq_id); - rc = efx_filter_make_id(table->id, filter_idx); + rc = efx_filter_make_id(table->id, filter_idx, spec->flags); out: spin_unlock_bh(&state->lock); @@ -448,7 +567,7 @@ out: static void efx_filter_table_clear_entry(struct efx_nic *efx, struct efx_filter_table *table, - int filter_idx) + unsigned int filter_idx) { static efx_oword_t filter; @@ -463,48 +582,101 @@ static void efx_filter_table_clear_entry(struct efx_nic *efx, } /** - * efx_filter_remove_filter - remove a filter by specification + * efx_filter_remove_id_safe - remove a filter by ID, carefully * @efx: NIC from which to remove the filter - * @spec: Specification for the filter + * @priority: Priority of filter, as passed to @efx_filter_insert_filter + * @filter_id: ID of filter, as returned by @efx_filter_insert_filter * - * On success, return zero. - * On failure, return a negative error code. + * This function will range-check @filter_id, so it is safe to call + * with a value passed from userland. */ -int efx_filter_remove_filter(struct efx_nic *efx, struct efx_filter_spec *spec) +int efx_filter_remove_id_safe(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id) { struct efx_filter_state *state = efx->filter_state; - struct efx_filter_table *table = efx_filter_spec_table(state, spec); - struct efx_filter_spec *saved_spec; - efx_oword_t filter; - int filter_idx, depth; - u32 key; + enum efx_filter_table_id table_id; + struct efx_filter_table *table; + unsigned int filter_idx; + struct efx_filter_spec *spec; + u8 filter_flags; int rc; - if (!table) - return -EINVAL; + table_id = efx_filter_id_table_id(filter_id); + if ((unsigned int)table_id >= EFX_FILTER_TABLE_COUNT) + return -ENOENT; + table = &state->table[table_id]; - key = efx_filter_build(&filter, spec); + filter_idx = efx_filter_id_index(filter_id); + if (filter_idx >= table->size) + return -ENOENT; + spec = &table->spec[filter_idx]; - spin_lock_bh(&state->lock); + filter_flags = efx_filter_id_flags(filter_id); - rc = efx_filter_search(table, spec, key, false, &depth); - if (rc < 0) - goto out; - filter_idx = rc; - saved_spec = &table->spec[filter_idx]; + spin_lock_bh(&state->lock); - if (spec->priority < saved_spec->priority) { - rc = -EPERM; - goto out; + if (test_bit(filter_idx, table->used_bitmap) && + spec->priority == priority && spec->flags == filter_flags) { + efx_filter_table_clear_entry(efx, table, filter_idx); + if (table->used == 0) + efx_filter_table_reset_search_depth(table); + rc = 0; + } else { + rc = -ENOENT; } - efx_filter_table_clear_entry(efx, table, filter_idx); - if (table->used == 0) - efx_filter_table_reset_search_depth(table); - rc = 0; + spin_unlock_bh(&state->lock); + + return rc; +} + +/** + * efx_filter_get_filter_safe - retrieve a filter by ID, carefully + * @efx: NIC from which to remove the filter + * @priority: Priority of filter, as passed to @efx_filter_insert_filter + * @filter_id: ID of filter, as returned by @efx_filter_insert_filter + * @spec: Buffer in which to store filter specification + * + * This function will range-check @filter_id, so it is safe to call + * with a value passed from userland. + */ +int efx_filter_get_filter_safe(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id, struct efx_filter_spec *spec_buf) +{ + struct efx_filter_state *state = efx->filter_state; + enum efx_filter_table_id table_id; + struct efx_filter_table *table; + struct efx_filter_spec *spec; + unsigned int filter_idx; + u8 filter_flags; + int rc; + + table_id = efx_filter_id_table_id(filter_id); + if ((unsigned int)table_id >= EFX_FILTER_TABLE_COUNT) + return -ENOENT; + table = &state->table[table_id]; + + filter_idx = efx_filter_id_index(filter_id); + if (filter_idx >= table->size) + return -ENOENT; + spec = &table->spec[filter_idx]; + + filter_flags = efx_filter_id_flags(filter_id); + + spin_lock_bh(&state->lock); + + if (test_bit(filter_idx, table->used_bitmap) && + spec->priority == priority && spec->flags == filter_flags) { + *spec_buf = *spec; + rc = 0; + } else { + rc = -ENOENT; + } -out: spin_unlock_bh(&state->lock); + return rc; } @@ -514,7 +686,7 @@ static void efx_filter_table_clear(struct efx_nic *efx, { struct efx_filter_state *state = efx->filter_state; struct efx_filter_table *table = &state->table[table_id]; - int filter_idx; + unsigned int filter_idx; spin_lock_bh(&state->lock); @@ -538,6 +710,68 @@ void efx_filter_clear_rx(struct efx_nic *efx, enum efx_filter_priority priority) efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_MAC, priority); } +u32 efx_filter_count_rx_used(struct efx_nic *efx, + enum efx_filter_priority priority) +{ + struct efx_filter_state *state = efx->filter_state; + enum efx_filter_table_id table_id; + struct efx_filter_table *table; + unsigned int filter_idx; + u32 count = 0; + + spin_lock_bh(&state->lock); + + for (table_id = EFX_FILTER_TABLE_RX_IP; + table_id <= EFX_FILTER_TABLE_RX_MAC; + table_id++) { + table = &state->table[table_id]; + for (filter_idx = 0; filter_idx < table->size; filter_idx++) { + if (test_bit(filter_idx, table->used_bitmap) && + table->spec[filter_idx].priority == priority) + ++count; + } + } + + spin_unlock_bh(&state->lock); + + return count; +} + +s32 efx_filter_get_rx_ids(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 *buf, u32 size) +{ + struct efx_filter_state *state = efx->filter_state; + enum efx_filter_table_id table_id; + struct efx_filter_table *table; + unsigned int filter_idx; + s32 count = 0; + + spin_lock_bh(&state->lock); + + for (table_id = EFX_FILTER_TABLE_RX_IP; + table_id <= EFX_FILTER_TABLE_RX_MAC; + table_id++) { + table = &state->table[table_id]; + for (filter_idx = 0; filter_idx < table->size; filter_idx++) { + if (test_bit(filter_idx, table->used_bitmap) && + table->spec[filter_idx].priority == priority) { + if (count == size) { + count = -EMSGSIZE; + goto out; + } + buf[count++] = efx_filter_make_id( + table_id, filter_idx, + table->spec[filter_idx].flags); + } + } + } +out: + spin_unlock_bh(&state->lock); + + return count; +} + /* Restore filter stater after reset */ void efx_restore_filters(struct efx_nic *efx) { @@ -545,7 +779,7 @@ void efx_restore_filters(struct efx_nic *efx) enum efx_filter_table_id table_id; struct efx_filter_table *table; efx_oword_t filter; - int filter_idx; + unsigned int filter_idx; spin_lock_bh(&state->lock); diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h index 872f2132a49..3d4108cd90c 100644 --- a/drivers/net/ethernet/sfc/filter.h +++ b/drivers/net/ethernet/sfc/filter.h @@ -78,6 +78,11 @@ enum efx_filter_flags { * * Use the efx_filter_set_*() functions to initialise the @type and * @data fields. + * + * The @priority field is used by software to determine whether a new + * filter may replace an old one. The hardware priority of a filter + * depends on the filter type and %EFX_FILTER_FLAG_RX_OVERRIDE_IP + * flag. */ struct efx_filter_spec { u8 type:4; @@ -100,11 +105,18 @@ static inline void efx_filter_init_rx(struct efx_filter_spec *spec, extern int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto, __be32 host, __be16 port); +extern int efx_filter_get_ipv4_local(const struct efx_filter_spec *spec, + u8 *proto, __be32 *host, __be16 *port); extern int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto, __be32 host, __be16 port, __be32 rhost, __be16 rport); +extern int efx_filter_get_ipv4_full(const struct efx_filter_spec *spec, + u8 *proto, __be32 *host, __be16 *port, + __be32 *rhost, __be16 *rport); extern int efx_filter_set_eth_local(struct efx_filter_spec *spec, u16 vid, const u8 *addr); +extern int efx_filter_get_eth_local(const struct efx_filter_spec *spec, + u16 *vid, u8 *addr); enum { EFX_FILTER_VID_UNSPEC = 0xffff, }; diff --git a/drivers/net/ethernet/sfc/mtd.c b/drivers/net/ethernet/sfc/mtd.c index b6304486f24..bc9dcd6b30d 100644 --- a/drivers/net/ethernet/sfc/mtd.c +++ b/drivers/net/ethernet/sfc/mtd.c @@ -496,7 +496,7 @@ static int siena_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len) rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type); if (rc) goto out; - part->mcdi.updating = 1; + part->mcdi.updating = true; } /* The MCDI interface can in fact do multiple erase blocks at once; @@ -528,7 +528,7 @@ static int siena_mtd_write(struct mtd_info *mtd, loff_t start, rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type); if (rc) goto out; - part->mcdi.updating = 1; + part->mcdi.updating = true; } while (offset < end) { @@ -553,7 +553,7 @@ static int siena_mtd_sync(struct mtd_info *mtd) int rc = 0; if (part->mcdi.updating) { - part->mcdi.updating = 0; + part->mcdi.updating = false; rc = efx_mcdi_nvram_update_finish(efx, part->mcdi.nvram_type); } diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index b8e251a1ee4..c49502bab6a 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -908,7 +908,7 @@ struct efx_nic_type { unsigned int phys_addr_channels; unsigned int tx_dc_base; unsigned int rx_dc_base; - u32 offload_features; + netdev_features_t offload_features; }; /************************************************************************** diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index 752d521c09b..aca34986176 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -479,11 +479,8 @@ static void efx_rx_packet_gro(struct efx_channel *channel, if (efx->net_dev->features & NETIF_F_RXHASH) skb->rxhash = efx_rx_buf_hash(eh); - skb_frag_set_page(skb, 0, page); - skb_shinfo(skb)->frags[0].page_offset = - efx_rx_buf_offset(efx, rx_buf); - skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx_buf->len); - skb_shinfo(skb)->nr_frags = 1; + skb_fill_page_desc(skb, 0, page, + efx_rx_buf_offset(efx, rx_buf), rx_buf->len); skb->len = rx_buf->len; skb->data_len = rx_buf->len; @@ -669,7 +666,7 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue) rx_queue->ptr_mask); /* Allocate RX buffers */ - rx_queue->buffer = kzalloc(entries * sizeof(*rx_queue->buffer), + rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer), GFP_KERNEL); if (!rx_queue->buffer) return -ENOMEM; diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c index 822f6c2a6a7..52edd24fcde 100644 --- a/drivers/net/ethernet/sfc/selftest.c +++ b/drivers/net/ethernet/sfc/selftest.c @@ -503,8 +503,8 @@ efx_test_loopback(struct efx_tx_queue *tx_queue, /* Determine how many packets to send */ state->packet_count = efx->txq_entries / 3; state->packet_count = min(1 << (i << 2), state->packet_count); - state->skbs = kzalloc(sizeof(state->skbs[0]) * - state->packet_count, GFP_KERNEL); + state->skbs = kcalloc(state->packet_count, + sizeof(state->skbs[0]), GFP_KERNEL); if (!state->skbs) return -ENOMEM; state->flush = false; diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index cc2549cb707..4d5d619feaa 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -232,7 +232,7 @@ static int siena_probe_nvconfig(struct efx_nic *efx) static int siena_probe_nic(struct efx_nic *efx) { struct siena_nic_data *nic_data; - bool already_attached = 0; + bool already_attached = false; efx_oword_t reg; int rc; diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index df88c5430f9..72f0fbc73b1 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -31,7 +31,9 @@ #define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u) static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, - struct efx_tx_buffer *buffer) + struct efx_tx_buffer *buffer, + unsigned int *pkts_compl, + unsigned int *bytes_compl) { if (buffer->unmap_len) { struct pci_dev *pci_dev = tx_queue->efx->pci_dev; @@ -48,6 +50,8 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, } if (buffer->skb) { + (*pkts_compl)++; + (*bytes_compl) += buffer->skb->len; dev_kfree_skb_any((struct sk_buff *) buffer->skb); buffer->skb = NULL; netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, @@ -250,6 +254,8 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) buffer->skb = skb; buffer->continuation = false; + netdev_tx_sent_queue(tx_queue->core_txq, skb->len); + /* Pass off to hardware */ efx_nic_push_buffers(tx_queue); @@ -267,10 +273,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) unwind: /* Work backwards until we hit the original insert pointer value */ while (tx_queue->insert_count != tx_queue->write_count) { + unsigned int pkts_compl = 0, bytes_compl = 0; --tx_queue->insert_count; insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; buffer = &tx_queue->buffer[insert_ptr]; - efx_dequeue_buffer(tx_queue, buffer); + efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); buffer->len = 0; } @@ -293,7 +300,9 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) * specified index. */ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, - unsigned int index) + unsigned int index, + unsigned int *pkts_compl, + unsigned int *bytes_compl) { struct efx_nic *efx = tx_queue->efx; unsigned int stop_index, read_ptr; @@ -311,7 +320,7 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, return; } - efx_dequeue_buffer(tx_queue, buffer); + efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl); buffer->continuation = true; buffer->len = 0; @@ -422,10 +431,12 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) { unsigned fill_level; struct efx_nic *efx = tx_queue->efx; + unsigned int pkts_compl = 0, bytes_compl = 0; EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask); - efx_dequeue_buffers(tx_queue, index); + efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl); + netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl); /* See if we need to restart the netif queue. This barrier * separates the update of read_count from the test of the @@ -468,7 +479,7 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask); /* Allocate software ring */ - tx_queue->buffer = kzalloc(entries * sizeof(*tx_queue->buffer), + tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer), GFP_KERNEL); if (!tx_queue->buffer) return -ENOMEM; @@ -515,13 +526,15 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue) /* Free any buffers left in the ring */ while (tx_queue->read_count != tx_queue->write_count) { + unsigned int pkts_compl = 0, bytes_compl = 0; buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask]; - efx_dequeue_buffer(tx_queue, buffer); + efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); buffer->continuation = true; buffer->len = 0; ++tx_queue->read_count; } + netdev_tx_reset_queue(tx_queue->core_txq); } void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) @@ -1160,6 +1173,8 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, goto mem_err; } + netdev_tx_sent_queue(tx_queue->core_txq, skb->len); + /* Pass off to hardware */ efx_nic_push_buffers(tx_queue); diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c index 60135aa5580..53efe7c7b1c 100644 --- a/drivers/net/ethernet/sgi/meth.c +++ b/drivers/net/ethernet/sgi/meth.c @@ -28,6 +28,7 @@ #include <linux/tcp.h> /* struct tcphdr */ #include <linux/skbuff.h> #include <linux/mii.h> /* MII definitions */ +#include <linux/crc32.h> #include <asm/ip32/mace.h> #include <asm/ip32/ip32_ints.h> @@ -58,12 +59,19 @@ static int timeout = TX_TIMEOUT; module_param(timeout, int, 0); /* + * Maximum number of multicast addresses to filter (vs. Rx-all-multicast). + * MACE Ethernet uses a 64 element hash table based on the Ethernet CRC. + */ +#define METH_MCF_LIMIT 32 + +/* * This structure is private to each device. It is used to pass * packets in and out, so there is place for a packet */ struct meth_private { /* in-memory copy of MAC Control register */ - unsigned long mac_ctrl; + u64 mac_ctrl; + /* in-memory copy of DMA Control register */ unsigned long dma_ctrl; /* address of PHY, used by mdio_* functions, initialized in mdio_probe */ @@ -79,6 +87,9 @@ struct meth_private { struct sk_buff *rx_skbs[RX_RING_ENTRIES]; unsigned long rx_write; + /* Multicast filter. */ + u64 mcast_filter; + spinlock_t meth_lock; }; @@ -765,6 +776,40 @@ static int meth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) } } +static void meth_set_rx_mode(struct net_device *dev) +{ + struct meth_private *priv = netdev_priv(dev); + unsigned long flags; + + netif_stop_queue(dev); + spin_lock_irqsave(&priv->meth_lock, flags); + priv->mac_ctrl &= ~METH_PROMISC; + + if (dev->flags & IFF_PROMISC) { + priv->mac_ctrl |= METH_PROMISC; + priv->mcast_filter = 0xffffffffffffffffUL; + } else if ((netdev_mc_count(dev) > METH_MCF_LIMIT) || + (dev->flags & IFF_ALLMULTI)) { + priv->mac_ctrl |= METH_ACCEPT_AMCAST; + priv->mcast_filter = 0xffffffffffffffffUL; + } else { + struct netdev_hw_addr *ha; + priv->mac_ctrl |= METH_ACCEPT_MCAST; + + netdev_for_each_mc_addr(ha, dev) + set_bit((ether_crc(ETH_ALEN, ha->addr) >> 26), + (volatile unsigned long *)&priv->mcast_filter); + } + + /* Write the changes to the chip registers. */ + mace->eth.mac_ctrl = priv->mac_ctrl; + mace->eth.mcast_filter = priv->mcast_filter; + + /* Done! */ + spin_unlock_irqrestore(&priv->meth_lock, flags); + netif_wake_queue(dev); +} + static const struct net_device_ops meth_netdev_ops = { .ndo_open = meth_open, .ndo_stop = meth_release, @@ -774,6 +819,7 @@ static const struct net_device_ops meth_netdev_ops = { .ndo_change_mtu = eth_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, + .ndo_set_rx_mode = meth_set_rx_mode, }; /* @@ -830,24 +876,7 @@ static struct platform_driver meth_driver = { } }; -static int __init meth_init_module(void) -{ - int err; - - err = platform_driver_register(&meth_driver); - if (err) - printk(KERN_ERR "Driver registration failed\n"); - - return err; -} - -static void __exit meth_exit_module(void) -{ - platform_driver_unregister(&meth_driver); -} - -module_init(meth_init_module); -module_exit(meth_exit_module); +module_platform_driver(meth_driver); MODULE_AUTHOR("Ilya Volynets <ilya@theIlya.com>"); MODULE_DESCRIPTION("SGI O2 Builtin Fast Ethernet driver"); diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c index 1b4658c9939..5b118cd5bf9 100644 --- a/drivers/net/ethernet/sis/sis190.c +++ b/drivers/net/ethernet/sis/sis190.c @@ -47,8 +47,6 @@ #define sis190_rx_skb netif_rx #define sis190_rx_quota(count, quota) count -#define MAC_ADDR_LEN 6 - #define NUM_TX_DESC 64 /* [8..1024] */ #define NUM_RX_DESC 64 /* [8..8192] */ #define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc)) @@ -1601,7 +1599,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev, } /* Get MAC address from EEPROM */ - for (i = 0; i < MAC_ADDR_LEN / 2; i++) { + for (i = 0; i < ETH_ALEN / 2; i++) { u16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i); ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(w); @@ -1653,7 +1651,7 @@ static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev, udelay(50); pci_read_config_byte(isa_bridge, 0x48, ®); - for (i = 0; i < MAC_ADDR_LEN; i++) { + for (i = 0; i < ETH_ALEN; i++) { outb(0x9 + i, 0x78); dev->dev_addr[i] = inb(0x79); } @@ -1692,7 +1690,7 @@ static inline void sis190_init_rxfilter(struct net_device *dev) */ SIS_W16(RxMacControl, ctl & ~0x0f00); - for (i = 0; i < MAC_ADDR_LEN; i++) + for (i = 0; i < ETH_ALEN; i++) SIS_W8(RxMacAddr + i, dev->dev_addr[i]); SIS_W16(RxMacControl, ctl); @@ -1760,9 +1758,10 @@ static void sis190_get_drvinfo(struct net_device *dev, { struct sis190_private *tp = netdev_priv(dev); - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - strcpy(info->bus_info, pci_name(tp->pci_dev)); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(tp->pci_dev), + sizeof(info->bus_info)); } static int sis190_get_regs_len(struct net_device *dev) diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c index a184abc5ef1..c8efc708c79 100644 --- a/drivers/net/ethernet/sis/sis900.c +++ b/drivers/net/ethernet/sis/sis900.c @@ -1991,9 +1991,10 @@ static void sis900_get_drvinfo(struct net_device *net_dev, { struct sis900_private *sis_priv = netdev_priv(net_dev); - strcpy (info->driver, SIS900_MODULE_NAME); - strcpy (info->version, SIS900_DRV_VERSION); - strcpy (info->bus_info, pci_name(sis_priv->pci_dev)); + strlcpy(info->driver, SIS900_MODULE_NAME, sizeof(info->driver)); + strlcpy(info->version, SIS900_DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(sis_priv->pci_dev), + sizeof(info->bus_info)); } static u32 sis900_get_msglevel(struct net_device *net_dev) diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c index 0a5dfb81415..2c077ce0b6d 100644 --- a/drivers/net/ethernet/smsc/epic100.c +++ b/drivers/net/ethernet/smsc/epic100.c @@ -1414,9 +1414,9 @@ static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo * { struct epic_private *np = netdev_priv(dev); - strcpy (info->driver, DRV_NAME); - strcpy (info->version, DRV_VERSION); - strcpy (info->bus_info, pci_name(np->pci_dev)); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); } static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c index 8f61fe9db1d..313ba3b32ab 100644 --- a/drivers/net/ethernet/smsc/smc911x.c +++ b/drivers/net/ethernet/smsc/smc911x.c @@ -2196,15 +2196,4 @@ static struct platform_driver smc911x_driver = { }, }; -static int __init smc911x_init(void) -{ - return platform_driver_register(&smc911x_driver); -} - -static void __exit smc911x_cleanup(void) -{ - platform_driver_unregister(&smc911x_driver); -} - -module_init(smc911x_init); -module_exit(smc911x_cleanup); +module_platform_driver(smc911x_driver); diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c index cbfa9818713..ada927aba7a 100644 --- a/drivers/net/ethernet/smsc/smc91c92_cs.c +++ b/drivers/net/ethernet/smsc/smc91c92_cs.c @@ -1909,8 +1909,8 @@ static int check_if_running(struct net_device *dev) static void smc_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); } static int smc_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index f47f81e2532..64ad3ed7449 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -2417,15 +2417,4 @@ static struct platform_driver smc_driver = { }, }; -static int __init smc_init(void) -{ - return platform_driver_register(&smc_driver); -} - -static void __exit smc_cleanup(void) -{ - platform_driver_unregister(&smc_driver); -} - -module_init(smc_init); -module_exit(smc_cleanup); +module_platform_driver(smc_driver); diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index 8843071fe98..9d0b8ced023 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -44,6 +44,7 @@ #include <linux/module.h> #include <linux/netdevice.h> #include <linux/platform_device.h> +#include <linux/regulator/consumer.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/bug.h> @@ -88,6 +89,8 @@ struct smsc911x_ops { unsigned int *buf, unsigned int wordcount); }; +#define SMSC911X_NUM_SUPPLIES 2 + struct smsc911x_data { void __iomem *ioaddr; @@ -138,6 +141,9 @@ struct smsc911x_data { /* register access functions */ const struct smsc911x_ops *ops; + + /* regulators */ + struct regulator_bulk_data supplies[SMSC911X_NUM_SUPPLIES]; }; /* Easy access to information */ @@ -362,6 +368,76 @@ out: spin_unlock_irqrestore(&pdata->dev_lock, flags); } +/* + * enable resources, currently just regulators. + */ +static int smsc911x_enable_resources(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct smsc911x_data *pdata = netdev_priv(ndev); + int ret = 0; + + ret = regulator_bulk_enable(ARRAY_SIZE(pdata->supplies), + pdata->supplies); + if (ret) + netdev_err(ndev, "failed to enable regulators %d\n", + ret); + return ret; +} + +/* + * disable resources, currently just regulators. + */ +static int smsc911x_disable_resources(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct smsc911x_data *pdata = netdev_priv(ndev); + int ret = 0; + + ret = regulator_bulk_disable(ARRAY_SIZE(pdata->supplies), + pdata->supplies); + return ret; +} + +/* + * Request resources, currently just regulators. + * + * The SMSC911x has two power pins: vddvario and vdd33a, in designs where + * these are not always-on we need to request regulators to be turned on + * before we can try to access the device registers. + */ +static int smsc911x_request_resources(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct smsc911x_data *pdata = netdev_priv(ndev); + int ret = 0; + + /* Request regulators */ + pdata->supplies[0].supply = "vdd33a"; + pdata->supplies[1].supply = "vddvario"; + ret = regulator_bulk_get(&pdev->dev, + ARRAY_SIZE(pdata->supplies), + pdata->supplies); + if (ret) + netdev_err(ndev, "couldn't get regulators %d\n", + ret); + return ret; +} + +/* + * Free resources, currently just regulators. + * + */ +static void smsc911x_free_resources(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct smsc911x_data *pdata = netdev_priv(ndev); + + /* Free regulators */ + regulator_bulk_free(ARRAY_SIZE(pdata->supplies), + pdata->supplies); +} + /* waits for MAC not busy, with timeout. Only called by smsc911x_mac_read * and smsc911x_mac_write, so assumes mac_lock is held */ static int smsc911x_mac_complete(struct smsc911x_data *pdata) @@ -1243,10 +1319,92 @@ static void smsc911x_rx_multicast_update_workaround(struct smsc911x_data *pdata) spin_unlock(&pdata->mac_lock); } +static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata) +{ + int rc = 0; + + if (!pdata->phy_dev) + return rc; + + rc = phy_read(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS); + + if (rc < 0) { + SMSC_WARN(pdata, drv, "Failed reading PHY control reg"); + return rc; + } + + /* + * If energy is detected the PHY is already awake so is not necessary + * to disable the energy detect power-down mode. + */ + if ((rc & MII_LAN83C185_EDPWRDOWN) && + !(rc & MII_LAN83C185_ENERGYON)) { + /* Disable energy detect mode for this SMSC Transceivers */ + rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS, + rc & (~MII_LAN83C185_EDPWRDOWN)); + + if (rc < 0) { + SMSC_WARN(pdata, drv, "Failed writing PHY control reg"); + return rc; + } + + mdelay(1); + } + + return 0; +} + +static int smsc911x_phy_enable_energy_detect(struct smsc911x_data *pdata) +{ + int rc = 0; + + if (!pdata->phy_dev) + return rc; + + rc = phy_read(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS); + + if (rc < 0) { + SMSC_WARN(pdata, drv, "Failed reading PHY control reg"); + return rc; + } + + /* Only enable if energy detect mode is already disabled */ + if (!(rc & MII_LAN83C185_EDPWRDOWN)) { + mdelay(100); + /* Enable energy detect mode for this SMSC Transceivers */ + rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS, + rc | MII_LAN83C185_EDPWRDOWN); + + if (rc < 0) { + SMSC_WARN(pdata, drv, "Failed writing PHY control reg"); + return rc; + } + + mdelay(1); + } + return 0; +} + static int smsc911x_soft_reset(struct smsc911x_data *pdata) { unsigned int timeout; unsigned int temp; + int ret; + + /* + * LAN9210/LAN9211/LAN9220/LAN9221 chips have an internal PHY that + * are initialized in a Energy Detect Power-Down mode that prevents + * the MAC chip to be software reseted. So we have to wakeup the PHY + * before. + */ + if (pdata->generation == 4) { + ret = smsc911x_phy_disable_energy_detect(pdata); + + if (ret) { + SMSC_WARN(pdata, drv, "Failed to wakeup the PHY chip"); + return ret; + } + } /* Reset the LAN911x */ smsc911x_reg_write(pdata, HW_CFG, HW_CFG_SRST_); @@ -1260,6 +1418,16 @@ static int smsc911x_soft_reset(struct smsc911x_data *pdata) SMSC_WARN(pdata, drv, "Failed to complete reset"); return -EIO; } + + if (pdata->generation == 4) { + ret = smsc911x_phy_enable_energy_detect(pdata); + + if (ret) { + SMSC_WARN(pdata, drv, "Failed to wakeup the PHY chip"); + return ret; + } + } + return 0; } @@ -2092,6 +2260,9 @@ static int __devexit smsc911x_drv_remove(struct platform_device *pdev) iounmap(pdata->ioaddr); + (void)smsc911x_disable_resources(pdev); + smsc911x_free_resources(pdev); + free_netdev(dev); return 0; @@ -2218,10 +2389,20 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev) pdata->dev = dev; pdata->msg_enable = ((1 << debug) - 1); + platform_set_drvdata(pdev, dev); + + retval = smsc911x_request_resources(pdev); + if (retval) + goto out_return_resources; + + retval = smsc911x_enable_resources(pdev); + if (retval) + goto out_disable_resources; + if (pdata->ioaddr == NULL) { SMSC_WARN(pdata, probe, "Error smsc911x base address invalid"); retval = -ENOMEM; - goto out_free_netdev_2; + goto out_disable_resources; } retval = smsc911x_probe_config_dt(&pdata->config, np); @@ -2233,7 +2414,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev) if (retval) { SMSC_WARN(pdata, probe, "Error smsc911x config not found"); - goto out_unmap_io_3; + goto out_disable_resources; } /* assume standard, non-shifted, access to HW registers */ @@ -2244,7 +2425,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev) retval = smsc911x_init(dev); if (retval < 0) - goto out_unmap_io_3; + goto out_disable_resources; /* configure irq polarity and type before connecting isr */ if (pdata->config.irq_polarity == SMSC911X_IRQ_POLARITY_ACTIVE_HIGH) @@ -2264,15 +2445,13 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev) if (retval) { SMSC_WARN(pdata, probe, "Unable to claim requested irq: %d", dev->irq); - goto out_unmap_io_3; + goto out_free_irq; } - platform_set_drvdata(pdev, dev); - retval = register_netdev(dev); if (retval) { SMSC_WARN(pdata, probe, "Error %i registering device", retval); - goto out_unset_drvdata_4; + goto out_free_irq; } else { SMSC_TRACE(pdata, probe, "Network interface: \"%s\"", dev->name); @@ -2321,12 +2500,14 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev) out_unregister_netdev_5: unregister_netdev(dev); -out_unset_drvdata_4: - platform_set_drvdata(pdev, NULL); +out_free_irq: free_irq(dev->irq, dev); -out_unmap_io_3: +out_disable_resources: + (void)smsc911x_disable_resources(pdev); +out_return_resources: + smsc911x_free_resources(pdev); + platform_set_drvdata(pdev, NULL); iounmap(pdata->ioaddr); -out_free_netdev_2: free_netdev(dev); out_release_io_1: release_mem_region(res->start, resource_size(res)); diff --git a/drivers/net/ethernet/smsc/smsc911x.h b/drivers/net/ethernet/smsc/smsc911x.h index 8d67aacf886..938ecf29081 100644 --- a/drivers/net/ethernet/smsc/smsc911x.h +++ b/drivers/net/ethernet/smsc/smsc911x.h @@ -401,4 +401,8 @@ #include <asm/smsc911x.h> #endif +#ifdef CONFIG_SMSC_PHY +#include <linux/smscphy.h> +#endif + #endif /* __SMSC911X_H__ */ diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c index edb24b0e337..a9efbdfe530 100644 --- a/drivers/net/ethernet/smsc/smsc9420.c +++ b/drivers/net/ethernet/smsc/smsc9420.c @@ -279,9 +279,10 @@ static void smsc9420_ethtool_get_drvinfo(struct net_device *netdev, { struct smsc9420_pdata *pd = netdev_priv(netdev); - strcpy(drvinfo->driver, DRV_NAME); - strcpy(drvinfo->bus_info, pci_name(pd->pdev)); - strcpy(drvinfo->version, DRV_VERSION); + strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); + strlcpy(drvinfo->bus_info, pci_name(pd->pdev), + sizeof(drvinfo->bus_info)); + strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); } static u32 smsc9420_ethtool_get_msglevel(struct net_device *netdev) diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index 22745d7bf53..036428348fa 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -12,11 +12,36 @@ config STMMAC_ETH if STMMAC_ETH +config STMMAC_PLATFORM + tristate "STMMAC platform bus support" + depends on STMMAC_ETH + default y + ---help--- + This selects the platform specific bus support for + the stmmac device driver. This is the driver used + on many embedded STM platforms based on ARM and SuperH + processors. + If you have a controller with this interface, say Y or M here. + + If unsure, say N. + +config STMMAC_PCI + tristate "STMMAC support on PCI bus (EXPERIMENTAL)" + depends on STMMAC_ETH && PCI && EXPERIMENTAL + ---help--- + This is to select the Synopsys DWMAC available on PCI devices, + if you have a controller with this interface, say Y or M here. + + This PCI support is tested on XLINX XC2V3000 FF1152AMT0221 + D1215994A VIRTEX FPGA board. + + If unsure, say N. + config STMMAC_DEBUG_FS bool "Enable monitoring via sysFS " default n depends on STMMAC_ETH && DEBUG_FS - -- help + ---help--- The stmmac entry in /sys reports DMA TX/RX rings or (if supported) the HW cap register. diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile index d7c45164ea7..bc965ac9e02 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile @@ -2,6 +2,8 @@ obj-$(CONFIG_STMMAC_ETH) += stmmac.o stmmac-$(CONFIG_STMMAC_TIMER) += stmmac_timer.o stmmac-$(CONFIG_STMMAC_RING) += ring_mode.o stmmac-$(CONFIG_STMMAC_CHAINED) += chain_mode.o +stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o +stmmac-$(CONFIG_STMMAC_PCI) += stmmac_pci.o stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \ dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \ dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \ diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 2cc11929582..d0b814ef067 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -22,7 +22,11 @@ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> *******************************************************************************/ +#include <linux/etherdevice.h> #include <linux/netdevice.h> +#include <linux/phy.h> +#include <linux/module.h> +#include <linux/init.h> #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) #define STMMAC_VLAN_TAG_USED #include <linux/if_vlan.h> @@ -315,5 +319,8 @@ extern void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6], unsigned int high, unsigned int low); extern void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, unsigned int high, unsigned int low); + +extern void stmmac_set_mac(void __iomem *ioaddr, bool enable); + extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr); extern const struct stmmac_ring_mode_ops ring_mode_ops; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c index e25093510b0..f20aa12931d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c @@ -238,6 +238,19 @@ void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6], writel(data, ioaddr + low); } +/* Enable disable MAC RX/TX */ +void stmmac_set_mac(void __iomem *ioaddr, bool enable) +{ + u32 value = readl(ioaddr + MAC_CTRL_REG); + + if (enable) + value |= MAC_RNABLE_RX | MAC_ENABLE_TX; + else + value &= ~(MAC_ENABLE_TX | MAC_RNABLE_RX); + + writel(value, ioaddr + MAC_CTRL_REG); +} + void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, unsigned int high, unsigned int low) { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index a140a8fbf05..120740020e2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -20,7 +20,8 @@ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> *******************************************************************************/ -#define DRV_MODULE_VERSION "Oct_2011" +#define STMMAC_RESOURCE_NAME "stmmaceth" +#define DRV_MODULE_VERSION "Dec_2011" #include <linux/stmmac.h> #include <linux/phy.h> #include "common.h" @@ -82,8 +83,18 @@ struct stmmac_priv { int hw_cap_support; }; +extern int phyaddr; + extern int stmmac_mdio_unregister(struct net_device *ndev); extern int stmmac_mdio_register(struct net_device *ndev); extern void stmmac_set_ethtool_ops(struct net_device *netdev); extern const struct stmmac_desc_ops enh_desc_ops; extern const struct stmmac_desc_ops ndesc_ops; + +int stmmac_freeze(struct net_device *ndev); +int stmmac_restore(struct net_device *ndev); +int stmmac_resume(struct net_device *ndev); +int stmmac_suspend(struct net_device *ndev); +int stmmac_dvr_remove(struct net_device *ndev); +struct stmmac_priv *stmmac_dvr_probe(struct device *device, + struct plat_stmmacenet_data *plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 0395f9eba80..9573303a706 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -185,9 +185,10 @@ static void stmmac_ethtool_getdrvinfo(struct net_device *dev, struct stmmac_priv *priv = netdev_priv(dev); if (priv->plat->has_gmac) - strcpy(info->driver, GMAC_ETHTOOL_NAME); + strlcpy(info->driver, GMAC_ETHTOOL_NAME, sizeof(info->driver)); else - strcpy(info->driver, MAC100_ETHTOOL_NAME); + strlcpy(info->driver, MAC100_ETHTOOL_NAME, + sizeof(info->driver)); strcpy(info->version, DRV_MODULE_VERSION); info->fw_version[0] = '\0'; @@ -458,7 +459,7 @@ static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) return 0; } -static struct ethtool_ops stmmac_ethtool_ops = { +static const struct ethtool_ops stmmac_ethtool_ops = { .begin = stmmac_check_if_running, .get_drvinfo = stmmac_ethtool_getdrvinfo, .get_settings = stmmac_ethtool_getsettings, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 72cd190b9c1..3738b470054 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -28,12 +28,8 @@ https://bugzilla.stlinux.com/ *******************************************************************************/ -#include <linux/module.h> -#include <linux/init.h> #include <linux/kernel.h> #include <linux/interrupt.h> -#include <linux/etherdevice.h> -#include <linux/platform_device.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/skbuff.h> @@ -52,8 +48,6 @@ #endif #include "stmmac.h" -#define STMMAC_RESOURCE_NAME "stmmaceth" - #undef STMMAC_DEBUG /*#define STMMAC_DEBUG*/ #ifdef STMMAC_DEBUG @@ -93,7 +87,7 @@ static int debug = -1; /* -1: default, 0: no output, 16: all */ module_param(debug, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Message Level (0: no output, 16: all)"); -static int phyaddr = -1; +int phyaddr = -1; module_param(phyaddr, int, S_IRUGO); MODULE_PARM_DESC(phyaddr, "Physical device address"); @@ -141,6 +135,11 @@ static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | static irqreturn_t stmmac_interrupt(int irq, void *dev_id); +#ifdef CONFIG_STMMAC_DEBUG_FS +static int stmmac_init_fs(struct net_device *dev); +static void stmmac_exit_fs(void); +#endif + /** * stmmac_verify_args - verify the driver parameters. * Description: it verifies if some wrong parameter is passed to the driver. @@ -345,22 +344,6 @@ static int stmmac_init_phy(struct net_device *dev) return 0; } -static inline void stmmac_enable_mac(void __iomem *ioaddr) -{ - u32 value = readl(ioaddr + MAC_CTRL_REG); - - value |= MAC_RNABLE_RX | MAC_ENABLE_TX; - writel(value, ioaddr + MAC_CTRL_REG); -} - -static inline void stmmac_disable_mac(void __iomem *ioaddr) -{ - u32 value = readl(ioaddr + MAC_CTRL_REG); - - value &= ~(MAC_ENABLE_TX | MAC_RNABLE_RX); - writel(value, ioaddr + MAC_CTRL_REG); -} - /** * display_ring * @p: pointer to the ring. @@ -887,6 +870,53 @@ static int stmmac_get_hw_features(struct stmmac_priv *priv) } /** + * stmmac_mac_device_setup + * @dev : device pointer + * Description: this is to attach the GMAC or MAC 10/100 + * main core structures that will be completed during the + * open step. + */ +static int stmmac_mac_device_setup(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + struct mac_device_info *device; + + if (priv->plat->has_gmac) + device = dwmac1000_setup(priv->ioaddr); + else + device = dwmac100_setup(priv->ioaddr); + + if (!device) + return -ENOMEM; + + priv->hw = device; + priv->hw->ring = &ring_mode_ops; + + if (device_can_wakeup(priv->device)) { + priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */ + enable_irq_wake(priv->wol_irq); + } + + return 0; +} + +static void stmmac_check_ether_addr(struct stmmac_priv *priv) +{ + /* verify if the MAC address is valid, in case of failures it + * generates a random MAC address */ + if (!is_valid_ether_addr(priv->dev->dev_addr)) { + priv->hw->mac->get_umac_addr((void __iomem *) + priv->dev->base_addr, + priv->dev->dev_addr, 0); + if (!is_valid_ether_addr(priv->dev->dev_addr)) + random_ether_addr(priv->dev->dev_addr); + } + pr_warning("%s: device MAC address %pM\n", priv->dev->name, + priv->dev->dev_addr); +} + +/** * stmmac_open - open entry point of the driver * @dev : pointer to the device structure. * Description: @@ -900,18 +930,28 @@ static int stmmac_open(struct net_device *dev) struct stmmac_priv *priv = netdev_priv(dev); int ret; - /* Check that the MAC address is valid. If its not, refuse - * to bring the device up. The user must specify an - * address using the following linux command: - * ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx */ - if (!is_valid_ether_addr(dev->dev_addr)) { - random_ether_addr(dev->dev_addr); - pr_warning("%s: generated random MAC address %pM\n", dev->name, - dev->dev_addr); - } + /* MAC HW device setup */ + ret = stmmac_mac_device_setup(dev); + if (ret < 0) + return ret; + + stmmac_check_ether_addr(priv); stmmac_verify_args(); + /* Override with kernel parameters if supplied XXX CRS XXX + * this needs to have multiple instances */ + if ((phyaddr >= 0) && (phyaddr <= 31)) + priv->plat->phy_addr = phyaddr; + + /* MDIO bus Registration */ + ret = stmmac_mdio_register(dev); + if (ret < 0) { + pr_debug("%s: MDIO bus (id: %d) registration failed", + __func__, priv->plat->bus_id); + return ret; + } + #ifdef CONFIG_STMMAC_TIMER priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL); if (unlikely(priv->tm == NULL)) { @@ -1008,7 +1048,7 @@ static int stmmac_open(struct net_device *dev) } /* Enable the MAC Rx/Tx */ - stmmac_enable_mac(priv->ioaddr); + stmmac_set_mac(priv->ioaddr, true); /* Set the HW DMA mode and the COE */ stmmac_dma_operation_mode(priv); @@ -1019,6 +1059,11 @@ static int stmmac_open(struct net_device *dev) stmmac_mmc_setup(priv); +#ifdef CONFIG_STMMAC_DEBUG_FS + ret = stmmac_init_fs(dev); + if (ret < 0) + pr_warning("\tFailed debugFS registration"); +#endif /* Start the ball rolling... */ DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name); priv->hw->dma->start_tx(priv->ioaddr); @@ -1091,10 +1136,15 @@ static int stmmac_release(struct net_device *dev) free_dma_desc_resources(priv); /* Disable the MAC Rx/Tx */ - stmmac_disable_mac(priv->ioaddr); + stmmac_set_mac(priv->ioaddr, false); netif_carrier_off(dev); +#ifdef CONFIG_STMMAC_DEBUG_FS + stmmac_exit_fs(); +#endif + stmmac_mdio_unregister(dev); + return 0; } @@ -1470,7 +1520,8 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu) return 0; } -static u32 stmmac_fix_features(struct net_device *dev, u32 features) +static netdev_features_t stmmac_fix_features(struct net_device *dev, + netdev_features_t features) { struct stmmac_priv *priv = netdev_priv(dev); @@ -1738,28 +1789,41 @@ static const struct net_device_ops stmmac_netdev_ops = { }; /** - * stmmac_probe - Initialization of the adapter . - * @dev : device pointer - * Description: The function initializes the network device structure for - * the STMMAC driver. It also calls the low level routines - * in order to init the HW (i.e. the DMA engine) + * stmmac_dvr_probe + * @device: device pointer + * Description: this is the main probe function used to + * call the alloc_etherdev, allocate the priv structure. */ -static int stmmac_probe(struct net_device *dev) +struct stmmac_priv *stmmac_dvr_probe(struct device *device, + struct plat_stmmacenet_data *plat_dat) { int ret = 0; - struct stmmac_priv *priv = netdev_priv(dev); + struct net_device *ndev = NULL; + struct stmmac_priv *priv; + + ndev = alloc_etherdev(sizeof(struct stmmac_priv)); + if (!ndev) { + pr_err("%s: ERROR: allocating the device\n", __func__); + return NULL; + } + + SET_NETDEV_DEV(ndev, device); - ether_setup(dev); + priv = netdev_priv(ndev); + priv->device = device; + priv->dev = ndev; - dev->netdev_ops = &stmmac_netdev_ops; - stmmac_set_ethtool_ops(dev); + ether_setup(ndev); - dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; - dev->features |= dev->hw_features | NETIF_F_HIGHDMA; - dev->watchdog_timeo = msecs_to_jiffies(watchdog); + ndev->netdev_ops = &stmmac_netdev_ops; + stmmac_set_ethtool_ops(ndev); + + ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; + ndev->watchdog_timeo = msecs_to_jiffies(watchdog); #ifdef STMMAC_VLAN_TAG_USED /* Both mac100 and gmac support receive VLAN tag detection */ - dev->features |= NETIF_F_HW_VLAN_RX; + ndev->features |= NETIF_F_HW_VLAN_RX; #endif priv->msg_enable = netif_msg_init(debug, default_msg_level); @@ -1767,248 +1831,60 @@ static int stmmac_probe(struct net_device *dev) priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ priv->pause = pause; - netif_napi_add(dev, &priv->napi, stmmac_poll, 64); - - /* Get the MAC address */ - priv->hw->mac->get_umac_addr((void __iomem *) dev->base_addr, - dev->dev_addr, 0); - - if (!is_valid_ether_addr(dev->dev_addr)) - pr_warning("\tno valid MAC address;" - "please, use ifconfig or nwhwconfig!\n"); + priv->plat = plat_dat; + netif_napi_add(ndev, &priv->napi, stmmac_poll, 64); spin_lock_init(&priv->lock); spin_lock_init(&priv->tx_lock); - ret = register_netdev(dev); + ret = register_netdev(ndev); if (ret) { pr_err("%s: ERROR %i registering the device\n", __func__, ret); - return -ENODEV; + goto error; } DBG(probe, DEBUG, "%s: Scatter/Gather: %s - HW checksums: %s\n", - dev->name, (dev->features & NETIF_F_SG) ? "on" : "off", - (dev->features & NETIF_F_IP_CSUM) ? "on" : "off"); + ndev->name, (ndev->features & NETIF_F_SG) ? "on" : "off", + (ndev->features & NETIF_F_IP_CSUM) ? "on" : "off"); - return ret; -} + return priv; -/** - * stmmac_mac_device_setup - * @dev : device pointer - * Description: select and initialise the mac device (mac100 or Gmac). - */ -static int stmmac_mac_device_setup(struct net_device *dev) -{ - struct stmmac_priv *priv = netdev_priv(dev); - - struct mac_device_info *device; - - if (priv->plat->has_gmac) { - dev->priv_flags |= IFF_UNICAST_FLT; - device = dwmac1000_setup(priv->ioaddr); - } else { - device = dwmac100_setup(priv->ioaddr); - } +error: + netif_napi_del(&priv->napi); - if (!device) - return -ENOMEM; - - priv->hw = device; - priv->hw->ring = &ring_mode_ops; - - if (device_can_wakeup(priv->device)) { - priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */ - enable_irq_wake(priv->wol_irq); - } - - return 0; -} - -/** - * stmmac_dvr_probe - * @pdev: platform device pointer - * Description: the driver is initialized through platform_device. - */ -static int stmmac_dvr_probe(struct platform_device *pdev) -{ - int ret = 0; - struct resource *res; - void __iomem *addr = NULL; - struct net_device *ndev = NULL; - struct stmmac_priv *priv = NULL; - struct plat_stmmacenet_data *plat_dat; - - pr_info("STMMAC driver:\n\tplatform registration... "); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) - return -ENODEV; - pr_info("\tdone!\n"); - - if (!request_mem_region(res->start, resource_size(res), - pdev->name)) { - pr_err("%s: ERROR: memory allocation failed" - "cannot get the I/O addr 0x%x\n", - __func__, (unsigned int)res->start); - return -EBUSY; - } - - addr = ioremap(res->start, resource_size(res)); - if (!addr) { - pr_err("%s: ERROR: memory mapping failed\n", __func__); - ret = -ENOMEM; - goto out_release_region; - } - - ndev = alloc_etherdev(sizeof(struct stmmac_priv)); - if (!ndev) { - pr_err("%s: ERROR: allocating the device\n", __func__); - ret = -ENOMEM; - goto out_unmap; - } - - SET_NETDEV_DEV(ndev, &pdev->dev); - - /* Get the MAC information */ - ndev->irq = platform_get_irq_byname(pdev, "macirq"); - if (ndev->irq == -ENXIO) { - pr_err("%s: ERROR: MAC IRQ configuration " - "information not found\n", __func__); - ret = -ENXIO; - goto out_free_ndev; - } - - priv = netdev_priv(ndev); - priv->device = &(pdev->dev); - priv->dev = ndev; - plat_dat = pdev->dev.platform_data; - - priv->plat = plat_dat; - - priv->ioaddr = addr; - - /* - * On some platforms e.g. SPEAr the wake up irq differs from the mac irq - * The external wake up irq can be passed through the platform code - * named as "eth_wake_irq" - * - * In case the wake up interrupt is not passed from the platform - * so the driver will continue to use the mac irq (ndev->irq) - */ - priv->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq"); - if (priv->wol_irq == -ENXIO) - priv->wol_irq = ndev->irq; - - platform_set_drvdata(pdev, ndev); - - /* Set the I/O base addr */ - ndev->base_addr = (unsigned long)addr; - - /* Custom initialisation */ - if (priv->plat->init) { - ret = priv->plat->init(pdev); - if (unlikely(ret)) - goto out_free_ndev; - } - - /* MAC HW device detection */ - ret = stmmac_mac_device_setup(ndev); - if (ret < 0) - goto out_plat_exit; - - /* Network Device Registration */ - ret = stmmac_probe(ndev); - if (ret < 0) - goto out_plat_exit; - - /* Override with kernel parameters if supplied XXX CRS XXX - * this needs to have multiple instances */ - if ((phyaddr >= 0) && (phyaddr <= 31)) - priv->plat->phy_addr = phyaddr; - - pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n" - "\tIO base addr: 0x%p)\n", ndev->name, pdev->name, - pdev->id, ndev->irq, addr); - - /* MDIO bus Registration */ - pr_debug("\tMDIO bus (id: %d)...", priv->plat->bus_id); - ret = stmmac_mdio_register(ndev); - if (ret < 0) - goto out_unregister; - pr_debug("registered!\n"); - -#ifdef CONFIG_STMMAC_DEBUG_FS - ret = stmmac_init_fs(ndev); - if (ret < 0) - pr_warning("\tFailed debugFS registration"); -#endif - - return 0; - -out_unregister: unregister_netdev(ndev); -out_plat_exit: - if (priv->plat->exit) - priv->plat->exit(pdev); -out_free_ndev: free_netdev(ndev); - platform_set_drvdata(pdev, NULL); -out_unmap: - iounmap(addr); -out_release_region: - release_mem_region(res->start, resource_size(res)); - return ret; + return NULL; } /** * stmmac_dvr_remove - * @pdev: platform device pointer + * @ndev: net device pointer * Description: this function resets the TX/RX processes, disables the MAC RX/TX - * changes the link status, releases the DMA descriptor rings, - * unregisters the MDIO bus and unmaps the allocated memory. + * changes the link status, releases the DMA descriptor rings. */ -static int stmmac_dvr_remove(struct platform_device *pdev) +int stmmac_dvr_remove(struct net_device *ndev) { - struct net_device *ndev = platform_get_drvdata(pdev); struct stmmac_priv *priv = netdev_priv(ndev); - struct resource *res; pr_info("%s:\n\tremoving driver", __func__); priv->hw->dma->stop_rx(priv->ioaddr); priv->hw->dma->stop_tx(priv->ioaddr); - stmmac_disable_mac(priv->ioaddr); - + stmmac_set_mac(priv->ioaddr, false); netif_carrier_off(ndev); - - stmmac_mdio_unregister(ndev); - - if (priv->plat->exit) - priv->plat->exit(pdev); - - platform_set_drvdata(pdev, NULL); unregister_netdev(ndev); - - iounmap((void *)priv->ioaddr); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(res->start, resource_size(res)); - -#ifdef CONFIG_STMMAC_DEBUG_FS - stmmac_exit_fs(); -#endif - free_netdev(ndev); return 0; } #ifdef CONFIG_PM -static int stmmac_suspend(struct device *dev) +int stmmac_suspend(struct net_device *ndev) { - struct net_device *ndev = dev_get_drvdata(dev); struct stmmac_priv *priv = netdev_priv(ndev); int dis_ic = 0; @@ -2042,15 +1918,14 @@ static int stmmac_suspend(struct device *dev) if (device_may_wakeup(priv->device)) priv->hw->mac->pmt(priv->ioaddr, priv->wolopts); else - stmmac_disable_mac(priv->ioaddr); + stmmac_set_mac(priv->ioaddr, false); spin_unlock(&priv->lock); return 0; } -static int stmmac_resume(struct device *dev) +int stmmac_resume(struct net_device *ndev) { - struct net_device *ndev = dev_get_drvdata(dev); struct stmmac_priv *priv = netdev_priv(ndev); if (!netif_running(ndev)) @@ -2069,7 +1944,7 @@ static int stmmac_resume(struct device *dev) netif_device_attach(ndev); /* Enable the MAC and DMA */ - stmmac_enable_mac(priv->ioaddr); + stmmac_set_mac(priv->ioaddr, true); priv->hw->dma->start_tx(priv->ioaddr); priv->hw->dma->start_rx(priv->ioaddr); @@ -2089,68 +1964,23 @@ static int stmmac_resume(struct device *dev) return 0; } -static int stmmac_freeze(struct device *dev) +int stmmac_freeze(struct net_device *ndev) { - struct net_device *ndev = dev_get_drvdata(dev); - if (!ndev || !netif_running(ndev)) return 0; return stmmac_release(ndev); } -static int stmmac_restore(struct device *dev) +int stmmac_restore(struct net_device *ndev) { - struct net_device *ndev = dev_get_drvdata(dev); - if (!ndev || !netif_running(ndev)) return 0; return stmmac_open(ndev); } - -static const struct dev_pm_ops stmmac_pm_ops = { - .suspend = stmmac_suspend, - .resume = stmmac_resume, - .freeze = stmmac_freeze, - .thaw = stmmac_restore, - .restore = stmmac_restore, -}; -#else -static const struct dev_pm_ops stmmac_pm_ops; #endif /* CONFIG_PM */ -static struct platform_driver stmmac_driver = { - .probe = stmmac_dvr_probe, - .remove = stmmac_dvr_remove, - .driver = { - .name = STMMAC_RESOURCE_NAME, - .owner = THIS_MODULE, - .pm = &stmmac_pm_ops, - }, -}; - -/** - * stmmac_init_module - Entry point for the driver - * Description: This function is the entry point for the driver. - */ -static int __init stmmac_init_module(void) -{ - int ret; - - ret = platform_driver_register(&stmmac_driver); - return ret; -} - -/** - * stmmac_cleanup_module - Cleanup routine for the driver - * Description: This function is the cleanup routine for the driver. - */ -static void __exit stmmac_cleanup_module(void) -{ - platform_driver_unregister(&stmmac_driver); -} - #ifndef MODULE static int __init stmmac_cmdline_opt(char *str) { @@ -2210,9 +2040,6 @@ err: __setup("stmmaceth=", stmmac_cmdline_opt); #endif -module_init(stmmac_init_module); -module_exit(stmmac_cleanup_module); - -MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet driver"); +MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver"); MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index 9c3b9d5c341..51f44123396 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -109,6 +109,7 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg, */ static int stmmac_mdio_reset(struct mii_bus *bus) { +#if defined(CONFIG_STMMAC_PLATFORM) struct net_device *ndev = bus->priv; struct stmmac_priv *priv = netdev_priv(ndev); unsigned int mii_address = priv->hw->mii.addr; @@ -123,7 +124,7 @@ static int stmmac_mdio_reset(struct mii_bus *bus) * on MDC, so perform a dummy mdio read. */ writel(0, priv->ioaddr + mii_address); - +#endif return 0; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c new file mode 100644 index 00000000000..54a819a3648 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c @@ -0,0 +1,221 @@ +/******************************************************************************* + This contains the functions to handle the pci driver. + + Copyright (C) 2011-2012 Vayavya Labs Pvt Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Rayagond Kokatanur <rayagond@vayavyalabs.com> + Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> +*******************************************************************************/ + +#include <linux/pci.h> +#include "stmmac.h" + +struct plat_stmmacenet_data plat_dat; +struct stmmac_mdio_bus_data mdio_data; + +static void stmmac_default_data(void) +{ + memset(&plat_dat, 0, sizeof(struct plat_stmmacenet_data)); + plat_dat.bus_id = 1; + plat_dat.phy_addr = 0; + plat_dat.interface = PHY_INTERFACE_MODE_GMII; + plat_dat.pbl = 32; + plat_dat.clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */ + plat_dat.has_gmac = 1; + plat_dat.force_sf_dma_mode = 1; + + mdio_data.bus_id = 1; + mdio_data.phy_reset = NULL; + mdio_data.phy_mask = 0; + plat_dat.mdio_bus_data = &mdio_data; +} + +/** + * stmmac_pci_probe + * + * @pdev: pci device pointer + * @id: pointer to table of device id/id's. + * + * Description: This probing function gets called for all PCI devices which + * match the ID table and are not "owned" by other driver yet. This function + * gets passed a "struct pci_dev *" for each device whose entry in the ID table + * matches the device. The probe functions returns zero when the driver choose + * to take "ownership" of the device or an error code(-ve no) otherwise. + */ +static int __devinit stmmac_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + int ret = 0; + void __iomem *addr = NULL; + struct stmmac_priv *priv = NULL; + int i; + + /* Enable pci device */ + ret = pci_enable_device(pdev); + if (ret) { + pr_err("%s : ERROR: failed to enable %s device\n", __func__, + pci_name(pdev)); + return ret; + } + if (pci_request_regions(pdev, STMMAC_RESOURCE_NAME)) { + pr_err("%s: ERROR: failed to get PCI region\n", __func__); + ret = -ENODEV; + goto err_out_req_reg_failed; + } + + /* Get the base address of device */ + for (i = 0; i <= 5; i++) { + if (pci_resource_len(pdev, i) == 0) + continue; + addr = pci_iomap(pdev, i, 0); + if (addr == NULL) { + pr_err("%s: ERROR: cannot map regiser memory, aborting", + __func__); + ret = -EIO; + goto err_out_map_failed; + } + break; + } + pci_set_master(pdev); + + stmmac_default_data(); + + priv = stmmac_dvr_probe(&(pdev->dev), &plat_dat); + if (!priv) { + pr_err("%s: main drivr probe failed", __func__); + goto err_out; + } + priv->ioaddr = addr; + priv->dev->base_addr = (unsigned long)addr; + priv->dev->irq = pdev->irq; + priv->wol_irq = pdev->irq; + + pci_set_drvdata(pdev, priv->dev); + + pr_debug("STMMAC platform driver registration completed"); + + return 0; + +err_out: + pci_clear_master(pdev); +err_out_map_failed: + pci_release_regions(pdev); +err_out_req_reg_failed: + pci_disable_device(pdev); + + return ret; +} + +/** + * stmmac_dvr_remove + * + * @pdev: platform device pointer + * Description: this function calls the main to free the net resources + * and releases the PCI resources. + */ +static void __devexit stmmac_pci_remove(struct pci_dev *pdev) +{ + struct net_device *ndev = pci_get_drvdata(pdev); + struct stmmac_priv *priv = netdev_priv(ndev); + + stmmac_dvr_remove(ndev); + + pci_set_drvdata(pdev, NULL); + pci_iounmap(pdev, priv->ioaddr); + pci_release_regions(pdev); + pci_disable_device(pdev); +} + +#ifdef CONFIG_PM +static int stmmac_pci_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *ndev = pci_get_drvdata(pdev); + int ret; + + ret = stmmac_suspend(ndev); + pci_save_state(pdev); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + + return ret; +} + +static int stmmac_pci_resume(struct pci_dev *pdev) +{ + struct net_device *ndev = pci_get_drvdata(pdev); + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + + return stmmac_resume(ndev); +} +#endif + +#define STMMAC_VENDOR_ID 0x700 +#define STMMAC_DEVICE_ID 0x1108 + +static DEFINE_PCI_DEVICE_TABLE(stmmac_id_table) = { + { + PCI_DEVICE(STMMAC_VENDOR_ID, STMMAC_DEVICE_ID)}, { + } +}; + +MODULE_DEVICE_TABLE(pci, stmmac_id_table); + +static struct pci_driver stmmac_driver = { + .name = STMMAC_RESOURCE_NAME, + .id_table = stmmac_id_table, + .probe = stmmac_pci_probe, + .remove = __devexit_p(stmmac_pci_remove), +#ifdef CONFIG_PM + .suspend = stmmac_pci_suspend, + .resume = stmmac_pci_resume, +#endif +}; + +/** + * stmmac_init_module - Entry point for the driver + * Description: This function is the entry point for the driver. + */ +static int __init stmmac_init_module(void) +{ + int ret; + + ret = pci_register_driver(&stmmac_driver); + if (ret < 0) + pr_err("%s: ERROR: driver registration failed\n", __func__); + + return ret; +} + +/** + * stmmac_cleanup_module - Cleanup routine for the driver + * Description: This function is the cleanup routine for the driver. + */ +static void __exit stmmac_cleanup_module(void) +{ + pci_unregister_driver(&stmmac_driver); +} + +module_init(stmmac_init_module); +module_exit(stmmac_cleanup_module); + +MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PCI driver"); +MODULE_AUTHOR("Rayagond Kokatanur <rayagond.kokatanur@vayavyalabs.com>"); +MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c new file mode 100644 index 00000000000..7b1594f4944 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -0,0 +1,198 @@ +/******************************************************************************* + This contains the functions to handle the platform driver. + + Copyright (C) 2007-2011 STMicroelectronics Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> +*******************************************************************************/ + +#include <linux/platform_device.h> +#include <linux/io.h> +#include "stmmac.h" + +/** + * stmmac_pltfr_probe + * @pdev: platform device pointer + * Description: platform_device probe function. It allocates + * the necessary resources and invokes the main to init + * the net device, register the mdio bus etc. + */ +static int stmmac_pltfr_probe(struct platform_device *pdev) +{ + int ret = 0; + struct resource *res; + void __iomem *addr = NULL; + struct stmmac_priv *priv = NULL; + struct plat_stmmacenet_data *plat_dat; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; + + if (!request_mem_region(res->start, resource_size(res), pdev->name)) { + pr_err("%s: ERROR: memory allocation failed" + "cannot get the I/O addr 0x%x\n", + __func__, (unsigned int)res->start); + return -EBUSY; + } + + addr = ioremap(res->start, resource_size(res)); + if (!addr) { + pr_err("%s: ERROR: memory mapping failed", __func__); + ret = -ENOMEM; + goto out_release_region; + } + plat_dat = pdev->dev.platform_data; + priv = stmmac_dvr_probe(&(pdev->dev), plat_dat); + if (!priv) { + pr_err("%s: main drivr probe failed", __func__); + goto out_release_region; + } + + priv->ioaddr = addr; + /* Set the I/O base addr */ + priv->dev->base_addr = (unsigned long)addr; + + /* Get the MAC information */ + priv->dev->irq = platform_get_irq_byname(pdev, "macirq"); + if (priv->dev->irq == -ENXIO) { + pr_err("%s: ERROR: MAC IRQ configuration " + "information not found\n", __func__); + ret = -ENXIO; + goto out_unmap; + } + + /* + * On some platforms e.g. SPEAr the wake up irq differs from the mac irq + * The external wake up irq can be passed through the platform code + * named as "eth_wake_irq" + * + * In case the wake up interrupt is not passed from the platform + * so the driver will continue to use the mac irq (ndev->irq) + */ + priv->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq"); + if (priv->wol_irq == -ENXIO) + priv->wol_irq = priv->dev->irq; + + platform_set_drvdata(pdev, priv->dev); + + /* Custom initialisation */ + if (priv->plat->init) { + ret = priv->plat->init(pdev); + if (unlikely(ret)) + goto out_unmap; + } + + pr_debug("STMMAC platform driver registration completed"); + + return 0; + +out_unmap: + iounmap(addr); + platform_set_drvdata(pdev, NULL); + +out_release_region: + release_mem_region(res->start, resource_size(res)); + + return ret; +} + +/** + * stmmac_pltfr_remove + * @pdev: platform device pointer + * Description: this function calls the main to free the net resources + * and calls the platforms hook and release the resources (e.g. mem). + */ +static int stmmac_pltfr_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct stmmac_priv *priv = netdev_priv(ndev); + struct resource *res; + int ret = stmmac_dvr_remove(ndev); + + if (priv->plat->exit) + priv->plat->exit(pdev); + + if (priv->plat->exit) + priv->plat->exit(pdev); + + platform_set_drvdata(pdev, NULL); + + iounmap((void *)priv->ioaddr); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(res->start, resource_size(res)); + + return ret; +} + +#ifdef CONFIG_PM +static int stmmac_pltfr_suspend(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + + return stmmac_suspend(ndev); +} + +static int stmmac_pltfr_resume(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + + return stmmac_resume(ndev); +} + +int stmmac_pltfr_freeze(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + + return stmmac_freeze(ndev); +} + +int stmmac_pltfr_restore(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + + return stmmac_restore(ndev); +} + +static const struct dev_pm_ops stmmac_pltfr_pm_ops = { + .suspend = stmmac_pltfr_suspend, + .resume = stmmac_pltfr_resume, + .freeze = stmmac_pltfr_freeze, + .thaw = stmmac_pltfr_restore, + .restore = stmmac_pltfr_restore, +}; +#else +static const struct dev_pm_ops stmmac_pltfr_pm_ops; +#endif /* CONFIG_PM */ + +static struct platform_driver stmmac_driver = { + .probe = stmmac_pltfr_probe, + .remove = stmmac_pltfr_remove, + .driver = { + .name = STMMAC_RESOURCE_NAME, + .owner = THIS_MODULE, + .pm = &stmmac_pltfr_pm_ops, + }, +}; + +module_platform_driver(stmmac_driver); + +MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PLATFORM driver"); +MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index fd40988c19a..f10665f594c 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -4532,10 +4532,9 @@ static void cas_set_multicast(struct net_device *dev) static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct cas *cp = netdev_priv(dev); - strncpy(info->driver, DRV_MODULE_NAME, ETHTOOL_BUSINFO_LEN); - strncpy(info->version, DRV_MODULE_VERSION, ETHTOOL_BUSINFO_LEN); - info->fw_version[0] = '\0'; - strncpy(info->bus_info, pci_name(cp->pdev), ETHTOOL_BUSINFO_LEN); + strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info)); info->regdump_len = cp->casreg_len < CAS_MAX_REGS ? cp->casreg_len : CAS_MAX_REGS; info->n_stats = CAS_NUM_STAT_KEYS; diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 73c708107a3..cf433931304 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -1151,19 +1151,8 @@ static int link_status_mii(struct niu *np, int *link_up_p) supported |= SUPPORTED_1000baseT_Full; lp->supported = supported; - advertising = 0; - if (advert & ADVERTISE_10HALF) - advertising |= ADVERTISED_10baseT_Half; - if (advert & ADVERTISE_10FULL) - advertising |= ADVERTISED_10baseT_Full; - if (advert & ADVERTISE_100HALF) - advertising |= ADVERTISED_100baseT_Half; - if (advert & ADVERTISE_100FULL) - advertising |= ADVERTISED_100baseT_Full; - if (ctrl1000 & ADVERTISE_1000HALF) - advertising |= ADVERTISED_1000baseT_Half; - if (ctrl1000 & ADVERTISE_1000FULL) - advertising |= ADVERTISED_1000baseT_Full; + advertising = mii_adv_to_ethtool_adv_t(advert); + advertising |= mii_ctrl1000_to_ethtool_adv_t(ctrl1000); if (bmcr & BMCR_ANENABLE) { int neg, neg1000; @@ -3609,6 +3598,7 @@ static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx) static void niu_tx_work(struct niu *np, struct tx_ring_info *rp) { struct netdev_queue *txq; + unsigned int tx_bytes; u16 pkt_cnt, tmp; int cons, index; u64 cs; @@ -3631,12 +3621,18 @@ static void niu_tx_work(struct niu *np, struct tx_ring_info *rp) netif_printk(np, tx_done, KERN_DEBUG, np->dev, "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons); - while (pkt_cnt--) + tx_bytes = 0; + tmp = pkt_cnt; + while (tmp--) { + tx_bytes += rp->tx_buffs[cons].skb->len; cons = release_tx_packet(np, rp, cons); + } rp->cons = cons; smp_mb(); + netdev_tx_completed_queue(txq, pkt_cnt, tx_bytes); + out: if (unlikely(netif_tx_queue_stopped(txq) && (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) { @@ -4337,6 +4333,7 @@ static void niu_free_channels(struct niu *np) struct tx_ring_info *rp = &np->tx_rings[i]; niu_free_tx_ring_info(np, rp); + netdev_tx_reset_queue(netdev_get_tx_queue(np->dev, i)); } kfree(np->tx_rings); np->tx_rings = NULL; @@ -6742,6 +6739,8 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb, prod = NEXT_TX(rp, prod); } + netdev_tx_sent_queue(txq, skb->len); + if (prod < rp->prod) rp->wrap_bit ^= TX_RING_KICK_WRAP; rp->prod = prod; @@ -6823,12 +6822,13 @@ static void niu_get_drvinfo(struct net_device *dev, struct niu *np = netdev_priv(dev); struct niu_vpd *vpd = &np->vpd; - strcpy(info->driver, DRV_MODULE_NAME); - strcpy(info->version, DRV_MODULE_VERSION); - sprintf(info->fw_version, "%d.%d", + strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); + snprintf(info->fw_version, sizeof(info->fw_version), "%d.%d", vpd->fcode_major, vpd->fcode_minor); if (np->parent->plat_type != PLAT_TYPE_NIU) - strcpy(info->bus_info, pci_name(np->pdev)); + strlcpy(info->bus_info, pci_name(np->pdev), + sizeof(info->bus_info)); } static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) @@ -8589,9 +8589,11 @@ static int __devinit phy_record(struct niu_parent *parent, if (dev_id_1 < 0 || dev_id_2 < 0) return 0; if (type == PHY_TYPE_PMA_PMD || type == PHY_TYPE_PCS) { + /* Because of the NIU_PHY_ID_MASK being applied, the 8704 + * test covers the 8706 as well. + */ if (((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8704) && - ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011) && - ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8706)) + ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011)) return 0; } else { if ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM5464R) diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c index 0d8cfd9ea05..220f724c337 100644 --- a/drivers/net/ethernet/sun/sunbmac.c +++ b/drivers/net/ethernet/sun/sunbmac.c @@ -1293,15 +1293,4 @@ static struct platform_driver bigmac_sbus_driver = { .remove = __devexit_p(bigmac_sbus_remove), }; -static int __init bigmac_init(void) -{ - return platform_driver_register(&bigmac_sbus_driver); -} - -static void __exit bigmac_exit(void) -{ - platform_driver_unregister(&bigmac_sbus_driver); -} - -module_init(bigmac_init); -module_exit(bigmac_exit); +module_platform_driver(bigmac_sbus_driver); diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c index ceab215bb4a..31441a870b0 100644 --- a/drivers/net/ethernet/sun/sungem.c +++ b/drivers/net/ethernet/sun/sungem.c @@ -2517,9 +2517,9 @@ static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info { struct gem *gp = netdev_priv(dev); - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - strcpy(info->bus_info, pci_name(gp->pdev)); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(gp->pdev), sizeof(info->bus_info)); } static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index cf14ab9db57..09c518655db 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c @@ -2457,11 +2457,11 @@ static void hme_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info { struct happy_meal *hp = netdev_priv(dev); - strcpy(info->driver, "sunhme"); - strcpy(info->version, "2.02"); + strlcpy(info->driver, "sunhme", sizeof(info->driver)); + strlcpy(info->version, "2.02", sizeof(info->version)); if (hp->happy_flags & HFLAG_PCI) { struct pci_dev *pdev = hp->happy_dev; - strcpy(info->bus_info, pci_name(pdev)); + strlcpy(info->bus_info, pci_name(pdev), sizeof(info->bus_info)); } #ifdef CONFIG_SBUS else { @@ -2469,7 +2469,8 @@ static void hme_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info struct platform_device *op = hp->happy_dev; regs = of_get_property(op->dev.of_node, "regs", NULL); if (regs) - sprintf(info->bus_info, "SBUS:%d", + snprintf(info->bus_info, sizeof(info->bus_info), + "SBUS:%d", regs->which_io); } #endif @@ -2849,7 +2850,7 @@ err_out: static int is_quattro_p(struct pci_dev *pdev) { struct pci_dev *busdev = pdev->bus->self; - struct list_head *tmp; + struct pci_dev *this_pdev; int n_hmes; if (busdev == NULL || @@ -2858,15 +2859,10 @@ static int is_quattro_p(struct pci_dev *pdev) return 0; n_hmes = 0; - tmp = pdev->bus->devices.next; - while (tmp != &pdev->bus->devices) { - struct pci_dev *this_pdev = pci_dev_b(tmp); - + list_for_each_entry(this_pdev, &pdev->bus->devices, bus_list) { if (this_pdev->vendor == PCI_VENDOR_ID_SUN && this_pdev->device == PCI_DEVICE_ID_SUN_HAPPYMEAL) n_hmes++; - - tmp = tmp->next; } if (n_hmes != 4) diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index 3a90af6d111..4b19e9b0606 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c @@ -727,9 +727,10 @@ static void __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable) * @ndev network device * @vid VLAN vid to add */ -static void bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid) +static int bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid) { __bdx_vlan_rx_vid(ndev, vid, 1); + return 0; } /* @@ -737,9 +738,10 @@ static void bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid) * @ndev network device * @vid VLAN vid to kill */ -static void bdx_vlan_rx_kill_vid(struct net_device *ndev, unsigned short vid) +static int bdx_vlan_rx_kill_vid(struct net_device *ndev, unsigned short vid) { __bdx_vlan_rx_vid(ndev, vid, 0); + return 0; } /** diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 815c7970261..794ac30a577 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -115,6 +115,7 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1"; #define EMAC_DEF_TX_CH (0) /* Default 0th channel */ #define EMAC_DEF_RX_CH (0) /* Default 0th channel */ #define EMAC_DEF_RX_NUM_DESC (128) +#define EMAC_DEF_TX_NUM_DESC (128) #define EMAC_DEF_MAX_TX_CH (1) /* Max TX channels configured */ #define EMAC_DEF_MAX_RX_CH (1) /* Max RX channels configured */ #define EMAC_POLL_WEIGHT (64) /* Default NAPI poll weight */ @@ -336,6 +337,7 @@ struct emac_priv { u32 mac_hash2; u32 multicast_hash_cnt[EMAC_NUM_MULTICAST_BITS]; u32 rx_addr_type; + atomic_t cur_tx; const char *phy_id; struct phy_device *phydev; spinlock_t lock; @@ -1044,6 +1046,9 @@ static void emac_tx_handler(void *token, int len, int status) { struct sk_buff *skb = token; struct net_device *ndev = skb->dev; + struct emac_priv *priv = netdev_priv(ndev); + + atomic_dec(&priv->cur_tx); if (unlikely(netif_queue_stopped(ndev))) netif_start_queue(ndev); @@ -1092,6 +1097,9 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev) goto fail_tx; } + if (atomic_inc_return(&priv->cur_tx) >= EMAC_DEF_TX_NUM_DESC) + netif_stop_queue(ndev); + return NETDEV_TX_OK; fail_tx: diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c index 1187a1169eb..6b75063988e 100644 --- a/drivers/net/ethernet/tile/tilepro.c +++ b/drivers/net/ethernet/tile/tilepro.c @@ -1256,7 +1256,7 @@ static void tile_net_stop_aux(struct net_device *dev) sizeof(dummy), NETIO_IPP_STOP_SHIM_OFF) < 0) panic("Failed to stop LIPP/LEPP!\n"); - priv->partly_opened = 0; + priv->partly_opened = false; } @@ -1507,7 +1507,7 @@ static int tile_net_open(struct net_device *dev) priv->network_cpus_count, priv->network_cpus_credits); #endif - priv->partly_opened = 1; + priv->partly_opened = true; } else { /* FIXME: Is this possible? */ diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c index a8df7eca095..a9ce01bafd2 100644 --- a/drivers/net/ethernet/tundra/tsi108_eth.c +++ b/drivers/net/ethernet/tundra/tsi108_eth.c @@ -1688,18 +1688,6 @@ static void tsi108_timed_checker(unsigned long dev_ptr) mod_timer(&data->timer, jiffies + CHECK_PHY_INTERVAL); } -static int tsi108_ether_init(void) -{ - int ret; - ret = platform_driver_register (&tsi_eth_driver); - if (ret < 0){ - printk("tsi108_ether_init: error initializing ethernet " - "device\n"); - return ret; - } - return 0; -} - static int tsi108_ether_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); @@ -1714,13 +1702,7 @@ static int tsi108_ether_remove(struct platform_device *pdev) return 0; } -static void tsi108_ether_exit(void) -{ - platform_driver_unregister(&tsi_eth_driver); -} - -module_init(tsi108_ether_init); -module_exit(tsi108_ether_exit); +module_platform_driver(tsi_eth_driver); MODULE_AUTHOR("Tundra Semiconductor Corporation"); MODULE_DESCRIPTION("Tsi108 Gigabit Ethernet driver"); diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index f34dd99fe57..5c4983b2870 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -35,6 +35,7 @@ #define DRV_VERSION "1.5.0" #define DRV_RELDATE "2010-10-09" +#include <linux/types.h> /* A few user-configurable values. These may be modified when a driver module is loaded. */ @@ -55,7 +56,7 @@ static int rx_copybreak; /* Work-around for broken BIOSes: they are unable to get the chip back out of power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */ -static int avoid_D3; +static bool avoid_D3; /* * In case you are looking for 'options[]' or 'full_duplex[]', they @@ -488,8 +489,8 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static const struct ethtool_ops netdev_ethtool_ops; static int rhine_close(struct net_device *dev); static void rhine_shutdown (struct pci_dev *pdev); -static void rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid); -static void rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid); +static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid); +static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid); static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr); static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr); static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask); @@ -1261,7 +1262,7 @@ static void rhine_update_vcam(struct net_device *dev) rhine_set_vlan_cam_mask(ioaddr, vCAMmask); } -static void rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) +static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) { struct rhine_private *rp = netdev_priv(dev); @@ -1269,9 +1270,10 @@ static void rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) set_bit(vid, rp->active_vlans); rhine_update_vcam(dev); spin_unlock_irq(&rp->lock); + return 0; } -static void rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) +static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) { struct rhine_private *rp = netdev_priv(dev); @@ -1279,6 +1281,7 @@ static void rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) clear_bit(vid, rp->active_vlans); rhine_update_vcam(dev); spin_unlock_irq(&rp->lock); + return 0; } static void init_registers(struct net_device *dev) @@ -2009,9 +2012,9 @@ static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i { struct rhine_private *rp = netdev_priv(dev); - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - strcpy(info->bus_info, pci_name(rp->pdev)); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info)); } static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) @@ -2320,7 +2323,7 @@ static int __init rhine_init(void) #endif if (dmi_check_system(rhine_dmi_table)) { /* these BIOSes fail at PXE boot if chip is in D3 */ - avoid_D3 = 1; + avoid_D3 = true; pr_warn("Broken BIOS detected, avoid_D3 enabled\n"); } else if (avoid_D3) diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index 4535d7cc848..4128d6b8cc2 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c @@ -522,7 +522,7 @@ static void velocity_init_cam_filter(struct velocity_info *vptr) mac_set_vlan_cam_mask(regs, vptr->vCAMmask); } -static void velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) +static int velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) { struct velocity_info *vptr = netdev_priv(dev); @@ -530,9 +530,10 @@ static void velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) set_bit(vid, vptr->active_vlans); velocity_init_cam_filter(vptr); spin_unlock_irq(&vptr->lock); + return 0; } -static void velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) +static int velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) { struct velocity_info *vptr = netdev_priv(dev); @@ -540,6 +541,7 @@ static void velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid clear_bit(vid, vptr->active_vlans); velocity_init_cam_filter(vptr); spin_unlock_irq(&vptr->lock); + return 0; } static void velocity_init_rx_ring_indexes(struct velocity_info *vptr) @@ -3270,9 +3272,9 @@ static int velocity_set_settings(struct net_device *dev, static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct velocity_info *vptr = netdev_priv(dev); - strcpy(info->driver, VELOCITY_NAME); - strcpy(info->version, VELOCITY_VERSION); - strcpy(info->bus_info, pci_name(vptr->pdev)); + strlcpy(info->driver, VELOCITY_NAME, sizeof(info->driver)); + strlcpy(info->version, VELOCITY_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(vptr->pdev), sizeof(info->bus_info)); } static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 2681b53820e..f21addb1db9 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -237,7 +237,7 @@ static int temac_dma_bd_init(struct net_device *ndev) struct sk_buff *skb; int i; - lp->rx_skb = kzalloc(sizeof(*lp->rx_skb) * RX_BD_NUM, GFP_KERNEL); + lp->rx_skb = kcalloc(RX_BD_NUM, sizeof(*lp->rx_skb), GFP_KERNEL); if (!lp->rx_skb) { dev_err(&ndev->dev, "can't allocate memory for DMA RX buffer\n"); @@ -920,12 +920,26 @@ temac_poll_controller(struct net_device *ndev) } #endif +static int temac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) +{ + struct temac_local *lp = netdev_priv(ndev); + + if (!netif_running(ndev)) + return -EINVAL; + + if (!lp->phy_dev) + return -EINVAL; + + return phy_mii_ioctl(lp->phy_dev, rq, cmd); +} + static const struct net_device_ops temac_netdev_ops = { .ndo_open = temac_open, .ndo_stop = temac_stop, .ndo_start_xmit = temac_start_xmit, .ndo_set_mac_address = netdev_set_mac_address, .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = temac_ioctl, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = temac_poll_controller, #endif @@ -1077,7 +1091,7 @@ static int __devinit temac_of_probe(struct platform_device *op) of_node_put(np); /* Finished with the DMA node; drop the reference */ - if ((lp->rx_irq == NO_IRQ) || (lp->tx_irq == NO_IRQ)) { + if (!lp->rx_irq || !lp->tx_irq) { dev_err(&op->dev, "could not determine irqs\n"); rc = -ENOMEM; goto err_iounmap_2; @@ -1167,17 +1181,7 @@ static struct platform_driver temac_of_driver = { }, }; -static int __init temac_init(void) -{ - return platform_driver_register(&temac_of_driver); -} -module_init(temac_init); - -static void __exit temac_exit(void) -{ - platform_driver_unregister(&temac_of_driver); -} -module_exit(temac_exit); +module_platform_driver(temac_of_driver); MODULE_DESCRIPTION("Xilinx LL_TEMAC Ethernet driver"); MODULE_AUTHOR("Yoshio Kashiwagi"); diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 8018d7d045b..79013e5731a 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -662,7 +662,7 @@ static void xemaclite_rx_handler(struct net_device *dev) */ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id) { - bool tx_complete = 0; + bool tx_complete = false; struct net_device *dev = dev_id; struct net_local *lp = netdev_priv(dev); void __iomem *base_addr = lp->base_addr; @@ -683,7 +683,7 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id) tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK; out_be32(base_addr + XEL_TSR_OFFSET, tx_status); - tx_complete = 1; + tx_complete = true; } /* Check if the Transmission for the second buffer is completed */ @@ -695,7 +695,7 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id) out_be32(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET, tx_status); - tx_complete = 1; + tx_complete = true; } /* If there was a Tx interrupt, call the Tx Handler */ @@ -1129,7 +1129,7 @@ static int __devinit xemaclite_of_probe(struct platform_device *ofdev) /* Get IRQ for the device */ rc = of_irq_to_resource(ofdev->dev.of_node, 0, &r_irq); - if (rc == NO_IRQ) { + if (!rc) { dev_err(dev, "no IRQ found\n"); return rc; } @@ -1303,27 +1303,7 @@ static struct platform_driver xemaclite_of_driver = { .remove = __devexit_p(xemaclite_of_remove), }; -/** - * xgpiopss_init - Initial driver registration call - * - * Return: 0 upon success, or a negative error upon failure. - */ -static int __init xemaclite_init(void) -{ - /* No kernel boot options used, we just need to register the driver */ - return platform_driver_register(&xemaclite_of_driver); -} - -/** - * xemaclite_cleanup - Driver un-registration call - */ -static void __exit xemaclite_cleanup(void) -{ - platform_driver_unregister(&xemaclite_of_driver); -} - -module_init(xemaclite_init); -module_exit(xemaclite_cleanup); +module_platform_driver(xemaclite_of_driver); MODULE_AUTHOR("Xilinx, Inc."); MODULE_DESCRIPTION("Xilinx Ethernet MAC Lite driver"); diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c index bbe8b7dbf3f..33979c3ac94 100644 --- a/drivers/net/ethernet/xircom/xirc2ps_cs.c +++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c @@ -1411,7 +1411,7 @@ do_open(struct net_device *dev) static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { - strcpy(info->driver, "xirc2ps_cs"); + strlcpy(info->driver, "xirc2ps_cs", sizeof(info->driver)); sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr); } diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c index 46b5f5fd686..e05b645bbc3 100644 --- a/drivers/net/ifb.c +++ b/drivers/net/ifb.c @@ -164,7 +164,7 @@ static const struct net_device_ops ifb_netdev_ops = { .ndo_validate_addr = eth_validate_addr, }; -#define IFB_FEATURES (NETIF_F_NO_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | \ +#define IFB_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | \ NETIF_F_TSO_ECN | NETIF_F_TSO | NETIF_F_TSO6 | \ NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_TX) diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c index 9d4ce1aba10..a561ae44a9a 100644 --- a/drivers/net/irda/bfin_sir.c +++ b/drivers/net/irda/bfin_sir.c @@ -806,18 +806,7 @@ static struct platform_driver bfin_ir_driver = { }, }; -static int __init bfin_sir_init(void) -{ - return platform_driver_register(&bfin_ir_driver); -} - -static void __exit bfin_sir_exit(void) -{ - platform_driver_unregister(&bfin_ir_driver); -} - -module_init(bfin_sir_init); -module_exit(bfin_sir_exit); +module_platform_driver(bfin_ir_driver); module_param(max_rate, int, 0); MODULE_PARM_DESC(max_rate, "Maximum baud rate (115200, 57600, 38400, 19200, 9600)"); diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c index b45b2cc4280..64f403da101 100644 --- a/drivers/net/irda/donauboe.c +++ b/drivers/net/irda/donauboe.c @@ -197,7 +197,7 @@ static char *driver_name = DRIVER_NAME; static int max_baud = 4000000; #ifdef USE_PROBE -static int do_probe = 0; +static bool do_probe = false; #endif diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c index d0851dfa037..81d5275a15e 100644 --- a/drivers/net/irda/pxaficp_ir.c +++ b/drivers/net/irda/pxaficp_ir.c @@ -966,18 +966,7 @@ static struct platform_driver pxa_ir_driver = { .resume = pxa_irda_resume, }; -static int __init pxa_irda_init(void) -{ - return platform_driver_register(&pxa_ir_driver); -} - -static void __exit pxa_irda_exit(void) -{ - platform_driver_unregister(&pxa_ir_driver); -} - -module_init(pxa_irda_init); -module_exit(pxa_irda_exit); +module_platform_driver(pxa_ir_driver); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:pxa2xx-ir"); diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c index d275e276e74..725d6b36782 100644 --- a/drivers/net/irda/sh_irda.c +++ b/drivers/net/irda/sh_irda.c @@ -873,18 +873,7 @@ static struct platform_driver sh_irda_driver = { }, }; -static int __init sh_irda_init(void) -{ - return platform_driver_register(&sh_irda_driver); -} - -static void __exit sh_irda_exit(void) -{ - platform_driver_unregister(&sh_irda_driver); -} - -module_init(sh_irda_init); -module_exit(sh_irda_exit); +module_platform_driver(sh_irda_driver); MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>"); MODULE_DESCRIPTION("SuperH IrDA driver"); diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c index ed7d7d62bf6..e6661b5c1f8 100644 --- a/drivers/net/irda/sh_sir.c +++ b/drivers/net/irda/sh_sir.c @@ -808,18 +808,7 @@ static struct platform_driver sh_sir_driver = { }, }; -static int __init sh_sir_init(void) -{ - return platform_driver_register(&sh_sir_driver); -} - -static void __exit sh_sir_exit(void) -{ - platform_driver_unregister(&sh_sir_driver); -} - -module_init(sh_sir_init); -module_exit(sh_sir_exit); +module_platform_driver(sh_sir_driver); MODULE_AUTHOR("Kuninori Morimoto <morimoto.kuninori@renesas.com>"); MODULE_DESCRIPTION("SuperH IrDA driver"); diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c index 8b1c3484d27..6c95d4087b2 100644 --- a/drivers/net/irda/smsc-ircc2.c +++ b/drivers/net/irda/smsc-ircc2.c @@ -79,7 +79,7 @@ MODULE_AUTHOR("Daniele Peri <peri@csai.unipa.it>"); MODULE_DESCRIPTION("SMC IrCC SIR/FIR controller driver"); MODULE_LICENSE("GPL"); -static int smsc_nopnp = 1; +static bool smsc_nopnp = true; module_param_named(nopnp, smsc_nopnp, bool, 0); MODULE_PARM_DESC(nopnp, "Do not use PNP to detect controller settings, defaults to true"); diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index 4ce9e5f2c06..b71998d0b5b 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -169,7 +169,7 @@ static void loopback_setup(struct net_device *dev) dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | NETIF_F_UFO - | NETIF_F_NO_CSUM + | NETIF_F_HW_CSUM | NETIF_F_RXCSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 74134970b70..f2f820c4b40 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -26,6 +26,7 @@ #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/if_arp.h> +#include <linux/if_vlan.h> #include <linux/if_link.h> #include <linux/if_macvlan.h> #include <net/rtnetlink.h> @@ -520,26 +521,23 @@ static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev, return stats; } -static void macvlan_vlan_rx_add_vid(struct net_device *dev, +static int macvlan_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) { struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *lowerdev = vlan->lowerdev; - const struct net_device_ops *ops = lowerdev->netdev_ops; - if (ops->ndo_vlan_rx_add_vid) - ops->ndo_vlan_rx_add_vid(lowerdev, vid); + return vlan_vid_add(lowerdev, vid); } -static void macvlan_vlan_rx_kill_vid(struct net_device *dev, +static int macvlan_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) { struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *lowerdev = vlan->lowerdev; - const struct net_device_ops *ops = lowerdev->netdev_ops; - if (ops->ndo_vlan_rx_kill_vid) - ops->ndo_vlan_rx_kill_vid(lowerdev, vid); + vlan_vid_del(lowerdev, vid); + return 0; } static void macvlan_ethtool_get_drvinfo(struct net_device *dev, diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 1b7082d08f3..58dc117a8d7 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -145,8 +145,8 @@ static void macvtap_put_queue(struct macvtap_queue *q) if (vlan) { int index = get_slot(vlan, q); - rcu_assign_pointer(vlan->taps[index], NULL); - rcu_assign_pointer(q->vlan, NULL); + RCU_INIT_POINTER(vlan->taps[index], NULL); + RCU_INIT_POINTER(q->vlan, NULL); sock_put(&q->sk); --vlan->numvtaps; } @@ -175,6 +175,14 @@ static struct macvtap_queue *macvtap_get_queue(struct net_device *dev, if (!numvtaps) goto out; + /* Check if we can use flow to select a queue */ + rxq = skb_get_rxhash(skb); + if (rxq) { + tap = rcu_dereference(vlan->taps[rxq % numvtaps]); + if (tap) + goto out; + } + if (likely(skb_rx_queue_recorded(skb))) { rxq = skb_get_rx_queue(skb); @@ -186,14 +194,6 @@ static struct macvtap_queue *macvtap_get_queue(struct net_device *dev, goto out; } - /* Check if we can use flow to select a queue */ - rxq = skb_get_rxhash(skb); - if (rxq) { - tap = rcu_dereference(vlan->taps[rxq % numvtaps]); - if (tap) - goto out; - } - /* Everything failed - find first available queue */ for (rxq = 0; rxq < MAX_MACVTAP_QUEUES; rxq++) { tap = rcu_dereference(vlan->taps[rxq]); @@ -223,8 +223,8 @@ static void macvtap_del_queues(struct net_device *dev) lockdep_is_held(&macvtap_lock)); if (q) { qlist[j++] = q; - rcu_assign_pointer(vlan->taps[i], NULL); - rcu_assign_pointer(q->vlan, NULL); + RCU_INIT_POINTER(vlan->taps[i], NULL); + RCU_INIT_POINTER(q->vlan, NULL); vlan->numvtaps--; } } diff --git a/drivers/net/mii.c b/drivers/net/mii.c index c62e7816d54..c70c2332d15 100644 --- a/drivers/net/mii.c +++ b/drivers/net/mii.c @@ -35,26 +35,11 @@ static u32 mii_get_an(struct mii_if_info *mii, u16 addr) { - u32 result = 0; int advert; advert = mii->mdio_read(mii->dev, mii->phy_id, addr); - if (advert & LPA_LPACK) - result |= ADVERTISED_Autoneg; - if (advert & ADVERTISE_10HALF) - result |= ADVERTISED_10baseT_Half; - if (advert & ADVERTISE_10FULL) - result |= ADVERTISED_10baseT_Full; - if (advert & ADVERTISE_100HALF) - result |= ADVERTISED_100baseT_Half; - if (advert & ADVERTISE_100FULL) - result |= ADVERTISED_100baseT_Full; - if (advert & ADVERTISE_PAUSE_CAP) - result |= ADVERTISED_Pause; - if (advert & ADVERTISE_PAUSE_ASYM) - result |= ADVERTISED_Asym_Pause; - - return result; + + return mii_lpa_to_ethtool_lpa_t(advert); } /** @@ -104,19 +89,14 @@ int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd) ecmd->autoneg = AUTONEG_ENABLE; ecmd->advertising |= mii_get_an(mii, MII_ADVERTISE); - if (ctrl1000 & ADVERTISE_1000HALF) - ecmd->advertising |= ADVERTISED_1000baseT_Half; - if (ctrl1000 & ADVERTISE_1000FULL) - ecmd->advertising |= ADVERTISED_1000baseT_Full; + if (mii->supports_gmii) + ecmd->advertising |= + mii_ctrl1000_to_ethtool_adv_t(ctrl1000); if (bmsr & BMSR_ANEGCOMPLETE) { ecmd->lp_advertising = mii_get_an(mii, MII_LPA); - if (stat1000 & LPA_1000HALF) - ecmd->lp_advertising |= - ADVERTISED_1000baseT_Half; - if (stat1000 & LPA_1000FULL) - ecmd->lp_advertising |= - ADVERTISED_1000baseT_Full; + ecmd->lp_advertising |= + mii_stat1000_to_ethtool_lpa_t(stat1000); } else { ecmd->lp_advertising = 0; } @@ -204,20 +184,11 @@ int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd) advert2 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000); tmp2 = advert2 & ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL); } - if (ecmd->advertising & ADVERTISED_10baseT_Half) - tmp |= ADVERTISE_10HALF; - if (ecmd->advertising & ADVERTISED_10baseT_Full) - tmp |= ADVERTISE_10FULL; - if (ecmd->advertising & ADVERTISED_100baseT_Half) - tmp |= ADVERTISE_100HALF; - if (ecmd->advertising & ADVERTISED_100baseT_Full) - tmp |= ADVERTISE_100FULL; - if (mii->supports_gmii) { - if (ecmd->advertising & ADVERTISED_1000baseT_Half) - tmp2 |= ADVERTISE_1000HALF; - if (ecmd->advertising & ADVERTISED_1000baseT_Full) - tmp2 |= ADVERTISE_1000FULL; - } + tmp |= ethtool_adv_to_mii_adv_t(ecmd->advertising); + + if (mii->supports_gmii) + tmp2 |= + ethtool_adv_to_mii_ctrl1000_t(ecmd->advertising); if (advert != tmp) { mii->mdio_write(dev, mii->phy_id, MII_ADVERTISE, tmp); mii->advertising = tmp; diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index a70244306c9..fbdcdf83cbf 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -131,3 +131,7 @@ config MDIO_OCTEON If in doubt, say Y. endif # PHYLIB + +config MICREL_KS8995MA + tristate "Micrel KS8995MA 5-ports 10/100 managed Ethernet switch" + depends on SPI diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index 2333215bbb3..e15c83fecbe 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -23,3 +23,4 @@ obj-$(CONFIG_DP83640_PHY) += dp83640.o obj-$(CONFIG_STE10XP) += ste10Xp.o obj-$(CONFIG_MICREL_PHY) += micrel.o obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o +obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c index 65391891d8c..daec9b05d16 100644 --- a/drivers/net/phy/mdio-bitbang.c +++ b/drivers/net/phy/mdio-bitbang.c @@ -202,6 +202,14 @@ static int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val) return 0; } +static int mdiobb_reset(struct mii_bus *bus) +{ + struct mdiobb_ctrl *ctrl = bus->priv; + if (ctrl->reset) + ctrl->reset(bus); + return 0; +} + struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl) { struct mii_bus *bus; @@ -214,6 +222,7 @@ struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl) bus->read = mdiobb_read; bus->write = mdiobb_write; + bus->reset = mdiobb_reset; bus->priv = ctrl; return bus; diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 2843c90f712..89c5a3eccc1 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -95,6 +95,7 @@ static struct mii_bus * __devinit mdio_gpio_bus_init(struct device *dev, goto out; bitbang->ctrl.ops = &mdio_gpio_ops; + bitbang->ctrl.reset = pdata->reset; bitbang->mdc = pdata->mdc; bitbang->mdio = pdata->mdio; diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 83a5a5afec6..f320f466f03 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -563,20 +563,9 @@ static int genphy_config_advert(struct phy_device *phydev) if (adv < 0) return adv; - adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | + adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); - if (advertise & ADVERTISED_10baseT_Half) - adv |= ADVERTISE_10HALF; - if (advertise & ADVERTISED_10baseT_Full) - adv |= ADVERTISE_10FULL; - if (advertise & ADVERTISED_100baseT_Half) - adv |= ADVERTISE_100HALF; - if (advertise & ADVERTISED_100baseT_Full) - adv |= ADVERTISE_100FULL; - if (advertise & ADVERTISED_Pause) - adv |= ADVERTISE_PAUSE_CAP; - if (advertise & ADVERTISED_Asym_Pause) - adv |= ADVERTISE_PAUSE_ASYM; + adv |= ethtool_adv_to_mii_adv_t(advertise); if (adv != oldadv) { err = phy_write(phydev, MII_ADVERTISE, adv); @@ -595,10 +584,7 @@ static int genphy_config_advert(struct phy_device *phydev) return adv; adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF); - if (advertise & SUPPORTED_1000baseT_Half) - adv |= ADVERTISE_1000HALF; - if (advertise & SUPPORTED_1000baseT_Full) - adv |= ADVERTISE_1000FULL; + adv |= ethtool_adv_to_mii_ctrl1000_t(advertise); if (adv != oldadv) { err = phy_write(phydev, MII_CTRL1000, adv); diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c index 342505c976d..fc3e7e96c88 100644 --- a/drivers/net/phy/smsc.c +++ b/drivers/net/phy/smsc.c @@ -22,26 +22,7 @@ #include <linux/ethtool.h> #include <linux/phy.h> #include <linux/netdevice.h> - -#define MII_LAN83C185_ISF 29 /* Interrupt Source Flags */ -#define MII_LAN83C185_IM 30 /* Interrupt Mask */ -#define MII_LAN83C185_CTRL_STATUS 17 /* Mode/Status Register */ - -#define MII_LAN83C185_ISF_INT1 (1<<1) /* Auto-Negotiation Page Received */ -#define MII_LAN83C185_ISF_INT2 (1<<2) /* Parallel Detection Fault */ -#define MII_LAN83C185_ISF_INT3 (1<<3) /* Auto-Negotiation LP Ack */ -#define MII_LAN83C185_ISF_INT4 (1<<4) /* Link Down */ -#define MII_LAN83C185_ISF_INT5 (1<<5) /* Remote Fault Detected */ -#define MII_LAN83C185_ISF_INT6 (1<<6) /* Auto-Negotiation complete */ -#define MII_LAN83C185_ISF_INT7 (1<<7) /* ENERGYON */ - -#define MII_LAN83C185_ISF_INT_ALL (0x0e) - -#define MII_LAN83C185_ISF_INT_PHYLIB_EVENTS \ - (MII_LAN83C185_ISF_INT6 | MII_LAN83C185_ISF_INT4 | \ - MII_LAN83C185_ISF_INT7) - -#define MII_LAN83C185_EDPWRDOWN (1 << 13) /* EDPWRDOWN */ +#include <linux/smscphy.h> static int smsc_phy_config_intr(struct phy_device *phydev) { diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c new file mode 100644 index 00000000000..116a2dd7c87 --- /dev/null +++ b/drivers/net/phy/spi_ks8995.c @@ -0,0 +1,375 @@ +/* + * SPI driver for Micrel/Kendin KS8995M ethernet switch + * + * Copyright (C) 2008 Gabor Juhos <juhosg at openwrt.org> + * + * This file was based on: drivers/spi/at25.c + * Copyright (C) 2006 David Brownell + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/device.h> + +#include <linux/spi/spi.h> + +#define DRV_VERSION "0.1.1" +#define DRV_DESC "Micrel KS8995 Ethernet switch SPI driver" + +/* ------------------------------------------------------------------------ */ + +#define KS8995_REG_ID0 0x00 /* Chip ID0 */ +#define KS8995_REG_ID1 0x01 /* Chip ID1 */ + +#define KS8995_REG_GC0 0x02 /* Global Control 0 */ +#define KS8995_REG_GC1 0x03 /* Global Control 1 */ +#define KS8995_REG_GC2 0x04 /* Global Control 2 */ +#define KS8995_REG_GC3 0x05 /* Global Control 3 */ +#define KS8995_REG_GC4 0x06 /* Global Control 4 */ +#define KS8995_REG_GC5 0x07 /* Global Control 5 */ +#define KS8995_REG_GC6 0x08 /* Global Control 6 */ +#define KS8995_REG_GC7 0x09 /* Global Control 7 */ +#define KS8995_REG_GC8 0x0a /* Global Control 8 */ +#define KS8995_REG_GC9 0x0b /* Global Control 9 */ + +#define KS8995_REG_PC(p, r) ((0x10 * p) + r) /* Port Control */ +#define KS8995_REG_PS(p, r) ((0x10 * p) + r + 0xe) /* Port Status */ + +#define KS8995_REG_TPC0 0x60 /* TOS Priority Control 0 */ +#define KS8995_REG_TPC1 0x61 /* TOS Priority Control 1 */ +#define KS8995_REG_TPC2 0x62 /* TOS Priority Control 2 */ +#define KS8995_REG_TPC3 0x63 /* TOS Priority Control 3 */ +#define KS8995_REG_TPC4 0x64 /* TOS Priority Control 4 */ +#define KS8995_REG_TPC5 0x65 /* TOS Priority Control 5 */ +#define KS8995_REG_TPC6 0x66 /* TOS Priority Control 6 */ +#define KS8995_REG_TPC7 0x67 /* TOS Priority Control 7 */ + +#define KS8995_REG_MAC0 0x68 /* MAC address 0 */ +#define KS8995_REG_MAC1 0x69 /* MAC address 1 */ +#define KS8995_REG_MAC2 0x6a /* MAC address 2 */ +#define KS8995_REG_MAC3 0x6b /* MAC address 3 */ +#define KS8995_REG_MAC4 0x6c /* MAC address 4 */ +#define KS8995_REG_MAC5 0x6d /* MAC address 5 */ + +#define KS8995_REG_IAC0 0x6e /* Indirect Access Control 0 */ +#define KS8995_REG_IAC1 0x6f /* Indirect Access Control 0 */ +#define KS8995_REG_IAD7 0x70 /* Indirect Access Data 7 */ +#define KS8995_REG_IAD6 0x71 /* Indirect Access Data 6 */ +#define KS8995_REG_IAD5 0x72 /* Indirect Access Data 5 */ +#define KS8995_REG_IAD4 0x73 /* Indirect Access Data 4 */ +#define KS8995_REG_IAD3 0x74 /* Indirect Access Data 3 */ +#define KS8995_REG_IAD2 0x75 /* Indirect Access Data 2 */ +#define KS8995_REG_IAD1 0x76 /* Indirect Access Data 1 */ +#define KS8995_REG_IAD0 0x77 /* Indirect Access Data 0 */ + +#define KS8995_REGS_SIZE 0x80 + +#define ID1_CHIPID_M 0xf +#define ID1_CHIPID_S 4 +#define ID1_REVISION_M 0x7 +#define ID1_REVISION_S 1 +#define ID1_START_SW 1 /* start the switch */ + +#define FAMILY_KS8995 0x95 +#define CHIPID_M 0 + +#define KS8995_CMD_WRITE 0x02U +#define KS8995_CMD_READ 0x03U + +#define KS8995_RESET_DELAY 10 /* usec */ + +struct ks8995_pdata { + /* not yet implemented */ +}; + +struct ks8995_switch { + struct spi_device *spi; + struct mutex lock; + struct ks8995_pdata *pdata; +}; + +static inline u8 get_chip_id(u8 val) +{ + return (val >> ID1_CHIPID_S) & ID1_CHIPID_M; +} + +static inline u8 get_chip_rev(u8 val) +{ + return (val >> ID1_REVISION_S) & ID1_REVISION_M; +} + +/* ------------------------------------------------------------------------ */ +static int ks8995_read(struct ks8995_switch *ks, char *buf, + unsigned offset, size_t count) +{ + u8 cmd[2]; + struct spi_transfer t[2]; + struct spi_message m; + int err; + + spi_message_init(&m); + + memset(&t, 0, sizeof(t)); + + t[0].tx_buf = cmd; + t[0].len = sizeof(cmd); + spi_message_add_tail(&t[0], &m); + + t[1].rx_buf = buf; + t[1].len = count; + spi_message_add_tail(&t[1], &m); + + cmd[0] = KS8995_CMD_READ; + cmd[1] = offset; + + mutex_lock(&ks->lock); + err = spi_sync(ks->spi, &m); + mutex_unlock(&ks->lock); + + return err ? err : count; +} + + +static int ks8995_write(struct ks8995_switch *ks, char *buf, + unsigned offset, size_t count) +{ + u8 cmd[2]; + struct spi_transfer t[2]; + struct spi_message m; + int err; + + spi_message_init(&m); + + memset(&t, 0, sizeof(t)); + + t[0].tx_buf = cmd; + t[0].len = sizeof(cmd); + spi_message_add_tail(&t[0], &m); + + t[1].tx_buf = buf; + t[1].len = count; + spi_message_add_tail(&t[1], &m); + + cmd[0] = KS8995_CMD_WRITE; + cmd[1] = offset; + + mutex_lock(&ks->lock); + err = spi_sync(ks->spi, &m); + mutex_unlock(&ks->lock); + + return err ? err : count; +} + +static inline int ks8995_read_reg(struct ks8995_switch *ks, u8 addr, u8 *buf) +{ + return (ks8995_read(ks, buf, addr, 1) != 1); +} + +static inline int ks8995_write_reg(struct ks8995_switch *ks, u8 addr, u8 val) +{ + char buf = val; + + return (ks8995_write(ks, &buf, addr, 1) != 1); +} + +/* ------------------------------------------------------------------------ */ + +static int ks8995_stop(struct ks8995_switch *ks) +{ + return ks8995_write_reg(ks, KS8995_REG_ID1, 0); +} + +static int ks8995_start(struct ks8995_switch *ks) +{ + return ks8995_write_reg(ks, KS8995_REG_ID1, 1); +} + +static int ks8995_reset(struct ks8995_switch *ks) +{ + int err; + + err = ks8995_stop(ks); + if (err) + return err; + + udelay(KS8995_RESET_DELAY); + + return ks8995_start(ks); +} + +/* ------------------------------------------------------------------------ */ + +static ssize_t ks8995_registers_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) +{ + struct device *dev; + struct ks8995_switch *ks8995; + + dev = container_of(kobj, struct device, kobj); + ks8995 = dev_get_drvdata(dev); + + if (unlikely(off > KS8995_REGS_SIZE)) + return 0; + + if ((off + count) > KS8995_REGS_SIZE) + count = KS8995_REGS_SIZE - off; + + if (unlikely(!count)) + return count; + + return ks8995_read(ks8995, buf, off, count); +} + + +static ssize_t ks8995_registers_write(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) +{ + struct device *dev; + struct ks8995_switch *ks8995; + + dev = container_of(kobj, struct device, kobj); + ks8995 = dev_get_drvdata(dev); + + if (unlikely(off >= KS8995_REGS_SIZE)) + return -EFBIG; + + if ((off + count) > KS8995_REGS_SIZE) + count = KS8995_REGS_SIZE - off; + + if (unlikely(!count)) + return count; + + return ks8995_write(ks8995, buf, off, count); +} + + +static struct bin_attribute ks8995_registers_attr = { + .attr = { + .name = "registers", + .mode = S_IRUSR | S_IWUSR, + }, + .size = KS8995_REGS_SIZE, + .read = ks8995_registers_read, + .write = ks8995_registers_write, +}; + +/* ------------------------------------------------------------------------ */ + +static int __devinit ks8995_probe(struct spi_device *spi) +{ + struct ks8995_switch *ks; + struct ks8995_pdata *pdata; + u8 ids[2]; + int err; + + /* Chip description */ + pdata = spi->dev.platform_data; + + ks = kzalloc(sizeof(*ks), GFP_KERNEL); + if (!ks) { + dev_err(&spi->dev, "no memory for private data\n"); + return -ENOMEM; + } + + mutex_init(&ks->lock); + ks->pdata = pdata; + ks->spi = spi_dev_get(spi); + dev_set_drvdata(&spi->dev, ks); + + spi->mode = SPI_MODE_0; + spi->bits_per_word = 8; + err = spi_setup(spi); + if (err) { + dev_err(&spi->dev, "spi_setup failed, err=%d\n", err); + goto err_drvdata; + } + + err = ks8995_read(ks, ids, KS8995_REG_ID0, sizeof(ids)); + if (err < 0) { + dev_err(&spi->dev, "unable to read id registers, err=%d\n", + err); + goto err_drvdata; + } + + switch (ids[0]) { + case FAMILY_KS8995: + break; + default: + dev_err(&spi->dev, "unknown family id:%02x\n", ids[0]); + err = -ENODEV; + goto err_drvdata; + } + + err = ks8995_reset(ks); + if (err) + goto err_drvdata; + + err = sysfs_create_bin_file(&spi->dev.kobj, &ks8995_registers_attr); + if (err) { + dev_err(&spi->dev, "unable to create sysfs file, err=%d\n", + err); + goto err_drvdata; + } + + dev_info(&spi->dev, "KS89%02X device found, Chip ID:%01x, " + "Revision:%01x\n", ids[0], + get_chip_id(ids[1]), get_chip_rev(ids[1])); + + return 0; + +err_drvdata: + dev_set_drvdata(&spi->dev, NULL); + kfree(ks); + return err; +} + +static int __devexit ks8995_remove(struct spi_device *spi) +{ + struct ks8995_data *ks8995; + + ks8995 = dev_get_drvdata(&spi->dev); + sysfs_remove_bin_file(&spi->dev.kobj, &ks8995_registers_attr); + + dev_set_drvdata(&spi->dev, NULL); + kfree(ks8995); + + return 0; +} + +/* ------------------------------------------------------------------------ */ + +static struct spi_driver ks8995_driver = { + .driver = { + .name = "spi-ks8995", + .bus = &spi_bus_type, + .owner = THIS_MODULE, + }, + .probe = ks8995_probe, + .remove = __devexit_p(ks8995_remove), +}; + +static int __init ks8995_init(void) +{ + printk(KERN_INFO DRV_DESC " version " DRV_VERSION"\n"); + + return spi_register_driver(&ks8995_driver); +} +module_init(ks8995_init); + +static void __exit ks8995_exit(void) +{ + spi_unregister_driver(&ks8995_driver); +} +module_exit(ks8995_exit); + +MODULE_DESCRIPTION(DRV_DESC); +MODULE_VERSION(DRV_VERSION); +MODULE_AUTHOR("Gabor Juhos <juhosg at openwrt.org>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c index f8a6853b692..c1c9293c2bb 100644 --- a/drivers/net/ppp/pptp.c +++ b/drivers/net/ppp/pptp.c @@ -162,7 +162,7 @@ static void del_chan(struct pppox_sock *sock) { spin_lock(&chan_lock); clear_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap); - rcu_assign_pointer(callid_sock[sock->proto.pptp.src_addr.call_id], NULL); + RCU_INIT_POINTER(callid_sock[sock->proto.pptp.src_addr.call_id], NULL); spin_unlock(&chan_lock); synchronize_rcu(); } diff --git a/drivers/net/team/Kconfig b/drivers/net/team/Kconfig new file mode 100644 index 00000000000..248a144033c --- /dev/null +++ b/drivers/net/team/Kconfig @@ -0,0 +1,43 @@ +menuconfig NET_TEAM + tristate "Ethernet team driver support (EXPERIMENTAL)" + depends on EXPERIMENTAL + ---help--- + This allows one to create virtual interfaces that teams together + multiple ethernet devices. + + Team devices can be added using the "ip" command from the + iproute2 package: + + "ip link add link [ address MAC ] [ NAME ] type team" + + To compile this driver as a module, choose M here: the module + will be called team. + +if NET_TEAM + +config NET_TEAM_MODE_ROUNDROBIN + tristate "Round-robin mode support" + depends on NET_TEAM + ---help--- + Basic mode where port used for transmitting packets is selected in + round-robin fashion using packet counter. + + All added ports are setup to have bond's mac address. + + To compile this team mode as a module, choose M here: the module + will be called team_mode_roundrobin. + +config NET_TEAM_MODE_ACTIVEBACKUP + tristate "Active-backup mode support" + depends on NET_TEAM + ---help--- + Only one port is active at a time and the rest of ports are used + for backup. + + Mac addresses of ports are not modified. Userspace is responsible + to do so. + + To compile this team mode as a module, choose M here: the module + will be called team_mode_activebackup. + +endif # NET_TEAM diff --git a/drivers/net/team/Makefile b/drivers/net/team/Makefile new file mode 100644 index 00000000000..85f2028a87a --- /dev/null +++ b/drivers/net/team/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for the network team driver +# + +obj-$(CONFIG_NET_TEAM) += team.o +obj-$(CONFIG_NET_TEAM_MODE_ROUNDROBIN) += team_mode_roundrobin.o +obj-$(CONFIG_NET_TEAM_MODE_ACTIVEBACKUP) += team_mode_activebackup.o diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c new file mode 100644 index 00000000000..ed2a862b835 --- /dev/null +++ b/drivers/net/team/team.c @@ -0,0 +1,1684 @@ +/* + * net/drivers/team/team.c - Network team device driver + * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/rcupdate.h> +#include <linux/errno.h> +#include <linux/ctype.h> +#include <linux/notifier.h> +#include <linux/netdevice.h> +#include <linux/if_vlan.h> +#include <linux/if_arp.h> +#include <linux/socket.h> +#include <linux/etherdevice.h> +#include <linux/rtnetlink.h> +#include <net/rtnetlink.h> +#include <net/genetlink.h> +#include <net/netlink.h> +#include <linux/if_team.h> + +#define DRV_NAME "team" + + +/********** + * Helpers + **********/ + +#define team_port_exists(dev) (dev->priv_flags & IFF_TEAM_PORT) + +static struct team_port *team_port_get_rcu(const struct net_device *dev) +{ + struct team_port *port = rcu_dereference(dev->rx_handler_data); + + return team_port_exists(dev) ? port : NULL; +} + +static struct team_port *team_port_get_rtnl(const struct net_device *dev) +{ + struct team_port *port = rtnl_dereference(dev->rx_handler_data); + + return team_port_exists(dev) ? port : NULL; +} + +/* + * Since the ability to change mac address for open port device is tested in + * team_port_add, this function can be called without control of return value + */ +static int __set_port_mac(struct net_device *port_dev, + const unsigned char *dev_addr) +{ + struct sockaddr addr; + + memcpy(addr.sa_data, dev_addr, ETH_ALEN); + addr.sa_family = ARPHRD_ETHER; + return dev_set_mac_address(port_dev, &addr); +} + +int team_port_set_orig_mac(struct team_port *port) +{ + return __set_port_mac(port->dev, port->orig.dev_addr); +} + +int team_port_set_team_mac(struct team_port *port) +{ + return __set_port_mac(port->dev, port->team->dev->dev_addr); +} +EXPORT_SYMBOL(team_port_set_team_mac); + + +/******************* + * Options handling + *******************/ + +struct team_option *__team_find_option(struct team *team, const char *opt_name) +{ + struct team_option *option; + + list_for_each_entry(option, &team->option_list, list) { + if (strcmp(option->name, opt_name) == 0) + return option; + } + return NULL; +} + +int team_options_register(struct team *team, + const struct team_option *option, + size_t option_count) +{ + int i; + struct team_option **dst_opts; + int err; + + dst_opts = kzalloc(sizeof(struct team_option *) * option_count, + GFP_KERNEL); + if (!dst_opts) + return -ENOMEM; + for (i = 0; i < option_count; i++, option++) { + if (__team_find_option(team, option->name)) { + err = -EEXIST; + goto rollback; + } + dst_opts[i] = kmemdup(option, sizeof(*option), GFP_KERNEL); + if (!dst_opts[i]) { + err = -ENOMEM; + goto rollback; + } + } + + for (i = 0; i < option_count; i++) + list_add_tail(&dst_opts[i]->list, &team->option_list); + + kfree(dst_opts); + return 0; + +rollback: + for (i = 0; i < option_count; i++) + kfree(dst_opts[i]); + + kfree(dst_opts); + return err; +} + +EXPORT_SYMBOL(team_options_register); + +static void __team_options_change_check(struct team *team, + struct team_option *changed_option); + +static void __team_options_unregister(struct team *team, + const struct team_option *option, + size_t option_count) +{ + int i; + + for (i = 0; i < option_count; i++, option++) { + struct team_option *del_opt; + + del_opt = __team_find_option(team, option->name); + if (del_opt) { + list_del(&del_opt->list); + kfree(del_opt); + } + } +} + +void team_options_unregister(struct team *team, + const struct team_option *option, + size_t option_count) +{ + __team_options_unregister(team, option, option_count); + __team_options_change_check(team, NULL); +} +EXPORT_SYMBOL(team_options_unregister); + +static int team_option_get(struct team *team, struct team_option *option, + void *arg) +{ + return option->getter(team, arg); +} + +static int team_option_set(struct team *team, struct team_option *option, + void *arg) +{ + int err; + + err = option->setter(team, arg); + if (err) + return err; + + __team_options_change_check(team, option); + return err; +} + +/**************** + * Mode handling + ****************/ + +static LIST_HEAD(mode_list); +static DEFINE_SPINLOCK(mode_list_lock); + +static struct team_mode *__find_mode(const char *kind) +{ + struct team_mode *mode; + + list_for_each_entry(mode, &mode_list, list) { + if (strcmp(mode->kind, kind) == 0) + return mode; + } + return NULL; +} + +static bool is_good_mode_name(const char *name) +{ + while (*name != '\0') { + if (!isalpha(*name) && !isdigit(*name) && *name != '_') + return false; + name++; + } + return true; +} + +int team_mode_register(struct team_mode *mode) +{ + int err = 0; + + if (!is_good_mode_name(mode->kind) || + mode->priv_size > TEAM_MODE_PRIV_SIZE) + return -EINVAL; + spin_lock(&mode_list_lock); + if (__find_mode(mode->kind)) { + err = -EEXIST; + goto unlock; + } + list_add_tail(&mode->list, &mode_list); +unlock: + spin_unlock(&mode_list_lock); + return err; +} +EXPORT_SYMBOL(team_mode_register); + +int team_mode_unregister(struct team_mode *mode) +{ + spin_lock(&mode_list_lock); + list_del_init(&mode->list); + spin_unlock(&mode_list_lock); + return 0; +} +EXPORT_SYMBOL(team_mode_unregister); + +static struct team_mode *team_mode_get(const char *kind) +{ + struct team_mode *mode; + + spin_lock(&mode_list_lock); + mode = __find_mode(kind); + if (!mode) { + spin_unlock(&mode_list_lock); + request_module("team-mode-%s", kind); + spin_lock(&mode_list_lock); + mode = __find_mode(kind); + } + if (mode) + if (!try_module_get(mode->owner)) + mode = NULL; + + spin_unlock(&mode_list_lock); + return mode; +} + +static void team_mode_put(const struct team_mode *mode) +{ + module_put(mode->owner); +} + +static bool team_dummy_transmit(struct team *team, struct sk_buff *skb) +{ + dev_kfree_skb_any(skb); + return false; +} + +rx_handler_result_t team_dummy_receive(struct team *team, + struct team_port *port, + struct sk_buff *skb) +{ + return RX_HANDLER_ANOTHER; +} + +static void team_adjust_ops(struct team *team) +{ + /* + * To avoid checks in rx/tx skb paths, ensure here that non-null and + * correct ops are always set. + */ + + if (list_empty(&team->port_list) || + !team->mode || !team->mode->ops->transmit) + team->ops.transmit = team_dummy_transmit; + else + team->ops.transmit = team->mode->ops->transmit; + + if (list_empty(&team->port_list) || + !team->mode || !team->mode->ops->receive) + team->ops.receive = team_dummy_receive; + else + team->ops.receive = team->mode->ops->receive; +} + +/* + * We can benefit from the fact that it's ensured no port is present + * at the time of mode change. Therefore no packets are in fly so there's no + * need to set mode operations in any special way. + */ +static int __team_change_mode(struct team *team, + const struct team_mode *new_mode) +{ + /* Check if mode was previously set and do cleanup if so */ + if (team->mode) { + void (*exit_op)(struct team *team) = team->ops.exit; + + /* Clear ops area so no callback is called any longer */ + memset(&team->ops, 0, sizeof(struct team_mode_ops)); + team_adjust_ops(team); + + if (exit_op) + exit_op(team); + team_mode_put(team->mode); + team->mode = NULL; + /* zero private data area */ + memset(&team->mode_priv, 0, + sizeof(struct team) - offsetof(struct team, mode_priv)); + } + + if (!new_mode) + return 0; + + if (new_mode->ops->init) { + int err; + + err = new_mode->ops->init(team); + if (err) + return err; + } + + team->mode = new_mode; + memcpy(&team->ops, new_mode->ops, sizeof(struct team_mode_ops)); + team_adjust_ops(team); + + return 0; +} + +static int team_change_mode(struct team *team, const char *kind) +{ + struct team_mode *new_mode; + struct net_device *dev = team->dev; + int err; + + if (!list_empty(&team->port_list)) { + netdev_err(dev, "No ports can be present during mode change\n"); + return -EBUSY; + } + + if (team->mode && strcmp(team->mode->kind, kind) == 0) { + netdev_err(dev, "Unable to change to the same mode the team is in\n"); + return -EINVAL; + } + + new_mode = team_mode_get(kind); + if (!new_mode) { + netdev_err(dev, "Mode \"%s\" not found\n", kind); + return -EINVAL; + } + + err = __team_change_mode(team, new_mode); + if (err) { + netdev_err(dev, "Failed to change to mode \"%s\"\n", kind); + team_mode_put(new_mode); + return err; + } + + netdev_info(dev, "Mode changed to \"%s\"\n", kind); + return 0; +} + + +/************************ + * Rx path frame handler + ************************/ + +/* note: already called with rcu_read_lock */ +static rx_handler_result_t team_handle_frame(struct sk_buff **pskb) +{ + struct sk_buff *skb = *pskb; + struct team_port *port; + struct team *team; + rx_handler_result_t res; + + skb = skb_share_check(skb, GFP_ATOMIC); + if (!skb) + return RX_HANDLER_CONSUMED; + + *pskb = skb; + + port = team_port_get_rcu(skb->dev); + team = port->team; + + res = team->ops.receive(team, port, skb); + if (res == RX_HANDLER_ANOTHER) { + struct team_pcpu_stats *pcpu_stats; + + pcpu_stats = this_cpu_ptr(team->pcpu_stats); + u64_stats_update_begin(&pcpu_stats->syncp); + pcpu_stats->rx_packets++; + pcpu_stats->rx_bytes += skb->len; + if (skb->pkt_type == PACKET_MULTICAST) + pcpu_stats->rx_multicast++; + u64_stats_update_end(&pcpu_stats->syncp); + + skb->dev = team->dev; + } else { + this_cpu_inc(team->pcpu_stats->rx_dropped); + } + + return res; +} + + +/**************** + * Port handling + ****************/ + +static bool team_port_find(const struct team *team, + const struct team_port *port) +{ + struct team_port *cur; + + list_for_each_entry(cur, &team->port_list, list) + if (cur == port) + return true; + return false; +} + +/* + * Add/delete port to the team port list. Write guarded by rtnl_lock. + * Takes care of correct port->index setup (might be racy). + */ +static void team_port_list_add_port(struct team *team, + struct team_port *port) +{ + port->index = team->port_count++; + hlist_add_head_rcu(&port->hlist, + team_port_index_hash(team, port->index)); + list_add_tail_rcu(&port->list, &team->port_list); +} + +static void __reconstruct_port_hlist(struct team *team, int rm_index) +{ + int i; + struct team_port *port; + + for (i = rm_index + 1; i < team->port_count; i++) { + port = team_get_port_by_index(team, i); + hlist_del_rcu(&port->hlist); + port->index--; + hlist_add_head_rcu(&port->hlist, + team_port_index_hash(team, port->index)); + } +} + +static void team_port_list_del_port(struct team *team, + struct team_port *port) +{ + int rm_index = port->index; + + hlist_del_rcu(&port->hlist); + list_del_rcu(&port->list); + __reconstruct_port_hlist(team, rm_index); + team->port_count--; +} + +#define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \ + NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \ + NETIF_F_HIGHDMA | NETIF_F_LRO) + +static void __team_compute_features(struct team *team) +{ + struct team_port *port; + u32 vlan_features = TEAM_VLAN_FEATURES; + unsigned short max_hard_header_len = ETH_HLEN; + + list_for_each_entry(port, &team->port_list, list) { + vlan_features = netdev_increment_features(vlan_features, + port->dev->vlan_features, + TEAM_VLAN_FEATURES); + + if (port->dev->hard_header_len > max_hard_header_len) + max_hard_header_len = port->dev->hard_header_len; + } + + team->dev->vlan_features = vlan_features; + team->dev->hard_header_len = max_hard_header_len; + + netdev_change_features(team->dev); +} + +static void team_compute_features(struct team *team) +{ + mutex_lock(&team->lock); + __team_compute_features(team); + mutex_unlock(&team->lock); +} + +static int team_port_enter(struct team *team, struct team_port *port) +{ + int err = 0; + + dev_hold(team->dev); + port->dev->priv_flags |= IFF_TEAM_PORT; + if (team->ops.port_enter) { + err = team->ops.port_enter(team, port); + if (err) { + netdev_err(team->dev, "Device %s failed to enter team mode\n", + port->dev->name); + goto err_port_enter; + } + } + + return 0; + +err_port_enter: + port->dev->priv_flags &= ~IFF_TEAM_PORT; + dev_put(team->dev); + + return err; +} + +static void team_port_leave(struct team *team, struct team_port *port) +{ + if (team->ops.port_leave) + team->ops.port_leave(team, port); + port->dev->priv_flags &= ~IFF_TEAM_PORT; + dev_put(team->dev); +} + +static void __team_port_change_check(struct team_port *port, bool linkup); + +static int team_port_add(struct team *team, struct net_device *port_dev) +{ + struct net_device *dev = team->dev; + struct team_port *port; + char *portname = port_dev->name; + int err; + + if (port_dev->flags & IFF_LOOPBACK || + port_dev->type != ARPHRD_ETHER) { + netdev_err(dev, "Device %s is of an unsupported type\n", + portname); + return -EINVAL; + } + + if (team_port_exists(port_dev)) { + netdev_err(dev, "Device %s is already a port " + "of a team device\n", portname); + return -EBUSY; + } + + if (port_dev->flags & IFF_UP) { + netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n", + portname); + return -EBUSY; + } + + port = kzalloc(sizeof(struct team_port), GFP_KERNEL); + if (!port) + return -ENOMEM; + + port->dev = port_dev; + port->team = team; + + port->orig.mtu = port_dev->mtu; + err = dev_set_mtu(port_dev, dev->mtu); + if (err) { + netdev_dbg(dev, "Error %d calling dev_set_mtu\n", err); + goto err_set_mtu; + } + + memcpy(port->orig.dev_addr, port_dev->dev_addr, ETH_ALEN); + + err = team_port_enter(team, port); + if (err) { + netdev_err(dev, "Device %s failed to enter team mode\n", + portname); + goto err_port_enter; + } + + err = dev_open(port_dev); + if (err) { + netdev_dbg(dev, "Device %s opening failed\n", + portname); + goto err_dev_open; + } + + err = vlan_vids_add_by_dev(port_dev, dev); + if (err) { + netdev_err(dev, "Failed to add vlan ids to device %s\n", + portname); + goto err_vids_add; + } + + err = netdev_set_master(port_dev, dev); + if (err) { + netdev_err(dev, "Device %s failed to set master\n", portname); + goto err_set_master; + } + + err = netdev_rx_handler_register(port_dev, team_handle_frame, + port); + if (err) { + netdev_err(dev, "Device %s failed to register rx_handler\n", + portname); + goto err_handler_register; + } + + team_port_list_add_port(team, port); + team_adjust_ops(team); + __team_compute_features(team); + __team_port_change_check(port, !!netif_carrier_ok(port_dev)); + + netdev_info(dev, "Port device %s added\n", portname); + + return 0; + +err_handler_register: + netdev_set_master(port_dev, NULL); + +err_set_master: + vlan_vids_del_by_dev(port_dev, dev); + +err_vids_add: + dev_close(port_dev); + +err_dev_open: + team_port_leave(team, port); + team_port_set_orig_mac(port); + +err_port_enter: + dev_set_mtu(port_dev, port->orig.mtu); + +err_set_mtu: + kfree(port); + + return err; +} + +static int team_port_del(struct team *team, struct net_device *port_dev) +{ + struct net_device *dev = team->dev; + struct team_port *port; + char *portname = port_dev->name; + + port = team_port_get_rtnl(port_dev); + if (!port || !team_port_find(team, port)) { + netdev_err(dev, "Device %s does not act as a port of this team\n", + portname); + return -ENOENT; + } + + __team_port_change_check(port, false); + team_port_list_del_port(team, port); + team_adjust_ops(team); + netdev_rx_handler_unregister(port_dev); + netdev_set_master(port_dev, NULL); + vlan_vids_del_by_dev(port_dev, dev); + dev_close(port_dev); + team_port_leave(team, port); + team_port_set_orig_mac(port); + dev_set_mtu(port_dev, port->orig.mtu); + synchronize_rcu(); + kfree(port); + netdev_info(dev, "Port device %s removed\n", portname); + __team_compute_features(team); + + return 0; +} + + +/***************** + * Net device ops + *****************/ + +static const char team_no_mode_kind[] = "*NOMODE*"; + +static int team_mode_option_get(struct team *team, void *arg) +{ + const char **str = arg; + + *str = team->mode ? team->mode->kind : team_no_mode_kind; + return 0; +} + +static int team_mode_option_set(struct team *team, void *arg) +{ + const char **str = arg; + + return team_change_mode(team, *str); +} + +static const struct team_option team_options[] = { + { + .name = "mode", + .type = TEAM_OPTION_TYPE_STRING, + .getter = team_mode_option_get, + .setter = team_mode_option_set, + }, +}; + +static int team_init(struct net_device *dev) +{ + struct team *team = netdev_priv(dev); + int i; + int err; + + team->dev = dev; + mutex_init(&team->lock); + + team->pcpu_stats = alloc_percpu(struct team_pcpu_stats); + if (!team->pcpu_stats) + return -ENOMEM; + + for (i = 0; i < TEAM_PORT_HASHENTRIES; i++) + INIT_HLIST_HEAD(&team->port_hlist[i]); + INIT_LIST_HEAD(&team->port_list); + + team_adjust_ops(team); + + INIT_LIST_HEAD(&team->option_list); + err = team_options_register(team, team_options, ARRAY_SIZE(team_options)); + if (err) + goto err_options_register; + netif_carrier_off(dev); + + return 0; + +err_options_register: + free_percpu(team->pcpu_stats); + + return err; +} + +static void team_uninit(struct net_device *dev) +{ + struct team *team = netdev_priv(dev); + struct team_port *port; + struct team_port *tmp; + + mutex_lock(&team->lock); + list_for_each_entry_safe(port, tmp, &team->port_list, list) + team_port_del(team, port->dev); + + __team_change_mode(team, NULL); /* cleanup */ + __team_options_unregister(team, team_options, ARRAY_SIZE(team_options)); + mutex_unlock(&team->lock); +} + +static void team_destructor(struct net_device *dev) +{ + struct team *team = netdev_priv(dev); + + free_percpu(team->pcpu_stats); + free_netdev(dev); +} + +static int team_open(struct net_device *dev) +{ + netif_carrier_on(dev); + return 0; +} + +static int team_close(struct net_device *dev) +{ + netif_carrier_off(dev); + return 0; +} + +/* + * note: already called with rcu_read_lock + */ +static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct team *team = netdev_priv(dev); + bool tx_success = false; + unsigned int len = skb->len; + + tx_success = team->ops.transmit(team, skb); + if (tx_success) { + struct team_pcpu_stats *pcpu_stats; + + pcpu_stats = this_cpu_ptr(team->pcpu_stats); + u64_stats_update_begin(&pcpu_stats->syncp); + pcpu_stats->tx_packets++; + pcpu_stats->tx_bytes += len; + u64_stats_update_end(&pcpu_stats->syncp); + } else { + this_cpu_inc(team->pcpu_stats->tx_dropped); + } + + return NETDEV_TX_OK; +} + +static void team_change_rx_flags(struct net_device *dev, int change) +{ + struct team *team = netdev_priv(dev); + struct team_port *port; + int inc; + + rcu_read_lock(); + list_for_each_entry_rcu(port, &team->port_list, list) { + if (change & IFF_PROMISC) { + inc = dev->flags & IFF_PROMISC ? 1 : -1; + dev_set_promiscuity(port->dev, inc); + } + if (change & IFF_ALLMULTI) { + inc = dev->flags & IFF_ALLMULTI ? 1 : -1; + dev_set_allmulti(port->dev, inc); + } + } + rcu_read_unlock(); +} + +static void team_set_rx_mode(struct net_device *dev) +{ + struct team *team = netdev_priv(dev); + struct team_port *port; + + rcu_read_lock(); + list_for_each_entry_rcu(port, &team->port_list, list) { + dev_uc_sync(port->dev, dev); + dev_mc_sync(port->dev, dev); + } + rcu_read_unlock(); +} + +static int team_set_mac_address(struct net_device *dev, void *p) +{ + struct team *team = netdev_priv(dev); + struct team_port *port; + struct sockaddr *addr = p; + + memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); + rcu_read_lock(); + list_for_each_entry_rcu(port, &team->port_list, list) + if (team->ops.port_change_mac) + team->ops.port_change_mac(team, port); + rcu_read_unlock(); + return 0; +} + +static int team_change_mtu(struct net_device *dev, int new_mtu) +{ + struct team *team = netdev_priv(dev); + struct team_port *port; + int err; + + /* + * Alhough this is reader, it's guarded by team lock. It's not possible + * to traverse list in reverse under rcu_read_lock + */ + mutex_lock(&team->lock); + list_for_each_entry(port, &team->port_list, list) { + err = dev_set_mtu(port->dev, new_mtu); + if (err) { + netdev_err(dev, "Device %s failed to change mtu", + port->dev->name); + goto unwind; + } + } + mutex_unlock(&team->lock); + + dev->mtu = new_mtu; + + return 0; + +unwind: + list_for_each_entry_continue_reverse(port, &team->port_list, list) + dev_set_mtu(port->dev, dev->mtu); + mutex_unlock(&team->lock); + + return err; +} + +static struct rtnl_link_stats64 * +team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) +{ + struct team *team = netdev_priv(dev); + struct team_pcpu_stats *p; + u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes; + u32 rx_dropped = 0, tx_dropped = 0; + unsigned int start; + int i; + + for_each_possible_cpu(i) { + p = per_cpu_ptr(team->pcpu_stats, i); + do { + start = u64_stats_fetch_begin_bh(&p->syncp); + rx_packets = p->rx_packets; + rx_bytes = p->rx_bytes; + rx_multicast = p->rx_multicast; + tx_packets = p->tx_packets; + tx_bytes = p->tx_bytes; + } while (u64_stats_fetch_retry_bh(&p->syncp, start)); + + stats->rx_packets += rx_packets; + stats->rx_bytes += rx_bytes; + stats->multicast += rx_multicast; + stats->tx_packets += tx_packets; + stats->tx_bytes += tx_bytes; + /* + * rx_dropped & tx_dropped are u32, updated + * without syncp protection. + */ + rx_dropped += p->rx_dropped; + tx_dropped += p->tx_dropped; + } + stats->rx_dropped = rx_dropped; + stats->tx_dropped = tx_dropped; + return stats; +} + +static int team_vlan_rx_add_vid(struct net_device *dev, uint16_t vid) +{ + struct team *team = netdev_priv(dev); + struct team_port *port; + int err; + + /* + * Alhough this is reader, it's guarded by team lock. It's not possible + * to traverse list in reverse under rcu_read_lock + */ + mutex_lock(&team->lock); + list_for_each_entry(port, &team->port_list, list) { + err = vlan_vid_add(port->dev, vid); + if (err) + goto unwind; + } + mutex_unlock(&team->lock); + + return 0; + +unwind: + list_for_each_entry_continue_reverse(port, &team->port_list, list) + vlan_vid_del(port->dev, vid); + mutex_unlock(&team->lock); + + return err; +} + +static int team_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid) +{ + struct team *team = netdev_priv(dev); + struct team_port *port; + + rcu_read_lock(); + list_for_each_entry_rcu(port, &team->port_list, list) + vlan_vid_del(port->dev, vid); + rcu_read_unlock(); + + return 0; +} + +static int team_add_slave(struct net_device *dev, struct net_device *port_dev) +{ + struct team *team = netdev_priv(dev); + int err; + + mutex_lock(&team->lock); + err = team_port_add(team, port_dev); + mutex_unlock(&team->lock); + return err; +} + +static int team_del_slave(struct net_device *dev, struct net_device *port_dev) +{ + struct team *team = netdev_priv(dev); + int err; + + mutex_lock(&team->lock); + err = team_port_del(team, port_dev); + mutex_unlock(&team->lock); + return err; +} + +static netdev_features_t team_fix_features(struct net_device *dev, + netdev_features_t features) +{ + struct team_port *port; + struct team *team = netdev_priv(dev); + netdev_features_t mask; + + mask = features; + features &= ~NETIF_F_ONE_FOR_ALL; + features |= NETIF_F_ALL_FOR_ALL; + + rcu_read_lock(); + list_for_each_entry_rcu(port, &team->port_list, list) { + features = netdev_increment_features(features, + port->dev->features, + mask); + } + rcu_read_unlock(); + return features; +} + +static const struct net_device_ops team_netdev_ops = { + .ndo_init = team_init, + .ndo_uninit = team_uninit, + .ndo_open = team_open, + .ndo_stop = team_close, + .ndo_start_xmit = team_xmit, + .ndo_change_rx_flags = team_change_rx_flags, + .ndo_set_rx_mode = team_set_rx_mode, + .ndo_set_mac_address = team_set_mac_address, + .ndo_change_mtu = team_change_mtu, + .ndo_get_stats64 = team_get_stats64, + .ndo_vlan_rx_add_vid = team_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = team_vlan_rx_kill_vid, + .ndo_add_slave = team_add_slave, + .ndo_del_slave = team_del_slave, + .ndo_fix_features = team_fix_features, +}; + + +/*********************** + * rt netlink interface + ***********************/ + +static void team_setup(struct net_device *dev) +{ + ether_setup(dev); + + dev->netdev_ops = &team_netdev_ops; + dev->destructor = team_destructor; + dev->tx_queue_len = 0; + dev->flags |= IFF_MULTICAST; + dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); + + /* + * Indicate we support unicast address filtering. That way core won't + * bring us to promisc mode in case a unicast addr is added. + * Let this up to underlay drivers. + */ + dev->priv_flags |= IFF_UNICAST_FLT; + + dev->features |= NETIF_F_LLTX; + dev->features |= NETIF_F_GRO; + dev->hw_features = NETIF_F_HW_VLAN_TX | + NETIF_F_HW_VLAN_RX | + NETIF_F_HW_VLAN_FILTER; + + dev->features |= dev->hw_features; +} + +static int team_newlink(struct net *src_net, struct net_device *dev, + struct nlattr *tb[], struct nlattr *data[]) +{ + int err; + + if (tb[IFLA_ADDRESS] == NULL) + random_ether_addr(dev->dev_addr); + + err = register_netdevice(dev); + if (err) + return err; + + return 0; +} + +static int team_validate(struct nlattr *tb[], struct nlattr *data[]) +{ + if (tb[IFLA_ADDRESS]) { + if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) + return -EINVAL; + if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) + return -EADDRNOTAVAIL; + } + return 0; +} + +static struct rtnl_link_ops team_link_ops __read_mostly = { + .kind = DRV_NAME, + .priv_size = sizeof(struct team), + .setup = team_setup, + .newlink = team_newlink, + .validate = team_validate, +}; + + +/*********************************** + * Generic netlink custom interface + ***********************************/ + +static struct genl_family team_nl_family = { + .id = GENL_ID_GENERATE, + .name = TEAM_GENL_NAME, + .version = TEAM_GENL_VERSION, + .maxattr = TEAM_ATTR_MAX, + .netnsok = true, +}; + +static const struct nla_policy team_nl_policy[TEAM_ATTR_MAX + 1] = { + [TEAM_ATTR_UNSPEC] = { .type = NLA_UNSPEC, }, + [TEAM_ATTR_TEAM_IFINDEX] = { .type = NLA_U32 }, + [TEAM_ATTR_LIST_OPTION] = { .type = NLA_NESTED }, + [TEAM_ATTR_LIST_PORT] = { .type = NLA_NESTED }, +}; + +static const struct nla_policy +team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = { + [TEAM_ATTR_OPTION_UNSPEC] = { .type = NLA_UNSPEC, }, + [TEAM_ATTR_OPTION_NAME] = { + .type = NLA_STRING, + .len = TEAM_STRING_MAX_LEN, + }, + [TEAM_ATTR_OPTION_CHANGED] = { .type = NLA_FLAG }, + [TEAM_ATTR_OPTION_TYPE] = { .type = NLA_U8 }, + [TEAM_ATTR_OPTION_DATA] = { + .type = NLA_BINARY, + .len = TEAM_STRING_MAX_LEN, + }, +}; + +static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info) +{ + struct sk_buff *msg; + void *hdr; + int err; + + msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq, + &team_nl_family, 0, TEAM_CMD_NOOP); + if (IS_ERR(hdr)) { + err = PTR_ERR(hdr); + goto err_msg_put; + } + + genlmsg_end(msg, hdr); + + return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid); + +err_msg_put: + nlmsg_free(msg); + + return err; +} + +/* + * Netlink cmd functions should be locked by following two functions. + * Since dev gets held here, that ensures dev won't disappear in between. + */ +static struct team *team_nl_team_get(struct genl_info *info) +{ + struct net *net = genl_info_net(info); + int ifindex; + struct net_device *dev; + struct team *team; + + if (!info->attrs[TEAM_ATTR_TEAM_IFINDEX]) + return NULL; + + ifindex = nla_get_u32(info->attrs[TEAM_ATTR_TEAM_IFINDEX]); + dev = dev_get_by_index(net, ifindex); + if (!dev || dev->netdev_ops != &team_netdev_ops) { + if (dev) + dev_put(dev); + return NULL; + } + + team = netdev_priv(dev); + mutex_lock(&team->lock); + return team; +} + +static void team_nl_team_put(struct team *team) +{ + mutex_unlock(&team->lock); + dev_put(team->dev); +} + +static int team_nl_send_generic(struct genl_info *info, struct team *team, + int (*fill_func)(struct sk_buff *skb, + struct genl_info *info, + int flags, struct team *team)) +{ + struct sk_buff *skb; + int err; + + skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + err = fill_func(skb, info, NLM_F_ACK, team); + if (err < 0) + goto err_fill; + + err = genlmsg_unicast(genl_info_net(info), skb, info->snd_pid); + return err; + +err_fill: + nlmsg_free(skb); + return err; +} + +static int team_nl_fill_options_get_changed(struct sk_buff *skb, + u32 pid, u32 seq, int flags, + struct team *team, + struct team_option *changed_option) +{ + struct nlattr *option_list; + void *hdr; + struct team_option *option; + + hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags, + TEAM_CMD_OPTIONS_GET); + if (IS_ERR(hdr)) + return PTR_ERR(hdr); + + NLA_PUT_U32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex); + option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION); + if (!option_list) + return -EMSGSIZE; + + list_for_each_entry(option, &team->option_list, list) { + struct nlattr *option_item; + long arg; + + option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION); + if (!option_item) + goto nla_put_failure; + NLA_PUT_STRING(skb, TEAM_ATTR_OPTION_NAME, option->name); + if (option == changed_option) + NLA_PUT_FLAG(skb, TEAM_ATTR_OPTION_CHANGED); + switch (option->type) { + case TEAM_OPTION_TYPE_U32: + NLA_PUT_U8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32); + team_option_get(team, option, &arg); + NLA_PUT_U32(skb, TEAM_ATTR_OPTION_DATA, arg); + break; + case TEAM_OPTION_TYPE_STRING: + NLA_PUT_U8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING); + team_option_get(team, option, &arg); + NLA_PUT_STRING(skb, TEAM_ATTR_OPTION_DATA, + (char *) arg); + break; + default: + BUG(); + } + nla_nest_end(skb, option_item); + } + + nla_nest_end(skb, option_list); + return genlmsg_end(skb, hdr); + +nla_put_failure: + genlmsg_cancel(skb, hdr); + return -EMSGSIZE; +} + +static int team_nl_fill_options_get(struct sk_buff *skb, + struct genl_info *info, int flags, + struct team *team) +{ + return team_nl_fill_options_get_changed(skb, info->snd_pid, + info->snd_seq, NLM_F_ACK, + team, NULL); +} + +static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info) +{ + struct team *team; + int err; + + team = team_nl_team_get(info); + if (!team) + return -EINVAL; + + err = team_nl_send_generic(info, team, team_nl_fill_options_get); + + team_nl_team_put(team); + + return err; +} + +static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) +{ + struct team *team; + int err = 0; + int i; + struct nlattr *nl_option; + + team = team_nl_team_get(info); + if (!team) + return -EINVAL; + + err = -EINVAL; + if (!info->attrs[TEAM_ATTR_LIST_OPTION]) { + err = -EINVAL; + goto team_put; + } + + nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) { + struct nlattr *mode_attrs[TEAM_ATTR_OPTION_MAX + 1]; + enum team_option_type opt_type; + struct team_option *option; + char *opt_name; + bool opt_found = false; + + if (nla_type(nl_option) != TEAM_ATTR_ITEM_OPTION) { + err = -EINVAL; + goto team_put; + } + err = nla_parse_nested(mode_attrs, TEAM_ATTR_OPTION_MAX, + nl_option, team_nl_option_policy); + if (err) + goto team_put; + if (!mode_attrs[TEAM_ATTR_OPTION_NAME] || + !mode_attrs[TEAM_ATTR_OPTION_TYPE] || + !mode_attrs[TEAM_ATTR_OPTION_DATA]) { + err = -EINVAL; + goto team_put; + } + switch (nla_get_u8(mode_attrs[TEAM_ATTR_OPTION_TYPE])) { + case NLA_U32: + opt_type = TEAM_OPTION_TYPE_U32; + break; + case NLA_STRING: + opt_type = TEAM_OPTION_TYPE_STRING; + break; + default: + goto team_put; + } + + opt_name = nla_data(mode_attrs[TEAM_ATTR_OPTION_NAME]); + list_for_each_entry(option, &team->option_list, list) { + long arg; + struct nlattr *opt_data_attr; + + if (option->type != opt_type || + strcmp(option->name, opt_name)) + continue; + opt_found = true; + opt_data_attr = mode_attrs[TEAM_ATTR_OPTION_DATA]; + switch (opt_type) { + case TEAM_OPTION_TYPE_U32: + arg = nla_get_u32(opt_data_attr); + break; + case TEAM_OPTION_TYPE_STRING: + arg = (long) nla_data(opt_data_attr); + break; + default: + BUG(); + } + err = team_option_set(team, option, &arg); + if (err) + goto team_put; + } + if (!opt_found) { + err = -ENOENT; + goto team_put; + } + } + +team_put: + team_nl_team_put(team); + + return err; +} + +static int team_nl_fill_port_list_get_changed(struct sk_buff *skb, + u32 pid, u32 seq, int flags, + struct team *team, + struct team_port *changed_port) +{ + struct nlattr *port_list; + void *hdr; + struct team_port *port; + + hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags, + TEAM_CMD_PORT_LIST_GET); + if (IS_ERR(hdr)) + return PTR_ERR(hdr); + + NLA_PUT_U32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex); + port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT); + if (!port_list) + return -EMSGSIZE; + + list_for_each_entry(port, &team->port_list, list) { + struct nlattr *port_item; + + port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT); + if (!port_item) + goto nla_put_failure; + NLA_PUT_U32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex); + if (port == changed_port) + NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_CHANGED); + if (port->linkup) + NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_LINKUP); + NLA_PUT_U32(skb, TEAM_ATTR_PORT_SPEED, port->speed); + NLA_PUT_U8(skb, TEAM_ATTR_PORT_DUPLEX, port->duplex); + nla_nest_end(skb, port_item); + } + + nla_nest_end(skb, port_list); + return genlmsg_end(skb, hdr); + +nla_put_failure: + genlmsg_cancel(skb, hdr); + return -EMSGSIZE; +} + +static int team_nl_fill_port_list_get(struct sk_buff *skb, + struct genl_info *info, int flags, + struct team *team) +{ + return team_nl_fill_port_list_get_changed(skb, info->snd_pid, + info->snd_seq, NLM_F_ACK, + team, NULL); +} + +static int team_nl_cmd_port_list_get(struct sk_buff *skb, + struct genl_info *info) +{ + struct team *team; + int err; + + team = team_nl_team_get(info); + if (!team) + return -EINVAL; + + err = team_nl_send_generic(info, team, team_nl_fill_port_list_get); + + team_nl_team_put(team); + + return err; +} + +static struct genl_ops team_nl_ops[] = { + { + .cmd = TEAM_CMD_NOOP, + .doit = team_nl_cmd_noop, + .policy = team_nl_policy, + }, + { + .cmd = TEAM_CMD_OPTIONS_SET, + .doit = team_nl_cmd_options_set, + .policy = team_nl_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = TEAM_CMD_OPTIONS_GET, + .doit = team_nl_cmd_options_get, + .policy = team_nl_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = TEAM_CMD_PORT_LIST_GET, + .doit = team_nl_cmd_port_list_get, + .policy = team_nl_policy, + .flags = GENL_ADMIN_PERM, + }, +}; + +static struct genl_multicast_group team_change_event_mcgrp = { + .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME, +}; + +static int team_nl_send_event_options_get(struct team *team, + struct team_option *changed_option) +{ + struct sk_buff *skb; + int err; + struct net *net = dev_net(team->dev); + + skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + err = team_nl_fill_options_get_changed(skb, 0, 0, 0, team, + changed_option); + if (err < 0) + goto err_fill; + + err = genlmsg_multicast_netns(net, skb, 0, team_change_event_mcgrp.id, + GFP_KERNEL); + return err; + +err_fill: + nlmsg_free(skb); + return err; +} + +static int team_nl_send_event_port_list_get(struct team_port *port) +{ + struct sk_buff *skb; + int err; + struct net *net = dev_net(port->team->dev); + + skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + err = team_nl_fill_port_list_get_changed(skb, 0, 0, 0, + port->team, port); + if (err < 0) + goto err_fill; + + err = genlmsg_multicast_netns(net, skb, 0, team_change_event_mcgrp.id, + GFP_KERNEL); + return err; + +err_fill: + nlmsg_free(skb); + return err; +} + +static int team_nl_init(void) +{ + int err; + + err = genl_register_family_with_ops(&team_nl_family, team_nl_ops, + ARRAY_SIZE(team_nl_ops)); + if (err) + return err; + + err = genl_register_mc_group(&team_nl_family, &team_change_event_mcgrp); + if (err) + goto err_change_event_grp_reg; + + return 0; + +err_change_event_grp_reg: + genl_unregister_family(&team_nl_family); + + return err; +} + +static void team_nl_fini(void) +{ + genl_unregister_family(&team_nl_family); +} + + +/****************** + * Change checkers + ******************/ + +static void __team_options_change_check(struct team *team, + struct team_option *changed_option) +{ + int err; + + err = team_nl_send_event_options_get(team, changed_option); + if (err) + netdev_warn(team->dev, "Failed to send options change via netlink\n"); +} + +/* rtnl lock is held */ +static void __team_port_change_check(struct team_port *port, bool linkup) +{ + int err; + + if (port->linkup == linkup) + return; + + port->linkup = linkup; + if (linkup) { + struct ethtool_cmd ecmd; + + err = __ethtool_get_settings(port->dev, &ecmd); + if (!err) { + port->speed = ethtool_cmd_speed(&ecmd); + port->duplex = ecmd.duplex; + goto send_event; + } + } + port->speed = 0; + port->duplex = 0; + +send_event: + err = team_nl_send_event_port_list_get(port); + if (err) + netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink\n", + port->dev->name); + +} + +static void team_port_change_check(struct team_port *port, bool linkup) +{ + struct team *team = port->team; + + mutex_lock(&team->lock); + __team_port_change_check(port, linkup); + mutex_unlock(&team->lock); +} + +/************************************ + * Net device notifier event handler + ************************************/ + +static int team_device_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = (struct net_device *) ptr; + struct team_port *port; + + port = team_port_get_rtnl(dev); + if (!port) + return NOTIFY_DONE; + + switch (event) { + case NETDEV_UP: + if (netif_carrier_ok(dev)) + team_port_change_check(port, true); + case NETDEV_DOWN: + team_port_change_check(port, false); + case NETDEV_CHANGE: + if (netif_running(port->dev)) + team_port_change_check(port, + !!netif_carrier_ok(port->dev)); + break; + case NETDEV_UNREGISTER: + team_del_slave(port->team->dev, dev); + break; + case NETDEV_FEAT_CHANGE: + team_compute_features(port->team); + break; + case NETDEV_CHANGEMTU: + /* Forbid to change mtu of underlaying device */ + return NOTIFY_BAD; + case NETDEV_PRE_TYPE_CHANGE: + /* Forbid to change type of underlaying device */ + return NOTIFY_BAD; + } + return NOTIFY_DONE; +} + +static struct notifier_block team_notifier_block __read_mostly = { + .notifier_call = team_device_event, +}; + + +/*********************** + * Module init and exit + ***********************/ + +static int __init team_module_init(void) +{ + int err; + + register_netdevice_notifier(&team_notifier_block); + + err = rtnl_link_register(&team_link_ops); + if (err) + goto err_rtnl_reg; + + err = team_nl_init(); + if (err) + goto err_nl_init; + + return 0; + +err_nl_init: + rtnl_link_unregister(&team_link_ops); + +err_rtnl_reg: + unregister_netdevice_notifier(&team_notifier_block); + + return err; +} + +static void __exit team_module_exit(void) +{ + team_nl_fini(); + rtnl_link_unregister(&team_link_ops); + unregister_netdevice_notifier(&team_notifier_block); +} + +module_init(team_module_init); +module_exit(team_module_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>"); +MODULE_DESCRIPTION("Ethernet team device driver"); +MODULE_ALIAS_RTNL_LINK(DRV_NAME); diff --git a/drivers/net/team/team_mode_activebackup.c b/drivers/net/team/team_mode_activebackup.c new file mode 100644 index 00000000000..f4d960e82e2 --- /dev/null +++ b/drivers/net/team/team_mode_activebackup.c @@ -0,0 +1,136 @@ +/* + * net/drivers/team/team_mode_activebackup.c - Active-backup mode for team + * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/netdevice.h> +#include <net/rtnetlink.h> +#include <linux/if_team.h> + +struct ab_priv { + struct team_port __rcu *active_port; +}; + +static struct ab_priv *ab_priv(struct team *team) +{ + return (struct ab_priv *) &team->mode_priv; +} + +static rx_handler_result_t ab_receive(struct team *team, struct team_port *port, + struct sk_buff *skb) { + struct team_port *active_port; + + active_port = rcu_dereference(ab_priv(team)->active_port); + if (active_port != port) + return RX_HANDLER_EXACT; + return RX_HANDLER_ANOTHER; +} + +static bool ab_transmit(struct team *team, struct sk_buff *skb) +{ + struct team_port *active_port; + + active_port = rcu_dereference(ab_priv(team)->active_port); + if (unlikely(!active_port)) + goto drop; + skb->dev = active_port->dev; + if (dev_queue_xmit(skb)) + return false; + return true; + +drop: + dev_kfree_skb_any(skb); + return false; +} + +static void ab_port_leave(struct team *team, struct team_port *port) +{ + if (ab_priv(team)->active_port == port) + RCU_INIT_POINTER(ab_priv(team)->active_port, NULL); +} + +static int ab_active_port_get(struct team *team, void *arg) +{ + u32 *ifindex = arg; + + *ifindex = 0; + if (ab_priv(team)->active_port) + *ifindex = ab_priv(team)->active_port->dev->ifindex; + return 0; +} + +static int ab_active_port_set(struct team *team, void *arg) +{ + u32 *ifindex = arg; + struct team_port *port; + + list_for_each_entry_rcu(port, &team->port_list, list) { + if (port->dev->ifindex == *ifindex) { + rcu_assign_pointer(ab_priv(team)->active_port, port); + return 0; + } + } + return -ENOENT; +} + +static const struct team_option ab_options[] = { + { + .name = "activeport", + .type = TEAM_OPTION_TYPE_U32, + .getter = ab_active_port_get, + .setter = ab_active_port_set, + }, +}; + +int ab_init(struct team *team) +{ + return team_options_register(team, ab_options, ARRAY_SIZE(ab_options)); +} + +void ab_exit(struct team *team) +{ + team_options_unregister(team, ab_options, ARRAY_SIZE(ab_options)); +} + +static const struct team_mode_ops ab_mode_ops = { + .init = ab_init, + .exit = ab_exit, + .receive = ab_receive, + .transmit = ab_transmit, + .port_leave = ab_port_leave, +}; + +static struct team_mode ab_mode = { + .kind = "activebackup", + .owner = THIS_MODULE, + .priv_size = sizeof(struct ab_priv), + .ops = &ab_mode_ops, +}; + +static int __init ab_init_module(void) +{ + return team_mode_register(&ab_mode); +} + +static void __exit ab_cleanup_module(void) +{ + team_mode_unregister(&ab_mode); +} + +module_init(ab_init_module); +module_exit(ab_cleanup_module); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>"); +MODULE_DESCRIPTION("Active-backup mode for team"); +MODULE_ALIAS("team-mode-activebackup"); diff --git a/drivers/net/team/team_mode_roundrobin.c b/drivers/net/team/team_mode_roundrobin.c new file mode 100644 index 00000000000..a0e8f806331 --- /dev/null +++ b/drivers/net/team/team_mode_roundrobin.c @@ -0,0 +1,107 @@ +/* + * net/drivers/team/team_mode_roundrobin.c - Round-robin mode for team + * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/netdevice.h> +#include <linux/if_team.h> + +struct rr_priv { + unsigned int sent_packets; +}; + +static struct rr_priv *rr_priv(struct team *team) +{ + return (struct rr_priv *) &team->mode_priv; +} + +static struct team_port *__get_first_port_up(struct team *team, + struct team_port *port) +{ + struct team_port *cur; + + if (port->linkup) + return port; + cur = port; + list_for_each_entry_continue_rcu(cur, &team->port_list, list) + if (cur->linkup) + return cur; + list_for_each_entry_rcu(cur, &team->port_list, list) { + if (cur == port) + break; + if (cur->linkup) + return cur; + } + return NULL; +} + +static bool rr_transmit(struct team *team, struct sk_buff *skb) +{ + struct team_port *port; + int port_index; + + port_index = rr_priv(team)->sent_packets++ % team->port_count; + port = team_get_port_by_index_rcu(team, port_index); + port = __get_first_port_up(team, port); + if (unlikely(!port)) + goto drop; + skb->dev = port->dev; + if (dev_queue_xmit(skb)) + return false; + return true; + +drop: + dev_kfree_skb_any(skb); + return false; +} + +static int rr_port_enter(struct team *team, struct team_port *port) +{ + return team_port_set_team_mac(port); +} + +static void rr_port_change_mac(struct team *team, struct team_port *port) +{ + team_port_set_team_mac(port); +} + +static const struct team_mode_ops rr_mode_ops = { + .transmit = rr_transmit, + .port_enter = rr_port_enter, + .port_change_mac = rr_port_change_mac, +}; + +static struct team_mode rr_mode = { + .kind = "roundrobin", + .owner = THIS_MODULE, + .priv_size = sizeof(struct rr_priv), + .ops = &rr_mode_ops, +}; + +static int __init rr_init_module(void) +{ + return team_mode_register(&rr_mode); +} + +static void __exit rr_cleanup_module(void) +{ + team_mode_unregister(&rr_mode); +} + +module_init(rr_init_module); +module_exit(rr_cleanup_module); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>"); +MODULE_DESCRIPTION("Round-robin mode for team"); +MODULE_ALIAS("team-mode-roundrobin"); diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 7bea9c65119..93c5d72711b 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -123,7 +123,7 @@ struct tun_struct { gid_t group; struct net_device *dev; - u32 set_features; + netdev_features_t set_features; #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \ NETIF_F_TSO6|NETIF_F_UFO) struct fasync_struct *fasync; @@ -454,7 +454,8 @@ tun_net_change_mtu(struct net_device *dev, int new_mtu) return 0; } -static u32 tun_net_fix_features(struct net_device *dev, u32 features) +static netdev_features_t tun_net_fix_features(struct net_device *dev, + netdev_features_t features) { struct tun_struct *tun = netdev_priv(dev); @@ -1196,7 +1197,7 @@ static int tun_get_iff(struct net *net, struct tun_struct *tun, * privs required. */ static int set_offload(struct tun_struct *tun, unsigned long arg) { - u32 features = 0; + netdev_features_t features = 0; if (arg & TUN_F_CSUM) { features |= NETIF_F_HW_CSUM; @@ -1589,16 +1590,15 @@ static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info { struct tun_struct *tun = netdev_priv(dev); - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - strcpy(info->fw_version, "N/A"); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); switch (tun->flags & TUN_TYPE_MASK) { case TUN_TUN_DEV: - strcpy(info->bus_info, "tun"); + strlcpy(info->bus_info, "tun", sizeof(info->bus_info)); break; case TUN_TAP_DEV: - strcpy(info->bus_info, "tap"); + strlcpy(info->bus_info, "tap", sizeof(info->bus_info)); break; } } diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c index e95f0e60a9b..dbdca225b84 100644 --- a/drivers/net/usb/asix.c +++ b/drivers/net/usb/asix.c @@ -36,7 +36,7 @@ #include <linux/usb/usbnet.h> #include <linux/slab.h> -#define DRIVER_VERSION "08-Nov-2011" +#define DRIVER_VERSION "22-Dec-2011" #define DRIVER_NAME "asix" /* ASIX AX8817X based USB 2.0 Ethernet Devices */ @@ -689,6 +689,10 @@ asix_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo) } wolinfo->supported = WAKE_PHY | WAKE_MAGIC; wolinfo->wolopts = 0; + if (opt & AX_MONITOR_LINK) + wolinfo->wolopts |= WAKE_PHY; + if (opt & AX_MONITOR_MAGIC) + wolinfo->wolopts |= WAKE_MAGIC; } static int diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c index a60d0069cc4..331e44056f5 100644 --- a/drivers/net/usb/cdc-phonet.c +++ b/drivers/net/usb/cdc-phonet.c @@ -130,7 +130,7 @@ static int rx_submit(struct usbpn_dev *pnd, struct urb *req, gfp_t gfp_flags) struct page *page; int err; - page = __netdev_alloc_page(dev, gfp_flags); + page = alloc_page(gfp_flags); if (!page) return -ENOMEM; @@ -140,7 +140,7 @@ static int rx_submit(struct usbpn_dev *pnd, struct urb *req, gfp_t gfp_flags) err = usb_submit_urb(req, gfp_flags); if (unlikely(err)) { dev_dbg(&dev->dev, "RX submit error (%d)\n", err); - netdev_free_page(dev, page); + put_page(page); } return err; } @@ -208,9 +208,9 @@ static void rx_complete(struct urb *req) dev->stats.rx_errors++; resubmit: if (page) - netdev_free_page(dev, page); + put_page(page); if (req) - rx_submit(pnd, req, GFP_ATOMIC); + rx_submit(pnd, req, GFP_ATOMIC | __GFP_COLD); } static int usbpn_close(struct net_device *dev); @@ -229,7 +229,7 @@ static int usbpn_open(struct net_device *dev) for (i = 0; i < rxq_size; i++) { struct urb *req = usb_alloc_urb(0, GFP_KERNEL); - if (!req || rx_submit(pnd, req, GFP_KERNEL)) { + if (!req || rx_submit(pnd, req, GFP_KERNEL | __GFP_COLD)) { usbpn_close(dev); return -ENOMEM; } diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index f06fb78383a..009dd0f1853 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -465,12 +465,10 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf) int temp; u8 iface_no; - ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (ctx == NULL) return -ENODEV; - memset(ctx, 0, sizeof(*ctx)); - init_timer(&ctx->tx_timer); spin_lock_init(&ctx->mtx); ctx->netdev = dev->net; diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c index 769f5090bda..5d99b8cacd7 100644 --- a/drivers/net/usb/pegasus.c +++ b/drivers/net/usb/pegasus.c @@ -55,8 +55,8 @@ static const char driver_name[] = "pegasus"; #define BMSR_MEDIA (BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | \ BMSR_100FULL | BMSR_ANEGCAPABLE) -static int loopback; -static int mii_mode; +static bool loopback; +static bool mii_mode; static char *devid; static struct usb_eth_dev usb_dev_id[] = { @@ -517,7 +517,7 @@ static inline int reset_mac(pegasus_t *pegasus) for (i = 0; i < REG_TIMEOUT; i++) { get_registers(pegasus, EthCtrl1, 1, &data); if (~data & 0x08) { - if (loopback & 1) + if (loopback) break; if (mii_mode && (pegasus->features & HAS_HOME_PNA)) set_register(pegasus, Gpio1, 0x34); @@ -561,7 +561,7 @@ static int enable_net_traffic(struct net_device *dev, struct usb_device *usb) data[1] |= 0x10; /* set 100 Mbps */ if (mii_mode) data[1] = 0; - data[2] = (loopback & 1) ? 0x09 : 0x01; + data[2] = loopback ? 0x09 : 0x01; memcpy(pegasus->eth_regs, data, sizeof(data)); ret = set_registers(pegasus, EthCtrl0, 3, data); diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index a5b9b12ef26..0d5da82f0ff 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c @@ -76,7 +76,7 @@ struct usb_context { struct usbnet *dev; }; -static int turbo_mode = true; +static bool turbo_mode = true; module_param(turbo_mode, bool, 0644); MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction"); @@ -728,7 +728,8 @@ static int smsc75xx_change_mtu(struct net_device *netdev, int new_mtu) } /* Enable or disable Rx checksum offload engine */ -static int smsc75xx_set_features(struct net_device *netdev, u32 features) +static int smsc75xx_set_features(struct net_device *netdev, + netdev_features_t features) { struct usbnet *dev = netdev_priv(netdev); struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index eff67678c5a..db217ad66f2 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -59,7 +59,7 @@ struct usb_context { struct usbnet *dev; }; -static int turbo_mode = true; +static bool turbo_mode = true; module_param(turbo_mode, bool, 0644); MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction"); @@ -516,7 +516,8 @@ static void smsc95xx_status(struct usbnet *dev, struct urb *urb) } /* Enable or disable Tx & Rx checksum offload engines */ -static int smsc95xx_set_features(struct net_device *netdev, u32 features) +static int smsc95xx_set_features(struct net_device *netdev, + netdev_features_t features) { struct usbnet *dev = netdev_priv(netdev); u32 read_buf; diff --git a/drivers/net/veth.c b/drivers/net/veth.c index ef883e97cee..49f4667e1fa 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -27,8 +27,8 @@ struct veth_net_stats { u64 rx_packets; - u64 tx_packets; u64 rx_bytes; + u64 tx_packets; u64 tx_bytes; u64 rx_dropped; struct u64_stats_sync syncp; @@ -66,9 +66,8 @@ static int veth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - strcpy(info->fw_version, "N/A"); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); } static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf) @@ -271,7 +270,7 @@ static void veth_setup(struct net_device *dev) dev->features |= NETIF_F_LLTX; dev->destructor = veth_dev_free; - dev->hw_features = NETIF_F_NO_CSUM | NETIF_F_SG | NETIF_F_RXCSUM; + dev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_RXCSUM; } /* diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 6ee8410443c..76fe14efb2b 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -30,7 +30,7 @@ static int napi_weight = 128; module_param(napi_weight, int, 0444); -static int csum = 1, gso = 1; +static bool csum = true, gso = true; module_param(csum, bool, 0444); module_param(gso, bool, 0444); @@ -39,6 +39,7 @@ module_param(gso, bool, 0444); #define GOOD_COPY_LEN 128 #define VIRTNET_SEND_COMMAND_SG_MAX 2 +#define VIRTNET_DRIVER_VERSION "1.0.0" struct virtnet_stats { struct u64_stats_sync syncp; @@ -155,6 +156,7 @@ static void set_skb_frag(struct sk_buff *skb, struct page *page, *len -= size; } +/* Called from bottom half context */ static struct sk_buff *page_to_skb(struct virtnet_info *vi, struct page *page, unsigned int len) { @@ -357,7 +359,7 @@ static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp) struct skb_vnet_hdr *hdr; int err; - skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN); + skb = __netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN, gfp); if (unlikely(!skb)) return -ENOMEM; @@ -439,7 +441,13 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp) return err; } -/* Returns false if we couldn't fill entirely (OOM). */ +/* + * Returns false if we couldn't fill entirely (OOM). + * + * Normally run in the receive path, but can also be run from ndo_open + * before we're receiving packets, or from refill_work which is + * careful to disable receiving (using napi_disable). + */ static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp) { int err; @@ -501,7 +509,7 @@ static void refill_work(struct work_struct *work) /* In theory, this can happen: if we don't get any buffers in * we will *never* try to fill again. */ if (still_empty) - schedule_delayed_work(&vi->refill, HZ/2); + queue_delayed_work(system_nrt_wq, &vi->refill, HZ/2); } static int virtnet_poll(struct napi_struct *napi, int budget) @@ -520,7 +528,7 @@ again: if (vi->num < vi->max / 2) { if (!try_fill_recv(vi, GFP_ATOMIC)) - schedule_delayed_work(&vi->refill, 0); + queue_delayed_work(system_nrt_wq, &vi->refill, 0); } /* Out of packets? */ @@ -699,6 +707,7 @@ static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev, } tot->tx_dropped = dev->stats.tx_dropped; + tot->tx_fifo_errors = dev->stats.tx_fifo_errors; tot->rx_dropped = dev->stats.rx_dropped; tot->rx_length_errors = dev->stats.rx_length_errors; tot->rx_frame_errors = dev->stats.rx_frame_errors; @@ -719,6 +728,10 @@ static int virtnet_open(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); + /* Make sure we have some buffers: if oom use wq. */ + if (!try_fill_recv(vi, GFP_KERNEL)) + queue_delayed_work(system_nrt_wq, &vi->refill, 0); + virtnet_napi_enable(vi); return 0; } @@ -772,6 +785,8 @@ static int virtnet_close(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); + /* Make sure refill_work doesn't re-enable napi! */ + cancel_delayed_work_sync(&vi->refill); napi_disable(&vi->napi); return 0; @@ -853,7 +868,7 @@ static void virtnet_set_rx_mode(struct net_device *dev) kfree(buf); } -static void virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid) +static int virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid) { struct virtnet_info *vi = netdev_priv(dev); struct scatterlist sg; @@ -863,9 +878,10 @@ static void virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid) if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, VIRTIO_NET_CTRL_VLAN_ADD, &sg, 1, 0)) dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); + return 0; } -static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid) +static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid) { struct virtnet_info *vi = netdev_priv(dev); struct scatterlist sg; @@ -875,6 +891,7 @@ static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid) if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, VIRTIO_NET_CTRL_VLAN_DEL, &sg, 1, 0)) dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid); + return 0; } static void virtnet_get_ringparam(struct net_device *dev, @@ -889,7 +906,21 @@ static void virtnet_get_ringparam(struct net_device *dev, } + +static void virtnet_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct virtnet_info *vi = netdev_priv(dev); + struct virtio_device *vdev = vi->vdev; + + strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); + strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info)); + +} + static const struct ethtool_ops virtnet_ethtool_ops = { + .get_drvinfo = virtnet_get_drvinfo, .get_link = ethtool_op_get_link, .get_ringparam = virtnet_get_ringparam, }; @@ -1082,7 +1113,6 @@ static int virtnet_probe(struct virtio_device *vdev) unregister: unregister_netdev(dev); - cancel_delayed_work_sync(&vi->refill); free_vqs: vdev->config->del_vqs(vdev); free_stats: @@ -1121,9 +1151,7 @@ static void __devexit virtnet_remove(struct virtio_device *vdev) /* Stop all the virtqueues. */ vdev->config->reset(vdev); - unregister_netdev(vi->dev); - cancel_delayed_work_sync(&vi->refill); /* Free unused buffers in both send and recv, if any. */ free_unused_bufs(vi); diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index d96bfb1ac20..de7fc345148 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -1926,7 +1926,7 @@ vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter) } -static void +static int vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); @@ -1943,10 +1943,12 @@ vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid) } set_bit(vid, adapter->active_vlans); + + return 0; } -static void +static int vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); @@ -1963,6 +1965,8 @@ vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) } clear_bit(vid, adapter->active_vlans); + + return 0; } @@ -2163,7 +2167,8 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE; get_random_bytes(&rssConf->hashKey[0], rssConf->hashKeySize); for (i = 0; i < rssConf->indTableSize; i++) - rssConf->indTable[i] = i % adapter->num_rx_queues; + rssConf->indTable[i] = ethtool_rxfh_indir_default( + i, adapter->num_rx_queues); devRead->rssConfDesc.confVer = 1; devRead->rssConfDesc.confLen = sizeof(*rssConf); diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c index e662cbc8bfb..587a218b234 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c @@ -202,14 +202,9 @@ vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) struct vmxnet3_adapter *adapter = netdev_priv(netdev); strlcpy(drvinfo->driver, vmxnet3_driver_name, sizeof(drvinfo->driver)); - drvinfo->driver[sizeof(drvinfo->driver) - 1] = '\0'; strlcpy(drvinfo->version, VMXNET3_DRIVER_VERSION_REPORT, sizeof(drvinfo->version)); - drvinfo->driver[sizeof(drvinfo->version) - 1] = '\0'; - - strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); - drvinfo->fw_version[sizeof(drvinfo->fw_version) - 1] = '\0'; strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), ETHTOOL_BUSINFO_LEN); @@ -262,11 +257,11 @@ vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf) } } -int vmxnet3_set_features(struct net_device *netdev, u32 features) +int vmxnet3_set_features(struct net_device *netdev, netdev_features_t features) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); unsigned long flags; - u32 changed = features ^ netdev->features; + netdev_features_t changed = features ^ netdev->features; if (changed & (NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_HW_VLAN_RX)) { if (features & NETIF_F_RXCSUM) @@ -570,44 +565,38 @@ vmxnet3_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info, } #ifdef VMXNET3_RSS +static u32 +vmxnet3_get_rss_indir_size(struct net_device *netdev) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + struct UPT1_RSSConf *rssConf = adapter->rss_conf; + + return rssConf->indTableSize; +} + static int -vmxnet3_get_rss_indir(struct net_device *netdev, - struct ethtool_rxfh_indir *p) +vmxnet3_get_rss_indir(struct net_device *netdev, u32 *p) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); struct UPT1_RSSConf *rssConf = adapter->rss_conf; - unsigned int n = min_t(unsigned int, p->size, rssConf->indTableSize); + unsigned int n = rssConf->indTableSize; - p->size = rssConf->indTableSize; while (n--) - p->ring_index[n] = rssConf->indTable[n]; + p[n] = rssConf->indTable[n]; return 0; } static int -vmxnet3_set_rss_indir(struct net_device *netdev, - const struct ethtool_rxfh_indir *p) +vmxnet3_set_rss_indir(struct net_device *netdev, const u32 *p) { unsigned int i; unsigned long flags; struct vmxnet3_adapter *adapter = netdev_priv(netdev); struct UPT1_RSSConf *rssConf = adapter->rss_conf; - if (p->size != rssConf->indTableSize) - return -EINVAL; - for (i = 0; i < rssConf->indTableSize; i++) { - /* - * Return with error code if any of the queue indices - * is out of range - */ - if (p->ring_index[i] < 0 || - p->ring_index[i] >= adapter->num_rx_queues) - return -EINVAL; - } - for (i = 0; i < rssConf->indTableSize; i++) - rssConf->indTable[i] = p->ring_index[i]; + rssConf->indTable[i] = p[i]; spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, @@ -619,7 +608,7 @@ vmxnet3_set_rss_indir(struct net_device *netdev, } #endif -static struct ethtool_ops vmxnet3_ethtool_ops = { +static const struct ethtool_ops vmxnet3_ethtool_ops = { .get_settings = vmxnet3_get_settings, .get_drvinfo = vmxnet3_get_drvinfo, .get_regs_len = vmxnet3_get_regs_len, @@ -634,6 +623,7 @@ static struct ethtool_ops vmxnet3_ethtool_ops = { .set_ringparam = vmxnet3_set_ringparam, .get_rxnfc = vmxnet3_get_rxnfc, #ifdef VMXNET3_RSS + .get_rxfh_indir_size = vmxnet3_get_rss_indir_size, .get_rxfh_indir = vmxnet3_get_rss_indir, .set_rxfh_indir = vmxnet3_set_rss_indir, #endif diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index b18eac1dcca..ed54797db19 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h @@ -401,7 +401,7 @@ void vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter); int -vmxnet3_set_features(struct net_device *netdev, u32 features); +vmxnet3_set_features(struct net_device *netdev, netdev_features_t features); int vmxnet3_create_queues(struct vmxnet3_adapter *adapter, diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c index 783168cce07..d43f4efd3e0 100644 --- a/drivers/net/wan/sbni.c +++ b/drivers/net/wan/sbni.c @@ -155,7 +155,7 @@ static int emancipate( struct net_device * ); static const char version[] = "Granch SBNI12 driver ver 5.0.1 Jun 22 2001 Denis I.Timofeev.\n"; -static int skip_pci_probe __initdata = 0; +static bool skip_pci_probe __initdata = false; static int scandone __initdata = 0; static int num __initdata = 0; diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c index 0b4fd05e150..4f774847898 100644 --- a/drivers/net/wan/sealevel.c +++ b/drivers/net/wan/sealevel.c @@ -362,7 +362,7 @@ static int io=0x238; static int txdma=1; static int rxdma=3; static int irq=5; -static int slow=0; +static bool slow=false; module_param(io, int, 0); MODULE_PARM_DESC(io, "The I/O base of the Sealevel card"); diff --git a/drivers/net/wimax/i2400m/tx.c b/drivers/net/wimax/i2400m/tx.c index 4b9ecb20dee..f20886ade1c 100644 --- a/drivers/net/wimax/i2400m/tx.c +++ b/drivers/net/wimax/i2400m/tx.c @@ -562,7 +562,7 @@ void i2400m_tx_new(struct i2400m *i2400m) { struct device *dev = i2400m_dev(i2400m); struct i2400m_msg_hdr *tx_msg; - bool try_head = 0; + bool try_head = false; BUG_ON(i2400m->tx_msg != NULL); /* * In certain situations, TX queue might have enough space to @@ -580,7 +580,7 @@ try_head: else if (tx_msg == TAIL_FULL) { i2400m_tx_skip_tail(i2400m); d_printf(2, dev, "new TX message: tail full, trying head\n"); - try_head = 1; + try_head = true; goto try_head; } memset(tx_msg, 0, I2400M_TX_PLD_SIZE); @@ -720,7 +720,7 @@ int i2400m_tx(struct i2400m *i2400m, const void *buf, size_t buf_len, unsigned long flags; size_t padded_len; void *ptr; - bool try_head = 0; + bool try_head = false; unsigned is_singleton = pl_type == I2400M_PT_RESET_WARM || pl_type == I2400M_PT_RESET_COLD; @@ -771,7 +771,7 @@ try_new: d_printf(2, dev, "pl append: tail full\n"); i2400m_tx_close(i2400m); i2400m_tx_skip_tail(i2400m); - try_head = 1; + try_head = true; goto try_new; } else if (ptr == NULL) { /* All full */ result = -ENOSPC; diff --git a/drivers/net/wimax/i2400m/usb-tx.c b/drivers/net/wimax/i2400m/usb-tx.c index ac357acfb3e..99ef81b3d5a 100644 --- a/drivers/net/wimax/i2400m/usb-tx.c +++ b/drivers/net/wimax/i2400m/usb-tx.c @@ -177,7 +177,6 @@ retry: static int i2400mu_txd(void *_i2400mu) { - int result = 0; struct i2400mu *i2400mu = _i2400mu; struct i2400m *i2400m = &i2400mu->i2400m; struct device *dev = &i2400mu->usb_iface->dev; @@ -208,16 +207,14 @@ int i2400mu_txd(void *_i2400mu) /* Yeah, we ignore errors ... not much we can do */ i2400mu_tx(i2400mu, tx_msg, tx_msg_size); i2400m_tx_msg_sent(i2400m); /* ack it, advance the FIFO */ - if (result < 0) - break; } spin_lock_irqsave(&i2400m->tx_lock, flags); i2400mu->tx_kthread = NULL; spin_unlock_irqrestore(&i2400m->tx_lock, flags); - d_fnend(4, dev, "(i2400mu %p) = %d\n", i2400mu, result); - return result; + d_fnend(4, dev, "(i2400mu %p)\n", i2400mu); + return 0; } diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile index 0a304b060b6..98db76196b5 100644 --- a/drivers/net/wireless/Makefile +++ b/drivers/net/wireless/Makefile @@ -42,7 +42,7 @@ obj-$(CONFIG_ADM8211) += adm8211.o obj-$(CONFIG_MWL8K) += mwl8k.o obj-$(CONFIG_IWLWIFI) += iwlwifi/ -obj-$(CONFIG_IWLWIFI_LEGACY) += iwlegacy/ +obj-$(CONFIG_IWLEGACY) += iwlegacy/ obj-$(CONFIG_RT2X00) += rt2x00/ obj-$(CONFIG_P54_COMMON) += p54/ @@ -58,6 +58,6 @@ obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wl12xx/ obj-$(CONFIG_IWM) += iwmc3200wifi/ obj-$(CONFIG_MWIFIEX) += mwifiex/ -obj-$(CONFIG_BRCMFMAC) += brcm80211/ -obj-$(CONFIG_BRCMUMAC) += brcm80211/ -obj-$(CONFIG_BRCMSMAC) += brcm80211/ + +obj-$(CONFIG_BRCMFMAC) += brcm80211/ +obj-$(CONFIG_BRCMSMAC) += brcm80211/ diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c index ac1176a4f46..1c008c61b95 100644 --- a/drivers/net/wireless/airo.c +++ b/drivers/net/wireless/airo.c @@ -1418,7 +1418,7 @@ static int encapsulate(struct airo_info *ai ,etherHead *frame, MICBuffer *mic, i emmh32_update(&context->seed,frame->da,ETH_ALEN * 2); // DA,SA emmh32_update(&context->seed,(u8*)&mic->typelen,10); // Type/Length and Snap emmh32_update(&context->seed,(u8*)&mic->seq,sizeof(mic->seq)); //SEQ - emmh32_update(&context->seed,frame->da + ETH_ALEN * 2,payLen); //payload + emmh32_update(&context->seed,(u8*)(frame + 1),payLen); //payload emmh32_final(&context->seed, (u8*)&mic->mic); /* New Type/length ?????????? */ @@ -1506,7 +1506,7 @@ static int decapsulate(struct airo_info *ai, MICBuffer *mic, etherHead *eth, u16 emmh32_update(&context->seed, eth->da, ETH_ALEN*2); emmh32_update(&context->seed, (u8 *)&mic->typelen, sizeof(mic->typelen)+sizeof(mic->u.snap)); emmh32_update(&context->seed, (u8 *)&mic->seq,sizeof(mic->seq)); - emmh32_update(&context->seed, eth->da + ETH_ALEN*2,payLen); + emmh32_update(&context->seed, (u8 *)(eth + 1),payLen); //Calculate MIC emmh32_final(&context->seed, digest); diff --git a/drivers/net/wireless/ath/Makefile b/drivers/net/wireless/ath/Makefile index d1214696a35..d716b748e57 100644 --- a/drivers/net/wireless/ath/Makefile +++ b/drivers/net/wireless/ath/Makefile @@ -11,3 +11,4 @@ ath-objs := main.o \ key.o ath-$(CONFIG_ATH_DEBUG) += debug.o +ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h index 0f9ee46cfc9..efc01110dc3 100644 --- a/drivers/net/wireless/ath/ath.h +++ b/drivers/net/wireless/ath/ath.h @@ -152,6 +152,7 @@ struct ath_common { struct ath_cycle_counters cc_survey; struct ath_regulatory regulatory; + struct ath_regulatory reg_world_copy; const struct ath_ops *ops; const struct ath_bus_ops *bus_ops; @@ -214,6 +215,10 @@ do { \ * @ATH_DBG_HWTIMER: hardware timer handling * @ATH_DBG_BTCOEX: bluetooth coexistance * @ATH_DBG_BSTUCK: stuck beacons + * @ATH_DBG_MCI: Message Coexistence Interface, a private protocol + * used exclusively for WLAN-BT coexistence starting from + * AR9462. + * @ATH_DBG_DFS: radar datection * @ATH_DBG_ANY: enable all debugging * * The debug level is used to control the amount and type of debugging output @@ -239,6 +244,8 @@ enum ATH_DEBUG { ATH_DBG_BTCOEX = 0x00002000, ATH_DBG_WMI = 0x00004000, ATH_DBG_BSTUCK = 0x00008000, + ATH_DBG_MCI = 0x00010000, + ATH_DBG_DFS = 0x00020000, ATH_DBG_ANY = 0xffffffff }; @@ -248,7 +255,7 @@ enum ATH_DEBUG { #define ath_dbg(common, dbg_mask, fmt, ...) \ do { \ - if ((common)->debug_mask & dbg_mask) \ + if ((common)->debug_mask & ATH_DBG_##dbg_mask) \ _ath_printk(KERN_DEBUG, common, fmt, ##__VA_ARGS__); \ } while (0) @@ -258,10 +265,13 @@ do { \ #else static inline __attribute__ ((format (printf, 3, 4))) -void ath_dbg(struct ath_common *common, enum ATH_DEBUG dbg_mask, +void _ath_dbg(struct ath_common *common, enum ATH_DEBUG dbg_mask, const char *fmt, ...) { } +#define ath_dbg(common, dbg_mask, fmt, ...) \ + _ath_dbg(common, ATH_DBG_##dbg_mask, fmt, ##__VA_ARGS__) + #define ATH_DBG_WARN(foo, arg...) do {} while (0) #define ATH_DBG_WARN_ON_ONCE(foo) ({ \ int __ret_warn_once = !!(foo); \ diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c index e5be7e70181..ee7ea572b06 100644 --- a/drivers/net/wireless/ath/ath5k/ahb.c +++ b/drivers/net/wireless/ath/ath5k/ahb.c @@ -166,7 +166,9 @@ static int ath_ahb_probe(struct platform_device *pdev) if (to_platform_device(ah->dev)->id == 0 && (bcfg->config->flags & (BD_WLAN0 | BD_WLAN1)) == (BD_WLAN1 | BD_WLAN0)) - __set_bit(ATH_STAT_2G_DISABLED, ah->status); + ah->ah_capabilities.cap_needs_2GHz_ovr = true; + else + ah->ah_capabilities.cap_needs_2GHz_ovr = false; } ret = ath5k_init_ah(ah, &ath_ahb_bus_ops); diff --git a/drivers/net/wireless/ath/ath5k/ani.c b/drivers/net/wireless/ath/ath5k/ani.c index bea90e6be70..bf674161a21 100644 --- a/drivers/net/wireless/ath/ath5k/ani.c +++ b/drivers/net/wireless/ath/ath5k/ani.c @@ -27,15 +27,21 @@ * or reducing sensitivity as necessary. * * The parameters are: + * * - "noise immunity" + * * - "spur immunity" + * * - "firstep level" + * * - "OFDM weak signal detection" + * * - "CCK weak signal detection" * * Basically we look at the amount of ODFM and CCK timing errors we get and then * raise or lower immunity accordingly by setting one or more of these * parameters. + * * Newer chipsets have PHY error counters in hardware which will generate a MIB * interrupt when they overflow. Older hardware has too enable PHY error frames * by setting a RX flag and then count every single PHY error. When a specified @@ -45,11 +51,13 @@ */ -/*** ANI parameter control ***/ +/***********************\ +* ANI parameter control * +\***********************/ /** * ath5k_ani_set_noise_immunity_level() - Set noise immunity level - * + * @ah: The &struct ath5k_hw * @level: level between 0 and @ATH5K_ANI_MAX_NOISE_IMM_LVL */ void @@ -91,12 +99,11 @@ ath5k_ani_set_noise_immunity_level(struct ath5k_hw *ah, int level) ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level); } - /** * ath5k_ani_set_spur_immunity_level() - Set spur immunity level - * + * @ah: The &struct ath5k_hw * @level: level between 0 and @max_spur_level (the maximum level is dependent - * on the chip revision). + * on the chip revision). */ void ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level) @@ -117,10 +124,9 @@ ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level) ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level); } - /** * ath5k_ani_set_firstep_level() - Set "firstep" level - * + * @ah: The &struct ath5k_hw * @level: level between 0 and @ATH5K_ANI_MAX_FIRSTEP_LVL */ void @@ -140,11 +146,9 @@ ath5k_ani_set_firstep_level(struct ath5k_hw *ah, int level) ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level); } - /** - * ath5k_ani_set_ofdm_weak_signal_detection() - Control OFDM weak signal - * detection - * + * ath5k_ani_set_ofdm_weak_signal_detection() - Set OFDM weak signal detection + * @ah: The &struct ath5k_hw * @on: turn on or off */ void @@ -182,10 +186,9 @@ ath5k_ani_set_ofdm_weak_signal_detection(struct ath5k_hw *ah, bool on) on ? "on" : "off"); } - /** - * ath5k_ani_set_cck_weak_signal_detection() - control CCK weak signal detection - * + * ath5k_ani_set_cck_weak_signal_detection() - Set CCK weak signal detection + * @ah: The &struct ath5k_hw * @on: turn on or off */ void @@ -200,13 +203,16 @@ ath5k_ani_set_cck_weak_signal_detection(struct ath5k_hw *ah, bool on) } -/*** ANI algorithm ***/ +/***************\ +* ANI algorithm * +\***************/ /** * ath5k_ani_raise_immunity() - Increase noise immunity - * + * @ah: The &struct ath5k_hw + * @as: The &struct ath5k_ani_state * @ofdm_trigger: If this is true we are called because of too many OFDM errors, - * the algorithm will tune more parameters then. + * the algorithm will tune more parameters then. * * Try to raise noise immunity (=decrease sensitivity) in several steps * depending on the average RSSI of the beacons we received. @@ -290,9 +296,10 @@ ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as, */ } - /** * ath5k_ani_lower_immunity() - Decrease noise immunity + * @ah: The &struct ath5k_hw + * @as: The &struct ath5k_ani_state * * Try to lower noise immunity (=increase sensitivity) in several steps * depending on the average RSSI of the beacons we received. @@ -352,9 +359,10 @@ ath5k_ani_lower_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as) } } - /** * ath5k_hw_ani_get_listen_time() - Update counters and return listening time + * @ah: The &struct ath5k_hw + * @as: The &struct ath5k_ani_state * * Return an approximation of the time spent "listening" in milliseconds (ms) * since the last call of this function. @@ -379,9 +387,10 @@ ath5k_hw_ani_get_listen_time(struct ath5k_hw *ah, struct ath5k_ani_state *as) return listen; } - /** * ath5k_ani_save_and_clear_phy_errors() - Clear and save PHY error counters + * @ah: The &struct ath5k_hw + * @as: The &struct ath5k_ani_state * * Clear the PHY error counters as soon as possible, since this might be called * from a MIB interrupt and we want to make sure we don't get interrupted again. @@ -429,14 +438,14 @@ ath5k_ani_save_and_clear_phy_errors(struct ath5k_hw *ah, return 1; } - /** * ath5k_ani_period_restart() - Restart ANI period + * @as: The &struct ath5k_ani_state * * Just reset counters, so they are clear for the next "ani period". */ static void -ath5k_ani_period_restart(struct ath5k_hw *ah, struct ath5k_ani_state *as) +ath5k_ani_period_restart(struct ath5k_ani_state *as) { /* keep last values for debugging */ as->last_ofdm_errors = as->ofdm_errors; @@ -448,9 +457,9 @@ ath5k_ani_period_restart(struct ath5k_hw *ah, struct ath5k_ani_state *as) as->listen_time = 0; } - /** * ath5k_ani_calibration() - The main ANI calibration function + * @ah: The &struct ath5k_hw * * We count OFDM and CCK errors relative to the time where we did not send or * receive ("listen" time) and raise or lower immunity accordingly. @@ -492,7 +501,7 @@ ath5k_ani_calibration(struct ath5k_hw *ah) /* too many PHY errors - we have to raise immunity */ bool ofdm_flag = as->ofdm_errors > ofdm_high ? true : false; ath5k_ani_raise_immunity(ah, as, ofdm_flag); - ath5k_ani_period_restart(ah, as); + ath5k_ani_period_restart(as); } else if (as->listen_time > 5 * ATH5K_ANI_LISTEN_PERIOD) { /* If more than 5 (TODO: why 5?) periods have passed and we got @@ -504,15 +513,18 @@ ath5k_ani_calibration(struct ath5k_hw *ah) if (as->ofdm_errors <= ofdm_low && as->cck_errors <= cck_low) ath5k_ani_lower_immunity(ah, as); - ath5k_ani_period_restart(ah, as); + ath5k_ani_period_restart(as); } } -/*** INTERRUPT HANDLER ***/ +/*******************\ +* Interrupt handler * +\*******************/ /** * ath5k_ani_mib_intr() - Interrupt handler for ANI MIB counters + * @ah: The &struct ath5k_hw * * Just read & reset the registers quickly, so they don't generate more * interrupts, save the counters and schedule the tasklet to decide whether @@ -549,9 +561,11 @@ ath5k_ani_mib_intr(struct ath5k_hw *ah) tasklet_schedule(&ah->ani_tasklet); } - /** - * ath5k_ani_phy_error_report() - Used by older HW to report PHY errors + * ath5k_ani_phy_error_report - Used by older HW to report PHY errors + * + * @ah: The &struct ath5k_hw + * @phyerr: One of enum ath5k_phy_error_code * * This is used by hardware without PHY error counters to report PHY errors * on a frame-by-frame basis, instead of the interrupt. @@ -574,10 +588,13 @@ ath5k_ani_phy_error_report(struct ath5k_hw *ah, } -/*** INIT ***/ +/****************\ +* Initialization * +\****************/ /** * ath5k_enable_phy_err_counters() - Enable PHY error counters + * @ah: The &struct ath5k_hw * * Enable PHY error counters for OFDM and CCK timing errors. */ @@ -596,9 +613,9 @@ ath5k_enable_phy_err_counters(struct ath5k_hw *ah) ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT); } - /** * ath5k_disable_phy_err_counters() - Disable PHY error counters + * @ah: The &struct ath5k_hw * * Disable PHY error counters for OFDM and CCK timing errors. */ @@ -615,10 +632,10 @@ ath5k_disable_phy_err_counters(struct ath5k_hw *ah) ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT); } - /** * ath5k_ani_init() - Initialize ANI - * @mode: Which mode to use (auto, manual high, manual low, off) + * @ah: The &struct ath5k_hw + * @mode: One of enum ath5k_ani_mode * * Initialize ANI according to mode. */ @@ -695,10 +712,18 @@ ath5k_ani_init(struct ath5k_hw *ah, enum ath5k_ani_mode mode) } -/*** DEBUG ***/ +/**************\ +* Debug output * +\**************/ #ifdef CONFIG_ATH5K_DEBUG +/** + * ath5k_ani_print_counters() - Print ANI counters + * @ah: The &struct ath5k_hw + * + * Used for debugging ANI + */ void ath5k_ani_print_counters(struct ath5k_hw *ah) { diff --git a/drivers/net/wireless/ath/ath5k/ani.h b/drivers/net/wireless/ath/ath5k/ani.h index 7358b6c83c6..21aa355460b 100644 --- a/drivers/net/wireless/ath/ath5k/ani.h +++ b/drivers/net/wireless/ath/ath5k/ani.h @@ -40,13 +40,13 @@ enum ath5k_phy_error_code; * enum ath5k_ani_mode - mode for ANI / noise sensitivity * * @ATH5K_ANI_MODE_OFF: Turn ANI off. This can be useful to just stop the ANI - * algorithm after it has been on auto mode. - * ATH5K_ANI_MODE_MANUAL_LOW: Manually set all immunity parameters to low, - * maximizing sensitivity. ANI will not run. - * ATH5K_ANI_MODE_MANUAL_HIGH: Manually set all immunity parameters to high, - * minimizing sensitivity. ANI will not run. - * ATH5K_ANI_MODE_AUTO: Automatically control immunity parameters based on the - * amount of OFDM and CCK frame errors (default). + * algorithm after it has been on auto mode. + * @ATH5K_ANI_MODE_MANUAL_LOW: Manually set all immunity parameters to low, + * maximizing sensitivity. ANI will not run. + * @ATH5K_ANI_MODE_MANUAL_HIGH: Manually set all immunity parameters to high, + * minimizing sensitivity. ANI will not run. + * @ATH5K_ANI_MODE_AUTO: Automatically control immunity parameters based on the + * amount of OFDM and CCK frame errors (default). */ enum ath5k_ani_mode { ATH5K_ANI_MODE_OFF = 0, @@ -58,8 +58,22 @@ enum ath5k_ani_mode { /** * struct ath5k_ani_state - ANI state and associated counters - * - * @max_spur_level: the maximum spur level is chip dependent + * @ani_mode: One of enum ath5k_ani_mode + * @noise_imm_level: Noise immunity level + * @spur_level: Spur immunity level + * @firstep_level: FIRstep level + * @ofdm_weak_sig: OFDM weak signal detection state (on/off) + * @cck_weak_sig: CCK weak signal detection state (on/off) + * @max_spur_level: Max spur immunity level (chip specific) + * @listen_time: Listen time + * @ofdm_errors: OFDM timing error count + * @cck_errors: CCK timing error count + * @last_cc: The &struct ath_cycle_counters (for stats) + * @last_listen: Listen time from previous run (for stats) + * @last_ofdm_errors: OFDM timing error count from previous run (for tats) + * @last_cck_errors: CCK timing error count from previous run (for stats) + * @sum_ofdm_errors: Sum of OFDM timing errors (for stats) + * @sum_cck_errors: Sum of all CCK timing errors (for stats) */ struct ath5k_ani_state { enum ath5k_ani_mode ani_mode; diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h index fecbcd9a425..c2b2518c2ec 100644 --- a/drivers/net/wireless/ath/ath5k/ath5k.h +++ b/drivers/net/wireless/ath/ath5k/ath5k.h @@ -187,10 +187,9 @@ #define AR5K_TUNE_MAX_TXPOWER 63 #define AR5K_TUNE_DEFAULT_TXPOWER 25 #define AR5K_TUNE_TPC_TXPOWER false -#define ATH5K_TUNE_CALIBRATION_INTERVAL_FULL 10000 /* 10 sec */ +#define ATH5K_TUNE_CALIBRATION_INTERVAL_FULL 60000 /* 60 sec */ +#define ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT 10000 /* 10 sec */ #define ATH5K_TUNE_CALIBRATION_INTERVAL_ANI 1000 /* 1 sec */ -#define ATH5K_TUNE_CALIBRATION_INTERVAL_NF 60000 /* 60 sec */ - #define ATH5K_TX_COMPLETE_POLL_INT 3000 /* 3 sec */ #define AR5K_INIT_CARR_SENSE_EN 1 @@ -262,16 +261,34 @@ #define AR5K_AGC_SETTLING_TURBO 37 -/* GENERIC CHIPSET DEFINITIONS */ -/* MAC Chips */ +/*****************************\ +* GENERIC CHIPSET DEFINITIONS * +\*****************************/ + +/** + * enum ath5k_version - MAC Chips + * @AR5K_AR5210: AR5210 (Crete) + * @AR5K_AR5211: AR5211 (Oahu/Maui) + * @AR5K_AR5212: AR5212 (Venice) and newer + */ enum ath5k_version { AR5K_AR5210 = 0, AR5K_AR5211 = 1, AR5K_AR5212 = 2, }; -/* PHY Chips */ +/** + * enum ath5k_radio - PHY Chips + * @AR5K_RF5110: RF5110 (Fez) + * @AR5K_RF5111: RF5111 (Sombrero) + * @AR5K_RF5112: RF2112/5112(A) (Derby/Derby2) + * @AR5K_RF2413: RF2413/2414 (Griffin/Griffin-Lite) + * @AR5K_RF5413: RF5413/5414/5424 (Eagle/Condor) + * @AR5K_RF2316: RF2315/2316 (Cobra SoC) + * @AR5K_RF2317: RF2317 (Spider SoC) + * @AR5K_RF2425: RF2425/2417 (Swan/Nalla) + */ enum ath5k_radio { AR5K_RF5110 = 0, AR5K_RF5111 = 1, @@ -303,11 +320,11 @@ enum ath5k_radio { #define AR5K_SREV_AR5213A 0x59 /* Hainan */ #define AR5K_SREV_AR2413 0x78 /* Griffin lite */ #define AR5K_SREV_AR2414 0x70 /* Griffin */ -#define AR5K_SREV_AR2315_R6 0x86 /* AP51-Light */ -#define AR5K_SREV_AR2315_R7 0x87 /* AP51-Full */ +#define AR5K_SREV_AR2315_R6 0x86 /* AP51-Light */ +#define AR5K_SREV_AR2315_R7 0x87 /* AP51-Full */ #define AR5K_SREV_AR5424 0x90 /* Condor */ -#define AR5K_SREV_AR2317_R1 0x90 /* AP61-Light */ -#define AR5K_SREV_AR2317_R2 0x91 /* AP61-Full */ +#define AR5K_SREV_AR2317_R1 0x90 /* AP61-Light */ +#define AR5K_SREV_AR2317_R2 0x91 /* AP61-Full */ #define AR5K_SREV_AR5413 0xa4 /* Eagle lite */ #define AR5K_SREV_AR5414 0xa0 /* Eagle */ #define AR5K_SREV_AR2415 0xb0 /* Talon */ @@ -344,32 +361,40 @@ enum ath5k_radio { /* TODO add support to mac80211 for vendor-specific rates and modes */ -/* +/** + * DOC: Atheros XR + * * Some of this information is based on Documentation from: * * http://madwifi-project.org/wiki/ChipsetFeatures/SuperAG * - * Modulation for Atheros' eXtended Range - range enhancing extension that is - * supposed to double the distance an Atheros client device can keep a - * connection with an Atheros access point. This is achieved by increasing - * the receiver sensitivity up to, -105dBm, which is about 20dB above what - * the 802.11 specifications demand. In addition, new (proprietary) data rates - * are introduced: 3, 2, 1, 0.5 and 0.25 MBit/s. + * Atheros' eXtended Range - range enhancing extension is a modulation scheme + * that is supposed to double the link distance between an Atheros XR-enabled + * client device with an Atheros XR-enabled access point. This is achieved + * by increasing the receiver sensitivity up to, -105dBm, which is about 20dB + * above what the 802.11 specifications demand. In addition, new (proprietary) + * data rates are introduced: 3, 2, 1, 0.5 and 0.25 MBit/s. * * Please note that can you either use XR or TURBO but you cannot use both, * they are exclusive. * + * Also note that we do not plan to support XR mode at least for now. You can + * get a mode similar to XR by using 5MHz bwmode. */ -#define MODULATION_XR 0x00000200 -/* - * Modulation for Atheros' Turbo G and Turbo A, its supposed to provide a - * throughput transmission speed up to 40Mbit/s-60Mbit/s at a 108Mbit/s - * signaling rate achieved through the bonding of two 54Mbit/s 802.11g - * channels. To use this feature your Access Point must also support it. + + +/** + * DOC: Atheros SuperAG + * + * In addition to XR we have another modulation scheme called TURBO mode + * that is supposed to provide a throughput transmission speed up to 40Mbit/s + * -60Mbit/s at a 108Mbit/s signaling rate achieved through the bonding of two + * 54Mbit/s 802.11g channels. To use this feature both ends must support it. * There is also a distinction between "static" and "dynamic" turbo modes: * * - Static: is the dumb version: devices set to this mode stick to it until * the mode is turned off. + * * - Dynamic: is the intelligent version, the network decides itself if it * is ok to use turbo. As soon as traffic is detected on adjacent channels * (which would get used in turbo mode), or when a non-turbo station joins @@ -383,24 +408,39 @@ enum ath5k_radio { * * http://www.pcworld.com/article/id,113428-page,1/article.html * - * The channel bonding seems to be driver specific though. In addition to - * deciding what channels will be used, these "Turbo" modes are accomplished - * by also enabling the following features: + * The channel bonding seems to be driver specific though. + * + * In addition to TURBO modes we also have the following features for even + * greater speed-up: * * - Bursting: allows multiple frames to be sent at once, rather than pausing * after each frame. Bursting is a standards-compliant feature that can be * used with any Access Point. + * * - Fast frames: increases the amount of information that can be sent per * frame, also resulting in a reduction of transmission overhead. It is a * proprietary feature that needs to be supported by the Access Point. + * * - Compression: data frames are compressed in real time using a Lempel Ziv * algorithm. This is done transparently. Once this feature is enabled, * compression and decompression takes place inside the chipset, without * putting additional load on the host CPU. * + * As with XR we also don't plan to support SuperAG features for now. You can + * get a mode similar to TURBO by using 40MHz bwmode. */ -#define MODULATION_TURBO 0x00000080 + +/** + * enum ath5k_driver_mode - PHY operation mode + * @AR5K_MODE_11A: 802.11a + * @AR5K_MODE_11B: 802.11b + * @AR5K_MODE_11G: 801.11g + * @AR5K_MODE_MAX: Used for boundary checks + * + * Do not change the order here, we use these as + * array indices and it also maps EEPROM structures. + */ enum ath5k_driver_mode { AR5K_MODE_11A = 0, AR5K_MODE_11B = 1, @@ -408,30 +448,64 @@ enum ath5k_driver_mode { AR5K_MODE_MAX = 3 }; +/** + * enum ath5k_ant_mode - Antenna operation mode + * @AR5K_ANTMODE_DEFAULT: Default antenna setup + * @AR5K_ANTMODE_FIXED_A: Only antenna A is present + * @AR5K_ANTMODE_FIXED_B: Only antenna B is present + * @AR5K_ANTMODE_SINGLE_AP: STA locked on a single ap + * @AR5K_ANTMODE_SECTOR_AP: AP with tx antenna set on tx desc + * @AR5K_ANTMODE_SECTOR_STA: STA with tx antenna set on tx desc + * @AR5K_ANTMODE_DEBUG: Debug mode -A -> Rx, B-> Tx- + * @AR5K_ANTMODE_MAX: Used for boundary checks + * + * For more infos on antenna control check out phy.c + */ enum ath5k_ant_mode { - AR5K_ANTMODE_DEFAULT = 0, /* default antenna setup */ - AR5K_ANTMODE_FIXED_A = 1, /* only antenna A is present */ - AR5K_ANTMODE_FIXED_B = 2, /* only antenna B is present */ - AR5K_ANTMODE_SINGLE_AP = 3, /* sta locked on a single ap */ - AR5K_ANTMODE_SECTOR_AP = 4, /* AP with tx antenna set on tx desc */ - AR5K_ANTMODE_SECTOR_STA = 5, /* STA with tx antenna set on tx desc */ - AR5K_ANTMODE_DEBUG = 6, /* Debug mode -A -> Rx, B-> Tx- */ + AR5K_ANTMODE_DEFAULT = 0, + AR5K_ANTMODE_FIXED_A = 1, + AR5K_ANTMODE_FIXED_B = 2, + AR5K_ANTMODE_SINGLE_AP = 3, + AR5K_ANTMODE_SECTOR_AP = 4, + AR5K_ANTMODE_SECTOR_STA = 5, + AR5K_ANTMODE_DEBUG = 6, AR5K_ANTMODE_MAX, }; +/** + * enum ath5k_bw_mode - Bandwidth operation mode + * @AR5K_BWMODE_DEFAULT: 20MHz, default operation + * @AR5K_BWMODE_5MHZ: Quarter rate + * @AR5K_BWMODE_10MHZ: Half rate + * @AR5K_BWMODE_40MHZ: Turbo + */ enum ath5k_bw_mode { - AR5K_BWMODE_DEFAULT = 0, /* 20MHz, default operation */ - AR5K_BWMODE_5MHZ = 1, /* Quarter rate */ - AR5K_BWMODE_10MHZ = 2, /* Half rate */ - AR5K_BWMODE_40MHZ = 3 /* Turbo */ + AR5K_BWMODE_DEFAULT = 0, + AR5K_BWMODE_5MHZ = 1, + AR5K_BWMODE_10MHZ = 2, + AR5K_BWMODE_40MHZ = 3 }; + + /****************\ TX DEFINITIONS \****************/ -/* - * TX Status descriptor +/** + * struct ath5k_tx_status - TX Status descriptor + * @ts_seqnum: Sequence number + * @ts_tstamp: Timestamp + * @ts_status: Status code + * @ts_final_idx: Final transmission series index + * @ts_final_retry: Final retry count + * @ts_rssi: RSSI for received ACK + * @ts_shortretry: Short retry count + * @ts_virtcol: Virtual collision count + * @ts_antenna: Antenna used + * + * TX status descriptor gets filled by the hw + * on each transmission attempt. */ struct ath5k_tx_status { u16 ts_seqnum; @@ -454,7 +528,6 @@ struct ath5k_tx_status { * enum ath5k_tx_queue - Queue types used to classify tx queues. * @AR5K_TX_QUEUE_INACTIVE: q is unused -- see ath5k_hw_release_tx_queue * @AR5K_TX_QUEUE_DATA: A normal data queue - * @AR5K_TX_QUEUE_XR_DATA: An XR-data queue * @AR5K_TX_QUEUE_BEACON: The beacon queue * @AR5K_TX_QUEUE_CAB: The after-beacon queue * @AR5K_TX_QUEUE_UAPSD: Unscheduled Automatic Power Save Delivery queue @@ -462,7 +535,6 @@ struct ath5k_tx_status { enum ath5k_tx_queue { AR5K_TX_QUEUE_INACTIVE = 0, AR5K_TX_QUEUE_DATA, - AR5K_TX_QUEUE_XR_DATA, AR5K_TX_QUEUE_BEACON, AR5K_TX_QUEUE_CAB, AR5K_TX_QUEUE_UAPSD, @@ -471,36 +543,46 @@ enum ath5k_tx_queue { #define AR5K_NUM_TX_QUEUES 10 #define AR5K_NUM_TX_QUEUES_NOQCU 2 -/* - * Queue syb-types to classify normal data queues. +/** + * enum ath5k_tx_queue_subtype - Queue sub-types to classify normal data queues + * @AR5K_WME_AC_BK: Background traffic + * @AR5K_WME_AC_BE: Best-effort (normal) traffic + * @AR5K_WME_AC_VI: Video traffic + * @AR5K_WME_AC_VO: Voice traffic + * * These are the 4 Access Categories as defined in * WME spec. 0 is the lowest priority and 4 is the * highest. Normal data that hasn't been classified * goes to the Best Effort AC. */ enum ath5k_tx_queue_subtype { - AR5K_WME_AC_BK = 0, /*Background traffic*/ - AR5K_WME_AC_BE, /*Best-effort (normal) traffic*/ - AR5K_WME_AC_VI, /*Video traffic*/ - AR5K_WME_AC_VO, /*Voice traffic*/ + AR5K_WME_AC_BK = 0, + AR5K_WME_AC_BE, + AR5K_WME_AC_VI, + AR5K_WME_AC_VO, }; -/* - * Queue ID numbers as returned by the hw functions, each number - * represents a hw queue. If hw does not support hw queues - * (eg 5210) all data goes in one queue. These match - * d80211 definitions (net80211/MadWiFi don't use them). +/** + * enum ath5k_tx_queue_id - Queue ID numbers as returned by the hw functions + * @AR5K_TX_QUEUE_ID_NOQCU_DATA: Data queue on AR5210 (no QCU available) + * @AR5K_TX_QUEUE_ID_NOQCU_BEACON: Beacon queue on AR5210 (no QCU available) + * @AR5K_TX_QUEUE_ID_DATA_MIN: Data queue min index + * @AR5K_TX_QUEUE_ID_DATA_MAX: Data queue max index + * @AR5K_TX_QUEUE_ID_CAB: Content after beacon queue + * @AR5K_TX_QUEUE_ID_BEACON: Beacon queue + * @AR5K_TX_QUEUE_ID_UAPSD: Urgent Automatic Power Save Delivery, + * + * Each number represents a hw queue. If hw does not support hw queues + * (eg 5210) all data goes in one queue. */ enum ath5k_tx_queue_id { AR5K_TX_QUEUE_ID_NOQCU_DATA = 0, AR5K_TX_QUEUE_ID_NOQCU_BEACON = 1, - AR5K_TX_QUEUE_ID_DATA_MIN = 0, /*IEEE80211_TX_QUEUE_DATA0*/ - AR5K_TX_QUEUE_ID_DATA_MAX = 3, /*IEEE80211_TX_QUEUE_DATA3*/ - AR5K_TX_QUEUE_ID_DATA_SVP = 5, /*IEEE80211_TX_QUEUE_SVP - Spectralink Voice Protocol*/ - AR5K_TX_QUEUE_ID_CAB = 6, /*IEEE80211_TX_QUEUE_AFTER_BEACON*/ - AR5K_TX_QUEUE_ID_BEACON = 7, /*IEEE80211_TX_QUEUE_BEACON*/ - AR5K_TX_QUEUE_ID_UAPSD = 8, - AR5K_TX_QUEUE_ID_XR_DATA = 9, + AR5K_TX_QUEUE_ID_DATA_MIN = 0, + AR5K_TX_QUEUE_ID_DATA_MAX = 3, + AR5K_TX_QUEUE_ID_UAPSD = 7, + AR5K_TX_QUEUE_ID_CAB = 8, + AR5K_TX_QUEUE_ID_BEACON = 9, }; /* @@ -521,46 +603,70 @@ enum ath5k_tx_queue_id { #define AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS 0x1000 /* Disable backoff while bursting */ #define AR5K_TXQ_FLAG_COMPRESSION_ENABLE 0x2000 /* Enable hw compression -not implemented-*/ -/* - * Data transmit queue state. One of these exists for each - * hardware transmit queue. Packets sent to us from above - * are assigned to queues based on their priority. Not all - * devices support a complete set of hardware transmit queues. - * For those devices the array sc_ac2q will map multiple - * priorities to fewer hardware queues (typically all to one - * hardware queue). +/** + * struct ath5k_txq - Transmit queue state + * @qnum: Hardware q number + * @link: Link ptr in last TX desc + * @q: Transmit queue (&struct list_head) + * @lock: Lock on q and link + * @setup: Is the queue configured + * @txq_len:Number of queued buffers + * @txq_max: Max allowed num of queued buffers + * @txq_poll_mark: Used to check if queue got stuck + * @txq_stuck: Queue stuck counter + * + * One of these exists for each hardware transmit queue. + * Packets sent to us from above are assigned to queues based + * on their priority. Not all devices support a complete set + * of hardware transmit queues. For those devices the array + * sc_ac2q will map multiple priorities to fewer hardware queues + * (typically all to one hardware queue). */ struct ath5k_txq { - unsigned int qnum; /* hardware q number */ - u32 *link; /* link ptr in last TX desc */ - struct list_head q; /* transmit queue */ - spinlock_t lock; /* lock on q and link */ + unsigned int qnum; + u32 *link; + struct list_head q; + spinlock_t lock; bool setup; - int txq_len; /* number of queued buffers */ - int txq_max; /* max allowed num of queued buffers */ + int txq_len; + int txq_max; bool txq_poll_mark; - unsigned int txq_stuck; /* informational counter */ + unsigned int txq_stuck; }; -/* - * A struct to hold tx queue's parameters +/** + * struct ath5k_txq_info - A struct to hold TX queue's parameters + * @tqi_type: One of enum ath5k_tx_queue + * @tqi_subtype: One of enum ath5k_tx_queue_subtype + * @tqi_flags: TX queue flags (see above) + * @tqi_aifs: Arbitrated Inter-frame Space + * @tqi_cw_min: Minimum Contention Window + * @tqi_cw_max: Maximum Contention Window + * @tqi_cbr_period: Constant bit rate period + * @tqi_ready_time: Time queue waits after an event when RDYTIME is enabled */ struct ath5k_txq_info { enum ath5k_tx_queue tqi_type; enum ath5k_tx_queue_subtype tqi_subtype; - u16 tqi_flags; /* Tx queue flags (see above) */ - u8 tqi_aifs; /* Arbitrated Interframe Space */ - u16 tqi_cw_min; /* Minimum Contention Window */ - u16 tqi_cw_max; /* Maximum Contention Window */ - u32 tqi_cbr_period; /* Constant bit rate period */ + u16 tqi_flags; + u8 tqi_aifs; + u16 tqi_cw_min; + u16 tqi_cw_max; + u32 tqi_cbr_period; u32 tqi_cbr_overflow_limit; u32 tqi_burst_time; - u32 tqi_ready_time; /* Time queue waits after an event */ + u32 tqi_ready_time; }; -/* - * Transmit packet types. - * used on tx control descriptor +/** + * enum ath5k_pkt_type - Transmit packet types + * @AR5K_PKT_TYPE_NORMAL: Normal data + * @AR5K_PKT_TYPE_ATIM: ATIM + * @AR5K_PKT_TYPE_PSPOLL: PS-Poll + * @AR5K_PKT_TYPE_BEACON: Beacon + * @AR5K_PKT_TYPE_PROBE_RESP: Probe response + * @AR5K_PKT_TYPE_PIFS: PIFS + * Used on tx control descriptor */ enum ath5k_pkt_type { AR5K_PKT_TYPE_NORMAL = 0, @@ -583,27 +689,23 @@ enum ath5k_pkt_type { (ah->ah_txpower.txp_rates_power_table[(_r)] & 0x3f) << (_v) \ ) -/* - * DMA size definitions (2^(n+2)) - */ -enum ath5k_dmasize { - AR5K_DMASIZE_4B = 0, - AR5K_DMASIZE_8B, - AR5K_DMASIZE_16B, - AR5K_DMASIZE_32B, - AR5K_DMASIZE_64B, - AR5K_DMASIZE_128B, - AR5K_DMASIZE_256B, - AR5K_DMASIZE_512B -}; /****************\ RX DEFINITIONS \****************/ -/* - * RX Status descriptor +/** + * struct ath5k_rx_status - RX Status descriptor + * @rs_datalen: Data length + * @rs_tstamp: Timestamp + * @rs_status: Status code + * @rs_phyerr: PHY error mask + * @rs_rssi: RSSI in 0.5dbm units + * @rs_keyix: Index to the key used for decrypting + * @rs_rate: Rate used to decode the frame + * @rs_antenna: Antenna used to receive the frame + * @rs_more: Indicates this is a frame fragment (Fast frames) */ struct ath5k_rx_status { u16 rs_datalen; @@ -645,10 +747,18 @@ struct ath5k_rx_status { #define TSF_TO_TU(_tsf) (u32)((_tsf) >> 10) + /*******************************\ GAIN OPTIMIZATION DEFINITIONS \*******************************/ +/** + * enum ath5k_rfgain - RF Gain optimization engine state + * @AR5K_RFGAIN_INACTIVE: Engine disabled + * @AR5K_RFGAIN_ACTIVE: Probe active + * @AR5K_RFGAIN_READ_REQUESTED: Probe requested + * @AR5K_RFGAIN_NEED_CHANGE: Gain_F needs change + */ enum ath5k_rfgain { AR5K_RFGAIN_INACTIVE = 0, AR5K_RFGAIN_ACTIVE, @@ -656,6 +766,16 @@ enum ath5k_rfgain { AR5K_RFGAIN_NEED_CHANGE, }; +/** + * struct ath5k_gain - RF Gain optimization engine state data + * @g_step_idx: Current step index + * @g_current: Current gain + * @g_target: Target gain + * @g_low: Low gain boundary + * @g_high: High gain boundary + * @g_f_corr: Gain_F correction + * @g_state: One of enum ath5k_rfgain + */ struct ath5k_gain { u8 g_step_idx; u8 g_current; @@ -666,6 +786,8 @@ struct ath5k_gain { u8 g_state; }; + + /********************\ COMMON DEFINITIONS \********************/ @@ -674,9 +796,14 @@ struct ath5k_gain { #define AR5K_SLOT_TIME_20 880 #define AR5K_SLOT_TIME_MAX 0xffff -/* - * The following structure is used to map 2GHz channels to - * 5GHz Atheros channels. +/** + * struct ath5k_athchan_2ghz - 2GHz to 5GHZ map for RF5111 + * @a2_flags: Channel flags (internal) + * @a2_athchan: HW channel number (internal) + * + * This structure is used to map 2GHz channels to + * 5GHz Atheros channels on 2111 frequency converter + * that comes together with RF5111 * TODO: Clean up */ struct ath5k_athchan_2ghz { @@ -684,36 +811,80 @@ struct ath5k_athchan_2ghz { u16 a2_athchan; }; +/** + * enum ath5k_dmasize - DMA size definitions (2^(n+2)) + * @AR5K_DMASIZE_4B: 4Bytes + * @AR5K_DMASIZE_8B: 8Bytes + * @AR5K_DMASIZE_16B: 16Bytes + * @AR5K_DMASIZE_32B: 32Bytes + * @AR5K_DMASIZE_64B: 64Bytes (Default) + * @AR5K_DMASIZE_128B: 128Bytes + * @AR5K_DMASIZE_256B: 256Bytes + * @AR5K_DMASIZE_512B: 512Bytes + * + * These are used to set DMA burst size on hw + * + * Note: Some platforms can't handle more than 4Bytes + * be careful on embedded boards. + */ +enum ath5k_dmasize { + AR5K_DMASIZE_4B = 0, + AR5K_DMASIZE_8B, + AR5K_DMASIZE_16B, + AR5K_DMASIZE_32B, + AR5K_DMASIZE_64B, + AR5K_DMASIZE_128B, + AR5K_DMASIZE_256B, + AR5K_DMASIZE_512B +}; + + /******************\ RATE DEFINITIONS \******************/ /** + * DOC: Rate codes + * * Seems the ar5xxx hardware supports up to 32 rates, indexed by 1-32. * * The rate code is used to get the RX rate or set the TX rate on the * hardware descriptors. It is also used for internal modulation control * and settings. * - * This is the hardware rate map we are aware of: - * - * rate_code 0x01 0x02 0x03 0x04 0x05 0x06 0x07 0x08 - * rate_kbps 3000 1000 ? ? ? 2000 500 48000 - * - * rate_code 0x09 0x0A 0x0B 0x0C 0x0D 0x0E 0x0F 0x10 - * rate_kbps 24000 12000 6000 54000 36000 18000 9000 ? + * This is the hardware rate map we are aware of (html unfriendly): * - * rate_code 17 18 19 20 21 22 23 24 - * rate_kbps ? ? ? ? ? ? ? 11000 + * Rate code Rate (Kbps) + * --------- ----------- + * 0x01 3000 (XR) + * 0x02 1000 (XR) + * 0x03 250 (XR) + * 0x04 - 05 -Reserved- + * 0x06 2000 (XR) + * 0x07 500 (XR) + * 0x08 48000 (OFDM) + * 0x09 24000 (OFDM) + * 0x0A 12000 (OFDM) + * 0x0B 6000 (OFDM) + * 0x0C 54000 (OFDM) + * 0x0D 36000 (OFDM) + * 0x0E 18000 (OFDM) + * 0x0F 9000 (OFDM) + * 0x10 - 17 -Reserved- + * 0x18 11000L (CCK) + * 0x19 5500L (CCK) + * 0x1A 2000L (CCK) + * 0x1B 1000L (CCK) + * 0x1C 11000S (CCK) + * 0x1D 5500S (CCK) + * 0x1E 2000S (CCK) + * 0x1F -Reserved- * - * rate_code 25 26 27 28 29 30 31 32 - * rate_kbps 5500 2000 1000 11000S 5500S 2000S ? ? - * - * "S" indicates CCK rates with short preamble. + * "S" indicates CCK rates with short preamble and "L" with long preamble. * * AR5211 has different rate codes for CCK (802.11B) rates. It only uses the - * lowest 4 bits, so they are the same as below with a 0xF mask. + * lowest 4 bits, so they are the same as above with a 0xF mask. * (0xB, 0xA, 0x9 and 0x8 for 1M, 2M, 5.5M and 11M). * We handle this in ath5k_setup_bands(). */ @@ -733,13 +904,9 @@ struct ath5k_athchan_2ghz { #define ATH5K_RATE_CODE_36M 0x0D #define ATH5K_RATE_CODE_48M 0x08 #define ATH5K_RATE_CODE_54M 0x0C -/* XR */ -#define ATH5K_RATE_CODE_XR_500K 0x07 -#define ATH5K_RATE_CODE_XR_1M 0x02 -#define ATH5K_RATE_CODE_XR_2M 0x06 -#define ATH5K_RATE_CODE_XR_3M 0x01 -/* adding this flag to rate_code enables short preamble */ +/* Adding this flag to rate_code on B rates + * enables short preamble */ #define AR5K_SET_SHORT_PREAMBLE 0x04 /* @@ -747,7 +914,7 @@ struct ath5k_athchan_2ghz { */ #define AR5K_KEYCACHE_SIZE 8 -extern int ath5k_modparam_nohwcrypt; +extern bool ath5k_modparam_nohwcrypt; /***********************\ HW RELATED DEFINITIONS @@ -769,49 +936,65 @@ extern int ath5k_modparam_nohwcrypt; /** * enum ath5k_int - Hardware interrupt masks helpers + * @AR5K_INT_RXOK: Frame successfully received + * @AR5K_INT_RXDESC: Request RX descriptor/Read RX descriptor + * @AR5K_INT_RXERR: Frame reception failed + * @AR5K_INT_RXNOFRM: No frame received within a specified time period + * @AR5K_INT_RXEOL: Reached "End Of List", means we need more RX descriptors + * @AR5K_INT_RXORN: Indicates we got RX FIFO overrun. Note that Rx overrun is + * not always fatal, on some chips we can continue operation + * without resetting the card, that's why %AR5K_INT_FATAL is not + * common for all chips. + * @AR5K_INT_RX_ALL: Mask to identify all RX related interrupts + * + * @AR5K_INT_TXOK: Frame transmission success + * @AR5K_INT_TXDESC: Request TX descriptor/Read TX status descriptor + * @AR5K_INT_TXERR: Frame transmission failure + * @AR5K_INT_TXEOL: Received End Of List for VEOL (Virtual End Of List). The + * Queue Control Unit (QCU) signals an EOL interrupt only if a + * descriptor's LinkPtr is NULL. For more details, refer to: + * "http://www.freepatentsonline.com/20030225739.html" + * @AR5K_INT_TXNOFRM: No frame was transmitted within a specified time period + * @AR5K_INT_TXURN: Indicates we got TX FIFO underrun. In such case we should + * increase the TX trigger threshold. + * @AR5K_INT_TX_ALL: Mask to identify all TX related interrupts * - * @AR5K_INT_RX: mask to identify received frame interrupts, of type - * AR5K_ISR_RXOK or AR5K_ISR_RXERR - * @AR5K_INT_RXDESC: Request RX descriptor/Read RX descriptor (?) - * @AR5K_INT_RXNOFRM: No frame received (?) - * @AR5K_INT_RXEOL: received End Of List for VEOL (Virtual End Of List). The - * Queue Control Unit (QCU) signals an EOL interrupt only if a descriptor's - * LinkPtr is NULL. For more details, refer to: - * http://www.freepatentsonline.com/20030225739.html - * @AR5K_INT_RXORN: Indicates we got RX overrun (eg. no more descriptors). - * Note that Rx overrun is not always fatal, on some chips we can continue - * operation without resetting the card, that's why int_fatal is not - * common for all chips. - * @AR5K_INT_TX: mask to identify received frame interrupts, of type - * AR5K_ISR_TXOK or AR5K_ISR_TXERR - * @AR5K_INT_TXDESC: Request TX descriptor/Read TX status descriptor (?) - * @AR5K_INT_TXURN: received when we should increase the TX trigger threshold - * We currently do increments on interrupt by - * (AR5K_TUNE_MAX_TX_FIFO_THRES - current_trigger_level) / 2 * @AR5K_INT_MIB: Indicates the either Management Information Base counters or - * one of the PHY error counters reached the maximum value and should be - * read and cleared. + * one of the PHY error counters reached the maximum value and + * should be read and cleared. + * @AR5K_INT_SWI: Software triggered interrupt. * @AR5K_INT_RXPHY: RX PHY Error * @AR5K_INT_RXKCM: RX Key cache miss * @AR5K_INT_SWBA: SoftWare Beacon Alert - indicates its time to send a - * beacon that must be handled in software. The alternative is if you - * have VEOL support, in that case you let the hardware deal with things. + * beacon that must be handled in software. The alternative is if + * you have VEOL support, in that case you let the hardware deal + * with things. + * @AR5K_INT_BRSSI: Beacon received with an RSSI value below our threshold * @AR5K_INT_BMISS: If in STA mode this indicates we have stopped seeing - * beacons from the AP have associated with, we should probably try to - * reassociate. When in IBSS mode this might mean we have not received - * any beacons from any local stations. Note that every station in an - * IBSS schedules to send beacons at the Target Beacon Transmission Time - * (TBTT) with a random backoff. - * @AR5K_INT_BNR: Beacon Not Ready interrupt - ?? - * @AR5K_INT_GPIO: GPIO interrupt is used for RF Kill, disabled for now - * until properly handled - * @AR5K_INT_FATAL: Fatal errors were encountered, typically caused by DMA - * errors. These types of errors we can enable seem to be of type - * AR5K_SIMR2_MCABT, AR5K_SIMR2_SSERR and AR5K_SIMR2_DPERR. + * beacons from the AP have associated with, we should probably + * try to reassociate. When in IBSS mode this might mean we have + * not received any beacons from any local stations. Note that + * every station in an IBSS schedules to send beacons at the + * Target Beacon Transmission Time (TBTT) with a random backoff. + * @AR5K_INT_BNR: Beacon queue got triggered (DMA beacon alert) while empty. + * @AR5K_INT_TIM: Beacon with local station's TIM bit set + * @AR5K_INT_DTIM: Beacon with DTIM bit and zero DTIM count received + * @AR5K_INT_DTIM_SYNC: DTIM sync lost + * @AR5K_INT_GPIO: GPIO interrupt is used for RF Kill switches connected to + * our GPIO pins. + * @AR5K_INT_BCN_TIMEOUT: Beacon timeout, we waited after TBTT but got noting + * @AR5K_INT_CAB_TIMEOUT: We waited for CAB traffic after the beacon but got + * nothing or an incomplete CAB frame sequence. + * @AR5K_INT_QCBRORN: A queue got it's CBR counter expired + * @AR5K_INT_QCBRURN: A queue got triggered wile empty + * @AR5K_INT_QTRIG: A queue got triggered + * + * @AR5K_INT_FATAL: Fatal errors were encountered, typically caused by bus/DMA + * errors. Indicates we need to reset the card. * @AR5K_INT_GLOBAL: Used to clear and set the IER - * @AR5K_INT_NOCARD: signals the card has been removed - * @AR5K_INT_COMMON: common interrupts shared among MACs with the same - * bit value + * @AR5K_INT_NOCARD: Signals the card has been removed + * @AR5K_INT_COMMON: Common interrupts shared among MACs with the same + * bit value * * These are mapped to take advantage of some common bits * between the MACs, to be able to set intr properties @@ -847,15 +1030,15 @@ enum ath5k_int { AR5K_INT_GPIO = 0x01000000, AR5K_INT_BCN_TIMEOUT = 0x02000000, /* Non common */ AR5K_INT_CAB_TIMEOUT = 0x04000000, /* Non common */ - AR5K_INT_RX_DOPPLER = 0x08000000, /* Non common */ - AR5K_INT_QCBRORN = 0x10000000, /* Non common */ - AR5K_INT_QCBRURN = 0x20000000, /* Non common */ - AR5K_INT_QTRIG = 0x40000000, /* Non common */ + AR5K_INT_QCBRORN = 0x08000000, /* Non common */ + AR5K_INT_QCBRURN = 0x10000000, /* Non common */ + AR5K_INT_QTRIG = 0x20000000, /* Non common */ AR5K_INT_GLOBAL = 0x80000000, AR5K_INT_TX_ALL = AR5K_INT_TXOK | AR5K_INT_TXDESC | AR5K_INT_TXERR + | AR5K_INT_TXNOFRM | AR5K_INT_TXEOL | AR5K_INT_TXURN, @@ -891,15 +1074,32 @@ enum ath5k_int { AR5K_INT_NOCARD = 0xffffffff }; -/* mask which calibration is active at the moment */ +/** + * enum ath5k_calibration_mask - Mask which calibration is active at the moment + * @AR5K_CALIBRATION_FULL: Full calibration (AGC + SHORT) + * @AR5K_CALIBRATION_SHORT: Short calibration (NF + I/Q) + * @AR5K_CALIBRATION_NF: Noise Floor calibration + * @AR5K_CALIBRATION_ANI: Adaptive Noise Immunity + */ enum ath5k_calibration_mask { AR5K_CALIBRATION_FULL = 0x01, AR5K_CALIBRATION_SHORT = 0x02, - AR5K_CALIBRATION_ANI = 0x04, + AR5K_CALIBRATION_NF = 0x04, + AR5K_CALIBRATION_ANI = 0x08, }; -/* - * Power management +/** + * enum ath5k_power_mode - Power management modes + * @AR5K_PM_UNDEFINED: Undefined + * @AR5K_PM_AUTO: Allow card to sleep if possible + * @AR5K_PM_AWAKE: Force card to wake up + * @AR5K_PM_FULL_SLEEP: Force card to full sleep (DANGEROUS) + * @AR5K_PM_NETWORK_SLEEP: Allow to sleep for a specified duration + * + * Currently only PM_AWAKE is used, FULL_SLEEP and NETWORK_SLEEP/AUTO + * are also known to have problems on some cards. This is not a big + * problem though because we can have almost the same effect as + * FULL_SLEEP by putting card on warm reset (it's almost powered down). */ enum ath5k_power_mode { AR5K_PM_UNDEFINED = 0, @@ -957,6 +1157,8 @@ struct ath5k_capabilities { } cap_queues; bool cap_has_phyerr_counters; + bool cap_has_mrr_support; + bool cap_needs_2GHz_ovr; }; /* size of noise floor history (keep it a power of two) */ @@ -1072,13 +1274,11 @@ struct ath5k_hw { dma_addr_t desc_daddr; /* DMA (physical) address */ size_t desc_len; /* size of TX/RX descriptors */ - DECLARE_BITMAP(status, 6); + DECLARE_BITMAP(status, 4); #define ATH_STAT_INVALID 0 /* disable hardware accesses */ -#define ATH_STAT_MRRETRY 1 /* multi-rate retry support */ -#define ATH_STAT_PROMISC 2 -#define ATH_STAT_LEDSOFT 3 /* enable LED gpio status */ -#define ATH_STAT_STARTED 4 /* opened & irqs enabled */ -#define ATH_STAT_2G_DISABLED 5 /* multiband radio without 2G */ +#define ATH_STAT_PROMISC 1 +#define ATH_STAT_LEDSOFT 2 /* enable LED gpio status */ +#define ATH_STAT_STARTED 3 /* opened & irqs enabled */ unsigned int filter_flags; /* HW flags, AR5K_RX_FILTER_* */ struct ieee80211_channel *curchan; /* current h/w channel */ @@ -1097,6 +1297,7 @@ struct ath5k_hw { led_on; /* pin setting for LED on */ struct work_struct reset_work; /* deferred chip reset */ + struct work_struct calib_work; /* deferred phy calibration */ struct list_head rxbuf; /* receive buffer */ spinlock_t rxbuflock; @@ -1113,8 +1314,6 @@ struct ath5k_hw { struct ath5k_rfkill rf_kill; - struct tasklet_struct calib; /* calibration tasklet */ - spinlock_t block; /* protects beacon */ struct tasklet_struct beacontq; /* beacon intr tasklet */ struct list_head bcbuf; /* beacon buffer */ @@ -1144,7 +1343,7 @@ struct ath5k_hw { enum ath5k_int ah_imr; struct ieee80211_channel *ah_current_channel; - bool ah_calibration; + bool ah_iq_cal_needed; bool ah_single_chip; enum ath5k_version ah_version; @@ -1187,7 +1386,13 @@ struct ath5k_hw { u32 ah_txq_imr_cbrurn; u32 ah_txq_imr_qtrig; u32 ah_txq_imr_nofrm; - u32 ah_txq_isr; + + u32 ah_txq_isr_txok_all; + u32 ah_txq_isr_txurn; + u32 ah_txq_isr_qcborn; + u32 ah_txq_isr_qcburn; + u32 ah_txq_isr_qtrig; + u32 *ah_rf_banks; size_t ah_rf_banks_size; size_t ah_rf_regs_count; @@ -1228,8 +1433,8 @@ struct ath5k_hw { /* Calibration timestamp */ unsigned long ah_cal_next_full; + unsigned long ah_cal_next_short; unsigned long ah_cal_next_ani; - unsigned long ah_cal_next_nf; /* Calibration mask */ u8 ah_cal_mask; @@ -1338,11 +1543,11 @@ void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah); u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah); void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64); void ath5k_hw_reset_tsf(struct ath5k_hw *ah); -void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval); +void ath5k_hw_init_beacon_timers(struct ath5k_hw *ah, u32 next_beacon, + u32 interval); bool ath5k_hw_check_beacon_timers(struct ath5k_hw *ah, int intval); /* Init function */ -void ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode, - u8 mode); +void ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode); /* Queue Control Unit, DFS Control Unit Functions */ int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue, diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c index 91627dd2c26..d7114c75fe9 100644 --- a/drivers/net/wireless/ath/ath5k/attach.c +++ b/drivers/net/wireless/ath/ath5k/attach.c @@ -27,8 +27,7 @@ #include "debug.h" /** - * ath5k_hw_post - Power On Self Test helper function - * + * ath5k_hw_post() - Power On Self Test helper function * @ah: The &struct ath5k_hw */ static int ath5k_hw_post(struct ath5k_hw *ah) @@ -92,8 +91,7 @@ static int ath5k_hw_post(struct ath5k_hw *ah) } /** - * ath5k_hw_init - Check if hw is supported and init the needed structs - * + * ath5k_hw_init() - Check if hw is supported and init the needed structs * @ah: The &struct ath5k_hw associated with the device * * Check if the device is supported, perform a POST and initialize the needed @@ -298,7 +296,7 @@ int ath5k_hw_init(struct ath5k_hw *ah) /* Reset SERDES to load new settings */ ath5k_hw_reg_write(ah, 0x00000000, AR5K_PCIE_SERDES_RESET); - mdelay(1); + usleep_range(1000, 1500); } /* Get misc capabilities */ @@ -308,11 +306,6 @@ int ath5k_hw_init(struct ath5k_hw *ah) goto err; } - if (test_bit(ATH_STAT_2G_DISABLED, ah->status)) { - __clear_bit(AR5K_MODE_11B, ah->ah_capabilities.cap_mode); - __clear_bit(AR5K_MODE_11G, ah->ah_capabilities.cap_mode); - } - /* Crypto settings */ common->keymax = (ah->ah_version == AR5K_AR5210 ? AR5K_KEYTABLE_SIZE_5210 : AR5K_KEYTABLE_SIZE_5211); @@ -349,8 +342,7 @@ err: } /** - * ath5k_hw_deinit - Free the ath5k_hw struct - * + * ath5k_hw_deinit() - Free the &struct ath5k_hw * @ah: The &struct ath5k_hw */ void ath5k_hw_deinit(struct ath5k_hw *ah) diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index b346d049200..d366dadcf86 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c @@ -68,18 +68,23 @@ #define CREATE_TRACE_POINTS #include "trace.h" -int ath5k_modparam_nohwcrypt; +bool ath5k_modparam_nohwcrypt; module_param_named(nohwcrypt, ath5k_modparam_nohwcrypt, bool, S_IRUGO); MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); -static int modparam_all_channels; +static bool modparam_all_channels; module_param_named(all_channels, modparam_all_channels, bool, S_IRUGO); MODULE_PARM_DESC(all_channels, "Expose all channels the device can use."); -static int modparam_fastchanswitch; +static bool modparam_fastchanswitch; module_param_named(fastchanswitch, modparam_fastchanswitch, bool, S_IRUGO); MODULE_PARM_DESC(fastchanswitch, "Enable fast channel switching for AR2413/AR5413 radios."); +static int ath5k_modparam_no_hw_rfkill_switch; +module_param_named(no_hw_rfkill_switch, ath5k_modparam_no_hw_rfkill_switch, + bool, S_IRUGO); +MODULE_PARM_DESC(no_hw_rfkill_switch, "Ignore the GPIO RFKill switch state"); + /* Module info */ MODULE_AUTHOR("Jiri Slaby"); @@ -183,7 +188,6 @@ static const struct ieee80211_rate ath5k_rates[] = { { .bitrate = 540, .hw_value = ATH5K_RATE_CODE_54M, .flags = 0 }, - /* XR missing */ }; static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp) @@ -721,21 +725,24 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf, if (ret) goto err_unmap; - memset(mrr_rate, 0, sizeof(mrr_rate)); - memset(mrr_tries, 0, sizeof(mrr_tries)); - for (i = 0; i < 3; i++) { - rate = ieee80211_get_alt_retry_rate(ah->hw, info, i); - if (!rate) - break; + /* Set up MRR descriptor */ + if (ah->ah_capabilities.cap_has_mrr_support) { + memset(mrr_rate, 0, sizeof(mrr_rate)); + memset(mrr_tries, 0, sizeof(mrr_tries)); + for (i = 0; i < 3; i++) { + rate = ieee80211_get_alt_retry_rate(ah->hw, info, i); + if (!rate) + break; - mrr_rate[i] = rate->hw_value; - mrr_tries[i] = info->control.rates[i + 1].count; - } + mrr_rate[i] = rate->hw_value; + mrr_tries[i] = info->control.rates[i + 1].count; + } - ath5k_hw_setup_mrr_tx_desc(ah, ds, - mrr_rate[0], mrr_tries[0], - mrr_rate[1], mrr_tries[1], - mrr_rate[2], mrr_tries[2]); + ath5k_hw_setup_mrr_tx_desc(ah, ds, + mrr_rate[0], mrr_tries[0], + mrr_rate[1], mrr_tries[1], + mrr_rate[2], mrr_tries[2]); + } ds->ds_link = 0; ds->ds_data = bf->skbaddr; @@ -1689,7 +1696,7 @@ ath5k_tasklet_tx(unsigned long data) struct ath5k_hw *ah = (void *)data; for (i = 0; i < AR5K_NUM_TX_QUEUES; i++) - if (ah->txqs[i].setup && (ah->ah_txq_isr & BIT(i))) + if (ah->txqs[i].setup && (ah->ah_txq_isr_txok_all & BIT(i))) ath5k_tx_processq(ah, &ah->txqs[i]); ah->tx_pending = false; @@ -2005,7 +2012,7 @@ ath5k_beacon_update_timers(struct ath5k_hw *ah, u64 bc_tsf) ah->nexttbtt = nexttbtt; intval |= AR5K_BEACON_ENA; - ath5k_hw_init_beacon(ah, nexttbtt, intval); + ath5k_hw_init_beacon_timers(ah, nexttbtt, intval); /* * debugging output last in order to preserve the time critical aspect @@ -2112,16 +2119,29 @@ static void ath5k_intr_calibration_poll(struct ath5k_hw *ah) { if (time_is_before_eq_jiffies(ah->ah_cal_next_ani) && - !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL)) { - /* run ANI only when full calibration is not active */ + !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL) && + !(ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)) { + + /* Run ANI only when calibration is not active */ + ah->ah_cal_next_ani = jiffies + msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI); tasklet_schedule(&ah->ani_tasklet); - } else if (time_is_before_eq_jiffies(ah->ah_cal_next_full)) { - ah->ah_cal_next_full = jiffies + - msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL); - tasklet_schedule(&ah->calib); + } else if (time_is_before_eq_jiffies(ah->ah_cal_next_short) && + !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL) && + !(ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)) { + + /* Run calibration only when another calibration + * is not running. + * + * Note: This is for both full/short calibration, + * if it's time for a full one, ath5k_calibrate_work will deal + * with it. */ + + ah->ah_cal_next_short = jiffies + + msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT); + ieee80211_queue_work(ah->hw, &ah->calib_work); } /* we could use SWI to generate enough interrupts to meet our * calibration interval requirements, if necessary: @@ -2149,69 +2169,110 @@ ath5k_intr(int irq, void *dev_id) enum ath5k_int status; unsigned int counter = 1000; + + /* + * If hw is not ready (or detached) and we get an + * interrupt, or if we have no interrupts pending + * (that means it's not for us) skip it. + * + * NOTE: Group 0/1 PCI interface registers are not + * supported on WiSOCs, so we can't check for pending + * interrupts (ISR belongs to another register group + * so we are ok). + */ if (unlikely(test_bit(ATH_STAT_INVALID, ah->status) || - ((ath5k_get_bus_type(ah) != ATH_AHB) && - !ath5k_hw_is_intr_pending(ah)))) + ((ath5k_get_bus_type(ah) != ATH_AHB) && + !ath5k_hw_is_intr_pending(ah)))) return IRQ_NONE; + /** Main loop **/ do { - ath5k_hw_get_isr(ah, &status); /* NB: clears IRQ too */ + ath5k_hw_get_isr(ah, &status); /* NB: clears IRQ too */ + ATH5K_DBG(ah, ATH5K_DEBUG_INTR, "status 0x%x/0x%x\n", status, ah->imask); + + /* + * Fatal hw error -> Log and reset + * + * Fatal errors are unrecoverable so we have to + * reset the card. These errors include bus and + * dma errors. + */ if (unlikely(status & AR5K_INT_FATAL)) { - /* - * Fatal errors are unrecoverable. - * Typically these are caused by DMA errors. - */ + ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "fatal int, resetting\n"); ieee80211_queue_work(ah->hw, &ah->reset_work); + + /* + * RX Overrun -> Count and reset if needed + * + * Receive buffers are full. Either the bus is busy or + * the CPU is not fast enough to process all received + * frames. + */ } else if (unlikely(status & AR5K_INT_RXORN)) { + /* - * Receive buffers are full. Either the bus is busy or - * the CPU is not fast enough to process all received - * frames. * Older chipsets need a reset to come out of this * condition, but we treat it as RX for newer chips. - * We don't know exactly which versions need a reset - + * We don't know exactly which versions need a reset * this guess is copied from the HAL. */ ah->stats.rxorn_intr++; + if (ah->ah_mac_srev < AR5K_SREV_AR5212) { ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "rx overrun, resetting\n"); ieee80211_queue_work(ah->hw, &ah->reset_work); } else ath5k_schedule_rx(ah); + } else { + + /* Software Beacon Alert -> Schedule beacon tasklet */ if (status & AR5K_INT_SWBA) tasklet_hi_schedule(&ah->beacontq); - if (status & AR5K_INT_RXEOL) { - /* - * NB: the hardware should re-read the link when - * RXE bit is written, but it doesn't work at - * least on older hardware revs. - */ + /* + * No more RX descriptors -> Just count + * + * NB: the hardware should re-read the link when + * RXE bit is written, but it doesn't work at + * least on older hardware revs. + */ + if (status & AR5K_INT_RXEOL) ah->stats.rxeol_intr++; - } - if (status & AR5K_INT_TXURN) { - /* bump tx trigger level */ + + + /* TX Underrun -> Bump tx trigger level */ + if (status & AR5K_INT_TXURN) ath5k_hw_update_tx_triglevel(ah, true); - } + + /* RX -> Schedule rx tasklet */ if (status & (AR5K_INT_RXOK | AR5K_INT_RXERR)) ath5k_schedule_rx(ah); - if (status & (AR5K_INT_TXOK | AR5K_INT_TXDESC - | AR5K_INT_TXERR | AR5K_INT_TXEOL)) + + /* TX -> Schedule tx tasklet */ + if (status & (AR5K_INT_TXOK + | AR5K_INT_TXDESC + | AR5K_INT_TXERR + | AR5K_INT_TXEOL)) ath5k_schedule_tx(ah); - if (status & AR5K_INT_BMISS) { - /* TODO */ - } + + /* Missed beacon -> TODO + if (status & AR5K_INT_BMISS) + */ + + /* MIB event -> Update counters and notify ANI */ if (status & AR5K_INT_MIB) { ah->stats.mib_intr++; ath5k_hw_update_mib_counters(ah); ath5k_ani_mib_intr(ah); } + + /* GPIO -> Notify RFKill layer */ if (status & AR5K_INT_GPIO) tasklet_schedule(&ah->rf_kill.toggleq); @@ -2222,12 +2283,19 @@ ath5k_intr(int irq, void *dev_id) } while (ath5k_hw_is_intr_pending(ah) && --counter > 0); + /* + * Until we handle rx/tx interrupts mask them on IMR + * + * NOTE: ah->(rx/tx)_pending are set when scheduling the tasklets + * and unset after we 've handled the interrupts. + */ if (ah->rx_pending || ah->tx_pending) ath5k_set_current_imask(ah); if (unlikely(!counter)) ATH5K_WARN(ah, "too many interrupts, giving up for now\n"); + /* Fire up calibration poll */ ath5k_intr_calibration_poll(ah); return IRQ_HANDLED; @@ -2238,41 +2306,58 @@ ath5k_intr(int irq, void *dev_id) * for temperature/environment changes. */ static void -ath5k_tasklet_calibrate(unsigned long data) +ath5k_calibrate_work(struct work_struct *work) { - struct ath5k_hw *ah = (void *)data; + struct ath5k_hw *ah = container_of(work, struct ath5k_hw, + calib_work); + + /* Should we run a full calibration ? */ + if (time_is_before_eq_jiffies(ah->ah_cal_next_full)) { + + ah->ah_cal_next_full = jiffies + + msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL); + ah->ah_cal_mask |= AR5K_CALIBRATION_FULL; + + ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE, + "running full calibration\n"); + + if (ath5k_hw_gainf_calibrate(ah) == AR5K_RFGAIN_NEED_CHANGE) { + /* + * Rfgain is out of bounds, reset the chip + * to load new gain values. + */ + ATH5K_DBG(ah, ATH5K_DEBUG_RESET, + "got new rfgain, resetting\n"); + ieee80211_queue_work(ah->hw, &ah->reset_work); + } + + /* TODO: On full calibration we should stop TX here, + * so that it doesn't interfere (mostly due to gain_f + * calibration that messes with tx packets -see phy.c). + * + * NOTE: Stopping the queues from above is not enough + * to stop TX but saves us from disconecting (at least + * we don't lose packets). */ + ieee80211_stop_queues(ah->hw); + } else + ah->ah_cal_mask |= AR5K_CALIBRATION_SHORT; - /* Only full calibration for now */ - ah->ah_cal_mask |= AR5K_CALIBRATION_FULL; ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE, "channel %u/%x\n", ieee80211_frequency_to_channel(ah->curchan->center_freq), ah->curchan->hw_value); - if (ath5k_hw_gainf_calibrate(ah) == AR5K_RFGAIN_NEED_CHANGE) { - /* - * Rfgain is out of bounds, reset the chip - * to load new gain values. - */ - ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "calibration, resetting\n"); - ieee80211_queue_work(ah->hw, &ah->reset_work); - } if (ath5k_hw_phy_calibrate(ah, ah->curchan)) ATH5K_ERR(ah, "calibration of channel %u failed\n", ieee80211_frequency_to_channel( ah->curchan->center_freq)); - /* Noise floor calibration interrupts rx/tx path while I/Q calibration - * doesn't. - * TODO: We should stop TX here, so that it doesn't interfere. - * Note that stopping the queues is not enough to stop TX! */ - if (time_is_before_eq_jiffies(ah->ah_cal_next_nf)) { - ah->ah_cal_next_nf = jiffies + - msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_NF); - ath5k_hw_update_noise_floor(ah); - } - - ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL; + /* Clear calibration flags */ + if (ah->ah_cal_mask & AR5K_CALIBRATION_FULL) { + ieee80211_wake_queues(ah->hw); + ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL; + } else if (ah->ah_cal_mask & AR5K_CALIBRATION_SHORT) + ah->ah_cal_mask &= ~AR5K_CALIBRATION_SHORT; } @@ -2407,8 +2492,8 @@ ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops) if (ret) goto err_irq; - /* set up multi-rate retry capabilities */ - if (ah->ah_version == AR5K_AR5212) { + /* Set up multi-rate retry capabilities */ + if (ah->ah_capabilities.cap_has_mrr_support) { hw->max_rates = 4; hw->max_rate_tries = max(AR5K_INIT_RETRY_SHORT, AR5K_INIT_RETRY_LONG); @@ -2544,15 +2629,22 @@ int ath5k_start(struct ieee80211_hw *hw) * and then setup of the interrupt mask. */ ah->curchan = ah->hw->conf.channel; - ah->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL | - AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL | - AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB; + ah->imask = AR5K_INT_RXOK + | AR5K_INT_RXERR + | AR5K_INT_RXEOL + | AR5K_INT_RXORN + | AR5K_INT_TXDESC + | AR5K_INT_TXEOL + | AR5K_INT_FATAL + | AR5K_INT_GLOBAL + | AR5K_INT_MIB; ret = ath5k_reset(ah, NULL, false); if (ret) goto done; - ath5k_rfkill_hw_start(ah); + if (!ath5k_modparam_no_hw_rfkill_switch) + ath5k_rfkill_hw_start(ah); /* * Reset the key cache since some parts do not reset the @@ -2585,7 +2677,6 @@ static void ath5k_stop_tasklets(struct ath5k_hw *ah) ah->tx_pending = false; tasklet_kill(&ah->rxtq); tasklet_kill(&ah->txtq); - tasklet_kill(&ah->calib); tasklet_kill(&ah->beacontq); tasklet_kill(&ah->ani_tasklet); } @@ -2637,7 +2728,8 @@ void ath5k_stop(struct ieee80211_hw *hw) cancel_delayed_work_sync(&ah->tx_complete_work); - ath5k_rfkill_hw_stop(ah); + if (!ath5k_modparam_no_hw_rfkill_switch) + ath5k_rfkill_hw_stop(ah); } /* @@ -2689,9 +2781,24 @@ ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan, ath5k_ani_init(ah, ani_mode); - ah->ah_cal_next_full = jiffies + msecs_to_jiffies(100); - ah->ah_cal_next_ani = jiffies; - ah->ah_cal_next_nf = jiffies; + /* + * Set calibration intervals + * + * Note: We don't need to run calibration imediately + * since some initial calibration is done on reset + * even for fast channel switching. Also on scanning + * this will get set again and again and it won't get + * executed unless we connect somewhere and spend some + * time on the channel (that's what calibration needs + * anyway to be accurate). + */ + ah->ah_cal_next_full = jiffies + + msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL); + ah->ah_cal_next_ani = jiffies + + msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI); + ah->ah_cal_next_short = jiffies + + msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT); + ewma_init(&ah->ah_beacon_rssi_avg, 1024, 8); /* clear survey data and cycle counters */ @@ -2745,20 +2852,6 @@ ath5k_init(struct ieee80211_hw *hw) /* - * Check if the MAC has multi-rate retry support. - * We do this by trying to setup a fake extended - * descriptor. MACs that don't have support will - * return false w/o doing anything. MACs that do - * support it will return true w/o doing anything. - */ - ret = ath5k_hw_setup_mrr_tx_desc(ah, NULL, 0, 0, 0, 0, 0, 0); - - if (ret < 0) - goto err; - if (ret > 0) - __set_bit(ATH_STAT_MRRETRY, ah->status); - - /* * Collect the channel list. The 802.11 layer * is responsible for filtering this list based * on settings like the phy mode and regulatory @@ -2841,11 +2934,11 @@ ath5k_init(struct ieee80211_hw *hw) tasklet_init(&ah->rxtq, ath5k_tasklet_rx, (unsigned long)ah); tasklet_init(&ah->txtq, ath5k_tasklet_tx, (unsigned long)ah); - tasklet_init(&ah->calib, ath5k_tasklet_calibrate, (unsigned long)ah); tasklet_init(&ah->beacontq, ath5k_tasklet_beacon, (unsigned long)ah); tasklet_init(&ah->ani_tasklet, ath5k_tasklet_ani, (unsigned long)ah); INIT_WORK(&ah->reset_work, ath5k_reset_work); + INIT_WORK(&ah->calib_work, ath5k_calibrate_work); INIT_DELAYED_WORK(&ah->tx_complete_work, ath5k_tx_complete_poll_work); ret = ath5k_hw_common(ah)->bus_ops->eeprom_read_mac(ah, mac); diff --git a/drivers/net/wireless/ath/ath5k/caps.c b/drivers/net/wireless/ath/ath5k/caps.c index 810fba96702..994169ad39c 100644 --- a/drivers/net/wireless/ath/ath5k/caps.c +++ b/drivers/net/wireless/ath/ath5k/caps.c @@ -85,12 +85,19 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah) caps->cap_range.range_2ghz_min = 2412; caps->cap_range.range_2ghz_max = 2732; - if (AR5K_EEPROM_HDR_11B(ee_header)) - __set_bit(AR5K_MODE_11B, caps->cap_mode); - - if (AR5K_EEPROM_HDR_11G(ee_header) && - ah->ah_version != AR5K_AR5211) - __set_bit(AR5K_MODE_11G, caps->cap_mode); + /* Override 2GHz modes on SoCs that need it + * NOTE: cap_needs_2GHz_ovr gets set from + * ath_ahb_probe */ + if (!caps->cap_needs_2GHz_ovr) { + if (AR5K_EEPROM_HDR_11B(ee_header)) + __set_bit(AR5K_MODE_11B, + caps->cap_mode); + + if (AR5K_EEPROM_HDR_11G(ee_header) && + ah->ah_version != AR5K_AR5211) + __set_bit(AR5K_MODE_11G, + caps->cap_mode); + } } } @@ -103,12 +110,18 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah) else caps->cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES; - /* newer hardware has PHY error counters */ + /* Newer hardware has PHY error counters */ if (ah->ah_mac_srev >= AR5K_SREV_AR5213A) caps->cap_has_phyerr_counters = true; else caps->cap_has_phyerr_counters = false; + /* MACs since AR5212 have MRR support */ + if (ah->ah_version == AR5K_AR5212) + caps->cap_has_mrr_support = true; + else + caps->cap_has_mrr_support = false; + return 0; } diff --git a/drivers/net/wireless/ath/ath5k/desc.c b/drivers/net/wireless/ath/ath5k/desc.c index 7e88dda8222..f8bfa3ac2af 100644 --- a/drivers/net/wireless/ath/ath5k/desc.c +++ b/drivers/net/wireless/ath/ath5k/desc.c @@ -26,20 +26,61 @@ #include "debug.h" +/** + * DOC: Hardware descriptor functions + * + * Here we handle the processing of the low-level hw descriptors + * that hw reads and writes via DMA for each TX and RX attempt (that means + * we can also have descriptors for failed TX/RX tries). We have two kind of + * descriptors for RX and TX, control descriptors tell the hw how to send or + * receive a packet where to read/write it from/to etc and status descriptors + * that contain information about how the packet was sent or received (errors + * included). + * + * Descriptor format is not exactly the same for each MAC chip version so we + * have function pointers on &struct ath5k_hw we initialize at runtime based on + * the chip used. + */ + + /************************\ * TX Control descriptors * \************************/ -/* - * Initialize the 2-word tx control descriptor on 5210/5211 +/** + * ath5k_hw_setup_2word_tx_desc() - Initialize a 2-word tx control descriptor + * @ah: The &struct ath5k_hw + * @desc: The &struct ath5k_desc + * @pkt_len: Frame length in bytes + * @hdr_len: Header length in bytes (only used on AR5210) + * @padsize: Any padding we've added to the frame length + * @type: One of enum ath5k_pkt_type + * @tx_power: Tx power in 0.5dB steps + * @tx_rate0: HW idx for transmission rate + * @tx_tries0: Max number of retransmissions + * @key_index: Index on key table to use for encryption + * @antenna_mode: Which antenna to use (0 for auto) + * @flags: One of AR5K_TXDESC_* flags (desc.h) + * @rtscts_rate: HW idx for RTS/CTS transmission rate + * @rtscts_duration: What to put on duration field on the header of RTS/CTS + * + * Internal function to initialize a 2-Word TX control descriptor + * found on AR5210 and AR5211 MACs chips. + * + * Returns 0 on success or -EINVAL on false input */ static int -ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc, - unsigned int pkt_len, unsigned int hdr_len, int padsize, - enum ath5k_pkt_type type, - unsigned int tx_power, unsigned int tx_rate0, unsigned int tx_tries0, - unsigned int key_index, unsigned int antenna_mode, unsigned int flags, - unsigned int rtscts_rate, unsigned int rtscts_duration) +ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, + struct ath5k_desc *desc, + unsigned int pkt_len, unsigned int hdr_len, + int padsize, + enum ath5k_pkt_type type, + unsigned int tx_power, + unsigned int tx_rate0, unsigned int tx_tries0, + unsigned int key_index, + unsigned int antenna_mode, + unsigned int flags, + unsigned int rtscts_rate, unsigned int rtscts_duration) { u32 frame_type; struct ath5k_hw_2w_tx_ctl *tx_ctl; @@ -172,17 +213,40 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc, return 0; } -/* - * Initialize the 4-word tx control descriptor on 5212 +/** + * ath5k_hw_setup_4word_tx_desc() - Initialize a 4-word tx control descriptor + * @ah: The &struct ath5k_hw + * @desc: The &struct ath5k_desc + * @pkt_len: Frame length in bytes + * @hdr_len: Header length in bytes (only used on AR5210) + * @padsize: Any padding we've added to the frame length + * @type: One of enum ath5k_pkt_type + * @tx_power: Tx power in 0.5dB steps + * @tx_rate0: HW idx for transmission rate + * @tx_tries0: Max number of retransmissions + * @key_index: Index on key table to use for encryption + * @antenna_mode: Which antenna to use (0 for auto) + * @flags: One of AR5K_TXDESC_* flags (desc.h) + * @rtscts_rate: HW idx for RTS/CTS transmission rate + * @rtscts_duration: What to put on duration field on the header of RTS/CTS + * + * Internal function to initialize a 4-Word TX control descriptor + * found on AR5212 and later MACs chips. + * + * Returns 0 on success or -EINVAL on false input */ -static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah, - struct ath5k_desc *desc, unsigned int pkt_len, unsigned int hdr_len, - int padsize, - enum ath5k_pkt_type type, unsigned int tx_power, unsigned int tx_rate0, - unsigned int tx_tries0, unsigned int key_index, - unsigned int antenna_mode, unsigned int flags, - unsigned int rtscts_rate, - unsigned int rtscts_duration) +static int +ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah, + struct ath5k_desc *desc, + unsigned int pkt_len, unsigned int hdr_len, + int padsize, + enum ath5k_pkt_type type, + unsigned int tx_power, + unsigned int tx_rate0, unsigned int tx_tries0, + unsigned int key_index, + unsigned int antenna_mode, + unsigned int flags, + unsigned int rtscts_rate, unsigned int rtscts_duration) { struct ath5k_hw_4w_tx_ctl *tx_ctl; unsigned int frame_len; @@ -292,13 +356,29 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah, return 0; } -/* - * Initialize a 4-word multi rate retry tx control descriptor on 5212 +/** + * ath5k_hw_setup_mrr_tx_desc() - Initialize an MRR tx control descriptor + * @ah: The &struct ath5k_hw + * @desc: The &struct ath5k_desc + * @tx_rate1: HW idx for rate used on transmission series 1 + * @tx_tries1: Max number of retransmissions for transmission series 1 + * @tx_rate2: HW idx for rate used on transmission series 2 + * @tx_tries2: Max number of retransmissions for transmission series 2 + * @tx_rate3: HW idx for rate used on transmission series 3 + * @tx_tries3: Max number of retransmissions for transmission series 3 + * + * Multi rate retry (MRR) tx control descriptors are available only on AR5212 + * MACs, they are part of the normal 4-word tx control descriptor (see above) + * but we handle them through a separate function for better abstraction. + * + * Returns 0 on success or -EINVAL on invalid input */ int -ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc, - unsigned int tx_rate1, u_int tx_tries1, u_int tx_rate2, - u_int tx_tries2, unsigned int tx_rate3, u_int tx_tries3) +ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, + struct ath5k_desc *desc, + u_int tx_rate1, u_int tx_tries1, + u_int tx_rate2, u_int tx_tries2, + u_int tx_rate3, u_int tx_tries3) { struct ath5k_hw_4w_tx_ctl *tx_ctl; @@ -350,11 +430,16 @@ ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc, * TX Status descriptors * \***********************/ -/* - * Process the tx status descriptor on 5210/5211 +/** + * ath5k_hw_proc_2word_tx_status() - Process a tx status descriptor on 5210/1 + * @ah: The &struct ath5k_hw + * @desc: The &struct ath5k_desc + * @ts: The &struct ath5k_tx_status */ -static int ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah, - struct ath5k_desc *desc, struct ath5k_tx_status *ts) +static int +ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah, + struct ath5k_desc *desc, + struct ath5k_tx_status *ts) { struct ath5k_hw_2w_tx_ctl *tx_ctl; struct ath5k_hw_tx_status *tx_status; @@ -399,11 +484,16 @@ static int ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah, return 0; } -/* - * Process a tx status descriptor on 5212 +/** + * ath5k_hw_proc_4word_tx_status() - Process a tx status descriptor on 5212 + * @ah: The &struct ath5k_hw + * @desc: The &struct ath5k_desc + * @ts: The &struct ath5k_tx_status */ -static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah, - struct ath5k_desc *desc, struct ath5k_tx_status *ts) +static int +ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah, + struct ath5k_desc *desc, + struct ath5k_tx_status *ts) { struct ath5k_hw_4w_tx_ctl *tx_ctl; struct ath5k_hw_tx_status *tx_status; @@ -460,11 +550,17 @@ static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah, * RX Descriptors * \****************/ -/* - * Initialize an rx control descriptor +/** + * ath5k_hw_setup_rx_desc() - Initialize an rx control descriptor + * @ah: The &struct ath5k_hw + * @desc: The &struct ath5k_desc + * @size: RX buffer length in bytes + * @flags: One of AR5K_RXDESC_* flags */ -int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc, - u32 size, unsigned int flags) +int +ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, + struct ath5k_desc *desc, + u32 size, unsigned int flags) { struct ath5k_hw_rx_ctl *rx_ctl; @@ -491,11 +587,22 @@ int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc, return 0; } -/* - * Process the rx status descriptor on 5210/5211 +/** + * ath5k_hw_proc_5210_rx_status() - Process the rx status descriptor on 5210/1 + * @ah: The &struct ath5k_hw + * @desc: The &struct ath5k_desc + * @rs: The &struct ath5k_rx_status + * + * Internal function used to process an RX status descriptor + * on AR5210/5211 MAC. + * + * Returns 0 on success or -EINPROGRESS in case we haven't received the who;e + * frame yet. */ -static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah, - struct ath5k_desc *desc, struct ath5k_rx_status *rs) +static int +ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah, + struct ath5k_desc *desc, + struct ath5k_rx_status *rs) { struct ath5k_hw_rx_status *rx_status; @@ -574,12 +681,22 @@ static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah, return 0; } -/* - * Process the rx status descriptor on 5212 +/** + * ath5k_hw_proc_5212_rx_status() - Process the rx status descriptor on 5212 + * @ah: The &struct ath5k_hw + * @desc: The &struct ath5k_desc + * @rs: The &struct ath5k_rx_status + * + * Internal function used to process an RX status descriptor + * on AR5212 and later MAC. + * + * Returns 0 on success or -EINPROGRESS in case we haven't received the who;e + * frame yet. */ -static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah, - struct ath5k_desc *desc, - struct ath5k_rx_status *rs) +static int +ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah, + struct ath5k_desc *desc, + struct ath5k_rx_status *rs) { struct ath5k_hw_rx_status *rx_status; u32 rxstat0, rxstat1; @@ -646,10 +763,16 @@ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah, * Attach * \********/ -/* - * Init function pointers inside ath5k_hw struct +/** + * ath5k_hw_init_desc_functions() - Init function pointers inside ah + * @ah: The &struct ath5k_hw + * + * Maps the internal descriptor functions to the function pointers on ah, used + * from above. This is used as an abstraction layer to handle the various chips + * the same way. */ -int ath5k_hw_init_desc_functions(struct ath5k_hw *ah) +int +ath5k_hw_init_desc_functions(struct ath5k_hw *ah) { if (ah->ah_version == AR5K_AR5212) { ah->ah_setup_tx_desc = ath5k_hw_setup_4word_tx_desc; diff --git a/drivers/net/wireless/ath/ath5k/desc.h b/drivers/net/wireless/ath/ath5k/desc.h index cfd529b548f..8d6c01a49ea 100644 --- a/drivers/net/wireless/ath/ath5k/desc.h +++ b/drivers/net/wireless/ath/ath5k/desc.h @@ -20,25 +20,30 @@ * RX/TX descriptor structures */ -/* - * Common hardware RX control descriptor +/** + * struct ath5k_hw_rx_ctl - Common hardware RX control descriptor + * @rx_control_0: RX control word 0 + * @rx_control_1: RX control word 1 */ struct ath5k_hw_rx_ctl { - u32 rx_control_0; /* RX control word 0 */ - u32 rx_control_1; /* RX control word 1 */ + u32 rx_control_0; + u32 rx_control_1; } __packed __aligned(4); /* RX control word 1 fields/flags */ #define AR5K_DESC_RX_CTL1_BUF_LEN 0x00000fff /* data buffer length */ #define AR5K_DESC_RX_CTL1_INTREQ 0x00002000 /* RX interrupt request */ -/* - * Common hardware RX status descriptor +/** + * struct ath5k_hw_rx_status - Common hardware RX status descriptor + * @rx_status_0: RX status word 0 + * @rx_status_1: RX status word 1 + * * 5210, 5211 and 5212 differ only in the fields and flags defined below */ struct ath5k_hw_rx_status { - u32 rx_status_0; /* RX status word 0 */ - u32 rx_status_1; /* RX status word 1 */ + u32 rx_status_0; + u32 rx_status_1; } __packed __aligned(4); /* 5210/5211 */ @@ -98,17 +103,36 @@ struct ath5k_hw_rx_status { /** * enum ath5k_phy_error_code - PHY Error codes + * @AR5K_RX_PHY_ERROR_UNDERRUN: Transmit underrun, [5210] No error + * @AR5K_RX_PHY_ERROR_TIMING: Timing error + * @AR5K_RX_PHY_ERROR_PARITY: Illegal parity + * @AR5K_RX_PHY_ERROR_RATE: Illegal rate + * @AR5K_RX_PHY_ERROR_LENGTH: Illegal length + * @AR5K_RX_PHY_ERROR_RADAR: Radar detect, [5210] 64 QAM rate + * @AR5K_RX_PHY_ERROR_SERVICE: Illegal service + * @AR5K_RX_PHY_ERROR_TOR: Transmit override receive + * @AR5K_RX_PHY_ERROR_OFDM_TIMING: OFDM Timing error [5212+] + * @AR5K_RX_PHY_ERROR_OFDM_SIGNAL_PARITY: OFDM Signal parity error [5212+] + * @AR5K_RX_PHY_ERROR_OFDM_RATE_ILLEGAL: OFDM Illegal rate [5212+] + * @AR5K_RX_PHY_ERROR_OFDM_LENGTH_ILLEGAL: OFDM Illegal length [5212+] + * @AR5K_RX_PHY_ERROR_OFDM_POWER_DROP: OFDM Power drop [5212+] + * @AR5K_RX_PHY_ERROR_OFDM_SERVICE: OFDM Service (?) [5212+] + * @AR5K_RX_PHY_ERROR_OFDM_RESTART: OFDM Restart (?) [5212+] + * @AR5K_RX_PHY_ERROR_CCK_TIMING: CCK Timing error [5212+] + * @AR5K_RX_PHY_ERROR_CCK_HEADER_CRC: Header CRC error [5212+] + * @AR5K_RX_PHY_ERROR_CCK_RATE_ILLEGAL: Illegal rate [5212+] + * @AR5K_RX_PHY_ERROR_CCK_SERVICE: CCK Service (?) [5212+] + * @AR5K_RX_PHY_ERROR_CCK_RESTART: CCK Restart (?) [5212+] */ enum ath5k_phy_error_code { - AR5K_RX_PHY_ERROR_UNDERRUN = 0, /* Transmit underrun, [5210] No error */ - AR5K_RX_PHY_ERROR_TIMING = 1, /* Timing error */ - AR5K_RX_PHY_ERROR_PARITY = 2, /* Illegal parity */ - AR5K_RX_PHY_ERROR_RATE = 3, /* Illegal rate */ - AR5K_RX_PHY_ERROR_LENGTH = 4, /* Illegal length */ - AR5K_RX_PHY_ERROR_RADAR = 5, /* Radar detect, [5210] 64 QAM rate */ - AR5K_RX_PHY_ERROR_SERVICE = 6, /* Illegal service */ - AR5K_RX_PHY_ERROR_TOR = 7, /* Transmit override receive */ - /* these are specific to the 5212 */ + AR5K_RX_PHY_ERROR_UNDERRUN = 0, + AR5K_RX_PHY_ERROR_TIMING = 1, + AR5K_RX_PHY_ERROR_PARITY = 2, + AR5K_RX_PHY_ERROR_RATE = 3, + AR5K_RX_PHY_ERROR_LENGTH = 4, + AR5K_RX_PHY_ERROR_RADAR = 5, + AR5K_RX_PHY_ERROR_SERVICE = 6, + AR5K_RX_PHY_ERROR_TOR = 7, AR5K_RX_PHY_ERROR_OFDM_TIMING = 17, AR5K_RX_PHY_ERROR_OFDM_SIGNAL_PARITY = 18, AR5K_RX_PHY_ERROR_OFDM_RATE_ILLEGAL = 19, @@ -123,12 +147,14 @@ enum ath5k_phy_error_code { AR5K_RX_PHY_ERROR_CCK_RESTART = 31, }; -/* - * 5210/5211 hardware 2-word TX control descriptor +/** + * struct ath5k_hw_2w_tx_ctl - 5210/5211 hardware 2-word TX control descriptor + * @tx_control_0: TX control word 0 + * @tx_control_1: TX control word 1 */ struct ath5k_hw_2w_tx_ctl { - u32 tx_control_0; /* TX control word 0 */ - u32 tx_control_1; /* TX control word 1 */ + u32 tx_control_0; + u32 tx_control_1; } __packed __aligned(4); /* TX control word 0 fields/flags */ @@ -177,14 +203,18 @@ struct ath5k_hw_2w_tx_ctl { #define AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS 4 #define AR5K_AR5211_TX_DESC_FRAME_TYPE_PRESP 4 -/* - * 5212 hardware 4-word TX control descriptor +/** + * struct ath5k_hw_4w_tx_ctl - 5212 hardware 4-word TX control descriptor + * @tx_control_0: TX control word 0 + * @tx_control_1: TX control word 1 + * @tx_control_2: TX control word 2 + * @tx_control_3: TX control word 3 */ struct ath5k_hw_4w_tx_ctl { - u32 tx_control_0; /* TX control word 0 */ - u32 tx_control_1; /* TX control word 1 */ - u32 tx_control_2; /* TX control word 2 */ - u32 tx_control_3; /* TX control word 3 */ + u32 tx_control_0; + u32 tx_control_1; + u32 tx_control_2; + u32 tx_control_3; } __packed __aligned(4); /* TX control word 0 fields/flags */ @@ -238,12 +268,14 @@ struct ath5k_hw_4w_tx_ctl { #define AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE 0x01f00000 /* RTS or CTS rate */ #define AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE_S 20 -/* - * Common TX status descriptor +/** + * struct ath5k_hw_tx_status - Common TX status descriptor + * @tx_status_0: TX status word 0 + * @tx_status_1: TX status word 1 */ struct ath5k_hw_tx_status { - u32 tx_status_0; /* TX status word 0 */ - u32 tx_status_1; /* TX status word 1 */ + u32 tx_status_0; + u32 tx_status_1; } __packed __aligned(4); /* TX status word 0 fields/flags */ @@ -276,37 +308,47 @@ struct ath5k_hw_tx_status { #define AR5K_DESC_TX_STATUS1_COMP_SUCCESS_5212 0x00800000 /* [5212] compression status */ #define AR5K_DESC_TX_STATUS1_XMIT_ANTENNA_5212 0x01000000 /* [5212] transmit antenna */ -/* - * 5210/5211 hardware TX descriptor +/** + * struct ath5k_hw_5210_tx_desc - 5210/5211 hardware TX descriptor + * @tx_ctl: The &struct ath5k_hw_2w_tx_ctl + * @tx_stat: The &struct ath5k_hw_tx_status */ struct ath5k_hw_5210_tx_desc { struct ath5k_hw_2w_tx_ctl tx_ctl; struct ath5k_hw_tx_status tx_stat; } __packed __aligned(4); -/* - * 5212 hardware TX descriptor +/** + * struct ath5k_hw_5212_tx_desc - 5212 hardware TX descriptor + * @tx_ctl: The &struct ath5k_hw_4w_tx_ctl + * @tx_stat: The &struct ath5k_hw_tx_status */ struct ath5k_hw_5212_tx_desc { struct ath5k_hw_4w_tx_ctl tx_ctl; struct ath5k_hw_tx_status tx_stat; } __packed __aligned(4); -/* - * Common hardware RX descriptor +/** + * struct ath5k_hw_all_rx_desc - Common hardware RX descriptor + * @rx_ctl: The &struct ath5k_hw_rx_ctl + * @rx_stat: The &struct ath5k_hw_rx_status */ struct ath5k_hw_all_rx_desc { struct ath5k_hw_rx_ctl rx_ctl; struct ath5k_hw_rx_status rx_stat; } __packed __aligned(4); -/* - * Atheros hardware DMA descriptor +/** + * struct ath5k_desc - Atheros hardware DMA descriptor + * @ds_link: Physical address of the next descriptor + * @ds_data: Physical address of data buffer (skb) + * @ud: Union containing hw_5xxx_tx_desc structs and hw_all_rx_desc + * * This is read and written to by the hardware */ struct ath5k_desc { - u32 ds_link; /* physical address of the next descriptor */ - u32 ds_data; /* physical address of data buffer (skb) */ + u32 ds_link; + u32 ds_data; union { struct ath5k_hw_5210_tx_desc ds_tx5210; diff --git a/drivers/net/wireless/ath/ath5k/dma.c b/drivers/net/wireless/ath/ath5k/dma.c index 2481f9c7f4b..5cc9aa81469 100644 --- a/drivers/net/wireless/ath/ath5k/dma.c +++ b/drivers/net/wireless/ath/ath5k/dma.c @@ -20,16 +20,13 @@ * DMA and interrupt masking functions * \*************************************/ -/* - * dma.c - DMA and interrupt masking functions +/** + * DOC: DMA and interrupt masking functions * * Here we setup descriptor pointers (rxdp/txdp) start/stop dma engine and * handle queue setup for 5210 chipset (rest are handled on qcu.c). * Also we setup interrupt mask register (IMR) and read the various interrupt * status registers (ISR). - * - * TODO: Handle SISR on 5211+ and introduce a function to return the queue - * number that resulted the interrupt. */ #include "ath5k.h" @@ -42,22 +39,22 @@ \*********/ /** - * ath5k_hw_start_rx_dma - Start DMA receive - * + * ath5k_hw_start_rx_dma() - Start DMA receive * @ah: The &struct ath5k_hw */ -void ath5k_hw_start_rx_dma(struct ath5k_hw *ah) +void +ath5k_hw_start_rx_dma(struct ath5k_hw *ah) { ath5k_hw_reg_write(ah, AR5K_CR_RXE, AR5K_CR); ath5k_hw_reg_read(ah, AR5K_CR); } /** - * ath5k_hw_stop_rx_dma - Stop DMA receive - * + * ath5k_hw_stop_rx_dma() - Stop DMA receive * @ah: The &struct ath5k_hw */ -static int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah) +static int +ath5k_hw_stop_rx_dma(struct ath5k_hw *ah) { unsigned int i; @@ -79,24 +76,24 @@ static int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah) } /** - * ath5k_hw_get_rxdp - Get RX Descriptor's address - * + * ath5k_hw_get_rxdp() - Get RX Descriptor's address * @ah: The &struct ath5k_hw */ -u32 ath5k_hw_get_rxdp(struct ath5k_hw *ah) +u32 +ath5k_hw_get_rxdp(struct ath5k_hw *ah) { return ath5k_hw_reg_read(ah, AR5K_RXDP); } /** - * ath5k_hw_set_rxdp - Set RX Descriptor's address - * + * ath5k_hw_set_rxdp() - Set RX Descriptor's address * @ah: The &struct ath5k_hw * @phys_addr: RX descriptor address * * Returns -EIO if rx is active */ -int ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr) +int +ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr) { if (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) { ATH5K_DBG(ah, ATH5K_DEBUG_DMA, @@ -114,8 +111,7 @@ int ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr) \**********/ /** - * ath5k_hw_start_tx_dma - Start DMA transmit for a specific queue - * + * ath5k_hw_start_tx_dma() - Start DMA transmit for a specific queue * @ah: The &struct ath5k_hw * @queue: The hw queue number * @@ -128,7 +124,8 @@ int ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr) * NOTE: Must be called after setting up tx control descriptor for that * queue (see below). */ -int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue) +int +ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue) { u32 tx_queue; @@ -177,17 +174,16 @@ int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue) } /** - * ath5k_hw_stop_tx_dma - Stop DMA transmit on a specific queue - * + * ath5k_hw_stop_tx_dma() - Stop DMA transmit on a specific queue * @ah: The &struct ath5k_hw * @queue: The hw queue number * * Stop DMA transmit on a specific hw queue and drain queue so we don't * have any pending frames. Returns -EBUSY if we still have pending frames, * -EINVAL if queue number is out of range or inactive. - * */ -static int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue) +static int +ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue) { unsigned int i = 40; u32 tx_queue, pending; @@ -320,14 +316,14 @@ static int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue) } /** - * ath5k_hw_stop_beacon_queue - Stop beacon queue - * - * @ah The &struct ath5k_hw - * @queue The queue number + * ath5k_hw_stop_beacon_queue() - Stop beacon queue + * @ah: The &struct ath5k_hw + * @queue: The queue number * * Returns -EIO if queue didn't stop */ -int ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue) +int +ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue) { int ret; ret = ath5k_hw_stop_tx_dma(ah, queue); @@ -340,8 +336,7 @@ int ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue) } /** - * ath5k_hw_get_txdp - Get TX Descriptor's address for a specific queue - * + * ath5k_hw_get_txdp() - Get TX Descriptor's address for a specific queue * @ah: The &struct ath5k_hw * @queue: The hw queue number * @@ -352,7 +347,8 @@ int ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue) * * XXX: Is TXDP read and clear ? */ -u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue) +u32 +ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue) { u16 tx_reg; @@ -382,10 +378,10 @@ u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue) } /** - * ath5k_hw_set_txdp - Set TX Descriptor's address for a specific queue - * + * ath5k_hw_set_txdp() - Set TX Descriptor's address for a specific queue * @ah: The &struct ath5k_hw * @queue: The hw queue number + * @phys_addr: The physical address * * Set TX descriptor's address for a specific queue. For 5210 we ignore * the queue number and we use tx queue type since we only have 2 queues @@ -394,7 +390,8 @@ u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue) * Returns -EINVAL if queue type is invalid for 5210 and -EIO if queue is still * active. */ -int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr) +int +ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr) { u16 tx_reg; @@ -435,8 +432,7 @@ int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr) } /** - * ath5k_hw_update_tx_triglevel - Update tx trigger level - * + * ath5k_hw_update_tx_triglevel() - Update tx trigger level * @ah: The &struct ath5k_hw * @increase: Flag to force increase of trigger level * @@ -444,15 +440,15 @@ int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr) * buffer (aka FIFO threshold) that is used to indicate when PCU flushes * the buffer and transmits its data. Lowering this results sending small * frames more quickly but can lead to tx underruns, raising it a lot can - * result other problems (i think bmiss is related). Right now we start with - * the lowest possible (64Bytes) and if we get tx underrun we increase it using - * the increase flag. Returns -EIO if we have reached maximum/minimum. + * result other problems. Right now we start with the lowest possible + * (64Bytes) and if we get tx underrun we increase it using the increase + * flag. Returns -EIO if we have reached maximum/minimum. * * XXX: Link this with tx DMA size ? - * XXX: Use it to save interrupts ? - * TODO: Needs testing, i think it's related to bmiss... + * XXX2: Use it to save interrupts ? */ -int ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase) +int +ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase) { u32 trigger_level, imr; int ret = -EIO; @@ -498,21 +494,20 @@ done: \*******************/ /** - * ath5k_hw_is_intr_pending - Check if we have pending interrupts - * + * ath5k_hw_is_intr_pending() - Check if we have pending interrupts * @ah: The &struct ath5k_hw * * Check if we have pending interrupts to process. Returns 1 if we * have pending interrupts and 0 if we haven't. */ -bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah) +bool +ath5k_hw_is_intr_pending(struct ath5k_hw *ah) { return ath5k_hw_reg_read(ah, AR5K_INTPEND) == 1 ? 1 : 0; } /** - * ath5k_hw_get_isr - Get interrupt status - * + * ath5k_hw_get_isr() - Get interrupt status * @ah: The @struct ath5k_hw * @interrupt_mask: Driver's interrupt mask used to filter out * interrupts in sw. @@ -523,62 +518,162 @@ bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah) * being mapped on some standard non hw-specific positions * (check out &ath5k_int). * - * NOTE: We use read-and-clear register, so after this function is called ISR - * is zeroed. + * NOTE: We do write-to-clear, so the active PISR/SISR bits at the time this + * function gets called are cleared on return. */ -int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask) +int +ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask) { - u32 data; + u32 data = 0; /* - * Read interrupt status from the Interrupt Status register - * on 5210 + * Read interrupt status from Primary Interrupt + * Register. + * + * Note: PISR/SISR Not available on 5210 */ if (ah->ah_version == AR5K_AR5210) { - data = ath5k_hw_reg_read(ah, AR5K_ISR); - if (unlikely(data == AR5K_INT_NOCARD)) { - *interrupt_mask = data; + u32 isr = 0; + isr = ath5k_hw_reg_read(ah, AR5K_ISR); + if (unlikely(isr == AR5K_INT_NOCARD)) { + *interrupt_mask = isr; return -ENODEV; } - } else { + /* - * Read interrupt status from Interrupt - * Status Register shadow copy (Read And Clear) - * - * Note: PISR/SISR Not available on 5210 + * Filter out the non-common bits from the interrupt + * status. */ - data = ath5k_hw_reg_read(ah, AR5K_RAC_PISR); - if (unlikely(data == AR5K_INT_NOCARD)) { - *interrupt_mask = data; + *interrupt_mask = (isr & AR5K_INT_COMMON) & ah->ah_imr; + + /* Hanlde INT_FATAL */ + if (unlikely(isr & (AR5K_ISR_SSERR | AR5K_ISR_MCABT + | AR5K_ISR_DPERR))) + *interrupt_mask |= AR5K_INT_FATAL; + + /* + * XXX: BMISS interrupts may occur after association. + * I found this on 5210 code but it needs testing. If this is + * true we should disable them before assoc and re-enable them + * after a successful assoc + some jiffies. + interrupt_mask &= ~AR5K_INT_BMISS; + */ + + data = isr; + } else { + u32 pisr = 0; + u32 pisr_clear = 0; + u32 sisr0 = 0; + u32 sisr1 = 0; + u32 sisr2 = 0; + u32 sisr3 = 0; + u32 sisr4 = 0; + + /* Read PISR and SISRs... */ + pisr = ath5k_hw_reg_read(ah, AR5K_PISR); + if (unlikely(pisr == AR5K_INT_NOCARD)) { + *interrupt_mask = pisr; return -ENODEV; } - } - /* - * Get abstract interrupt mask (driver-compatible) - */ - *interrupt_mask = (data & AR5K_INT_COMMON) & ah->ah_imr; + sisr0 = ath5k_hw_reg_read(ah, AR5K_SISR0); + sisr1 = ath5k_hw_reg_read(ah, AR5K_SISR1); + sisr2 = ath5k_hw_reg_read(ah, AR5K_SISR2); + sisr3 = ath5k_hw_reg_read(ah, AR5K_SISR3); + sisr4 = ath5k_hw_reg_read(ah, AR5K_SISR4); - if (ah->ah_version != AR5K_AR5210) { - u32 sisr2 = ath5k_hw_reg_read(ah, AR5K_RAC_SISR2); + /* + * PISR holds the logical OR of interrupt bits + * from SISR registers: + * + * TXOK and TXDESC -> Logical OR of TXOK and TXDESC + * per-queue bits on SISR0 + * + * TXERR and TXEOL -> Logical OR of TXERR and TXEOL + * per-queue bits on SISR1 + * + * TXURN -> Logical OR of TXURN per-queue bits on SISR2 + * + * HIUERR -> Logical OR of MCABT, SSERR and DPER bits on SISR2 + * + * BCNMISC -> Logical OR of TIM, CAB_END, DTIM_SYNC + * BCN_TIMEOUT, CAB_TIMEOUT and DTIM + * (and TSFOOR ?) bits on SISR2 + * + * QCBRORN and QCBRURN -> Logical OR of QCBRORN and + * QCBRURN per-queue bits on SISR3 + * QTRIG -> Logical OR of QTRIG per-queue bits on SISR4 + * + * If we clean these bits on PISR we 'll also clear all + * related bits from SISRs, e.g. if we write the TXOK bit on + * PISR we 'll clean all TXOK bits from SISR0 so if a new TXOK + * interrupt got fired for another queue while we were reading + * the interrupt registers and we write back the TXOK bit on + * PISR we 'll lose it. So make sure that we don't write back + * on PISR any bits that come from SISRs. Clearing them from + * SISRs will also clear PISR so no need to worry here. + */ - /*HIU = Host Interface Unit (PCI etc)*/ - if (unlikely(data & (AR5K_ISR_HIUERR))) - *interrupt_mask |= AR5K_INT_FATAL; + pisr_clear = pisr & ~AR5K_ISR_BITS_FROM_SISRS; - /*Beacon Not Ready*/ - if (unlikely(data & (AR5K_ISR_BNR))) - *interrupt_mask |= AR5K_INT_BNR; + /* + * Write to clear them... + * Note: This means that each bit we write back + * to the registers will get cleared, leaving the + * rest unaffected. So this won't affect new interrupts + * we didn't catch while reading/processing, we 'll get + * them next time get_isr gets called. + */ + ath5k_hw_reg_write(ah, sisr0, AR5K_SISR0); + ath5k_hw_reg_write(ah, sisr1, AR5K_SISR1); + ath5k_hw_reg_write(ah, sisr2, AR5K_SISR2); + ath5k_hw_reg_write(ah, sisr3, AR5K_SISR3); + ath5k_hw_reg_write(ah, sisr4, AR5K_SISR4); + ath5k_hw_reg_write(ah, pisr_clear, AR5K_PISR); + /* Flush previous write */ + ath5k_hw_reg_read(ah, AR5K_PISR); - if (unlikely(sisr2 & (AR5K_SISR2_SSERR | - AR5K_SISR2_DPERR | - AR5K_SISR2_MCABT))) - *interrupt_mask |= AR5K_INT_FATAL; + /* + * Filter out the non-common bits from the interrupt + * status. + */ + *interrupt_mask = (pisr & AR5K_INT_COMMON) & ah->ah_imr; + + + /* We treat TXOK,TXDESC, TXERR and TXEOL + * the same way (schedule the tx tasklet) + * so we track them all together per queue */ + if (pisr & AR5K_ISR_TXOK) + ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr0, + AR5K_SISR0_QCU_TXOK); - if (data & AR5K_ISR_TIM) + if (pisr & AR5K_ISR_TXDESC) + ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr0, + AR5K_SISR0_QCU_TXDESC); + + if (pisr & AR5K_ISR_TXERR) + ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1, + AR5K_SISR1_QCU_TXERR); + + if (pisr & AR5K_ISR_TXEOL) + ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1, + AR5K_SISR1_QCU_TXEOL); + + /* Currently this is not much usefull since we treat + * all queues the same way if we get a TXURN (update + * tx trigger level) but we might need it later on*/ + if (pisr & AR5K_ISR_TXURN) + ah->ah_txq_isr_txurn |= AR5K_REG_MS(sisr2, + AR5K_SISR2_QCU_TXURN); + + /* Misc Beacon related interrupts */ + + /* For AR5211 */ + if (pisr & AR5K_ISR_TIM) *interrupt_mask |= AR5K_INT_TIM; - if (data & AR5K_ISR_BCNMISC) { + /* For AR5212+ */ + if (pisr & AR5K_ISR_BCNMISC) { if (sisr2 & AR5K_SISR2_TIM) *interrupt_mask |= AR5K_INT_TIM; if (sisr2 & AR5K_SISR2_DTIM) @@ -591,63 +686,39 @@ int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask) *interrupt_mask |= AR5K_INT_CAB_TIMEOUT; } - if (data & AR5K_ISR_RXDOPPLER) - *interrupt_mask |= AR5K_INT_RX_DOPPLER; - if (data & AR5K_ISR_QCBRORN) { + /* Below interrupts are unlikely to happen */ + + /* HIU = Host Interface Unit (PCI etc) + * Can be one of MCABT, SSERR, DPERR from SISR2 */ + if (unlikely(pisr & (AR5K_ISR_HIUERR))) + *interrupt_mask |= AR5K_INT_FATAL; + + /*Beacon Not Ready*/ + if (unlikely(pisr & (AR5K_ISR_BNR))) + *interrupt_mask |= AR5K_INT_BNR; + + /* A queue got CBR overrun */ + if (unlikely(pisr & (AR5K_ISR_QCBRORN))) { *interrupt_mask |= AR5K_INT_QCBRORN; - ah->ah_txq_isr |= AR5K_REG_MS( - ath5k_hw_reg_read(ah, AR5K_RAC_SISR3), - AR5K_SISR3_QCBRORN); + ah->ah_txq_isr_qcborn |= AR5K_REG_MS(sisr3, + AR5K_SISR3_QCBRORN); } - if (data & AR5K_ISR_QCBRURN) { + + /* A queue got CBR underrun */ + if (unlikely(pisr & (AR5K_ISR_QCBRURN))) { *interrupt_mask |= AR5K_INT_QCBRURN; - ah->ah_txq_isr |= AR5K_REG_MS( - ath5k_hw_reg_read(ah, AR5K_RAC_SISR3), - AR5K_SISR3_QCBRURN); + ah->ah_txq_isr_qcburn |= AR5K_REG_MS(sisr3, + AR5K_SISR3_QCBRURN); } - if (data & AR5K_ISR_QTRIG) { + + /* A queue got triggered */ + if (unlikely(pisr & (AR5K_ISR_QTRIG))) { *interrupt_mask |= AR5K_INT_QTRIG; - ah->ah_txq_isr |= AR5K_REG_MS( - ath5k_hw_reg_read(ah, AR5K_RAC_SISR4), - AR5K_SISR4_QTRIG); + ah->ah_txq_isr_qtrig |= AR5K_REG_MS(sisr4, + AR5K_SISR4_QTRIG); } - if (data & AR5K_ISR_TXOK) - ah->ah_txq_isr |= AR5K_REG_MS( - ath5k_hw_reg_read(ah, AR5K_RAC_SISR0), - AR5K_SISR0_QCU_TXOK); - - if (data & AR5K_ISR_TXDESC) - ah->ah_txq_isr |= AR5K_REG_MS( - ath5k_hw_reg_read(ah, AR5K_RAC_SISR0), - AR5K_SISR0_QCU_TXDESC); - - if (data & AR5K_ISR_TXERR) - ah->ah_txq_isr |= AR5K_REG_MS( - ath5k_hw_reg_read(ah, AR5K_RAC_SISR1), - AR5K_SISR1_QCU_TXERR); - - if (data & AR5K_ISR_TXEOL) - ah->ah_txq_isr |= AR5K_REG_MS( - ath5k_hw_reg_read(ah, AR5K_RAC_SISR1), - AR5K_SISR1_QCU_TXEOL); - - if (data & AR5K_ISR_TXURN) - ah->ah_txq_isr |= AR5K_REG_MS( - ath5k_hw_reg_read(ah, AR5K_RAC_SISR2), - AR5K_SISR2_QCU_TXURN); - } else { - if (unlikely(data & (AR5K_ISR_SSERR | AR5K_ISR_MCABT - | AR5K_ISR_HIUERR | AR5K_ISR_DPERR))) - *interrupt_mask |= AR5K_INT_FATAL; - - /* - * XXX: BMISS interrupts may occur after association. - * I found this on 5210 code but it needs testing. If this is - * true we should disable them before assoc and re-enable them - * after a successful assoc + some jiffies. - interrupt_mask &= ~AR5K_INT_BMISS; - */ + data = pisr; } /* @@ -661,8 +732,7 @@ int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask) } /** - * ath5k_hw_set_imr - Set interrupt mask - * + * ath5k_hw_set_imr() - Set interrupt mask * @ah: The &struct ath5k_hw * @new_mask: The new interrupt mask to be set * @@ -670,7 +740,8 @@ int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask) * ath5k_int bits to hw-specific bits to remove abstraction and writing * Interrupt Mask Register. */ -enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask) +enum ath5k_int +ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask) { enum ath5k_int old_mask, int_mask; @@ -697,16 +768,14 @@ enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask) u32 simr2 = ath5k_hw_reg_read(ah, AR5K_SIMR2) & AR5K_SIMR2_QCU_TXURN; + /* Fatal interrupt abstraction for 5211+ */ if (new_mask & AR5K_INT_FATAL) { int_mask |= AR5K_IMR_HIUERR; simr2 |= (AR5K_SIMR2_MCABT | AR5K_SIMR2_SSERR | AR5K_SIMR2_DPERR); } - /*Beacon Not Ready*/ - if (new_mask & AR5K_INT_BNR) - int_mask |= AR5K_INT_BNR; - + /* Misc beacon related interrupts */ if (new_mask & AR5K_INT_TIM) int_mask |= AR5K_IMR_TIM; @@ -721,8 +790,9 @@ enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask) if (new_mask & AR5K_INT_CAB_TIMEOUT) simr2 |= AR5K_SISR2_CAB_TIMEOUT; - if (new_mask & AR5K_INT_RX_DOPPLER) - int_mask |= AR5K_IMR_RXDOPPLER; + /*Beacon Not Ready*/ + if (new_mask & AR5K_INT_BNR) + int_mask |= AR5K_INT_BNR; /* Note: Per queue interrupt masks * are set via ath5k_hw_reset_tx_queue() (qcu.c) */ @@ -730,10 +800,12 @@ enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask) ath5k_hw_reg_write(ah, simr2, AR5K_SIMR2); } else { + /* Fatal interrupt abstraction for 5210 */ if (new_mask & AR5K_INT_FATAL) int_mask |= (AR5K_IMR_SSERR | AR5K_IMR_MCABT | AR5K_IMR_HIUERR | AR5K_IMR_DPERR); + /* Only common interrupts left for 5210 (no SIMRs) */ ath5k_hw_reg_write(ah, int_mask, AR5K_IMR); } @@ -760,8 +832,7 @@ enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask) \********************/ /** - * ath5k_hw_dma_init - Initialize DMA unit - * + * ath5k_hw_dma_init() - Initialize DMA unit * @ah: The &struct ath5k_hw * * Set DMA size and pre-enable interrupts @@ -770,7 +841,8 @@ enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask) * * XXX: Save/restore RXDP/TXDP registers ? */ -void ath5k_hw_dma_init(struct ath5k_hw *ah) +void +ath5k_hw_dma_init(struct ath5k_hw *ah) { /* * Set Rx/Tx DMA Configuration @@ -799,8 +871,7 @@ void ath5k_hw_dma_init(struct ath5k_hw *ah) } /** - * ath5k_hw_dma_stop - stop DMA unit - * + * ath5k_hw_dma_stop() - stop DMA unit * @ah: The &struct ath5k_hw * * Stop tx/rx DMA and interrupts. Returns @@ -810,7 +881,8 @@ void ath5k_hw_dma_init(struct ath5k_hw *ah) * stuck frames on tx queues, only a reset * can fix that. */ -int ath5k_hw_dma_stop(struct ath5k_hw *ah) +int +ath5k_hw_dma_stop(struct ath5k_hw *ah) { int i, qmax, err; err = 0; diff --git a/drivers/net/wireless/ath/ath5k/gpio.c b/drivers/net/wireless/ath/ath5k/gpio.c index 85929781191..73d3dd8a306 100644 --- a/drivers/net/wireless/ath/ath5k/gpio.c +++ b/drivers/net/wireless/ath/ath5k/gpio.c @@ -24,10 +24,33 @@ #include "reg.h" #include "debug.h" -/* - * Set led state + +/** + * DOC: GPIO/LED functions + * + * Here we control the 6 bidirectional GPIO pins provided by the hw. + * We can set a GPIO pin to be an input or an output pin on GPIO control + * register and then read or set its status from GPIO data input/output + * registers. + * + * We also control the two LED pins provided by the hw, LED_0 is our + * "power" LED and LED_1 is our "network activity" LED but many scenarios + * are available from hw. Vendors might also provide LEDs connected to the + * GPIO pins, we handle them through the LED subsystem on led.c + */ + + +/** + * ath5k_hw_set_ledstate() - Set led state + * @ah: The &struct ath5k_hw + * @state: One of AR5K_LED_* + * + * Used to set the LED blinking state. This only + * works for the LED connected to the LED_0, LED_1 pins, + * not the GPIO based. */ -void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state) +void +ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state) { u32 led; /*5210 has different led mode handling*/ @@ -74,10 +97,13 @@ void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state) AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, led_5210); } -/* - * Set GPIO inputs +/** + * ath5k_hw_set_gpio_input() - Set GPIO inputs + * @ah: The &struct ath5k_hw + * @gpio: GPIO pin to set as input */ -int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio) +int +ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio) { if (gpio >= AR5K_NUM_GPIO) return -EINVAL; @@ -89,10 +115,13 @@ int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio) return 0; } -/* - * Set GPIO outputs +/** + * ath5k_hw_set_gpio_output() - Set GPIO outputs + * @ah: The &struct ath5k_hw + * @gpio: The GPIO pin to set as output */ -int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio) +int +ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio) { if (gpio >= AR5K_NUM_GPIO) return -EINVAL; @@ -104,10 +133,13 @@ int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio) return 0; } -/* - * Get GPIO state +/** + * ath5k_hw_get_gpio() - Get GPIO state + * @ah: The &struct ath5k_hw + * @gpio: The GPIO pin to read */ -u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio) +u32 +ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio) { if (gpio >= AR5K_NUM_GPIO) return 0xffffffff; @@ -117,10 +149,14 @@ u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio) 0x1; } -/* - * Set GPIO state +/** + * ath5k_hw_set_gpio() - Set GPIO state + * @ah: The &struct ath5k_hw + * @gpio: The GPIO pin to set + * @val: Value to set (boolean) */ -int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val) +int +ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val) { u32 data; @@ -138,10 +174,19 @@ int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val) return 0; } -/* - * Initialize the GPIO interrupt (RFKill switch) +/** + * ath5k_hw_set_gpio_intr() - Initialize the GPIO interrupt (RFKill switch) + * @ah: The &struct ath5k_hw + * @gpio: The GPIO pin to use + * @interrupt_level: True to generate interrupt on active pin (high) + * + * This function is used to set up the GPIO interrupt for the hw RFKill switch. + * That switch is connected to a GPIO pin and it's number is stored on EEPROM. + * It can either open or close the circuit to indicate that we should disable + * RF/Wireless to save power (we also get that from EEPROM). */ -void ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio, +void +ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio, u32 interrupt_level) { u32 data; diff --git a/drivers/net/wireless/ath/ath5k/initvals.c b/drivers/net/wireless/ath/ath5k/initvals.c index 1ffecc0fd3e..a1ea78e05b4 100644 --- a/drivers/net/wireless/ath/ath5k/initvals.c +++ b/drivers/net/wireless/ath/ath5k/initvals.c @@ -23,24 +23,27 @@ #include "reg.h" #include "debug.h" -/* - * Mode-independent initial register writes +/** + * struct ath5k_ini - Mode-independent initial register writes + * @ini_register: Register address + * @ini_value: Default value + * @ini_mode: 0 to write 1 to read (and clear) */ - struct ath5k_ini { u16 ini_register; u32 ini_value; enum { AR5K_INI_WRITE = 0, /* Default */ - AR5K_INI_READ = 1, /* Cleared on read */ + AR5K_INI_READ = 1, } ini_mode; }; -/* - * Mode specific initial register values +/** + * struct ath5k_ini_mode - Mode specific initial register values + * @mode_register: Register address + * @mode_value: Set of values for each enum ath5k_driver_mode */ - struct ath5k_ini_mode { u16 mode_register; u32 mode_value[3]; @@ -386,11 +389,10 @@ static const struct ath5k_ini ar5211_ini[] = { /* Initial mode-specific settings for AR5211 * 5211 supports OFDM-only g (draft g) but we - * need to test it ! - */ + * need to test it ! */ static const struct ath5k_ini_mode ar5211_ini_mode[] = { { AR5K_TXCFG, - /* A/XR B G */ + /* A B G */ { 0x00000015, 0x0000001d, 0x00000015 } }, { AR5K_QUEUE_DFS_LOCAL_IFS(0), { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, @@ -460,7 +462,7 @@ static const struct ath5k_ini_mode ar5211_ini_mode[] = { { 0x00000010, 0x00000010, 0x00000010 } }, }; -/* Initial register settings for AR5212 */ +/* Initial register settings for AR5212 and newer chips */ static const struct ath5k_ini ar5212_ini_common_start[] = { { AR5K_RXDP, 0x00000000 }, { AR5K_RXCFG, 0x00000005 }, @@ -724,7 +726,8 @@ static const struct ath5k_ini_mode ar5212_ini_mode_start[] = { { 0x00000000, 0x00000000, 0x00000108 } }, }; -/* Initial mode-specific settings for AR5212 + RF5111 (Written after ar5212_ini) */ +/* Initial mode-specific settings for AR5212 + RF5111 + * (Written after ar5212_ini) */ static const struct ath5k_ini_mode rf5111_ini_mode_end[] = { { AR5K_TXCFG, /* A/XR B G */ @@ -757,6 +760,7 @@ static const struct ath5k_ini_mode rf5111_ini_mode_end[] = { { 0x1883800a, 0x1873800a, 0x1883800a } }, }; +/* Common for all modes */ static const struct ath5k_ini rf5111_ini_common_end[] = { { AR5K_DCU_FP, 0x00000000 }, { AR5K_PHY_AGC, 0x00000000 }, @@ -774,7 +778,9 @@ static const struct ath5k_ini rf5111_ini_common_end[] = { { 0xa23c, 0x13c889af }, }; -/* Initial mode-specific settings for AR5212 + RF5112 (Written after ar5212_ini) */ + +/* Initial mode-specific settings for AR5212 + RF5112 + * (Written after ar5212_ini) */ static const struct ath5k_ini_mode rf5112_ini_mode_end[] = { { AR5K_TXCFG, /* A/XR B G */ @@ -825,7 +831,9 @@ static const struct ath5k_ini rf5112_ini_common_end[] = { { 0xa23c, 0x13c889af }, }; -/* Initial mode-specific settings for RF5413/5414 (Written after ar5212_ini) */ + +/* Initial mode-specific settings for RF5413/5414 + * (Written after ar5212_ini) */ static const struct ath5k_ini_mode rf5413_ini_mode_end[] = { { AR5K_TXCFG, /* A/XR B G */ @@ -963,7 +971,8 @@ static const struct ath5k_ini rf5413_ini_common_end[] = { { 0xa384, 0xf3307ff0 }, }; -/* Initial mode-specific settings for RF2413/2414 (Written after ar5212_ini) */ +/* Initial mode-specific settings for RF2413/2414 + * (Written after ar5212_ini) */ /* XXX: a mode ? */ static const struct ath5k_ini_mode rf2413_ini_mode_end[] = { { AR5K_TXCFG, @@ -1085,7 +1094,8 @@ static const struct ath5k_ini rf2413_ini_common_end[] = { { 0xa384, 0xf3307ff0 }, }; -/* Initial mode-specific settings for RF2425 (Written after ar5212_ini) */ +/* Initial mode-specific settings for RF2425 + * (Written after ar5212_ini) */ /* XXX: a mode ? */ static const struct ath5k_ini_mode rf2425_ini_mode_end[] = { { AR5K_TXCFG, @@ -1357,10 +1367,15 @@ static const struct ath5k_ini rf5112_ini_bbgain[] = { }; -/* - * Write initial register dump +/** + * ath5k_hw_ini_registers() - Write initial register dump common for all modes + * @ah: The &struct ath5k_hw + * @size: Dump size + * @ini_regs: The array of &struct ath5k_ini + * @skip_pcu: Skip PCU registers */ -static void ath5k_hw_ini_registers(struct ath5k_hw *ah, unsigned int size, +static void +ath5k_hw_ini_registers(struct ath5k_hw *ah, unsigned int size, const struct ath5k_ini *ini_regs, bool skip_pcu) { unsigned int i; @@ -1388,7 +1403,15 @@ static void ath5k_hw_ini_registers(struct ath5k_hw *ah, unsigned int size, } } -static void ath5k_hw_ini_mode_registers(struct ath5k_hw *ah, +/** + * ath5k_hw_ini_mode_registers() - Write initial mode-specific register dump + * @ah: The &struct ath5k_hw + * @size: Dump size + * @ini_mode: The array of &struct ath5k_ini_mode + * @mode: One of enum ath5k_driver_mode + */ +static void +ath5k_hw_ini_mode_registers(struct ath5k_hw *ah, unsigned int size, const struct ath5k_ini_mode *ini_mode, u8 mode) { @@ -1402,7 +1425,17 @@ static void ath5k_hw_ini_mode_registers(struct ath5k_hw *ah, } -int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool skip_pcu) +/** + * ath5k_hw_write_initvals() - Write initial chip-specific register dump + * @ah: The &struct ath5k_hw + * @mode: One of enum ath5k_driver_mode + * @skip_pcu: Skip PCU registers + * + * Write initial chip-specific register dump, to get the chipset on a + * clean and ready-to-work state after warm reset. + */ +int +ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool skip_pcu) { /* * Write initial register settings diff --git a/drivers/net/wireless/ath/ath5k/pci.c b/drivers/net/wireless/ath/ath5k/pci.c index dfa48eb7d95..849fa060ebc 100644 --- a/drivers/net/wireless/ath/ath5k/pci.c +++ b/drivers/net/wireless/ath/ath5k/pci.c @@ -98,7 +98,7 @@ ath5k_pci_eeprom_read(struct ath_common *common, u32 offset, u16 *data) 0xffff); return true; } - udelay(15); + usleep_range(15, 20); } return false; diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c index a7eafa3edc2..cebfd6fd31d 100644 --- a/drivers/net/wireless/ath/ath5k/pcu.c +++ b/drivers/net/wireless/ath/ath5k/pcu.c @@ -30,11 +30,47 @@ #include "reg.h" #include "debug.h" -/* +/** + * DOC: Protocol Control Unit (PCU) functions + * + * Protocol control unit is responsible to maintain various protocol + * properties before a frame is send and after a frame is received to/from + * baseband. To be more specific, PCU handles: + * + * - Buffering of RX and TX frames (after QCU/DCUs) + * + * - Encrypting and decrypting (using the built-in engine) + * + * - Generating ACKs, RTS/CTS frames + * + * - Maintaining TSF + * + * - FCS + * + * - Updating beacon data (with TSF etc) + * + * - Generating virtual CCA + * + * - RX/Multicast filtering + * + * - BSSID filtering + * + * - Various statistics + * + * -Different operating modes: AP, STA, IBSS + * + * Note: Most of these functions can be tweaked/bypassed so you can do + * them on sw above for debugging or research. For more infos check out PCU + * registers on reg.h. + */ + +/** + * DOC: ACK rates + * * AR5212+ can use higher rates for ack transmission * based on current tx rate instead of the base rate. * It does this to better utilize channel usage. - * This is a mapping between G rates (that cover both + * There is a mapping between G rates (that cover both * CCK and OFDM) and ack rates that we use when setting * rate -> duration table. This mapping is hw-based so * don't change anything. @@ -63,17 +99,18 @@ static const unsigned int ack_rates_high[] = \*******************/ /** - * ath5k_hw_get_frame_duration - Get tx time of a frame - * + * ath5k_hw_get_frame_duration() - Get tx time of a frame * @ah: The &struct ath5k_hw * @len: Frame's length in bytes * @rate: The @struct ieee80211_rate + * @shortpre: Indicate short preample * * Calculate tx duration of a frame given it's rate and length * It extends ieee80211_generic_frame_duration for non standard * bwmodes. */ -int ath5k_hw_get_frame_duration(struct ath5k_hw *ah, +int +ath5k_hw_get_frame_duration(struct ath5k_hw *ah, int len, struct ieee80211_rate *rate, bool shortpre) { int sifs, preamble, plcp_bits, sym_time; @@ -129,11 +166,11 @@ int ath5k_hw_get_frame_duration(struct ath5k_hw *ah, } /** - * ath5k_hw_get_default_slottime - Get the default slot time for current mode - * + * ath5k_hw_get_default_slottime() - Get the default slot time for current mode * @ah: The &struct ath5k_hw */ -unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah) +unsigned int +ath5k_hw_get_default_slottime(struct ath5k_hw *ah) { struct ieee80211_channel *channel = ah->ah_current_channel; unsigned int slot_time; @@ -160,11 +197,11 @@ unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah) } /** - * ath5k_hw_get_default_sifs - Get the default SIFS for current mode - * + * ath5k_hw_get_default_sifs() - Get the default SIFS for current mode * @ah: The &struct ath5k_hw */ -unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah) +unsigned int +ath5k_hw_get_default_sifs(struct ath5k_hw *ah) { struct ieee80211_channel *channel = ah->ah_current_channel; unsigned int sifs; @@ -191,17 +228,17 @@ unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah) } /** - * ath5k_hw_update_mib_counters - Update MIB counters (mac layer statistics) - * + * ath5k_hw_update_mib_counters() - Update MIB counters (mac layer statistics) * @ah: The &struct ath5k_hw * * Reads MIB counters from PCU and updates sw statistics. Is called after a * MIB interrupt, because one of these counters might have reached their maximum * and triggered the MIB interrupt, to let us read and clear the counter. * - * Is called in interrupt context! + * NOTE: Is called in interrupt context! */ -void ath5k_hw_update_mib_counters(struct ath5k_hw *ah) +void +ath5k_hw_update_mib_counters(struct ath5k_hw *ah) { struct ath5k_statistics *stats = &ah->stats; @@ -219,10 +256,8 @@ void ath5k_hw_update_mib_counters(struct ath5k_hw *ah) \******************/ /** - * ath5k_hw_write_rate_duration - fill rate code to duration table - * - * @ah: the &struct ath5k_hw - * @mode: one of enum ath5k_driver_mode + * ath5k_hw_write_rate_duration() - Fill rate code to duration table + * @ah: The &struct ath5k_hw * * Write the rate code to duration table upon hw reset. This is a helper for * ath5k_hw_pcu_init(). It seems all this is doing is setting an ACK timeout on @@ -236,7 +271,8 @@ void ath5k_hw_update_mib_counters(struct ath5k_hw *ah) * that include all OFDM and CCK rates. * */ -static inline void ath5k_hw_write_rate_duration(struct ath5k_hw *ah) +static inline void +ath5k_hw_write_rate_duration(struct ath5k_hw *ah) { struct ieee80211_rate *rate; unsigned int i; @@ -280,12 +316,12 @@ static inline void ath5k_hw_write_rate_duration(struct ath5k_hw *ah) } /** - * ath5k_hw_set_ack_timeout - Set ACK timeout on PCU - * + * ath5k_hw_set_ack_timeout() - Set ACK timeout on PCU * @ah: The &struct ath5k_hw * @timeout: Timeout in usec */ -static int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout) +static int +ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout) { if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK)) <= timeout) @@ -298,12 +334,12 @@ static int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout) } /** - * ath5k_hw_set_cts_timeout - Set CTS timeout on PCU - * + * ath5k_hw_set_cts_timeout() - Set CTS timeout on PCU * @ah: The &struct ath5k_hw * @timeout: Timeout in usec */ -static int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout) +static int +ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout) { if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS)) <= timeout) @@ -321,14 +357,14 @@ static int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout) \*******************/ /** - * ath5k_hw_set_lladdr - Set station id - * + * ath5k_hw_set_lladdr() - Set station id * @ah: The &struct ath5k_hw - * @mac: The card's mac address + * @mac: The card's mac address (array of octets) * * Set station id on hw using the provided mac address */ -int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac) +int +ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac) { struct ath_common *common = ath5k_hw_common(ah); u32 low_id, high_id; @@ -349,14 +385,14 @@ int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac) } /** - * ath5k_hw_set_bssid - Set current BSSID on hw - * + * ath5k_hw_set_bssid() - Set current BSSID on hw * @ah: The &struct ath5k_hw * * Sets the current BSSID and BSSID mask we have from the * common struct into the hardware */ -void ath5k_hw_set_bssid(struct ath5k_hw *ah) +void +ath5k_hw_set_bssid(struct ath5k_hw *ah) { struct ath_common *common = ath5k_hw_common(ah); u16 tim_offset = 0; @@ -389,7 +425,23 @@ void ath5k_hw_set_bssid(struct ath5k_hw *ah) ath5k_hw_enable_pspoll(ah, NULL, 0); } -void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask) +/** + * ath5k_hw_set_bssid_mask() - Filter out bssids we listen + * @ah: The &struct ath5k_hw + * @mask: The BSSID mask to set (array of octets) + * + * BSSID masking is a method used by AR5212 and newer hardware to inform PCU + * which bits of the interface's MAC address should be looked at when trying + * to decide which packets to ACK. In station mode and AP mode with a single + * BSS every bit matters since we lock to only one BSS. In AP mode with + * multiple BSSes (virtual interfaces) not every bit matters because hw must + * accept frames for all BSSes and so we tweak some bits of our mac address + * in order to have multiple BSSes. + * + * For more information check out ../hw.c of the common ath module. + */ +void +ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask) { struct ath_common *common = ath5k_hw_common(ah); @@ -400,18 +452,21 @@ void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask) ath_hw_setbssidmask(common); } -/* - * Set multicast filter +/** + * ath5k_hw_set_mcast_filter() - Set multicast filter + * @ah: The &struct ath5k_hw + * @filter0: Lower 32bits of muticast filter + * @filter1: Higher 16bits of multicast filter */ -void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1) +void +ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1) { ath5k_hw_reg_write(ah, filter0, AR5K_MCAST_FILTER0); ath5k_hw_reg_write(ah, filter1, AR5K_MCAST_FILTER1); } /** - * ath5k_hw_get_rx_filter - Get current rx filter - * + * ath5k_hw_get_rx_filter() - Get current rx filter * @ah: The &struct ath5k_hw * * Returns the RX filter by reading rx filter and @@ -420,7 +475,8 @@ void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1) * and pass to the driver. For a list of frame types * check out reg.h. */ -u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah) +u32 +ath5k_hw_get_rx_filter(struct ath5k_hw *ah) { u32 data, filter = 0; @@ -440,8 +496,7 @@ u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah) } /** - * ath5k_hw_set_rx_filter - Set rx filter - * + * ath5k_hw_set_rx_filter() - Set rx filter * @ah: The &struct ath5k_hw * @filter: RX filter mask (see reg.h) * @@ -449,7 +504,8 @@ u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah) * register on 5212 and newer chips so that we have proper PHY * error reporting. */ -void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter) +void +ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter) { u32 data = 0; @@ -493,13 +549,13 @@ void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter) #define ATH5K_MAX_TSF_READ 10 /** - * ath5k_hw_get_tsf64 - Get the full 64bit TSF - * + * ath5k_hw_get_tsf64() - Get the full 64bit TSF * @ah: The &struct ath5k_hw * * Returns the current TSF */ -u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah) +u64 +ath5k_hw_get_tsf64(struct ath5k_hw *ah) { u32 tsf_lower, tsf_upper1, tsf_upper2; int i; @@ -536,28 +592,30 @@ u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah) return ((u64)tsf_upper1 << 32) | tsf_lower; } +#undef ATH5K_MAX_TSF_READ + /** - * ath5k_hw_set_tsf64 - Set a new 64bit TSF - * + * ath5k_hw_set_tsf64() - Set a new 64bit TSF * @ah: The &struct ath5k_hw * @tsf64: The new 64bit TSF * * Sets the new TSF */ -void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64) +void +ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64) { ath5k_hw_reg_write(ah, tsf64 & 0xffffffff, AR5K_TSF_L32); ath5k_hw_reg_write(ah, (tsf64 >> 32) & 0xffffffff, AR5K_TSF_U32); } /** - * ath5k_hw_reset_tsf - Force a TSF reset - * + * ath5k_hw_reset_tsf() - Force a TSF reset * @ah: The &struct ath5k_hw * * Forces a TSF reset on PCU */ -void ath5k_hw_reset_tsf(struct ath5k_hw *ah) +void +ath5k_hw_reset_tsf(struct ath5k_hw *ah) { u32 val; @@ -573,10 +631,17 @@ void ath5k_hw_reset_tsf(struct ath5k_hw *ah) ath5k_hw_reg_write(ah, val, AR5K_BEACON); } -/* - * Initialize beacon timers +/** + * ath5k_hw_init_beacon_timers() - Initialize beacon timers + * @ah: The &struct ath5k_hw + * @next_beacon: Next TBTT + * @interval: Current beacon interval + * + * This function is used to initialize beacon timers based on current + * operation mode and settings. */ -void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval) +void +ath5k_hw_init_beacon_timers(struct ath5k_hw *ah, u32 next_beacon, u32 interval) { u32 timer1, timer2, timer3; @@ -655,8 +720,7 @@ void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval) } /** - * ath5k_check_timer_win - Check if timer B is timer A + window - * + * ath5k_check_timer_win() - Check if timer B is timer A + window * @a: timer a (before b) * @b: timer b (after a) * @window: difference between a and b @@ -686,12 +750,11 @@ ath5k_check_timer_win(int a, int b, int window, int intval) } /** - * ath5k_hw_check_beacon_timers - Check if the beacon timers are correct - * + * ath5k_hw_check_beacon_timers() - Check if the beacon timers are correct * @ah: The &struct ath5k_hw * @intval: beacon interval * - * This is a workaround for IBSS mode: + * This is a workaround for IBSS mode * * The need for this function arises from the fact that we have 4 separate * HW timer registers (TIMER0 - TIMER3), which are closely related to the @@ -746,14 +809,14 @@ ath5k_hw_check_beacon_timers(struct ath5k_hw *ah, int intval) } /** - * ath5k_hw_set_coverage_class - Set IEEE 802.11 coverage class - * + * ath5k_hw_set_coverage_class() - Set IEEE 802.11 coverage class * @ah: The &struct ath5k_hw * @coverage_class: IEEE 802.11 coverage class number * * Sets IFS intervals and ACK/CTS timeouts for given coverage class. */ -void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class) +void +ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class) { /* As defined by IEEE 802.11-2007 17.3.8.6 */ int slot_time = ath5k_hw_get_default_slottime(ah) + 3 * coverage_class; @@ -772,8 +835,7 @@ void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class) \***************************/ /** - * ath5k_hw_start_rx_pcu - Start RX engine - * + * ath5k_hw_start_rx_pcu() - Start RX engine * @ah: The &struct ath5k_hw * * Starts RX engine on PCU so that hw can process RXed frames @@ -781,32 +843,33 @@ void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class) * * NOTE: RX DMA should be already enabled using ath5k_hw_start_rx_dma */ -void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah) +void +ath5k_hw_start_rx_pcu(struct ath5k_hw *ah) { AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX); } /** - * at5k_hw_stop_rx_pcu - Stop RX engine - * + * at5k_hw_stop_rx_pcu() - Stop RX engine * @ah: The &struct ath5k_hw * * Stops RX engine on PCU */ -void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah) +void +ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah) { AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX); } /** - * ath5k_hw_set_opmode - Set PCU operating mode - * + * ath5k_hw_set_opmode() - Set PCU operating mode * @ah: The &struct ath5k_hw - * @op_mode: &enum nl80211_iftype operating mode + * @op_mode: One of enum nl80211_iftype * * Configure PCU for the various operating modes (AP/STA etc) */ -int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode) +int +ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode) { struct ath_common *common = ath5k_hw_common(ah); u32 pcu_reg, beacon_reg, low_id, high_id; @@ -873,8 +936,17 @@ int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode) return 0; } -void ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode, - u8 mode) +/** + * ath5k_hw_pcu_init() - Initialize PCU + * @ah: The &struct ath5k_hw + * @op_mode: One of enum nl80211_iftype + * @mode: One of enum ath5k_driver_mode + * + * This function is used to initialize PCU by setting current + * operation mode and various other settings. + */ +void +ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode) { /* Set bssid and bssid mask */ ath5k_hw_set_bssid(ah); diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c index 01cb72de44c..e1f8613426a 100644 --- a/drivers/net/wireless/ath/ath5k/phy.c +++ b/drivers/net/wireless/ath/ath5k/phy.c @@ -1,6 +1,4 @@ /* - * PHY functions - * * Copyright (c) 2004-2007 Reyk Floeter <reyk@openbsd.org> * Copyright (c) 2006-2009 Nick Kossifidis <mickflemm@gmail.com> * Copyright (c) 2007-2008 Jiri Slaby <jirislaby@gmail.com> @@ -20,6 +18,10 @@ * */ +/***********************\ +* PHY related functions * +\***********************/ + #include <linux/delay.h> #include <linux/slab.h> #include <asm/unaligned.h> @@ -31,14 +33,53 @@ #include "../regd.h" +/** + * DOC: PHY related functions + * + * Here we handle the low-level functions related to baseband + * and analog frontend (RF) parts. This is by far the most complex + * part of the hw code so make sure you know what you are doing. + * + * Here is a list of what this is all about: + * + * - Channel setting/switching + * + * - Automatic Gain Control (AGC) calibration + * + * - Noise Floor calibration + * + * - I/Q imbalance calibration (QAM correction) + * + * - Calibration due to thermal changes (gain_F) + * + * - Spur noise mitigation + * + * - RF/PHY initialization for the various operating modes and bwmodes + * + * - Antenna control + * + * - TX power control per channel/rate/packet type + * + * Also have in mind we never got documentation for most of these + * functions, what we have comes mostly from Atheros's code, reverse + * engineering and patent docs/presentations etc. + */ + + /******************\ * Helper functions * \******************/ -/* - * Get the PHY Chip revision +/** + * ath5k_hw_radio_revision() - Get the PHY Chip revision + * @ah: The &struct ath5k_hw + * @band: One of enum ieee80211_band + * + * Returns the revision number of a 2GHz, 5GHz or single chip + * radio. */ -u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band) +u16 +ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band) { unsigned int i; u32 srev; @@ -58,7 +99,7 @@ u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band) return 0; } - mdelay(2); + usleep_range(2000, 2500); /* ...wait until PHY is ready and read the selected radio revision */ ath5k_hw_reg_write(ah, 0x00001c16, AR5K_PHY(0x34)); @@ -81,10 +122,16 @@ u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band) return ret; } -/* - * Check if a channel is supported +/** + * ath5k_channel_ok() - Check if a channel is supported by the hw + * @ah: The &struct ath5k_hw + * @channel: The &struct ieee80211_channel + * + * Note: We don't do any regulatory domain checks here, it's just + * a sanity check. */ -bool ath5k_channel_ok(struct ath5k_hw *ah, struct ieee80211_channel *channel) +bool +ath5k_channel_ok(struct ath5k_hw *ah, struct ieee80211_channel *channel) { u16 freq = channel->center_freq; @@ -101,7 +148,13 @@ bool ath5k_channel_ok(struct ath5k_hw *ah, struct ieee80211_channel *channel) return false; } -bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah, +/** + * ath5k_hw_chan_has_spur_noise() - Check if channel is sensitive to spur noise + * @ah: The &struct ath5k_hw + * @channel: The &struct ieee80211_channel + */ +bool +ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah, struct ieee80211_channel *channel) { u8 refclk_freq; @@ -122,11 +175,20 @@ bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah, return false; } -/* - * Used to modify RF Banks before writing them to AR5K_RF_BUFFER +/** + * ath5k_hw_rfb_op() - Perform an operation on the given RF Buffer + * @ah: The &struct ath5k_hw + * @rf_regs: The struct ath5k_rf_reg + * @val: New value + * @reg_id: RF register ID + * @set: Indicate we need to swap data + * + * This is an internal function used to modify RF Banks before + * writing them to AR5K_RF_BUFFER. Check out rfbuffer.h for more + * infos. */ -static unsigned int ath5k_hw_rfb_op(struct ath5k_hw *ah, - const struct ath5k_rf_reg *rf_regs, +static unsigned int +ath5k_hw_rfb_op(struct ath5k_hw *ah, const struct ath5k_rf_reg *rf_regs, u32 val, u8 reg_id, bool set) { const struct ath5k_rf_reg *rfreg = NULL; @@ -204,8 +266,7 @@ static unsigned int ath5k_hw_rfb_op(struct ath5k_hw *ah, } /** - * ath5k_hw_write_ofdm_timings - set OFDM timings on AR5212 - * + * ath5k_hw_write_ofdm_timings() - set OFDM timings on AR5212 * @ah: the &struct ath5k_hw * @channel: the currently set channel upon reset * @@ -216,10 +277,11 @@ static unsigned int ath5k_hw_rfb_op(struct ath5k_hw *ah, * mantissa and provide these values on hw. * * For more infos i think this patent is related - * http://www.freepatentsonline.com/7184495.html + * "http://www.freepatentsonline.com/7184495.html" */ -static inline int ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah, - struct ieee80211_channel *channel) +static inline int +ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah, + struct ieee80211_channel *channel) { /* Get exponent and mantissa and set it */ u32 coef_scaled, coef_exp, coef_man, @@ -278,6 +340,10 @@ static inline int ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah, return 0; } +/** + * ath5k_hw_phy_disable() - Disable PHY + * @ah: The &struct ath5k_hw + */ int ath5k_hw_phy_disable(struct ath5k_hw *ah) { /*Just a try M.F.*/ @@ -286,10 +352,13 @@ int ath5k_hw_phy_disable(struct ath5k_hw *ah) return 0; } -/* - * Wait for synth to settle +/** + * ath5k_hw_wait_for_synth() - Wait for synth to settle + * @ah: The &struct ath5k_hw + * @channel: The &struct ieee80211_channel */ -static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah, +static void +ath5k_hw_wait_for_synth(struct ath5k_hw *ah, struct ieee80211_channel *channel) { /* @@ -308,9 +377,9 @@ static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah, delay = delay << 2; /* XXX: /2 on turbo ? Let's be safe * for now */ - udelay(100 + delay); + usleep_range(100 + delay, 100 + (2 * delay)); } else { - mdelay(1); + usleep_range(1000, 1500); } } @@ -319,7 +388,9 @@ static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah, * RF Gain optimization * \**********************/ -/* +/** + * DOC: RF Gain optimization + * * This code is used to optimize RF gain on different environments * (temperature mostly) based on feedback from a power detector. * @@ -328,22 +399,22 @@ static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah, * no gain optimization ladder-. * * For more infos check out this patent doc - * http://www.freepatentsonline.com/7400691.html + * "http://www.freepatentsonline.com/7400691.html" * * This paper describes power drops as seen on the receiver due to * probe packets - * http://www.cnri.dit.ie/publications/ICT08%20-%20Practical%20Issues - * %20of%20Power%20Control.pdf + * "http://www.cnri.dit.ie/publications/ICT08%20-%20Practical%20Issues + * %20of%20Power%20Control.pdf" * * And this is the MadWiFi bug entry related to the above - * http://madwifi-project.org/ticket/1659 + * "http://madwifi-project.org/ticket/1659" * with various measurements and diagrams - * - * TODO: Deal with power drops due to probes by setting an appropriate - * tx power on the probe packets ! Make this part of the calibration process. */ -/* Initialize ah_gain during attach */ +/** + * ath5k_hw_rfgain_opt_init() - Initialize ah_gain during attach + * @ah: The &struct ath5k_hw + */ int ath5k_hw_rfgain_opt_init(struct ath5k_hw *ah) { /* Initialize the gain optimization values */ @@ -367,17 +438,21 @@ int ath5k_hw_rfgain_opt_init(struct ath5k_hw *ah) return 0; } -/* Schedule a gain probe check on the next transmitted packet. +/** + * ath5k_hw_request_rfgain_probe() - Request a PAPD probe packet + * @ah: The &struct ath5k_hw + * + * Schedules a gain probe check on the next transmitted packet. * That means our next packet is going to be sent with lower * tx power and a Peak to Average Power Detector (PAPD) will try * to measure the gain. * - * XXX: How about forcing a tx packet (bypassing PCU arbitrator etc) + * TODO: Force a tx packet (bypassing PCU arbitrator etc) * just after we enable the probe so that we don't mess with - * standard traffic ? Maybe it's time to use sw interrupts and - * a probe tasklet !!! + * standard traffic. */ -static void ath5k_hw_request_rfgain_probe(struct ath5k_hw *ah) +static void +ath5k_hw_request_rfgain_probe(struct ath5k_hw *ah) { /* Skip if gain calibration is inactive or @@ -395,9 +470,15 @@ static void ath5k_hw_request_rfgain_probe(struct ath5k_hw *ah) } -/* Calculate gain_F measurement correction - * based on the current step for RF5112 rev. 2 */ -static u32 ath5k_hw_rf_gainf_corr(struct ath5k_hw *ah) +/** + * ath5k_hw_rf_gainf_corr() - Calculate Gain_F measurement correction + * @ah: The &struct ath5k_hw + * + * Calculate Gain_F measurement correction + * based on the current step for RF5112 rev. 2 + */ +static u32 +ath5k_hw_rf_gainf_corr(struct ath5k_hw *ah) { u32 mix, step; u32 *rf; @@ -450,11 +531,19 @@ static u32 ath5k_hw_rf_gainf_corr(struct ath5k_hw *ah) return ah->ah_gain.g_f_corr; } -/* Check if current gain_F measurement is in the range of our +/** + * ath5k_hw_rf_check_gainf_readback() - Validate Gain_F feedback from detector + * @ah: The &struct ath5k_hw + * + * Check if current gain_F measurement is in the range of our * power detector windows. If we get a measurement outside range * we know it's not accurate (detectors can't measure anything outside - * their detection window) so we must ignore it */ -static bool ath5k_hw_rf_check_gainf_readback(struct ath5k_hw *ah) + * their detection window) so we must ignore it. + * + * Returns true if readback was O.K. or false on failure + */ +static bool +ath5k_hw_rf_check_gainf_readback(struct ath5k_hw *ah) { const struct ath5k_rf_reg *rf_regs; u32 step, mix_ovr, level[4]; @@ -506,9 +595,15 @@ static bool ath5k_hw_rf_check_gainf_readback(struct ath5k_hw *ah) ah->ah_gain.g_current <= level[3]); } -/* Perform gain_F adjustment by choosing the right set - * of parameters from RF gain optimization ladder */ -static s8 ath5k_hw_rf_gainf_adjust(struct ath5k_hw *ah) +/** + * ath5k_hw_rf_gainf_adjust() - Perform Gain_F adjustment + * @ah: The &struct ath5k_hw + * + * Choose the right target gain based on current gain + * and RF gain optimization ladder + */ +static s8 +ath5k_hw_rf_gainf_adjust(struct ath5k_hw *ah) { const struct ath5k_gain_opt *go; const struct ath5k_gain_opt_step *g_step; @@ -572,13 +667,18 @@ done: return ret; } -/* Main callback for thermal RF gain calibration engine +/** + * ath5k_hw_gainf_calibrate() - Do a gain_F calibration + * @ah: The &struct ath5k_hw + * + * Main callback for thermal RF gain calibration engine * Check for a new gain reading and schedule an adjustment * if needed. * - * TODO: Use sw interrupt to schedule reset if gain_F needs - * adjustment */ -enum ath5k_rfgain ath5k_hw_gainf_calibrate(struct ath5k_hw *ah) + * Returns one of enum ath5k_rfgain codes + */ +enum ath5k_rfgain +ath5k_hw_gainf_calibrate(struct ath5k_hw *ah) { u32 data, type; struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; @@ -638,10 +738,18 @@ done: return ah->ah_gain.g_state; } -/* Write initial RF gain table to set the RF sensitivity - * this one works on all RF chips and has nothing to do - * with gain_F calibration */ -static int ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum ieee80211_band band) +/** + * ath5k_hw_rfgain_init() - Write initial RF gain settings to hw + * @ah: The &struct ath5k_hw + * @band: One of enum ieee80211_band + * + * Write initial RF gain table to set the RF sensitivity. + * + * NOTE: This one works on all RF chips and has nothing to do + * with Gain_F calibration + */ +static int +ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum ieee80211_band band) { const struct ath5k_ini_rfgain *ath5k_rfg; unsigned int i, size, index; @@ -688,16 +796,23 @@ static int ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum ieee80211_band band) } - /********************\ * RF Registers setup * \********************/ -/* - * Setup RF registers by writing RF buffer on hw +/** + * ath5k_hw_rfregs_init() - Initialize RF register settings + * @ah: The &struct ath5k_hw + * @channel: The &struct ieee80211_channel + * @mode: One of enum ath5k_driver_mode + * + * Setup RF registers by writing RF buffer on hw. For + * more infos on this, check out rfbuffer.h */ -static int ath5k_hw_rfregs_init(struct ath5k_hw *ah, - struct ieee80211_channel *channel, unsigned int mode) +static int +ath5k_hw_rfregs_init(struct ath5k_hw *ah, + struct ieee80211_channel *channel, + unsigned int mode) { const struct ath5k_rf_reg *rf_regs; const struct ath5k_ini_rfbuffer *ini_rfb; @@ -1055,19 +1170,18 @@ static int ath5k_hw_rfregs_init(struct ath5k_hw *ah, PHY/RF channel functions \**************************/ -/* - * Conversion needed for RF5110 +/** + * ath5k_hw_rf5110_chan2athchan() - Convert channel freq on RF5110 + * @channel: The &struct ieee80211_channel + * + * Map channel frequency to IEEE channel number and convert it + * to an internal channel value used by the RF5110 chipset. */ -static u32 ath5k_hw_rf5110_chan2athchan(struct ieee80211_channel *channel) +static u32 +ath5k_hw_rf5110_chan2athchan(struct ieee80211_channel *channel) { u32 athchan; - /* - * Convert IEEE channel/MHz to an internal channel value used - * by the AR5210 chipset. This has not been verified with - * newer chipsets like the AR5212A who have a completely - * different RF/PHY part. - */ athchan = (ath5k_hw_bitswap( (ieee80211_frequency_to_channel( channel->center_freq) - 24) / 2, 5) @@ -1075,10 +1189,13 @@ static u32 ath5k_hw_rf5110_chan2athchan(struct ieee80211_channel *channel) return athchan; } -/* - * Set channel on RF5110 +/** + * ath5k_hw_rf5110_channel() - Set channel frequency on RF5110 + * @ah: The &struct ath5k_hw + * @channel: The &struct ieee80211_channel */ -static int ath5k_hw_rf5110_channel(struct ath5k_hw *ah, +static int +ath5k_hw_rf5110_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel) { u32 data; @@ -1089,15 +1206,23 @@ static int ath5k_hw_rf5110_channel(struct ath5k_hw *ah, data = ath5k_hw_rf5110_chan2athchan(channel); ath5k_hw_reg_write(ah, data, AR5K_RF_BUFFER); ath5k_hw_reg_write(ah, 0, AR5K_RF_BUFFER_CONTROL_0); - mdelay(1); + usleep_range(1000, 1500); return 0; } -/* - * Conversion needed for 5111 +/** + * ath5k_hw_rf5111_chan2athchan() - Handle 2GHz channels on RF5111/2111 + * @ieee: IEEE channel number + * @athchan: The &struct ath5k_athchan_2ghz + * + * In order to enable the RF2111 frequency converter on RF5111/2111 setups + * we need to add some offsets and extra flags to the data values we pass + * on to the PHY. So for every 2GHz channel this function gets called + * to do the conversion. */ -static int ath5k_hw_rf5111_chan2athchan(unsigned int ieee, +static int +ath5k_hw_rf5111_chan2athchan(unsigned int ieee, struct ath5k_athchan_2ghz *athchan) { int channel; @@ -1123,10 +1248,13 @@ static int ath5k_hw_rf5111_chan2athchan(unsigned int ieee, return 0; } -/* - * Set channel on 5111 +/** + * ath5k_hw_rf5111_channel() - Set channel frequency on RF5111/2111 + * @ah: The &struct ath5k_hw + * @channel: The &struct ieee80211_channel */ -static int ath5k_hw_rf5111_channel(struct ath5k_hw *ah, +static int +ath5k_hw_rf5111_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel) { struct ath5k_athchan_2ghz ath5k_channel_2ghz; @@ -1171,10 +1299,20 @@ static int ath5k_hw_rf5111_channel(struct ath5k_hw *ah, return 0; } -/* - * Set channel on 5112 and newer +/** + * ath5k_hw_rf5112_channel() - Set channel frequency on 5112 and newer + * @ah: The &struct ath5k_hw + * @channel: The &struct ieee80211_channel + * + * On RF5112/2112 and newer we don't need to do any conversion. + * We pass the frequency value after a few modifications to the + * chip directly. + * + * NOTE: Make sure channel frequency given is within our range or else + * we might damage the chip ! Use ath5k_channel_ok before calling this one. */ -static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah, +static int +ath5k_hw_rf5112_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel) { u32 data, data0, data1, data2; @@ -1183,17 +1321,37 @@ static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah, data = data0 = data1 = data2 = 0; c = channel->center_freq; + /* My guess based on code: + * 2GHz RF has 2 synth modes, one with a Local Oscillator + * at 2224Hz and one with a LO at 2192Hz. IF is 1520Hz + * (3040/2). data0 is used to set the PLL divider and data1 + * selects synth mode. */ if (c < 4800) { + /* Channel 14 and all frequencies with 2Hz spacing + * below/above (non-standard channels) */ if (!((c - 2224) % 5)) { + /* Same as (c - 2224) / 5 */ data0 = ((2 * (c - 704)) - 3040) / 10; data1 = 1; + /* Channel 1 and all frequencies with 5Hz spacing + * below/above (standard channels without channel 14) */ } else if (!((c - 2192) % 5)) { + /* Same as (c - 2192) / 5 */ data0 = ((2 * (c - 672)) - 3040) / 10; data1 = 0; } else return -EINVAL; data0 = ath5k_hw_bitswap((data0 << 2) & 0xff, 8); + /* This is more complex, we have a single synthesizer with + * 4 reference clock settings (?) based on frequency spacing + * and set using data2. LO is at 4800Hz and data0 is again used + * to set some divider. + * + * NOTE: There is an old atheros presentation at Stanford + * that mentions a method called dual direct conversion + * with 1GHz sliding IF for RF5110. Maybe that's what we + * have here, or an updated version. */ } else if ((c % 5) != 2 || c > 5435) { if (!(c % 20) && c >= 5120) { data0 = ath5k_hw_bitswap(((c - 4800) / 20 << 2), 8); @@ -1219,10 +1377,16 @@ static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah, return 0; } -/* - * Set the channel on the RF2425 +/** + * ath5k_hw_rf2425_channel() - Set channel frequency on RF2425 + * @ah: The &struct ath5k_hw + * @channel: The &struct ieee80211_channel + * + * AR2425/2417 have a different 2GHz RF so code changes + * a little bit from RF5112. */ -static int ath5k_hw_rf2425_channel(struct ath5k_hw *ah, +static int +ath5k_hw_rf2425_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel) { u32 data, data0, data2; @@ -1258,10 +1422,16 @@ static int ath5k_hw_rf2425_channel(struct ath5k_hw *ah, return 0; } -/* - * Set a channel on the radio chip +/** + * ath5k_hw_channel() - Set a channel on the radio chip + * @ah: The &struct ath5k_hw + * @channel: The &struct ieee80211_channel + * + * This is the main function called to set a channel on the + * radio chip based on the radio chip version. */ -static int ath5k_hw_channel(struct ath5k_hw *ah, +static int +ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel) { int ret; @@ -1313,11 +1483,46 @@ static int ath5k_hw_channel(struct ath5k_hw *ah, return 0; } + /*****************\ PHY calibration \*****************/ -static s32 ath5k_hw_read_measured_noise_floor(struct ath5k_hw *ah) +/** + * DOC: PHY Calibration routines + * + * Noise floor calibration: When we tell the hardware to + * perform a noise floor calibration by setting the + * AR5K_PHY_AGCCTL_NF bit on AR5K_PHY_AGCCTL, it will periodically + * sample-and-hold the minimum noise level seen at the antennas. + * This value is then stored in a ring buffer of recently measured + * noise floor values so we have a moving window of the last few + * samples. The median of the values in the history is then loaded + * into the hardware for its own use for RSSI and CCA measurements. + * This type of calibration doesn't interfere with traffic. + * + * AGC calibration: When we tell the hardware to perform + * an AGC (Automatic Gain Control) calibration by setting the + * AR5K_PHY_AGCCTL_CAL, hw disconnects the antennas and does + * a calibration on the DC offsets of ADCs. During this period + * rx/tx gets disabled so we have to deal with it on the driver + * part. + * + * I/Q calibration: When we tell the hardware to perform + * an I/Q calibration, it tries to correct I/Q imbalance and + * fix QAM constellation by sampling data from rxed frames. + * It doesn't interfere with traffic. + * + * For more infos on AGC and I/Q calibration check out patent doc + * #03/094463. + */ + +/** + * ath5k_hw_read_measured_noise_floor() - Read measured NF from hw + * @ah: The &struct ath5k_hw + */ +static s32 +ath5k_hw_read_measured_noise_floor(struct ath5k_hw *ah) { s32 val; @@ -1325,7 +1530,12 @@ static s32 ath5k_hw_read_measured_noise_floor(struct ath5k_hw *ah) return sign_extend32(AR5K_REG_MS(val, AR5K_PHY_NF_MINCCA_PWR), 8); } -void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah) +/** + * ath5k_hw_init_nfcal_hist() - Initialize NF calibration history buffer + * @ah: The &struct ath5k_hw + */ +void +ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah) { int i; @@ -1334,6 +1544,11 @@ void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah) ah->ah_nfcal_hist.nfval[i] = AR5K_TUNE_CCA_MAX_GOOD_VALUE; } +/** + * ath5k_hw_update_nfcal_hist() - Update NF calibration history buffer + * @ah: The &struct ath5k_hw + * @noise_floor: The NF we got from hw + */ static void ath5k_hw_update_nfcal_hist(struct ath5k_hw *ah, s16 noise_floor) { struct ath5k_nfcal_hist *hist = &ah->ah_nfcal_hist; @@ -1341,7 +1556,12 @@ static void ath5k_hw_update_nfcal_hist(struct ath5k_hw *ah, s16 noise_floor) hist->nfval[hist->index] = noise_floor; } -static s16 ath5k_hw_get_median_noise_floor(struct ath5k_hw *ah) +/** + * ath5k_hw_get_median_noise_floor() - Get median NF from history buffer + * @ah: The &struct ath5k_hw + */ +static s16 +ath5k_hw_get_median_noise_floor(struct ath5k_hw *ah) { s16 sort[ATH5K_NF_CAL_HIST_MAX]; s16 tmp; @@ -1364,18 +1584,16 @@ static s16 ath5k_hw_get_median_noise_floor(struct ath5k_hw *ah) return sort[(ATH5K_NF_CAL_HIST_MAX - 1) / 2]; } -/* - * When we tell the hardware to perform a noise floor calibration - * by setting the AR5K_PHY_AGCCTL_NF bit, it will periodically - * sample-and-hold the minimum noise level seen at the antennas. - * This value is then stored in a ring buffer of recently measured - * noise floor values so we have a moving window of the last few - * samples. +/** + * ath5k_hw_update_noise_floor() - Update NF on hardware + * @ah: The &struct ath5k_hw * - * The median of the values in the history is then loaded into the - * hardware for its own use for RSSI and CCA measurements. + * This is the main function we call to perform a NF calibration, + * it reads NF from hardware, calculates the median and updates + * NF on hw. */ -void ath5k_hw_update_noise_floor(struct ath5k_hw *ah) +void +ath5k_hw_update_noise_floor(struct ath5k_hw *ah) { struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; u32 val; @@ -1390,6 +1608,8 @@ void ath5k_hw_update_noise_floor(struct ath5k_hw *ah) return; } + ah->ah_cal_mask |= AR5K_CALIBRATION_NF; + ee_mode = ath5k_eeprom_mode_from_channel(ah->ah_current_channel); /* completed NF calibration, test threshold */ @@ -1434,20 +1654,29 @@ void ath5k_hw_update_noise_floor(struct ath5k_hw *ah) ah->ah_noise_floor = nf; + ah->ah_cal_mask &= ~AR5K_CALIBRATION_NF; + ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE, "noise floor calibrated: %d\n", nf); } -/* - * Perform a PHY calibration on RF5110 - * -Fix BPSK/QAM Constellation (I/Q correction) +/** + * ath5k_hw_rf5110_calibrate() - Perform a PHY calibration on RF5110 + * @ah: The &struct ath5k_hw + * @channel: The &struct ieee80211_channel + * + * Do a complete PHY calibration (AGC + NF + I/Q) on RF5110 */ -static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah, +static int +ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah, struct ieee80211_channel *channel) { u32 phy_sig, phy_agc, phy_sat, beacon; int ret; + if (!(ah->ah_cal_mask & AR5K_CALIBRATION_FULL)) + return 0; + /* * Disable beacons and RX/TX queues, wait */ @@ -1456,7 +1685,7 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah, beacon = ath5k_hw_reg_read(ah, AR5K_BEACON_5210); ath5k_hw_reg_write(ah, beacon & ~AR5K_BEACON_ENABLE, AR5K_BEACON_5210); - mdelay(2); + usleep_range(2000, 2500); /* * Set the channel (with AGC turned off) @@ -1469,7 +1698,7 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah, * Activate PHY and wait */ ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT); - mdelay(1); + usleep_range(1000, 1500); AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_AGC, AR5K_PHY_AGC_DISABLE); @@ -1506,7 +1735,7 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah, ath5k_hw_reg_write(ah, AR5K_PHY_RFSTG_DISABLE, AR5K_PHY_RFSTG); AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_AGC, AR5K_PHY_AGC_DISABLE); - mdelay(1); + usleep_range(1000, 1500); /* * Enable calibration and wait until completion @@ -1537,8 +1766,9 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah, return 0; } -/* - * Perform I/Q calibration on RF5111/5112 and newer chips +/** + * ath5k_hw_rf511x_iq_calibrate() - Perform I/Q calibration on RF5111 and newer + * @ah: The &struct ath5k_hw */ static int ath5k_hw_rf511x_iq_calibrate(struct ath5k_hw *ah) @@ -1547,12 +1777,19 @@ ath5k_hw_rf511x_iq_calibrate(struct ath5k_hw *ah) s32 iq_corr, i_coff, i_coffd, q_coff, q_coffd; int i; - if (!ah->ah_calibration || - ath5k_hw_reg_read(ah, AR5K_PHY_IQ) & AR5K_PHY_IQ_RUN) - return 0; + /* Skip if I/Q calibration is not needed or if it's still running */ + if (!ah->ah_iq_cal_needed) + return -EINVAL; + else if (ath5k_hw_reg_read(ah, AR5K_PHY_IQ) & AR5K_PHY_IQ_RUN) { + ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_CALIBRATE, + "I/Q calibration still running"); + return -EBUSY; + } /* Calibration has finished, get the results and re-run */ - /* work around empty results which can apparently happen on 5212 */ + + /* Work around for empty results which can apparently happen on 5212: + * Read registers up to 10 times until we get both i_pr and q_pwr */ for (i = 0; i <= 10; i++) { iq_corr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_CORR); i_pwr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_PWR_I); @@ -1570,9 +1807,13 @@ ath5k_hw_rf511x_iq_calibrate(struct ath5k_hw *ah) else q_coffd = q_pwr >> 7; - /* protect against divide by 0 and loss of sign bits */ + /* In case i_coffd became zero, cancel calibration + * not only it's too small, it'll also result a divide + * by zero later on. */ if (i_coffd == 0 || q_coffd < 2) - return 0; + return -ECANCELED; + + /* Protect against loss of sign bits */ i_coff = (-iq_corr) / i_coffd; i_coff = clamp(i_coff, -32, 31); /* signed 6 bit */ @@ -1601,10 +1842,17 @@ ath5k_hw_rf511x_iq_calibrate(struct ath5k_hw *ah) return 0; } -/* - * Perform a PHY calibration +/** + * ath5k_hw_phy_calibrate() - Perform a PHY calibration + * @ah: The &struct ath5k_hw + * @channel: The &struct ieee80211_channel + * + * The main function we call from above to perform + * a short or full PHY calibration based on RF chip + * and current channel */ -int ath5k_hw_phy_calibrate(struct ath5k_hw *ah, +int +ath5k_hw_phy_calibrate(struct ath5k_hw *ah, struct ieee80211_channel *channel) { int ret; @@ -1613,10 +1861,43 @@ int ath5k_hw_phy_calibrate(struct ath5k_hw *ah, return ath5k_hw_rf5110_calibrate(ah, channel); ret = ath5k_hw_rf511x_iq_calibrate(ah); + if (ret) { + ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_CALIBRATE, + "No I/Q correction performed (%uMHz)\n", + channel->center_freq); + + /* Happens all the time if there is not much + * traffic, consider it normal behaviour. */ + ret = 0; + } + + /* On full calibration do an AGC calibration and + * request a PAPD probe for gainf calibration if + * needed */ + if (ah->ah_cal_mask & AR5K_CALIBRATION_FULL) { - if ((ah->ah_radio == AR5K_RF5111 || ah->ah_radio == AR5K_RF5112) && - (channel->hw_value != AR5K_MODE_11B)) - ath5k_hw_request_rfgain_probe(ah); + AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL, + AR5K_PHY_AGCCTL_CAL); + + ret = ath5k_hw_register_timeout(ah, AR5K_PHY_AGCCTL, + AR5K_PHY_AGCCTL_CAL | AR5K_PHY_AGCCTL_NF, + 0, false); + if (ret) { + ATH5K_ERR(ah, + "gain calibration timeout (%uMHz)\n", + channel->center_freq); + } + + if ((ah->ah_radio == AR5K_RF5111 || + ah->ah_radio == AR5K_RF5112) + && (channel->hw_value != AR5K_MODE_11B)) + ath5k_hw_request_rfgain_probe(ah); + } + + /* Update noise floor + * XXX: Only do this after AGC calibration */ + if (!(ah->ah_cal_mask & AR5K_CALIBRATION_NF)) + ath5k_hw_update_noise_floor(ah); return ret; } @@ -1626,6 +1907,16 @@ int ath5k_hw_phy_calibrate(struct ath5k_hw *ah, * Spur mitigation functions * \***************************/ +/** + * ath5k_hw_set_spur_mitigation_filter() - Configure SPUR filter + * @ah: The &struct ath5k_hw + * @channel: The &struct ieee80211_channel + * + * This function gets called during PHY initialization to + * configure the spur filter for the given channel. Spur is noise + * generated due to "reflection" effects, for more information on this + * method check out patent US7643810 + */ static void ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah, struct ieee80211_channel *channel) @@ -1865,15 +2156,73 @@ ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah, * Antenna control * \*****************/ -static void /*TODO:Boundary check*/ +/** + * DOC: Antenna control + * + * Hw supports up to 14 antennas ! I haven't found any card that implements + * that. The maximum number of antennas I've seen is up to 4 (2 for 2GHz and 2 + * for 5GHz). Antenna 1 (MAIN) should be omnidirectional, 2 (AUX) + * omnidirectional or sectorial and antennas 3-14 sectorial (or directional). + * + * We can have a single antenna for RX and multiple antennas for TX. + * RX antenna is our "default" antenna (usually antenna 1) set on + * DEFAULT_ANTENNA register and TX antenna is set on each TX control descriptor + * (0 for automatic selection, 1 - 14 antenna number). + * + * We can let hw do all the work doing fast antenna diversity for both + * tx and rx or we can do things manually. Here are the options we have + * (all are bits of STA_ID1 register): + * + * AR5K_STA_ID1_DEFAULT_ANTENNA -> When 0 is set as the TX antenna on TX + * control descriptor, use the default antenna to transmit or else use the last + * antenna on which we received an ACK. + * + * AR5K_STA_ID1_DESC_ANTENNA -> Update default antenna after each TX frame to + * the antenna on which we got the ACK for that frame. + * + * AR5K_STA_ID1_RTS_DEF_ANTENNA -> Use default antenna for RTS or else use the + * one on the TX descriptor. + * + * AR5K_STA_ID1_SELFGEN_DEF_ANT -> Use default antenna for self generated frames + * (ACKs etc), or else use current antenna (the one we just used for TX). + * + * Using the above we support the following scenarios: + * + * AR5K_ANTMODE_DEFAULT -> Hw handles antenna diversity etc automatically + * + * AR5K_ANTMODE_FIXED_A -> Only antenna A (MAIN) is present + * + * AR5K_ANTMODE_FIXED_B -> Only antenna B (AUX) is present + * + * AR5K_ANTMODE_SINGLE_AP -> Sta locked on a single ap + * + * AR5K_ANTMODE_SECTOR_AP -> AP with tx antenna set on tx desc + * + * AR5K_ANTMODE_SECTOR_STA -> STA with tx antenna set on tx desc + * + * AR5K_ANTMODE_DEBUG Debug mode -A -> Rx, B-> Tx- + * + * Also note that when setting antenna to F on tx descriptor card inverts + * current tx antenna. + */ + +/** + * ath5k_hw_set_def_antenna() - Set default rx antenna on AR5211/5212 and newer + * @ah: The &struct ath5k_hw + * @ant: Antenna number + */ +static void ath5k_hw_set_def_antenna(struct ath5k_hw *ah, u8 ant) { if (ah->ah_version != AR5K_AR5210) ath5k_hw_reg_write(ah, ant & 0x7, AR5K_DEFAULT_ANTENNA); } -/* - * Enable/disable fast rx antenna diversity +/** + * ath5k_hw_set_fast_div() - Enable/disable fast rx antenna diversity + * @ah: The &struct ath5k_hw + * @ee_mode: One of enum ath5k_driver_mode + * @enable: True to enable, false to disable */ static void ath5k_hw_set_fast_div(struct ath5k_hw *ah, u8 ee_mode, bool enable) @@ -1913,6 +2262,14 @@ ath5k_hw_set_fast_div(struct ath5k_hw *ah, u8 ee_mode, bool enable) } } +/** + * ath5k_hw_set_antenna_switch() - Set up antenna switch table + * @ah: The &struct ath5k_hw + * @ee_mode: One of enum ath5k_driver_mode + * + * Switch table comes from EEPROM and includes information on controlling + * the 2 antenna RX attenuators + */ void ath5k_hw_set_antenna_switch(struct ath5k_hw *ah, u8 ee_mode) { @@ -1944,8 +2301,10 @@ ath5k_hw_set_antenna_switch(struct ath5k_hw *ah, u8 ee_mode) AR5K_PHY_ANT_SWITCH_TABLE_1); } -/* - * Set antenna operating mode +/** + * ath5k_hw_set_antenna_mode() - Set antenna operating mode + * @ah: The &struct ath5k_hw + * @ant_mode: One of enum ath5k_ant_mode */ void ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode) @@ -2068,8 +2427,13 @@ ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode) * Helper functions */ -/* - * Do linear interpolation between two given (x, y) points +/** + * ath5k_get_interpolated_value() - Get interpolated Y val between two points + * @target: X value of the middle point + * @x_left: X value of the left point + * @x_right: X value of the right point + * @y_left: Y value of the left point + * @y_right: Y value of the right point */ static s16 ath5k_get_interpolated_value(s16 target, s16 x_left, s16 x_right, @@ -2096,13 +2460,18 @@ ath5k_get_interpolated_value(s16 target, s16 x_left, s16 x_right, return result; } -/* - * Find vertical boundary (min pwr) for the linear PCDAC curve. +/** + * ath5k_get_linear_pcdac_min() - Find vertical boundary (min pwr) for the + * linear PCDAC curve + * @stepL: Left array with y values (pcdac steps) + * @stepR: Right array with y values (pcdac steps) + * @pwrL: Left array with x values (power steps) + * @pwrR: Right array with x values (power steps) * * Since we have the top of the curve and we draw the line below * until we reach 1 (1 pcdac step) we need to know which point - * (x value) that is so that we don't go below y axis and have negative - * pcdac values when creating the curve, or fill the table with zeroes. + * (x value) that is so that we don't go below x axis and have negative + * pcdac values when creating the curve, or fill the table with zeros. */ static s16 ath5k_get_linear_pcdac_min(const u8 *stepL, const u8 *stepR, @@ -2148,7 +2517,16 @@ ath5k_get_linear_pcdac_min(const u8 *stepL, const u8 *stepR, return max(min_pwrL, min_pwrR); } -/* +/** + * ath5k_create_power_curve() - Create a Power to PDADC or PCDAC curve + * @pmin: Minimum power value (xmin) + * @pmax: Maximum power value (xmax) + * @pwr: Array of power steps (x values) + * @vpd: Array of matching PCDAC/PDADC steps (y values) + * @num_points: Number of provided points + * @vpd_table: Array to fill with the full PCDAC/PDADC values (y values) + * @type: One of enum ath5k_powertable_type (eeprom.h) + * * Interpolate (pwr,vpd) points to create a Power to PDADC or a * Power to PCDAC curve. * @@ -2206,7 +2584,14 @@ ath5k_create_power_curve(s16 pmin, s16 pmax, } } -/* +/** + * ath5k_get_chan_pcal_surrounding_piers() - Get surrounding calibration piers + * for a given channel. + * @ah: The &struct ath5k_hw + * @channel: The &struct ieee80211_channel + * @pcinfo_l: The &struct ath5k_chan_pcal_info to put the left cal. pier + * @pcinfo_r: The &struct ath5k_chan_pcal_info to put the right cal. pier + * * Get the surrounding per-channel power calibration piers * for a given frequency so that we can interpolate between * them and come up with an appropriate dataset for our current @@ -2289,11 +2674,17 @@ done: *pcinfo_r = &pcinfo[idx_r]; } -/* +/** + * ath5k_get_rate_pcal_data() - Get the interpolated per-rate power + * calibration data + * @ah: The &struct ath5k_hw *ah, + * @channel: The &struct ieee80211_channel + * @rates: The &struct ath5k_rate_pcal_info to fill + * * Get the surrounding per-rate power calibration data * for a given frequency and interpolate between power * values to set max target power supported by hw for - * each rate. + * each rate on this frequency. */ static void ath5k_get_rate_pcal_data(struct ath5k_hw *ah, @@ -2381,7 +2772,11 @@ done: rpinfo[idx_r].target_power_54); } -/* +/** + * ath5k_get_max_ctl_power() - Get max edge power for a given frequency + * @ah: the &struct ath5k_hw + * @channel: The &struct ieee80211_channel + * * Get the max edge power for this channel if * we have such data from EEPROM's Conformance Test * Limits (CTL), and limit max power if needed. @@ -2461,8 +2856,39 @@ ath5k_get_max_ctl_power(struct ath5k_hw *ah, * Power to PCDAC table functions */ -/* - * Fill Power to PCDAC table on RF5111 +/** + * DOC: Power to PCDAC table functions + * + * For RF5111 we have an XPD -eXternal Power Detector- curve + * for each calibrated channel. Each curve has 0,5dB Power steps + * on x axis and PCDAC steps (offsets) on y axis and looks like an + * exponential function. To recreate the curve we read 11 points + * from eeprom (eeprom.c) and interpolate here. + * + * For RF5112 we have 4 XPD -eXternal Power Detector- curves + * for each calibrated channel on 0, -6, -12 and -18dBm but we only + * use the higher (3) and the lower (0) curves. Each curve again has 0.5dB + * power steps on x axis and PCDAC steps on y axis and looks like a + * linear function. To recreate the curve and pass the power values + * on hw, we get 4 points for xpd 0 (lower gain -> max power) + * and 3 points for xpd 3 (higher gain -> lower power) from eeprom (eeprom.c) + * and interpolate here. + * + * For a given channel we get the calibrated points (piers) for it or + * -if we don't have calibration data for this specific channel- from the + * available surrounding channels we have calibration data for, after we do a + * linear interpolation between them. Then since we have our calibrated points + * for this channel, we do again a linear interpolation between them to get the + * whole curve. + * + * We finally write the Y values of the curve(s) (the PCDAC values) on hw + */ + +/** + * ath5k_fill_pwr_to_pcdac_table() - Fill Power to PCDAC table on RF5111 + * @ah: The &struct ath5k_hw + * @table_min: Minimum power (x min) + * @table_max: Maximum power (x max) * * No further processing is needed for RF5111, the only thing we have to * do is fill the values below and above calibration range since eeprom data @@ -2503,10 +2929,14 @@ ath5k_fill_pwr_to_pcdac_table(struct ath5k_hw *ah, s16* table_min, } -/* - * Combine available XPD Curves and fill Linear Power to PCDAC table - * on RF5112 +/** + * ath5k_combine_linear_pcdac_curves() - Combine available PCDAC Curves + * @ah: The &struct ath5k_hw + * @table_min: Minimum power (x min) + * @table_max: Maximum power (x max) + * @pdcurves: Number of pd curves * + * Combine available XPD Curves and fill Linear Power to PCDAC table on RF5112 * RFX112 can have up to 2 curves (one for low txpower range and one for * higher txpower range). We need to put them both on pcdac_out and place * them in the correct location. In case we only have one curve available @@ -2608,7 +3038,10 @@ ath5k_combine_linear_pcdac_curves(struct ath5k_hw *ah, s16* table_min, } } -/* Write PCDAC values on hw */ +/** + * ath5k_write_pcdac_table() - Write the PCDAC values on hw + * @ah: The &struct ath5k_hw + */ static void ath5k_write_pcdac_table(struct ath5k_hw *ah) { @@ -2631,9 +3064,32 @@ ath5k_write_pcdac_table(struct ath5k_hw *ah) * Power to PDADC table functions */ -/* - * Set the gain boundaries and create final Power to PDADC table +/** + * DOC: Power to PDADC table functions + * + * For RF2413 and later we have a Power to PDADC table (Power Detector) + * instead of a PCDAC (Power Control) and 4 pd gain curves for each + * calibrated channel. Each curve has power on x axis in 0.5 db steps and + * PDADC steps on y axis and looks like an exponential function like the + * RF5111 curve. + * + * To recreate the curves we read the points from eeprom (eeprom.c) + * and interpolate here. Note that in most cases only 2 (higher and lower) + * curves are used (like RF5112) but vendors have the opportunity to include + * all 4 curves on eeprom. The final curve (higher power) has an extra + * point for better accuracy like RF5112. * + * The process is similar to what we do above for RF5111/5112 + */ + +/** + * ath5k_combine_pwr_to_pdadc_curves() - Combine the various PDADC curves + * @ah: The &struct ath5k_hw + * @pwr_min: Minimum power (x min) + * @pwr_max: Maximum power (x max) + * @pdcurves: Number of available curves + * + * Combine the various pd curves and create the final Power to PDADC table * We can have up to 4 pd curves, we need to do a similar process * as we do for RF5112. This time we don't have an edge_flag but we * set the gain boundaries on a separate register. @@ -2757,7 +3213,11 @@ ath5k_combine_pwr_to_pdadc_curves(struct ath5k_hw *ah, } -/* Write PDADC values on hw */ +/** + * ath5k_write_pwr_to_pdadc_table() - Write the PDADC values on hw + * @ah: The &struct ath5k_hw + * @ee_mode: One of enum ath5k_driver_mode + */ static void ath5k_write_pwr_to_pdadc_table(struct ath5k_hw *ah, u8 ee_mode) { @@ -2814,7 +3274,13 @@ ath5k_write_pwr_to_pdadc_table(struct ath5k_hw *ah, u8 ee_mode) * Common code for PCDAC/PDADC tables */ -/* +/** + * ath5k_setup_channel_powertable() - Set up power table for this channel + * @ah: The &struct ath5k_hw + * @channel: The &struct ieee80211_channel + * @ee_mode: One of enum ath5k_driver_mode + * @type: One of enum ath5k_powertable_type (eeprom.h) + * * This is the main function that uses all of the above * to set PCDAC/PDADC table on hw for the current channel. * This table is used for tx power calibration on the baseband, @@ -3012,7 +3478,12 @@ ath5k_setup_channel_powertable(struct ath5k_hw *ah, return 0; } -/* Write power table for current channel to hw */ +/** + * ath5k_write_channel_powertable() - Set power table for current channel on hw + * @ah: The &struct ath5k_hw + * @ee_mode: One of enum ath5k_driver_mode + * @type: One of enum ath5k_powertable_type (eeprom.h) + */ static void ath5k_write_channel_powertable(struct ath5k_hw *ah, u8 ee_mode, u8 type) { @@ -3022,28 +3493,36 @@ ath5k_write_channel_powertable(struct ath5k_hw *ah, u8 ee_mode, u8 type) ath5k_write_pcdac_table(ah); } -/* - * Per-rate tx power setting + +/** + * DOC: Per-rate tx power setting * - * This is the code that sets the desired tx power (below + * This is the code that sets the desired tx power limit (below * maximum) on hw for each rate (we also have TPC that sets - * power per packet). We do that by providing an index on the - * PCDAC/PDADC table we set up. - */ - -/* - * Set rate power table + * power per packet type). We do that by providing an index on the + * PCDAC/PDADC table we set up above, for each rate. * * For now we only limit txpower based on maximum tx power - * supported by hw (what's inside rate_info). We need to limit - * this even more, based on regulatory domain etc. + * supported by hw (what's inside rate_info) + conformance test + * limits. We need to limit this even more, based on regulatory domain + * etc to be safe. Normally this is done from above so we don't care + * here, all we care is that the tx power we set will be O.K. + * for the hw (e.g. won't create noise on PA etc). * - * Rate power table contains indices to PCDAC/PDADC table (0.5dB steps) - * and is indexed as follows: + * Rate power table contains indices to PCDAC/PDADC table (0.5dB steps - + * x values) and is indexed as follows: * rates[0] - rates[7] -> OFDM rates * rates[8] - rates[14] -> CCK rates * rates[15] -> XR rates (they all have the same power) */ + +/** + * ath5k_setup_rate_powertable() - Set up rate power table for a given tx power + * @ah: The &struct ath5k_hw + * @max_pwr: The maximum tx power requested in 0.5dB steps + * @rate_info: The &struct ath5k_rate_pcal_info to fill + * @ee_mode: One of enum ath5k_driver_mode + */ static void ath5k_setup_rate_powertable(struct ath5k_hw *ah, u16 max_pwr, struct ath5k_rate_pcal_info *rate_info, @@ -3114,8 +3593,14 @@ ath5k_setup_rate_powertable(struct ath5k_hw *ah, u16 max_pwr, } -/* - * Set transmission power +/** + * ath5k_hw_txpower() - Set transmission power limit for a given channel + * @ah: The &struct ath5k_hw + * @channel: The &struct ieee80211_channel + * @txpower: Requested tx power in 0.5dB steps + * + * Combines all of the above to set the requested tx power limit + * on hw. */ static int ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel, @@ -3233,7 +3718,16 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel, return 0; } -int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower) +/** + * ath5k_hw_set_txpower_limit() - Set txpower limit for the current channel + * @ah: The &struct ath5k_hw + * @txpower: The requested tx power limit in 0.5dB steps + * + * This function provides access to ath5k_hw_txpower to the driver in + * case user or an application changes it while PHY is running. + */ +int +ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower) { ATH5K_DBG(ah, ATH5K_DEBUG_TXPOWER, "changing txpower to %d\n", txpower); @@ -3241,11 +3735,26 @@ int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower) return ath5k_hw_txpower(ah, ah->ah_current_channel, txpower); } + /*************\ Init function \*************/ -int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel, +/** + * ath5k_hw_phy_init() - Initialize PHY + * @ah: The &struct ath5k_hw + * @channel: The @struct ieee80211_channel + * @mode: One of enum ath5k_driver_mode + * @fast: Try a fast channel switch instead + * + * This is the main function used during reset to initialize PHY + * or do a fast channel change if possible. + * + * NOTE: Do not call this one from the driver, it assumes PHY is in a + * warm reset state ! + */ +int +ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel, u8 mode, bool fast) { struct ieee80211_channel *curr_channel; @@ -3355,7 +3864,7 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel, if (ret) return ret; - mdelay(1); + usleep_range(1000, 1500); /* * Write RF buffer @@ -3376,10 +3885,10 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel, } } else if (ah->ah_version == AR5K_AR5210) { - mdelay(1); + usleep_range(1000, 1500); /* Disable phy and wait */ ath5k_hw_reg_write(ah, AR5K_PHY_ACT_DISABLE, AR5K_PHY_ACT); - mdelay(1); + usleep_range(1000, 1500); } /* Set channel on PHY */ @@ -3405,7 +3914,7 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel, for (i = 0; i <= 20; i++) { if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10)) break; - udelay(200); + usleep_range(200, 250); } ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1); @@ -3433,9 +3942,9 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel, /* At the same time start I/Q calibration for QAM constellation * -no need for CCK- */ - ah->ah_calibration = false; + ah->ah_iq_cal_needed = false; if (!(mode == AR5K_MODE_11B)) { - ah->ah_calibration = true; + ah->ah_iq_cal_needed = true; AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ, AR5K_PHY_IQ_CAL_NUM_LOG_MAX, 15); AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ, diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c index 776654228ea..30b50f93417 100644 --- a/drivers/net/wireless/ath/ath5k/qcu.c +++ b/drivers/net/wireless/ath/ath5k/qcu.c @@ -17,23 +17,48 @@ */ /********************************************\ -Queue Control Unit, DFS Control Unit Functions +Queue Control Unit, DCF Control Unit Functions \********************************************/ #include "ath5k.h" #include "reg.h" #include "debug.h" +#include <linux/log2.h> + +/** + * DOC: Queue Control Unit (QCU)/DCF Control Unit (DCU) functions + * + * Here we setup parameters for the 12 available TX queues. Note that + * on the various registers we can usually only map the first 10 of them so + * basically we have 10 queues to play with. Each queue has a matching + * QCU that controls when the queue will get triggered and multiple QCUs + * can be mapped to a single DCU that controls the various DFS parameters + * for the various queues. In our setup we have a 1:1 mapping between QCUs + * and DCUs allowing us to have different DFS settings for each queue. + * + * When a frame goes into a TX queue, QCU decides when it'll trigger a + * transmission based on various criteria (such as how many data we have inside + * it's buffer or -if it's a beacon queue- if it's time to fire up the queue + * based on TSF etc), DCU adds backoff, IFSes etc and then a scheduler + * (arbitrator) decides the priority of each QCU based on it's configuration + * (e.g. beacons are always transmitted when they leave DCU bypassing all other + * frames from other queues waiting to be transmitted). After a frame leaves + * the DCU it goes to PCU for further processing and then to PHY for + * the actual transmission. + */ /******************\ * Helper functions * \******************/ -/* - * Get number of pending frames - * for a specific queue [5211+] +/** + * ath5k_hw_num_tx_pending() - Get number of pending frames for a given queue + * @ah: The &struct ath5k_hw + * @queue: One of enum ath5k_tx_queue_id */ -u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue) +u32 +ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue) { u32 pending; AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); @@ -58,10 +83,13 @@ u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue) return pending; } -/* - * Set a transmit queue inactive +/** + * ath5k_hw_release_tx_queue() - Set a transmit queue inactive + * @ah: The &struct ath5k_hw + * @queue: One of enum ath5k_tx_queue_id */ -void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue) +void +ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue) { if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num)) return; @@ -72,34 +100,56 @@ void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue) AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue); } -/* +/** + * ath5k_cw_validate() - Make sure the given cw is valid + * @cw_req: The contention window value to check + * * Make sure cw is a power of 2 minus 1 and smaller than 1024 */ -static u16 ath5k_cw_validate(u16 cw_req) +static u16 +ath5k_cw_validate(u16 cw_req) { - u32 cw = 1; cw_req = min(cw_req, (u16)1023); - while (cw < cw_req) - cw = (cw << 1) | 1; + /* Check if cw_req + 1 a power of 2 */ + if (is_power_of_2(cw_req + 1)) + return cw_req; - return cw; + /* Check if cw_req is a power of 2 */ + if (is_power_of_2(cw_req)) + return cw_req - 1; + + /* If none of the above is correct + * find the closest power of 2 */ + cw_req = (u16) roundup_pow_of_two(cw_req) - 1; + + return cw_req; } -/* - * Get properties for a transmit queue +/** + * ath5k_hw_get_tx_queueprops() - Get properties for a transmit queue + * @ah: The &struct ath5k_hw + * @queue: One of enum ath5k_tx_queue_id + * @queue_info: The &struct ath5k_txq_info to fill */ -int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue, +int +ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue, struct ath5k_txq_info *queue_info) { memcpy(queue_info, &ah->ah_txq[queue], sizeof(struct ath5k_txq_info)); return 0; } -/* - * Set properties for a transmit queue +/** + * ath5k_hw_set_tx_queueprops() - Set properties for a transmit queue + * @ah: The &struct ath5k_hw + * @queue: One of enum ath5k_tx_queue_id + * @qinfo: The &struct ath5k_txq_info to use + * + * Returns 0 on success or -EIO if queue is inactive */ -int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue, +int +ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue, const struct ath5k_txq_info *qinfo) { struct ath5k_txq_info *qi; @@ -139,10 +189,16 @@ int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue, return 0; } -/* - * Initialize a transmit queue +/** + * ath5k_hw_setup_tx_queue() - Initialize a transmit queue + * @ah: The &struct ath5k_hw + * @queue_type: One of enum ath5k_tx_queue + * @queue_info: The &struct ath5k_txq_info to use + * + * Returns 0 on success, -EINVAL on invalid arguments */ -int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type, +int +ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type, struct ath5k_txq_info *queue_info) { unsigned int queue; @@ -217,10 +273,16 @@ int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type, * Single QCU/DCU initialization * \*******************************/ -/* - * Set tx retry limits on DCU +/** + * ath5k_hw_set_tx_retry_limits() - Set tx retry limits on DCU + * @ah: The &struct ath5k_hw + * @queue: One of enum ath5k_tx_queue_id + * + * This function is used when initializing a queue, to set + * retry limits based on ah->ah_retry_* and the chipset used. */ -void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah, +void +ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah, unsigned int queue) { /* Single data queue on AR5210 */ @@ -255,15 +317,15 @@ void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah, } /** - * ath5k_hw_reset_tx_queue - Initialize a single hw queue + * ath5k_hw_reset_tx_queue() - Initialize a single hw queue + * @ah: The &struct ath5k_hw + * @queue: One of enum ath5k_tx_queue_id * - * @ah The &struct ath5k_hw - * @queue The hw queue number - * - * Set DFS properties for the given transmit queue on DCU + * Set DCF properties for the given transmit queue on DCU * and configures all queue-specific parameters. */ -int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue) +int +ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue) { struct ath5k_txq_info *tq = &ah->ah_txq[queue]; @@ -491,10 +553,9 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue) \**************************/ /** - * ath5k_hw_set_ifs_intervals - Set global inter-frame spaces on DCU - * - * @ah The &struct ath5k_hw - * @slot_time Slot time in us + * ath5k_hw_set_ifs_intervals() - Set global inter-frame spaces on DCU + * @ah: The &struct ath5k_hw + * @slot_time: Slot time in us * * Sets the global IFS intervals on DCU (also works on AR5210) for * the given slot time and the current bwmode. @@ -597,7 +658,15 @@ int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time) } -int ath5k_hw_init_queues(struct ath5k_hw *ah) +/** + * ath5k_hw_init_queues() - Initialize tx queues + * @ah: The &struct ath5k_hw + * + * Initializes all tx queues based on information on + * ah->ah_txq* set by the driver + */ +int +ath5k_hw_init_queues(struct ath5k_hw *ah) { int i, ret; diff --git a/drivers/net/wireless/ath/ath5k/reg.h b/drivers/net/wireless/ath/ath5k/reg.h index f5c1000045d..0ea1608b47f 100644 --- a/drivers/net/wireless/ath/ath5k/reg.h +++ b/drivers/net/wireless/ath/ath5k/reg.h @@ -280,6 +280,10 @@ * 5211/5212 we have one primary and 4 secondary registers. * So we have AR5K_ISR for 5210 and AR5K_PISR /SISRx for 5211/5212. * Most of these bits are common for all chipsets. + * + * NOTE: On 5211+ TXOK, TXDESC, TXERR, TXEOL and TXURN contain + * the logical OR from per-queue interrupt bits found on SISR registers + * (see below). */ #define AR5K_ISR 0x001c /* Register Address [5210] */ #define AR5K_PISR 0x0080 /* Register Address [5211+] */ @@ -292,7 +296,10 @@ #define AR5K_ISR_TXOK 0x00000040 /* Frame successfully transmitted */ #define AR5K_ISR_TXDESC 0x00000080 /* TX descriptor request */ #define AR5K_ISR_TXERR 0x00000100 /* Transmit error */ -#define AR5K_ISR_TXNOFRM 0x00000200 /* No frame transmitted (transmit timeout) */ +#define AR5K_ISR_TXNOFRM 0x00000200 /* No frame transmitted (transmit timeout) + * NOTE: We don't have per-queue info for this + * one, but we can enable it per-queue through + * TXNOFRM_QCU field on TXNOFRM register */ #define AR5K_ISR_TXEOL 0x00000400 /* Empty TX descriptor */ #define AR5K_ISR_TXURN 0x00000800 /* Transmit FIFO underrun */ #define AR5K_ISR_MIB 0x00001000 /* Update MIB counters */ @@ -302,21 +309,29 @@ #define AR5K_ISR_SWBA 0x00010000 /* Software beacon alert */ #define AR5K_ISR_BRSSI 0x00020000 /* Beacon rssi below threshold (?) */ #define AR5K_ISR_BMISS 0x00040000 /* Beacon missed */ -#define AR5K_ISR_HIUERR 0x00080000 /* Host Interface Unit error [5211+] */ +#define AR5K_ISR_HIUERR 0x00080000 /* Host Interface Unit error [5211+] + * 'or' of MCABT, SSERR, DPERR from SISR2 */ #define AR5K_ISR_BNR 0x00100000 /* Beacon not ready [5211+] */ #define AR5K_ISR_MCABT 0x00100000 /* Master Cycle Abort [5210] */ #define AR5K_ISR_RXCHIRP 0x00200000 /* CHIRP Received [5212+] */ #define AR5K_ISR_SSERR 0x00200000 /* Signaled System Error [5210] */ -#define AR5K_ISR_DPERR 0x00400000 /* Det par Error (?) [5210] */ +#define AR5K_ISR_DPERR 0x00400000 /* Bus parity error [5210] */ #define AR5K_ISR_RXDOPPLER 0x00400000 /* Doppler chirp received [5212+] */ #define AR5K_ISR_TIM 0x00800000 /* [5211+] */ -#define AR5K_ISR_BCNMISC 0x00800000 /* 'or' of TIM, CAB_END, DTIM_SYNC, BCN_TIMEOUT, - CAB_TIMEOUT and DTIM bits from SISR2 [5212+] */ +#define AR5K_ISR_BCNMISC 0x00800000 /* Misc beacon related interrupt + * 'or' of TIM, CAB_END, DTIM_SYNC, BCN_TIMEOUT, + * CAB_TIMEOUT and DTIM bits from SISR2 [5212+] */ #define AR5K_ISR_GPIO 0x01000000 /* GPIO (rf kill) */ #define AR5K_ISR_QCBRORN 0x02000000 /* QCU CBR overrun [5211+] */ #define AR5K_ISR_QCBRURN 0x04000000 /* QCU CBR underrun [5211+] */ #define AR5K_ISR_QTRIG 0x08000000 /* QCU scheduling trigger [5211+] */ +#define AR5K_ISR_BITS_FROM_SISRS (AR5K_ISR_TXOK | AR5K_ISR_TXDESC |\ + AR5K_ISR_TXERR | AR5K_ISR_TXEOL |\ + AR5K_ISR_TXURN | AR5K_ISR_HIUERR |\ + AR5K_ISR_BCNMISC | AR5K_ISR_QCBRORN |\ + AR5K_ISR_QCBRURN | AR5K_ISR_QTRIG) + /* * Secondary status registers [5211+] (0 - 4) * @@ -347,7 +362,7 @@ #define AR5K_SISR2_BCN_TIMEOUT 0x08000000 /* Beacon Timeout [5212+] */ #define AR5K_SISR2_CAB_TIMEOUT 0x10000000 /* CAB Timeout [5212+] */ #define AR5K_SISR2_DTIM 0x20000000 /* [5212+] */ -#define AR5K_SISR2_TSFOOR 0x80000000 /* TSF OOR (?) */ +#define AR5K_SISR2_TSFOOR 0x80000000 /* TSF Out of range */ #define AR5K_SISR3 0x0090 /* Register Address [5211+] */ #define AR5K_SISR3_QCBRORN 0x000003ff /* Mask for QCBRORN */ diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c index 2abac257b4b..250db40b751 100644 --- a/drivers/net/wireless/ath/ath5k/reset.c +++ b/drivers/net/wireless/ath/ath5k/reset.c @@ -19,9 +19,9 @@ * */ -/*****************************\ - Reset functions and helpers -\*****************************/ +/****************************\ + Reset function and helpers +\****************************/ #include <asm/unaligned.h> @@ -33,14 +33,36 @@ #include "debug.h" +/** + * DOC: Reset function and helpers + * + * Here we implement the main reset routine, used to bring the card + * to a working state and ready to receive. We also handle routines + * that don't fit on other places such as clock, sleep and power control + */ + + /******************\ * Helper functions * \******************/ -/* - * Check if a register write has been completed +/** + * ath5k_hw_register_timeout() - Poll a register for a flag/field change + * @ah: The &struct ath5k_hw + * @reg: The register to read + * @flag: The flag/field to check on the register + * @val: The field value we expect (if we check a field) + * @is_set: Instead of checking if the flag got cleared, check if it got set + * + * Some registers contain flags that indicate that an operation is + * running. We use this function to poll these registers and check + * if these flags get cleared. We also use it to poll a register + * field (containing multiple flags) until it gets a specific value. + * + * Returns -EAGAIN if we exceeded AR5K_TUNE_REGISTER_TIMEOUT * 15us or 0 */ -int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val, +int +ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val, bool is_set) { int i; @@ -64,35 +86,48 @@ int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val, \*************************/ /** - * ath5k_hw_htoclock - Translate usec to hw clock units - * + * ath5k_hw_htoclock() - Translate usec to hw clock units * @ah: The &struct ath5k_hw * @usec: value in microseconds + * + * Translate usecs to hw clock units based on the current + * hw clock rate. + * + * Returns number of clock units */ -unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec) +unsigned int +ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec) { struct ath_common *common = ath5k_hw_common(ah); return usec * common->clockrate; } /** - * ath5k_hw_clocktoh - Translate hw clock units to usec + * ath5k_hw_clocktoh() - Translate hw clock units to usec + * @ah: The &struct ath5k_hw * @clock: value in hw clock units + * + * Translate hw clock units to usecs based on the current + * hw clock rate. + * + * Returns number of usecs */ -unsigned int ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock) +unsigned int +ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock) { struct ath_common *common = ath5k_hw_common(ah); return clock / common->clockrate; } /** - * ath5k_hw_init_core_clock - Initialize core clock - * - * @ah The &struct ath5k_hw + * ath5k_hw_init_core_clock() - Initialize core clock + * @ah: The &struct ath5k_hw * - * Initialize core clock parameters (usec, usec32, latencies etc). + * Initialize core clock parameters (usec, usec32, latencies etc), + * based on current bwmode and chipset properties. */ -static void ath5k_hw_init_core_clock(struct ath5k_hw *ah) +static void +ath5k_hw_init_core_clock(struct ath5k_hw *ah) { struct ieee80211_channel *channel = ah->ah_current_channel; struct ath_common *common = ath5k_hw_common(ah); @@ -227,16 +262,21 @@ static void ath5k_hw_init_core_clock(struct ath5k_hw *ah) } } -/* +/** + * ath5k_hw_set_sleep_clock() - Setup sleep clock operation + * @ah: The &struct ath5k_hw + * @enable: Enable sleep clock operation (false to disable) + * * If there is an external 32KHz crystal available, use it * as ref. clock instead of 32/40MHz clock and baseband clocks * to save power during sleep or restore normal 32/40MHz * operation. * - * XXX: When operating on 32KHz certain PHY registers (27 - 31, - * 123 - 127) require delay on access. + * NOTE: When operating on 32KHz certain PHY registers (27 - 31, + * 123 - 127) require delay on access. */ -static void ath5k_hw_set_sleep_clock(struct ath5k_hw *ah, bool enable) +static void +ath5k_hw_set_sleep_clock(struct ath5k_hw *ah, bool enable) { struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; u32 scal, spending, sclock; @@ -340,10 +380,19 @@ static void ath5k_hw_set_sleep_clock(struct ath5k_hw *ah, bool enable) * Reset/Sleep control * \*********************/ -/* - * Reset chipset +/** + * ath5k_hw_nic_reset() - Reset the various chipset units + * @ah: The &struct ath5k_hw + * @val: Mask to indicate what units to reset + * + * To reset the various chipset units we need to write + * the mask to AR5K_RESET_CTL and poll the register until + * all flags are cleared. + * + * Returns 0 if we are O.K. or -EAGAIN (from athk5_hw_register_timeout) */ -static int ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val) +static int +ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val) { int ret; u32 mask = val ? val : ~0U; @@ -357,7 +406,7 @@ static int ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val) ath5k_hw_reg_write(ah, val, AR5K_RESET_CTL); /* Wait at least 128 PCI clocks */ - udelay(15); + usleep_range(15, 20); if (ah->ah_version == AR5K_AR5210) { val &= AR5K_RESET_CTL_PCU | AR5K_RESET_CTL_DMA @@ -382,12 +431,17 @@ static int ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val) return ret; } -/* - * Reset AHB chipset - * AR5K_RESET_CTL_PCU flag resets WMAC - * AR5K_RESET_CTL_BASEBAND flag resets WBB +/** + * ath5k_hw_wisoc_reset() - Reset AHB chipset + * @ah: The &struct ath5k_hw + * @flags: Mask to indicate what units to reset + * + * Same as ath5k_hw_nic_reset but for AHB based devices + * + * Returns 0 if we are O.K. or -EAGAIN (from athk5_hw_register_timeout) */ -static int ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags) +static int +ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags) { u32 mask = flags ? flags : ~0U; u32 __iomem *reg; @@ -422,7 +476,7 @@ static int ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags) regval = __raw_readl(reg); __raw_writel(regval | val, reg); regval = __raw_readl(reg); - udelay(100); + usleep_range(100, 150); /* Bring BB/MAC out of reset */ __raw_writel(regval & ~val, reg); @@ -439,11 +493,23 @@ static int ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags) return 0; } - -/* - * Sleep control +/** + * ath5k_hw_set_power_mode() - Set power mode + * @ah: The &struct ath5k_hw + * @mode: One of enum ath5k_power_mode + * @set_chip: Set to true to write sleep control register + * @sleep_duration: How much time the device is allowed to sleep + * when sleep logic is enabled (in 128 microsecond increments). + * + * This function is used to configure sleep policy and allowed + * sleep modes. For more information check out the sleep control + * register on reg.h and STA_ID1. + * + * Returns 0 on success, -EIO if chip didn't wake up or -EINVAL if an invalid + * mode is requested. */ -static int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode, +static int +ath5k_hw_set_power_mode(struct ath5k_hw *ah, enum ath5k_power_mode mode, bool set_chip, u16 sleep_duration) { unsigned int i; @@ -493,7 +559,7 @@ static int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode, ath5k_hw_reg_write(ah, data | AR5K_SLEEP_CTL_SLE_WAKE, AR5K_SLEEP_CTL); - udelay(15); + usleep_range(15, 20); for (i = 200; i > 0; i--) { /* Check if the chip did wake up */ @@ -502,7 +568,7 @@ static int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode, break; /* Wait a bit and retry */ - udelay(50); + usleep_range(50, 75); ath5k_hw_reg_write(ah, data | AR5K_SLEEP_CTL_SLE_WAKE, AR5K_SLEEP_CTL); } @@ -523,17 +589,20 @@ commit: return 0; } -/* - * Put device on hold +/** + * ath5k_hw_on_hold() - Put device on hold + * @ah: The &struct ath5k_hw * - * Put MAC and Baseband on warm reset and - * keep that state (don't clean sleep control - * register). After this MAC and Baseband are - * disabled and a full reset is needed to come - * back. This way we save as much power as possible + * Put MAC and Baseband on warm reset and keep that state + * (don't clean sleep control register). After this MAC + * and Baseband are disabled and a full reset is needed + * to come back. This way we save as much power as possible * without putting the card on full sleep. + * + * Returns 0 on success or -EIO on error */ -int ath5k_hw_on_hold(struct ath5k_hw *ah) +int +ath5k_hw_on_hold(struct ath5k_hw *ah) { struct pci_dev *pdev = ah->pdev; u32 bus_flags; @@ -543,7 +612,7 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah) return 0; /* Make sure device is awake */ - ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0); + ret = ath5k_hw_set_power_mode(ah, AR5K_PM_AWAKE, true, 0); if (ret) { ATH5K_ERR(ah, "failed to wakeup the MAC Chip\n"); return ret; @@ -563,7 +632,7 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah) ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU | AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_DMA | AR5K_RESET_CTL_PHY | AR5K_RESET_CTL_PCI); - mdelay(2); + usleep_range(2000, 2500); } else { ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU | AR5K_RESET_CTL_BASEBAND | bus_flags); @@ -575,7 +644,7 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah) } /* ...wakeup again!*/ - ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0); + ret = ath5k_hw_set_power_mode(ah, AR5K_PM_AWAKE, true, 0); if (ret) { ATH5K_ERR(ah, "failed to put device on hold\n"); return ret; @@ -584,11 +653,18 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah) return ret; } -/* +/** + * ath5k_hw_nic_wakeup() - Force card out of sleep + * @ah: The &struct ath5k_hw + * @channel: The &struct ieee80211_channel + * * Bring up MAC + PHY Chips and program PLL - * Channel is NULL for the initial wakeup. + * NOTE: Channel is NULL for the initial wakeup. + * + * Returns 0 on success, -EIO on hw failure or -EINVAL for false channel infos */ -int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel) +int +ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel) { struct pci_dev *pdev = ah->pdev; u32 turbo, mode, clock, bus_flags; @@ -600,7 +676,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel) if ((ath5k_get_bus_type(ah) != ATH_AHB) || channel) { /* Wakeup the device */ - ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0); + ret = ath5k_hw_set_power_mode(ah, AR5K_PM_AWAKE, true, 0); if (ret) { ATH5K_ERR(ah, "failed to wakeup the MAC Chip\n"); return ret; @@ -621,7 +697,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel) ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU | AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_DMA | AR5K_RESET_CTL_PHY | AR5K_RESET_CTL_PCI); - mdelay(2); + usleep_range(2000, 2500); } else { if (ath5k_get_bus_type(ah) == ATH_AHB) ret = ath5k_hw_wisoc_reset(ah, AR5K_RESET_CTL_PCU | @@ -637,7 +713,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel) } /* ...wakeup again!...*/ - ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0); + ret = ath5k_hw_set_power_mode(ah, AR5K_PM_AWAKE, true, 0); if (ret) { ATH5K_ERR(ah, "failed to resume the MAC Chip\n"); return ret; @@ -739,7 +815,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel) /* ...update PLL if needed */ if (ath5k_hw_reg_read(ah, AR5K_PHY_PLL) != clock) { ath5k_hw_reg_write(ah, clock, AR5K_PHY_PLL); - udelay(300); + usleep_range(300, 350); } /* ...set the PHY operating mode */ @@ -755,8 +831,19 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel) * Post-initvals register modifications * \**************************************/ -/* TODO: Half/Quarter rate */ -static void ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah, +/** + * ath5k_hw_tweak_initval_settings() - Tweak initial settings + * @ah: The &struct ath5k_hw + * @channel: The &struct ieee80211_channel + * + * Some settings are not handled on initvals, e.g. bwmode + * settings, some phy settings, workarounds etc that in general + * don't fit anywhere else or are too small to introduce a separate + * function for each one. So we have this function to handle + * them all during reset and complete card's initialization. + */ +static void +ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah, struct ieee80211_channel *channel) { if (ah->ah_version == AR5K_AR5212 && @@ -875,7 +962,16 @@ static void ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah, } } -static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah, +/** + * ath5k_hw_commit_eeprom_settings() - Commit settings from EEPROM + * @ah: The &struct ath5k_hw + * @channel: The &struct ieee80211_channel + * + * Use settings stored on EEPROM to properly initialize the card + * based on various infos and per-mode calibration data. + */ +static void +ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah, struct ieee80211_channel *channel) { struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; @@ -1029,7 +1125,23 @@ static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah, * Main reset function * \*********************/ -int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode, +/** + * ath5k_hw_reset() - The main reset function + * @ah: The &struct ath5k_hw + * @op_mode: One of enum nl80211_iftype + * @channel: The &struct ieee80211_channel + * @fast: Enable fast channel switching + * @skip_pcu: Skip pcu initialization + * + * This is the function we call each time we want to (re)initialize the + * card and pass new settings to hw. We also call it when hw runs into + * trouble to make it come back to a working state. + * + * Returns 0 on success, -EINVAL on false op_mode or channel infos, or -EIO + * on failure. + */ +int +ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode, struct ieee80211_channel *channel, bool fast, bool skip_pcu) { u32 s_seq[10], s_led[3], tsf_up, tsf_lo; @@ -1047,7 +1159,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode, */ if (fast && (ah->ah_radio != AR5K_RF2413) && (ah->ah_radio != AR5K_RF5413)) - fast = 0; + fast = false; /* Disable sleep clock operation * to avoid register access delay on certain @@ -1073,7 +1185,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode, if (ret && fast) { ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "DMA didn't stop, falling back to normal reset\n"); - fast = 0; + fast = false; /* Non fatal, just continue with * normal reset */ ret = 0; @@ -1242,7 +1354,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode, /* * Initialize PCU */ - ath5k_hw_pcu_init(ah, op_mode, mode); + ath5k_hw_pcu_init(ah, op_mode); /* * Initialize PHY diff --git a/drivers/net/wireless/ath/ath5k/rfbuffer.h b/drivers/net/wireless/ath/ath5k/rfbuffer.h index 5d11c23b429..aed34d9954c 100644 --- a/drivers/net/wireless/ath/ath5k/rfbuffer.h +++ b/drivers/net/wireless/ath/ath5k/rfbuffer.h @@ -18,7 +18,9 @@ */ -/* +/** + * DOC: RF Buffer registers + * * There are some special registers on the RF chip * that control various operation settings related mostly to * the analog parts (channel, gain adjustment etc). @@ -44,40 +46,63 @@ */ -/* +/** + * struct ath5k_ini_rfbuffer - Initial RF Buffer settings + * @rfb_bank: RF Bank number + * @rfb_ctrl_register: RF Buffer control register + * @rfb_mode_data: RF Buffer data for each mode + * * Struct to hold default mode specific RF - * register values (RF Banks) + * register values (RF Banks) for each chip. */ struct ath5k_ini_rfbuffer { - u8 rfb_bank; /* RF Bank number */ - u16 rfb_ctrl_register; /* RF Buffer control register */ - u32 rfb_mode_data[3]; /* RF Buffer data for each mode */ + u8 rfb_bank; + u16 rfb_ctrl_register; + u32 rfb_mode_data[3]; }; -/* +/** + * struct ath5k_rfb_field - An RF Buffer field (register/value) + * @len: Field length + * @pos: Offset on the raw packet + * @col: Used for shifting + * * Struct to hold RF Buffer field * infos used to access certain RF * analog registers */ struct ath5k_rfb_field { - u8 len; /* Field length */ - u16 pos; /* Offset on the raw packet */ - u8 col; /* Column -used for shifting */ + u8 len; + u16 pos; + u8 col; }; -/* - * RF analog register definition +/** + * struct ath5k_rf_reg - RF analog register definition + * @bank: RF Buffer Bank number + * @index: Register's index on ath5k_rf_regx_idx + * @field: The &struct ath5k_rfb_field + * + * We use this struct to define the set of RF registers + * on each chip that we want to tweak. Some RF registers + * are common between different chip versions so this saves + * us space and complexity because we can refer to an rf + * register by it's index no matter what chip we work with + * as long as it has that register. */ struct ath5k_rf_reg { - u8 bank; /* RF Buffer Bank number */ - u8 index; /* Register's index on rf_regs_idx */ - struct ath5k_rfb_field field; /* RF Buffer field for this register */ + u8 bank; + u8 index; + struct ath5k_rfb_field field; }; -/* Map RF registers to indexes +/** + * enum ath5k_rf_regs_idx - Map RF registers to indexes + * * We do this to handle common bits and make our * life easier by using an index for each register - * instead of a full rfb_field */ + * instead of a full rfb_field + */ enum ath5k_rf_regs_idx { /* BANK 2 */ AR5K_RF_TURBO = 0, diff --git a/drivers/net/wireless/ath/ath5k/rfgain.h b/drivers/net/wireless/ath/ath5k/rfgain.h index ebfae052d89..4d21df0e597 100644 --- a/drivers/net/wireless/ath/ath5k/rfgain.h +++ b/drivers/net/wireless/ath/ath5k/rfgain.h @@ -18,13 +18,17 @@ * */ -/* +/** + * struct ath5k_ini_rfgain - RF Gain table + * @rfg_register: RF Gain register address + * @rfg_value: Register value for 5 and 2GHz + * * Mode-specific RF Gain table (64bytes) for RF5111/5112 * (RF5110 only comes with AR5210 and only supports a/turbo a mode so initial * RF Gain values are included in AR5K_AR5210_INI) */ struct ath5k_ini_rfgain { - u16 rfg_register; /* RF Gain register address */ + u16 rfg_register; u32 rfg_value[2]; /* [freq (see below)] */ }; @@ -455,18 +459,31 @@ static const struct ath5k_ini_rfgain rfgain_2425[] = { #define AR5K_GAIN_CHECK_ADJUST(_g) \ ((_g)->g_current <= (_g)->g_low || (_g)->g_current >= (_g)->g_high) +/** + * struct ath5k_gain_opt_step - An RF gain optimization step + * @gos_param: Set of parameters + * @gos_gain: Gain + */ struct ath5k_gain_opt_step { s8 gos_param[AR5K_GAIN_CRN_MAX_FIX_BITS]; s8 gos_gain; }; +/** + * struct ath5k_gain_opt - RF Gain optimization ladder + * @go_default: The default step + * @go_steps_count: How many optimization steps + * @go_step: Array of &struct ath5k_gain_opt_step + */ struct ath5k_gain_opt { u8 go_default; u8 go_steps_count; const struct ath5k_gain_opt_step go_step[AR5K_GAIN_STEP_COUNT]; }; + /* + * RF5111 * Parameters on gos_param: * 1) Tx clip PHY register * 2) PWD 90 RF register @@ -490,6 +507,7 @@ static const struct ath5k_gain_opt rfgain_opt_5111 = { }; /* + * RF5112 * Parameters on gos_param: * 1) Mixgain ovr RF register * 2) PWD 138 RF register diff --git a/drivers/net/wireless/ath/ath5k/trace.h b/drivers/net/wireless/ath/ath5k/trace.h index 39f002ed4a8..00f01581934 100644 --- a/drivers/net/wireless/ath/ath5k/trace.h +++ b/drivers/net/wireless/ath/ath5k/trace.h @@ -3,7 +3,8 @@ #include <linux/tracepoint.h> -#ifndef CONFIG_ATH5K_TRACER + +#if !defined(CONFIG_ATH5K_TRACER) || defined(__CHECKER__) #undef TRACE_EVENT #define TRACE_EVENT(name, proto, ...) \ static inline void trace_ ## name(proto) {} @@ -93,7 +94,7 @@ TRACE_EVENT(ath5k_tx_complete, #endif /* __TRACE_ATH5K_H */ -#ifdef CONFIG_ATH5K_TRACER +#if defined(CONFIG_ATH5K_TRACER) && !defined(__CHECKER__) #undef TRACE_INCLUDE_PATH #define TRACE_INCLUDE_PATH ../../drivers/net/wireless/ath/ath5k diff --git a/drivers/net/wireless/ath/ath6kl/Makefile b/drivers/net/wireless/ath/ath6kl/Makefile index 8f7a0d1c290..70706930355 100644 --- a/drivers/net/wireless/ath/ath6kl/Makefile +++ b/drivers/net/wireless/ath/ath6kl/Makefile @@ -23,7 +23,7 @@ obj-$(CONFIG_ATH6KL) := ath6kl.o ath6kl-y += debug.o -ath6kl-y += htc_hif.o +ath6kl-y += hif.o ath6kl-y += htc.o ath6kl-y += bmi.o ath6kl-y += cfg80211.o diff --git a/drivers/net/wireless/ath/ath6kl/bmi.c b/drivers/net/wireless/ath/ath6kl/bmi.c index c5d11cc536e..bce3575c310 100644 --- a/drivers/net/wireless/ath/ath6kl/bmi.c +++ b/drivers/net/wireless/ath/ath6kl/bmi.c @@ -19,165 +19,6 @@ #include "target.h" #include "debug.h" -static int ath6kl_get_bmi_cmd_credits(struct ath6kl *ar) -{ - u32 addr; - unsigned long timeout; - int ret; - - ar->bmi.cmd_credits = 0; - - /* Read the counter register to get the command credits */ - addr = COUNT_DEC_ADDRESS + (HTC_MAILBOX_NUM_MAX + ENDPOINT1) * 4; - - timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT); - while (time_before(jiffies, timeout) && !ar->bmi.cmd_credits) { - - /* - * Hit the credit counter with a 4-byte access, the first byte - * read will hit the counter and cause a decrement, while the - * remaining 3 bytes has no effect. The rationale behind this - * is to make all HIF accesses 4-byte aligned. - */ - ret = hif_read_write_sync(ar, addr, - (u8 *)&ar->bmi.cmd_credits, 4, - HIF_RD_SYNC_BYTE_INC); - if (ret) { - ath6kl_err("Unable to decrement the command credit count register: %d\n", - ret); - return ret; - } - - /* The counter is only 8 bits. - * Ignore anything in the upper 3 bytes - */ - ar->bmi.cmd_credits &= 0xFF; - } - - if (!ar->bmi.cmd_credits) { - ath6kl_err("bmi communication timeout\n"); - return -ETIMEDOUT; - } - - return 0; -} - -static int ath6kl_bmi_get_rx_lkahd(struct ath6kl *ar) -{ - unsigned long timeout; - u32 rx_word = 0; - int ret = 0; - - timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT); - while (time_before(jiffies, timeout) && !rx_word) { - ret = hif_read_write_sync(ar, RX_LOOKAHEAD_VALID_ADDRESS, - (u8 *)&rx_word, sizeof(rx_word), - HIF_RD_SYNC_BYTE_INC); - if (ret) { - ath6kl_err("unable to read RX_LOOKAHEAD_VALID\n"); - return ret; - } - - /* all we really want is one bit */ - rx_word &= (1 << ENDPOINT1); - } - - if (!rx_word) { - ath6kl_err("bmi_recv_buf FIFO empty\n"); - return -EINVAL; - } - - return ret; -} - -static int ath6kl_bmi_send_buf(struct ath6kl *ar, u8 *buf, u32 len) -{ - int ret; - u32 addr; - - ret = ath6kl_get_bmi_cmd_credits(ar); - if (ret) - return ret; - - addr = ar->mbox_info.htc_addr; - - ret = hif_read_write_sync(ar, addr, buf, len, - HIF_WR_SYNC_BYTE_INC); - if (ret) - ath6kl_err("unable to send the bmi data to the device\n"); - - return ret; -} - -static int ath6kl_bmi_recv_buf(struct ath6kl *ar, u8 *buf, u32 len) -{ - int ret; - u32 addr; - - /* - * During normal bootup, small reads may be required. - * Rather than issue an HIF Read and then wait as the Target - * adds successive bytes to the FIFO, we wait here until - * we know that response data is available. - * - * This allows us to cleanly timeout on an unexpected - * Target failure rather than risk problems at the HIF level. - * In particular, this avoids SDIO timeouts and possibly garbage - * data on some host controllers. And on an interconnect - * such as Compact Flash (as well as some SDIO masters) which - * does not provide any indication on data timeout, it avoids - * a potential hang or garbage response. - * - * Synchronization is more difficult for reads larger than the - * size of the MBOX FIFO (128B), because the Target is unable - * to push the 129th byte of data until AFTER the Host posts an - * HIF Read and removes some FIFO data. So for large reads the - * Host proceeds to post an HIF Read BEFORE all the data is - * actually available to read. Fortunately, large BMI reads do - * not occur in practice -- they're supported for debug/development. - * - * So Host/Target BMI synchronization is divided into these cases: - * CASE 1: length < 4 - * Should not happen - * - * CASE 2: 4 <= length <= 128 - * Wait for first 4 bytes to be in FIFO - * If CONSERVATIVE_BMI_READ is enabled, also wait for - * a BMI command credit, which indicates that the ENTIRE - * response is available in the the FIFO - * - * CASE 3: length > 128 - * Wait for the first 4 bytes to be in FIFO - * - * For most uses, a small timeout should be sufficient and we will - * usually see a response quickly; but there may be some unusual - * (debug) cases of BMI_EXECUTE where we want an larger timeout. - * For now, we use an unbounded busy loop while waiting for - * BMI_EXECUTE. - * - * If BMI_EXECUTE ever needs to support longer-latency execution, - * especially in production, this code needs to be enhanced to sleep - * and yield. Also note that BMI_COMMUNICATION_TIMEOUT is currently - * a function of Host processor speed. - */ - if (len >= 4) { /* NB: Currently, always true */ - ret = ath6kl_bmi_get_rx_lkahd(ar); - if (ret) - return ret; - } - - addr = ar->mbox_info.htc_addr; - ret = hif_read_write_sync(ar, addr, buf, len, - HIF_RD_SYNC_BYTE_INC); - if (ret) { - ath6kl_err("Unable to read the bmi data from the device: %d\n", - ret); - return ret; - } - - return 0; -} - int ath6kl_bmi_done(struct ath6kl *ar) { int ret; @@ -190,14 +31,12 @@ int ath6kl_bmi_done(struct ath6kl *ar) ar->bmi.done_sent = true; - ret = ath6kl_bmi_send_buf(ar, (u8 *)&cid, sizeof(cid)); + ret = ath6kl_hif_bmi_write(ar, (u8 *)&cid, sizeof(cid)); if (ret) { ath6kl_err("Unable to send bmi done: %d\n", ret); return ret; } - ath6kl_bmi_cleanup(ar); - return 0; } @@ -212,13 +51,13 @@ int ath6kl_bmi_get_target_info(struct ath6kl *ar, return -EACCES; } - ret = ath6kl_bmi_send_buf(ar, (u8 *)&cid, sizeof(cid)); + ret = ath6kl_hif_bmi_write(ar, (u8 *)&cid, sizeof(cid)); if (ret) { ath6kl_err("Unable to send get target info: %d\n", ret); return ret; } - ret = ath6kl_bmi_recv_buf(ar, (u8 *)&targ_info->version, + ret = ath6kl_hif_bmi_read(ar, (u8 *)&targ_info->version, sizeof(targ_info->version)); if (ret) { ath6kl_err("Unable to recv target info: %d\n", ret); @@ -227,7 +66,7 @@ int ath6kl_bmi_get_target_info(struct ath6kl *ar, if (le32_to_cpu(targ_info->version) == TARGET_VERSION_SENTINAL) { /* Determine how many bytes are in the Target's targ_info */ - ret = ath6kl_bmi_recv_buf(ar, + ret = ath6kl_hif_bmi_read(ar, (u8 *)&targ_info->byte_count, sizeof(targ_info->byte_count)); if (ret) { @@ -246,7 +85,7 @@ int ath6kl_bmi_get_target_info(struct ath6kl *ar, } /* Read the remainder of the targ_info */ - ret = ath6kl_bmi_recv_buf(ar, + ret = ath6kl_hif_bmi_read(ar, ((u8 *)targ_info) + sizeof(targ_info->byte_count), sizeof(*targ_info) - @@ -278,8 +117,8 @@ int ath6kl_bmi_read(struct ath6kl *ar, u32 addr, u8 *buf, u32 len) return -EACCES; } - size = BMI_DATASZ_MAX + sizeof(cid) + sizeof(addr) + sizeof(len); - if (size > MAX_BMI_CMDBUF_SZ) { + size = ar->bmi.max_data_size + sizeof(cid) + sizeof(addr) + sizeof(len); + if (size > ar->bmi.max_cmd_size) { WARN_ON(1); return -EINVAL; } @@ -292,8 +131,8 @@ int ath6kl_bmi_read(struct ath6kl *ar, u32 addr, u8 *buf, u32 len) len_remain = len; while (len_remain) { - rx_len = (len_remain < BMI_DATASZ_MAX) ? - len_remain : BMI_DATASZ_MAX; + rx_len = (len_remain < ar->bmi.max_data_size) ? + len_remain : ar->bmi.max_data_size; offset = 0; memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid)); offset += sizeof(cid); @@ -302,13 +141,13 @@ int ath6kl_bmi_read(struct ath6kl *ar, u32 addr, u8 *buf, u32 len) memcpy(&(ar->bmi.cmd_buf[offset]), &rx_len, sizeof(rx_len)); offset += sizeof(len); - ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset); + ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset); if (ret) { ath6kl_err("Unable to write to the device: %d\n", ret); return ret; } - ret = ath6kl_bmi_recv_buf(ar, ar->bmi.cmd_buf, rx_len); + ret = ath6kl_hif_bmi_read(ar, ar->bmi.cmd_buf, rx_len); if (ret) { ath6kl_err("Unable to read from the device: %d\n", ret); @@ -328,7 +167,7 @@ int ath6kl_bmi_write(struct ath6kl *ar, u32 addr, u8 *buf, u32 len) u32 offset; u32 len_remain, tx_len; const u32 header = sizeof(cid) + sizeof(addr) + sizeof(len); - u8 aligned_buf[BMI_DATASZ_MAX]; + u8 aligned_buf[400]; u8 *src; if (ar->bmi.done_sent) { @@ -336,12 +175,15 @@ int ath6kl_bmi_write(struct ath6kl *ar, u32 addr, u8 *buf, u32 len) return -EACCES; } - if ((BMI_DATASZ_MAX + header) > MAX_BMI_CMDBUF_SZ) { + if ((ar->bmi.max_data_size + header) > ar->bmi.max_cmd_size) { WARN_ON(1); return -EINVAL; } - memset(ar->bmi.cmd_buf, 0, BMI_DATASZ_MAX + header); + if (WARN_ON(ar->bmi.max_data_size > sizeof(aligned_buf))) + return -E2BIG; + + memset(ar->bmi.cmd_buf, 0, ar->bmi.max_data_size + header); ath6kl_dbg(ATH6KL_DBG_BMI, "bmi write memory: addr: 0x%x, len: %d\n", addr, len); @@ -350,7 +192,7 @@ int ath6kl_bmi_write(struct ath6kl *ar, u32 addr, u8 *buf, u32 len) while (len_remain) { src = &buf[len - len_remain]; - if (len_remain < (BMI_DATASZ_MAX - header)) { + if (len_remain < (ar->bmi.max_data_size - header)) { if (len_remain & 3) { /* align it with 4 bytes */ len_remain = len_remain + @@ -360,7 +202,7 @@ int ath6kl_bmi_write(struct ath6kl *ar, u32 addr, u8 *buf, u32 len) } tx_len = len_remain; } else { - tx_len = (BMI_DATASZ_MAX - header); + tx_len = (ar->bmi.max_data_size - header); } offset = 0; @@ -373,7 +215,7 @@ int ath6kl_bmi_write(struct ath6kl *ar, u32 addr, u8 *buf, u32 len) memcpy(&(ar->bmi.cmd_buf[offset]), src, tx_len); offset += tx_len; - ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset); + ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset); if (ret) { ath6kl_err("Unable to write to the device: %d\n", ret); @@ -398,7 +240,7 @@ int ath6kl_bmi_execute(struct ath6kl *ar, u32 addr, u32 *param) } size = sizeof(cid) + sizeof(addr) + sizeof(param); - if (size > MAX_BMI_CMDBUF_SZ) { + if (size > ar->bmi.max_cmd_size) { WARN_ON(1); return -EINVAL; } @@ -415,13 +257,13 @@ int ath6kl_bmi_execute(struct ath6kl *ar, u32 addr, u32 *param) memcpy(&(ar->bmi.cmd_buf[offset]), param, sizeof(*param)); offset += sizeof(*param); - ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset); + ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset); if (ret) { ath6kl_err("Unable to write to the device: %d\n", ret); return ret; } - ret = ath6kl_bmi_recv_buf(ar, ar->bmi.cmd_buf, sizeof(*param)); + ret = ath6kl_hif_bmi_read(ar, ar->bmi.cmd_buf, sizeof(*param)); if (ret) { ath6kl_err("Unable to read from the device: %d\n", ret); return ret; @@ -445,7 +287,7 @@ int ath6kl_bmi_set_app_start(struct ath6kl *ar, u32 addr) } size = sizeof(cid) + sizeof(addr); - if (size > MAX_BMI_CMDBUF_SZ) { + if (size > ar->bmi.max_cmd_size) { WARN_ON(1); return -EINVAL; } @@ -459,7 +301,7 @@ int ath6kl_bmi_set_app_start(struct ath6kl *ar, u32 addr) memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr)); offset += sizeof(addr); - ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset); + ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset); if (ret) { ath6kl_err("Unable to write to the device: %d\n", ret); return ret; @@ -481,7 +323,7 @@ int ath6kl_bmi_reg_read(struct ath6kl *ar, u32 addr, u32 *param) } size = sizeof(cid) + sizeof(addr); - if (size > MAX_BMI_CMDBUF_SZ) { + if (size > ar->bmi.max_cmd_size) { WARN_ON(1); return -EINVAL; } @@ -495,13 +337,13 @@ int ath6kl_bmi_reg_read(struct ath6kl *ar, u32 addr, u32 *param) memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr)); offset += sizeof(addr); - ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset); + ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset); if (ret) { ath6kl_err("Unable to write to the device: %d\n", ret); return ret; } - ret = ath6kl_bmi_recv_buf(ar, ar->bmi.cmd_buf, sizeof(*param)); + ret = ath6kl_hif_bmi_read(ar, ar->bmi.cmd_buf, sizeof(*param)); if (ret) { ath6kl_err("Unable to read from the device: %d\n", ret); return ret; @@ -524,7 +366,7 @@ int ath6kl_bmi_reg_write(struct ath6kl *ar, u32 addr, u32 param) } size = sizeof(cid) + sizeof(addr) + sizeof(param); - if (size > MAX_BMI_CMDBUF_SZ) { + if (size > ar->bmi.max_cmd_size) { WARN_ON(1); return -EINVAL; } @@ -542,7 +384,7 @@ int ath6kl_bmi_reg_write(struct ath6kl *ar, u32 addr, u32 param) memcpy(&(ar->bmi.cmd_buf[offset]), ¶m, sizeof(param)); offset += sizeof(param); - ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset); + ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset); if (ret) { ath6kl_err("Unable to write to the device: %d\n", ret); return ret; @@ -565,8 +407,8 @@ int ath6kl_bmi_lz_data(struct ath6kl *ar, u8 *buf, u32 len) return -EACCES; } - size = BMI_DATASZ_MAX + header; - if (size > MAX_BMI_CMDBUF_SZ) { + size = ar->bmi.max_data_size + header; + if (size > ar->bmi.max_cmd_size) { WARN_ON(1); return -EINVAL; } @@ -577,8 +419,8 @@ int ath6kl_bmi_lz_data(struct ath6kl *ar, u8 *buf, u32 len) len_remain = len; while (len_remain) { - tx_len = (len_remain < (BMI_DATASZ_MAX - header)) ? - len_remain : (BMI_DATASZ_MAX - header); + tx_len = (len_remain < (ar->bmi.max_data_size - header)) ? + len_remain : (ar->bmi.max_data_size - header); offset = 0; memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid)); @@ -589,7 +431,7 @@ int ath6kl_bmi_lz_data(struct ath6kl *ar, u8 *buf, u32 len) tx_len); offset += tx_len; - ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset); + ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset); if (ret) { ath6kl_err("Unable to write to the device: %d\n", ret); @@ -615,7 +457,7 @@ int ath6kl_bmi_lz_stream_start(struct ath6kl *ar, u32 addr) } size = sizeof(cid) + sizeof(addr); - if (size > MAX_BMI_CMDBUF_SZ) { + if (size > ar->bmi.max_cmd_size) { WARN_ON(1); return -EINVAL; } @@ -631,7 +473,7 @@ int ath6kl_bmi_lz_stream_start(struct ath6kl *ar, u32 addr) memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr)); offset += sizeof(addr); - ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset); + ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset); if (ret) { ath6kl_err("Unable to start LZ stream to the device: %d\n", ret); @@ -672,10 +514,20 @@ int ath6kl_bmi_fast_download(struct ath6kl *ar, u32 addr, u8 *buf, u32 len) return ret; } +void ath6kl_bmi_reset(struct ath6kl *ar) +{ + ar->bmi.done_sent = false; +} + int ath6kl_bmi_init(struct ath6kl *ar) { - ar->bmi.cmd_buf = kzalloc(MAX_BMI_CMDBUF_SZ, GFP_ATOMIC); + if (WARN_ON(ar->bmi.max_data_size == 0)) + return -EINVAL; + + /* cmd + addr + len + data_size */ + ar->bmi.max_cmd_size = ar->bmi.max_data_size + (sizeof(u32) * 3); + ar->bmi.cmd_buf = kzalloc(ar->bmi.max_cmd_size, GFP_ATOMIC); if (!ar->bmi.cmd_buf) return -ENOMEM; diff --git a/drivers/net/wireless/ath/ath6kl/bmi.h b/drivers/net/wireless/ath/ath6kl/bmi.h index 96851d5df24..f1ca6812456 100644 --- a/drivers/net/wireless/ath/ath6kl/bmi.h +++ b/drivers/net/wireless/ath/ath6kl/bmi.h @@ -44,12 +44,6 @@ * BMI handles all required Target-side cache flushing. */ -#define MAX_BMI_CMDBUF_SZ (BMI_DATASZ_MAX + \ - (sizeof(u32) * 3 /* cmd + addr + len */)) - -/* Maximum data size used for BMI transfers */ -#define BMI_DATASZ_MAX 256 - /* BMI Commands */ #define BMI_NO_COMMAND 0 @@ -230,6 +224,8 @@ struct ath6kl_bmi_target_info { int ath6kl_bmi_init(struct ath6kl *ar); void ath6kl_bmi_cleanup(struct ath6kl *ar); +void ath6kl_bmi_reset(struct ath6kl *ar); + int ath6kl_bmi_done(struct ath6kl *ar); int ath6kl_bmi_get_target_info(struct ath6kl *ar, struct ath6kl_bmi_target_info *targ_info); diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c index f517eb8f7b4..6c59a217b1a 100644 --- a/drivers/net/wireless/ath/ath6kl/cfg80211.c +++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c @@ -123,17 +123,50 @@ static struct ieee80211_supported_band ath6kl_band_5ghz = { .bitrates = ath6kl_a_rates, }; -static int ath6kl_set_wpa_version(struct ath6kl *ar, +#define CCKM_KRK_CIPHER_SUITE 0x004096ff /* use for KRK */ + +/* returns true if scheduled scan was stopped */ +static bool __ath6kl_cfg80211_sscan_stop(struct ath6kl_vif *vif) +{ + struct ath6kl *ar = vif->ar; + + if (ar->state != ATH6KL_STATE_SCHED_SCAN) + return false; + + del_timer_sync(&vif->sched_scan_timer); + + ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx, + ATH6KL_HOST_MODE_AWAKE); + + ar->state = ATH6KL_STATE_ON; + + return true; +} + +static void ath6kl_cfg80211_sscan_disable(struct ath6kl_vif *vif) +{ + struct ath6kl *ar = vif->ar; + bool stopped; + + stopped = __ath6kl_cfg80211_sscan_stop(vif); + + if (!stopped) + return; + + cfg80211_sched_scan_stopped(ar->wiphy); +} + +static int ath6kl_set_wpa_version(struct ath6kl_vif *vif, enum nl80211_wpa_versions wpa_version) { ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: %u\n", __func__, wpa_version); if (!wpa_version) { - ar->auth_mode = NONE_AUTH; + vif->auth_mode = NONE_AUTH; } else if (wpa_version & NL80211_WPA_VERSION_2) { - ar->auth_mode = WPA2_AUTH; + vif->auth_mode = WPA2_AUTH; } else if (wpa_version & NL80211_WPA_VERSION_1) { - ar->auth_mode = WPA_AUTH; + vif->auth_mode = WPA_AUTH; } else { ath6kl_err("%s: %u not supported\n", __func__, wpa_version); return -ENOTSUPP; @@ -142,25 +175,24 @@ static int ath6kl_set_wpa_version(struct ath6kl *ar, return 0; } -static int ath6kl_set_auth_type(struct ath6kl *ar, +static int ath6kl_set_auth_type(struct ath6kl_vif *vif, enum nl80211_auth_type auth_type) { - ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: 0x%x\n", __func__, auth_type); switch (auth_type) { case NL80211_AUTHTYPE_OPEN_SYSTEM: - ar->dot11_auth_mode = OPEN_AUTH; + vif->dot11_auth_mode = OPEN_AUTH; break; case NL80211_AUTHTYPE_SHARED_KEY: - ar->dot11_auth_mode = SHARED_AUTH; + vif->dot11_auth_mode = SHARED_AUTH; break; case NL80211_AUTHTYPE_NETWORK_EAP: - ar->dot11_auth_mode = LEAP_AUTH; + vif->dot11_auth_mode = LEAP_AUTH; break; case NL80211_AUTHTYPE_AUTOMATIC: - ar->dot11_auth_mode = OPEN_AUTH | SHARED_AUTH; + vif->dot11_auth_mode = OPEN_AUTH | SHARED_AUTH; break; default: @@ -171,11 +203,11 @@ static int ath6kl_set_auth_type(struct ath6kl *ar, return 0; } -static int ath6kl_set_cipher(struct ath6kl *ar, u32 cipher, bool ucast) +static int ath6kl_set_cipher(struct ath6kl_vif *vif, u32 cipher, bool ucast) { - u8 *ar_cipher = ucast ? &ar->prwise_crypto : &ar->grp_crypto; - u8 *ar_cipher_len = ucast ? &ar->prwise_crypto_len : - &ar->grp_crypto_len; + u8 *ar_cipher = ucast ? &vif->prwise_crypto : &vif->grp_crypto; + u8 *ar_cipher_len = ucast ? &vif->prwise_crypto_len : + &vif->grp_crypto_len; ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: cipher 0x%x, ucast %u\n", __func__, cipher, ucast); @@ -202,6 +234,10 @@ static int ath6kl_set_cipher(struct ath6kl *ar, u32 cipher, bool ucast) *ar_cipher = AES_CRYPT; *ar_cipher_len = 0; break; + case WLAN_CIPHER_SUITE_SMS4: + *ar_cipher = WAPI_CRYPT; + *ar_cipher_len = 0; + break; default: ath6kl_err("cipher 0x%x not supported\n", cipher); return -ENOTSUPP; @@ -210,28 +246,35 @@ static int ath6kl_set_cipher(struct ath6kl *ar, u32 cipher, bool ucast) return 0; } -static void ath6kl_set_key_mgmt(struct ath6kl *ar, u32 key_mgmt) +static void ath6kl_set_key_mgmt(struct ath6kl_vif *vif, u32 key_mgmt) { ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: 0x%x\n", __func__, key_mgmt); if (key_mgmt == WLAN_AKM_SUITE_PSK) { - if (ar->auth_mode == WPA_AUTH) - ar->auth_mode = WPA_PSK_AUTH; - else if (ar->auth_mode == WPA2_AUTH) - ar->auth_mode = WPA2_PSK_AUTH; + if (vif->auth_mode == WPA_AUTH) + vif->auth_mode = WPA_PSK_AUTH; + else if (vif->auth_mode == WPA2_AUTH) + vif->auth_mode = WPA2_PSK_AUTH; + } else if (key_mgmt == 0x00409600) { + if (vif->auth_mode == WPA_AUTH) + vif->auth_mode = WPA_AUTH_CCKM; + else if (vif->auth_mode == WPA2_AUTH) + vif->auth_mode = WPA2_AUTH_CCKM; } else if (key_mgmt != WLAN_AKM_SUITE_8021X) { - ar->auth_mode = NONE_AUTH; + vif->auth_mode = NONE_AUTH; } } -static bool ath6kl_cfg80211_ready(struct ath6kl *ar) +static bool ath6kl_cfg80211_ready(struct ath6kl_vif *vif) { + struct ath6kl *ar = vif->ar; + if (!test_bit(WMI_READY, &ar->flag)) { ath6kl_err("wmi is not ready\n"); return false; } - if (!test_bit(WLAN_ENABLED, &ar->flag)) { + if (!test_bit(WLAN_ENABLED, &vif->flags)) { ath6kl_err("wlan disabled\n"); return false; } @@ -239,15 +282,146 @@ static bool ath6kl_cfg80211_ready(struct ath6kl *ar) return true; } +static bool ath6kl_is_wpa_ie(const u8 *pos) +{ + return pos[0] == WLAN_EID_WPA && pos[1] >= 4 && + pos[2] == 0x00 && pos[3] == 0x50 && + pos[4] == 0xf2 && pos[5] == 0x01; +} + +static bool ath6kl_is_rsn_ie(const u8 *pos) +{ + return pos[0] == WLAN_EID_RSN; +} + +static bool ath6kl_is_wps_ie(const u8 *pos) +{ + return (pos[0] == WLAN_EID_VENDOR_SPECIFIC && + pos[1] >= 4 && + pos[2] == 0x00 && pos[3] == 0x50 && pos[4] == 0xf2 && + pos[5] == 0x04); +} + +static int ath6kl_set_assoc_req_ies(struct ath6kl_vif *vif, const u8 *ies, + size_t ies_len) +{ + struct ath6kl *ar = vif->ar; + const u8 *pos; + u8 *buf = NULL; + size_t len = 0; + int ret; + + /* + * Clear previously set flag + */ + + ar->connect_ctrl_flags &= ~CONNECT_WPS_FLAG; + + /* + * Filter out RSN/WPA IE(s) + */ + + if (ies && ies_len) { + buf = kmalloc(ies_len, GFP_KERNEL); + if (buf == NULL) + return -ENOMEM; + pos = ies; + + while (pos + 1 < ies + ies_len) { + if (pos + 2 + pos[1] > ies + ies_len) + break; + if (!(ath6kl_is_wpa_ie(pos) || ath6kl_is_rsn_ie(pos))) { + memcpy(buf + len, pos, 2 + pos[1]); + len += 2 + pos[1]; + } + + if (ath6kl_is_wps_ie(pos)) + ar->connect_ctrl_flags |= CONNECT_WPS_FLAG; + + pos += 2 + pos[1]; + } + } + + ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx, + WMI_FRAME_ASSOC_REQ, buf, len); + kfree(buf); + return ret; +} + +static int ath6kl_nliftype_to_drv_iftype(enum nl80211_iftype type, u8 *nw_type) +{ + switch (type) { + case NL80211_IFTYPE_STATION: + *nw_type = INFRA_NETWORK; + break; + case NL80211_IFTYPE_ADHOC: + *nw_type = ADHOC_NETWORK; + break; + case NL80211_IFTYPE_AP: + *nw_type = AP_NETWORK; + break; + case NL80211_IFTYPE_P2P_CLIENT: + *nw_type = INFRA_NETWORK; + break; + case NL80211_IFTYPE_P2P_GO: + *nw_type = AP_NETWORK; + break; + default: + ath6kl_err("invalid interface type %u\n", type); + return -ENOTSUPP; + } + + return 0; +} + +static bool ath6kl_is_valid_iftype(struct ath6kl *ar, enum nl80211_iftype type, + u8 *if_idx, u8 *nw_type) +{ + int i; + + if (ath6kl_nliftype_to_drv_iftype(type, nw_type)) + return false; + + if (ar->ibss_if_active || ((type == NL80211_IFTYPE_ADHOC) && + ar->num_vif)) + return false; + + if (type == NL80211_IFTYPE_STATION || + type == NL80211_IFTYPE_AP || type == NL80211_IFTYPE_ADHOC) { + for (i = 0; i < ar->vif_max; i++) { + if ((ar->avail_idx_map >> i) & BIT(0)) { + *if_idx = i; + return true; + } + } + } + + if (type == NL80211_IFTYPE_P2P_CLIENT || + type == NL80211_IFTYPE_P2P_GO) { + for (i = ar->max_norm_iface; i < ar->vif_max; i++) { + if ((ar->avail_idx_map >> i) & BIT(0)) { + *if_idx = i; + return true; + } + } + } + + return false; +} + static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_connect_params *sme) { struct ath6kl *ar = ath6kl_priv(dev); + struct ath6kl_vif *vif = netdev_priv(dev); int status; + u8 nw_subtype = (ar->p2p) ? SUBTYPE_P2PDEV : SUBTYPE_NONE; - ar->sme_state = SME_CONNECTING; + ath6kl_cfg80211_sscan_disable(vif); - if (!ath6kl_cfg80211_ready(ar)) + vif->sme_state = SME_CONNECTING; + + if (!ath6kl_cfg80211_ready(vif)) return -EIO; if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) { @@ -287,12 +461,22 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, } } - if (test_bit(CONNECTED, &ar->flag) && - ar->ssid_len == sme->ssid_len && - !memcmp(ar->ssid, sme->ssid, ar->ssid_len)) { - ar->reconnect_flag = true; - status = ath6kl_wmi_reconnect_cmd(ar->wmi, ar->req_bssid, - ar->ch_hint); + if (sme->ie && (sme->ie_len > 0)) { + status = ath6kl_set_assoc_req_ies(vif, sme->ie, sme->ie_len); + if (status) { + up(&ar->sem); + return status; + } + } else + ar->connect_ctrl_flags &= ~CONNECT_WPS_FLAG; + + if (test_bit(CONNECTED, &vif->flags) && + vif->ssid_len == sme->ssid_len && + !memcmp(vif->ssid, sme->ssid, vif->ssid_len)) { + vif->reconnect_flag = true; + status = ath6kl_wmi_reconnect_cmd(ar->wmi, vif->fw_vif_idx, + vif->req_bssid, + vif->ch_hint); up(&ar->sem); if (status) { @@ -300,42 +484,43 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, return -EIO; } return 0; - } else if (ar->ssid_len == sme->ssid_len && - !memcmp(ar->ssid, sme->ssid, ar->ssid_len)) { - ath6kl_disconnect(ar); + } else if (vif->ssid_len == sme->ssid_len && + !memcmp(vif->ssid, sme->ssid, vif->ssid_len)) { + ath6kl_disconnect(vif); } - memset(ar->ssid, 0, sizeof(ar->ssid)); - ar->ssid_len = sme->ssid_len; - memcpy(ar->ssid, sme->ssid, sme->ssid_len); + memset(vif->ssid, 0, sizeof(vif->ssid)); + vif->ssid_len = sme->ssid_len; + memcpy(vif->ssid, sme->ssid, sme->ssid_len); if (sme->channel) - ar->ch_hint = sme->channel->center_freq; + vif->ch_hint = sme->channel->center_freq; - memset(ar->req_bssid, 0, sizeof(ar->req_bssid)); + memset(vif->req_bssid, 0, sizeof(vif->req_bssid)); if (sme->bssid && !is_broadcast_ether_addr(sme->bssid)) - memcpy(ar->req_bssid, sme->bssid, sizeof(ar->req_bssid)); + memcpy(vif->req_bssid, sme->bssid, sizeof(vif->req_bssid)); - ath6kl_set_wpa_version(ar, sme->crypto.wpa_versions); + ath6kl_set_wpa_version(vif, sme->crypto.wpa_versions); - status = ath6kl_set_auth_type(ar, sme->auth_type); + status = ath6kl_set_auth_type(vif, sme->auth_type); if (status) { up(&ar->sem); return status; } if (sme->crypto.n_ciphers_pairwise) - ath6kl_set_cipher(ar, sme->crypto.ciphers_pairwise[0], true); + ath6kl_set_cipher(vif, sme->crypto.ciphers_pairwise[0], true); else - ath6kl_set_cipher(ar, 0, true); + ath6kl_set_cipher(vif, 0, true); - ath6kl_set_cipher(ar, sme->crypto.cipher_group, false); + ath6kl_set_cipher(vif, sme->crypto.cipher_group, false); if (sme->crypto.n_akm_suites) - ath6kl_set_key_mgmt(ar, sme->crypto.akm_suites[0]); + ath6kl_set_key_mgmt(vif, sme->crypto.akm_suites[0]); if ((sme->key_len) && - (ar->auth_mode == NONE_AUTH) && (ar->prwise_crypto == WEP_CRYPT)) { + (vif->auth_mode == NONE_AUTH) && + (vif->prwise_crypto == WEP_CRYPT)) { struct ath6kl_key *key = NULL; if (sme->key_idx < WMI_MIN_KEY_INDEX || @@ -346,56 +531,60 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, return -ENOENT; } - key = &ar->keys[sme->key_idx]; + key = &vif->keys[sme->key_idx]; key->key_len = sme->key_len; memcpy(key->key, sme->key, key->key_len); - key->cipher = ar->prwise_crypto; - ar->def_txkey_index = sme->key_idx; + key->cipher = vif->prwise_crypto; + vif->def_txkey_index = sme->key_idx; - ath6kl_wmi_addkey_cmd(ar->wmi, sme->key_idx, - ar->prwise_crypto, + ath6kl_wmi_addkey_cmd(ar->wmi, vif->fw_vif_idx, sme->key_idx, + vif->prwise_crypto, GROUP_USAGE | TX_USAGE, key->key_len, - NULL, + NULL, 0, key->key, KEY_OP_INIT_VAL, NULL, NO_SYNC_WMIFLAG); } if (!ar->usr_bss_filter) { - clear_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag); - if (ath6kl_wmi_bssfilter_cmd(ar->wmi, ALL_BSS_FILTER, 0) != 0) { + clear_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags); + if (ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, + ALL_BSS_FILTER, 0) != 0) { ath6kl_err("couldn't set bss filtering\n"); up(&ar->sem); return -EIO; } } - ar->nw_type = ar->next_mode; + vif->nw_type = vif->next_mode; + + if (vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT) + nw_subtype = SUBTYPE_P2PCLIENT; ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: connect called with authmode %d dot11 auth %d" " PW crypto %d PW crypto len %d GRP crypto %d" " GRP crypto len %d channel hint %u\n", __func__, - ar->auth_mode, ar->dot11_auth_mode, ar->prwise_crypto, - ar->prwise_crypto_len, ar->grp_crypto, - ar->grp_crypto_len, ar->ch_hint); - - ar->reconnect_flag = 0; - status = ath6kl_wmi_connect_cmd(ar->wmi, ar->nw_type, - ar->dot11_auth_mode, ar->auth_mode, - ar->prwise_crypto, - ar->prwise_crypto_len, - ar->grp_crypto, ar->grp_crypto_len, - ar->ssid_len, ar->ssid, - ar->req_bssid, ar->ch_hint, - ar->connect_ctrl_flags); + vif->auth_mode, vif->dot11_auth_mode, vif->prwise_crypto, + vif->prwise_crypto_len, vif->grp_crypto, + vif->grp_crypto_len, vif->ch_hint); + + vif->reconnect_flag = 0; + status = ath6kl_wmi_connect_cmd(ar->wmi, vif->fw_vif_idx, vif->nw_type, + vif->dot11_auth_mode, vif->auth_mode, + vif->prwise_crypto, + vif->prwise_crypto_len, + vif->grp_crypto, vif->grp_crypto_len, + vif->ssid_len, vif->ssid, + vif->req_bssid, vif->ch_hint, + ar->connect_ctrl_flags, nw_subtype); up(&ar->sem); if (status == -EINVAL) { - memset(ar->ssid, 0, sizeof(ar->ssid)); - ar->ssid_len = 0; + memset(vif->ssid, 0, sizeof(vif->ssid)); + vif->ssid_len = 0; ath6kl_err("invalid request\n"); return -ENOENT; } else if (status) { @@ -404,28 +593,40 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, } if ((!(ar->connect_ctrl_flags & CONNECT_DO_WPA_OFFLOAD)) && - ((ar->auth_mode == WPA_PSK_AUTH) - || (ar->auth_mode == WPA2_PSK_AUTH))) { - mod_timer(&ar->disconnect_timer, + ((vif->auth_mode == WPA_PSK_AUTH) + || (vif->auth_mode == WPA2_PSK_AUTH))) { + mod_timer(&vif->disconnect_timer, jiffies + msecs_to_jiffies(DISCON_TIMER_INTVAL)); } ar->connect_ctrl_flags &= ~CONNECT_DO_WPA_OFFLOAD; - set_bit(CONNECT_PEND, &ar->flag); + set_bit(CONNECT_PEND, &vif->flags); return 0; } -static int ath6kl_add_bss_if_needed(struct ath6kl *ar, const u8 *bssid, +static int ath6kl_add_bss_if_needed(struct ath6kl_vif *vif, + enum network_type nw_type, + const u8 *bssid, struct ieee80211_channel *chan, const u8 *beacon_ie, size_t beacon_ie_len) { + struct ath6kl *ar = vif->ar; struct cfg80211_bss *bss; + u16 cap_mask, cap_val; u8 *ie; - bss = cfg80211_get_bss(ar->wdev->wiphy, chan, bssid, - ar->ssid, ar->ssid_len, WLAN_CAPABILITY_ESS, - WLAN_CAPABILITY_ESS); + if (nw_type & ADHOC_NETWORK) { + cap_mask = WLAN_CAPABILITY_IBSS; + cap_val = WLAN_CAPABILITY_IBSS; + } else { + cap_mask = WLAN_CAPABILITY_ESS; + cap_val = WLAN_CAPABILITY_ESS; + } + + bss = cfg80211_get_bss(ar->wiphy, chan, bssid, + vif->ssid, vif->ssid_len, + cap_mask, cap_val); if (bss == NULL) { /* * Since cfg80211 may not yet know about the BSS, @@ -435,21 +636,20 @@ static int ath6kl_add_bss_if_needed(struct ath6kl *ar, const u8 *bssid, * Prepend SSID element since it is not included in the Beacon * IEs from the target. */ - ie = kmalloc(2 + ar->ssid_len + beacon_ie_len, GFP_KERNEL); + ie = kmalloc(2 + vif->ssid_len + beacon_ie_len, GFP_KERNEL); if (ie == NULL) return -ENOMEM; ie[0] = WLAN_EID_SSID; - ie[1] = ar->ssid_len; - memcpy(ie + 2, ar->ssid, ar->ssid_len); - memcpy(ie + 2 + ar->ssid_len, beacon_ie, beacon_ie_len); - bss = cfg80211_inform_bss(ar->wdev->wiphy, chan, - bssid, 0, WLAN_CAPABILITY_ESS, 100, - ie, 2 + ar->ssid_len + beacon_ie_len, + ie[1] = vif->ssid_len; + memcpy(ie + 2, vif->ssid, vif->ssid_len); + memcpy(ie + 2 + vif->ssid_len, beacon_ie, beacon_ie_len); + bss = cfg80211_inform_bss(ar->wiphy, chan, + bssid, 0, cap_val, 100, + ie, 2 + vif->ssid_len + beacon_ie_len, 0, GFP_KERNEL); if (bss) - ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "added dummy bss for " - "%pM prior to indicating connect/roamed " - "event\n", bssid); + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "added bss %pM to " + "cfg80211\n", bssid); kfree(ie); } else ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "cfg80211 already has a bss " @@ -463,7 +663,7 @@ static int ath6kl_add_bss_if_needed(struct ath6kl *ar, const u8 *bssid, return 0; } -void ath6kl_cfg80211_connect_event(struct ath6kl *ar, u16 channel, +void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel, u8 *bssid, u16 listen_intvl, u16 beacon_intvl, enum network_type nw_type, @@ -471,6 +671,7 @@ void ath6kl_cfg80211_connect_event(struct ath6kl *ar, u16 channel, u8 assoc_resp_len, u8 *assoc_info) { struct ieee80211_channel *chan; + struct ath6kl *ar = vif->ar; /* capinfo + listen interval */ u8 assoc_req_ie_offset = sizeof(u16) + sizeof(u16); @@ -489,11 +690,11 @@ void ath6kl_cfg80211_connect_event(struct ath6kl *ar, u16 channel, * Store Beacon interval here; DTIM period will be available only once * a Beacon frame from the AP is seen. */ - ar->assoc_bss_beacon_int = beacon_intvl; - clear_bit(DTIM_PERIOD_AVAIL, &ar->flag); + vif->assoc_bss_beacon_int = beacon_intvl; + clear_bit(DTIM_PERIOD_AVAIL, &vif->flags); if (nw_type & ADHOC_NETWORK) { - if (ar->wdev->iftype != NL80211_IFTYPE_ADHOC) { + if (vif->wdev.iftype != NL80211_IFTYPE_ADHOC) { ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: ath6k not in ibss mode\n", __func__); return; @@ -501,39 +702,39 @@ void ath6kl_cfg80211_connect_event(struct ath6kl *ar, u16 channel, } if (nw_type & INFRA_NETWORK) { - if (ar->wdev->iftype != NL80211_IFTYPE_STATION && - ar->wdev->iftype != NL80211_IFTYPE_P2P_CLIENT) { + if (vif->wdev.iftype != NL80211_IFTYPE_STATION && + vif->wdev.iftype != NL80211_IFTYPE_P2P_CLIENT) { ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: ath6k not in station mode\n", __func__); return; } } - chan = ieee80211_get_channel(ar->wdev->wiphy, (int) channel); - + chan = ieee80211_get_channel(ar->wiphy, (int) channel); - if (nw_type & ADHOC_NETWORK) { - cfg80211_ibss_joined(ar->net_dev, bssid, GFP_KERNEL); + if (ath6kl_add_bss_if_needed(vif, nw_type, bssid, chan, assoc_info, + beacon_ie_len) < 0) { + ath6kl_err("could not add cfg80211 bss entry\n"); return; } - if (ath6kl_add_bss_if_needed(ar, bssid, chan, assoc_info, - beacon_ie_len) < 0) { - ath6kl_err("could not add cfg80211 bss entry for " - "connect/roamed notification\n"); + if (nw_type & ADHOC_NETWORK) { + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "ad-hoc %s selected\n", + nw_type & ADHOC_CREATOR ? "creator" : "joiner"); + cfg80211_ibss_joined(vif->ndev, bssid, GFP_KERNEL); return; } - if (ar->sme_state == SME_CONNECTING) { + if (vif->sme_state == SME_CONNECTING) { /* inform connect result to cfg80211 */ - ar->sme_state = SME_CONNECTED; - cfg80211_connect_result(ar->net_dev, bssid, + vif->sme_state = SME_CONNECTED; + cfg80211_connect_result(vif->ndev, bssid, assoc_req_ie, assoc_req_len, assoc_resp_ie, assoc_resp_len, WLAN_STATUS_SUCCESS, GFP_KERNEL); - } else if (ar->sme_state == SME_CONNECTED) { + } else if (vif->sme_state == SME_CONNECTED) { /* inform roam event to cfg80211 */ - cfg80211_roamed(ar->net_dev, chan, bssid, + cfg80211_roamed(vif->ndev, chan, bssid, assoc_req_ie, assoc_req_len, assoc_resp_ie, assoc_resp_len, GFP_KERNEL); } @@ -542,12 +743,15 @@ void ath6kl_cfg80211_connect_event(struct ath6kl *ar, u16 channel, static int ath6kl_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev, u16 reason_code) { - struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(dev); + struct ath6kl *ar = ath6kl_priv(dev); + struct ath6kl_vif *vif = netdev_priv(dev); ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: reason=%u\n", __func__, reason_code); - if (!ath6kl_cfg80211_ready(ar)) + ath6kl_cfg80211_sscan_disable(vif); + + if (!ath6kl_cfg80211_ready(vif)) return -EIO; if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) { @@ -560,44 +764,46 @@ static int ath6kl_cfg80211_disconnect(struct wiphy *wiphy, return -ERESTARTSYS; } - ar->reconnect_flag = 0; - ath6kl_disconnect(ar); - memset(ar->ssid, 0, sizeof(ar->ssid)); - ar->ssid_len = 0; + vif->reconnect_flag = 0; + ath6kl_disconnect(vif); + memset(vif->ssid, 0, sizeof(vif->ssid)); + vif->ssid_len = 0; if (!test_bit(SKIP_SCAN, &ar->flag)) - memset(ar->req_bssid, 0, sizeof(ar->req_bssid)); + memset(vif->req_bssid, 0, sizeof(vif->req_bssid)); up(&ar->sem); - ar->sme_state = SME_DISCONNECTED; + vif->sme_state = SME_DISCONNECTED; return 0; } -void ath6kl_cfg80211_disconnect_event(struct ath6kl *ar, u8 reason, +void ath6kl_cfg80211_disconnect_event(struct ath6kl_vif *vif, u8 reason, u8 *bssid, u8 assoc_resp_len, u8 *assoc_info, u16 proto_reason) { - if (ar->scan_req) { - cfg80211_scan_done(ar->scan_req, true); - ar->scan_req = NULL; + struct ath6kl *ar = vif->ar; + + if (vif->scan_req) { + cfg80211_scan_done(vif->scan_req, true); + vif->scan_req = NULL; } - if (ar->nw_type & ADHOC_NETWORK) { - if (ar->wdev->iftype != NL80211_IFTYPE_ADHOC) { + if (vif->nw_type & ADHOC_NETWORK) { + if (vif->wdev.iftype != NL80211_IFTYPE_ADHOC) { ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: ath6k not in ibss mode\n", __func__); return; } memset(bssid, 0, ETH_ALEN); - cfg80211_ibss_joined(ar->net_dev, bssid, GFP_KERNEL); + cfg80211_ibss_joined(vif->ndev, bssid, GFP_KERNEL); return; } - if (ar->nw_type & INFRA_NETWORK) { - if (ar->wdev->iftype != NL80211_IFTYPE_STATION && - ar->wdev->iftype != NL80211_IFTYPE_P2P_CLIENT) { + if (vif->nw_type & INFRA_NETWORK) { + if (vif->wdev.iftype != NL80211_IFTYPE_STATION && + vif->wdev.iftype != NL80211_IFTYPE_P2P_CLIENT) { ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: ath6k not in station mode\n", __func__); return; @@ -614,42 +820,46 @@ void ath6kl_cfg80211_disconnect_event(struct ath6kl *ar, u8 reason, */ if (reason != DISCONNECT_CMD) { - ath6kl_wmi_disconnect_cmd(ar->wmi); + ath6kl_wmi_disconnect_cmd(ar->wmi, vif->fw_vif_idx); return; } - clear_bit(CONNECT_PEND, &ar->flag); + clear_bit(CONNECT_PEND, &vif->flags); - if (ar->sme_state == SME_CONNECTING) { - cfg80211_connect_result(ar->net_dev, + if (vif->sme_state == SME_CONNECTING) { + cfg80211_connect_result(vif->ndev, bssid, NULL, 0, NULL, 0, WLAN_STATUS_UNSPECIFIED_FAILURE, GFP_KERNEL); - } else if (ar->sme_state == SME_CONNECTED) { - cfg80211_disconnected(ar->net_dev, reason, + } else if (vif->sme_state == SME_CONNECTED) { + cfg80211_disconnected(vif->ndev, reason, NULL, 0, GFP_KERNEL); } - ar->sme_state = SME_DISCONNECTED; + vif->sme_state = SME_DISCONNECTED; } static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, struct cfg80211_scan_request *request) { - struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev); + struct ath6kl *ar = ath6kl_priv(ndev); + struct ath6kl_vif *vif = netdev_priv(ndev); s8 n_channels = 0; u16 *channels = NULL; int ret = 0; + u32 force_fg_scan = 0; - if (!ath6kl_cfg80211_ready(ar)) + if (!ath6kl_cfg80211_ready(vif)) return -EIO; + ath6kl_cfg80211_sscan_disable(vif); + if (!ar->usr_bss_filter) { - clear_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag); + clear_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags); ret = ath6kl_wmi_bssfilter_cmd( - ar->wmi, - (test_bit(CONNECTED, &ar->flag) ? + ar->wmi, vif->fw_vif_idx, + (test_bit(CONNECTED, &vif->flags) ? ALL_BUT_BSS_FILTER : ALL_BSS_FILTER), 0); if (ret) { ath6kl_err("couldn't set bss filtering\n"); @@ -664,14 +874,19 @@ static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, request->n_ssids = MAX_PROBED_SSID_INDEX - 1; for (i = 0; i < request->n_ssids; i++) - ath6kl_wmi_probedssid_cmd(ar->wmi, i + 1, - SPECIFIC_SSID_FLAG, + ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx, + i + 1, SPECIFIC_SSID_FLAG, request->ssids[i].ssid_len, request->ssids[i].ssid); } + /* + * FIXME: we should clear the IE in fw if it's not set so just + * remove the check altogether + */ if (request->ie) { - ret = ath6kl_wmi_set_appie_cmd(ar->wmi, WMI_FRAME_PROBE_REQ, + ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx, + WMI_FRAME_PROBE_REQ, request->ie, request->ie_len); if (ret) { ath6kl_err("failed to set Probe Request appie for " @@ -702,44 +917,63 @@ static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, channels[i] = request->channels[i]->center_freq; } - ret = ath6kl_wmi_startscan_cmd(ar->wmi, WMI_LONG_SCAN, 0, - false, 0, 0, n_channels, channels); + if (test_bit(CONNECTED, &vif->flags)) + force_fg_scan = 1; + + if (test_bit(ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX, + ar->fw_capabilities)) { + /* + * If capable of doing P2P mgmt operations using + * station interface, send additional information like + * supported rates to advertise and xmit rates for + * probe requests + */ + ret = ath6kl_wmi_beginscan_cmd(ar->wmi, vif->fw_vif_idx, + WMI_LONG_SCAN, force_fg_scan, + false, 0, 0, n_channels, + channels, request->no_cck, + request->rates); + } else { + ret = ath6kl_wmi_startscan_cmd(ar->wmi, vif->fw_vif_idx, + WMI_LONG_SCAN, force_fg_scan, + false, 0, 0, n_channels, + channels); + } if (ret) ath6kl_err("wmi_startscan_cmd failed\n"); else - ar->scan_req = request; + vif->scan_req = request; kfree(channels); return ret; } -void ath6kl_cfg80211_scan_complete_event(struct ath6kl *ar, int status) +void ath6kl_cfg80211_scan_complete_event(struct ath6kl_vif *vif, bool aborted) { + struct ath6kl *ar = vif->ar; int i; - ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: status %d\n", __func__, status); + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: status%s\n", __func__, + aborted ? " aborted" : ""); - if (!ar->scan_req) + if (!vif->scan_req) return; - if ((status == -ECANCELED) || (status == -EBUSY)) { - cfg80211_scan_done(ar->scan_req, true); + if (aborted) goto out; - } - cfg80211_scan_done(ar->scan_req, false); - - if (ar->scan_req->n_ssids && ar->scan_req->ssids[0].ssid_len) { - for (i = 0; i < ar->scan_req->n_ssids; i++) { - ath6kl_wmi_probedssid_cmd(ar->wmi, i + 1, - DISABLE_SSID_FLAG, + if (vif->scan_req->n_ssids && vif->scan_req->ssids[0].ssid_len) { + for (i = 0; i < vif->scan_req->n_ssids; i++) { + ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx, + i + 1, DISABLE_SSID_FLAG, 0, NULL); } } out: - ar->scan_req = NULL; + cfg80211_scan_done(vif->scan_req, aborted); + vif->scan_req = NULL; } static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, @@ -747,15 +981,22 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, const u8 *mac_addr, struct key_params *params) { - struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev); + struct ath6kl *ar = ath6kl_priv(ndev); + struct ath6kl_vif *vif = netdev_priv(ndev); struct ath6kl_key *key = NULL; u8 key_usage; u8 key_type; - int status = 0; - if (!ath6kl_cfg80211_ready(ar)) + if (!ath6kl_cfg80211_ready(vif)) return -EIO; + if (params->cipher == CCKM_KRK_CIPHER_SUITE) { + if (params->key_len != WMI_KRK_LEN) + return -EINVAL; + return ath6kl_wmi_add_krk_cmd(ar->wmi, vif->fw_vif_idx, + params->key); + } + if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) { ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: key index %d out of bounds\n", __func__, @@ -763,7 +1004,7 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, return -ENOENT; } - key = &ar->keys[key_index]; + key = &vif->keys[key_index]; memset(key, 0, sizeof(struct ath6kl_key)); if (pairwise) @@ -772,13 +1013,19 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, key_usage = GROUP_USAGE; if (params) { + int seq_len = params->seq_len; + if (params->cipher == WLAN_CIPHER_SUITE_SMS4 && + seq_len > ATH6KL_KEY_SEQ_LEN) { + /* Only first half of the WPI PN is configured */ + seq_len = ATH6KL_KEY_SEQ_LEN; + } if (params->key_len > WLAN_MAX_KEY_LEN || - params->seq_len > sizeof(key->seq)) + seq_len > sizeof(key->seq)) return -EINVAL; key->key_len = params->key_len; memcpy(key->key, params->key, key->key_len); - key->seq_len = params->seq_len; + key->seq_len = seq_len; memcpy(key->seq, params->seq, key->seq_len); key->cipher = params->cipher; } @@ -796,31 +1043,33 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, case WLAN_CIPHER_SUITE_CCMP: key_type = AES_CRYPT; break; + case WLAN_CIPHER_SUITE_SMS4: + key_type = WAPI_CRYPT; + break; default: return -ENOTSUPP; } - if (((ar->auth_mode == WPA_PSK_AUTH) - || (ar->auth_mode == WPA2_PSK_AUTH)) + if (((vif->auth_mode == WPA_PSK_AUTH) + || (vif->auth_mode == WPA2_PSK_AUTH)) && (key_usage & GROUP_USAGE)) - del_timer(&ar->disconnect_timer); + del_timer(&vif->disconnect_timer); ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d, key_len %d, key_type 0x%x, key_usage 0x%x, seq_len %d\n", __func__, key_index, key->key_len, key_type, key_usage, key->seq_len); - ar->def_txkey_index = key_index; - - if (ar->nw_type == AP_NETWORK && !pairwise && - (key_type == TKIP_CRYPT || key_type == AES_CRYPT) && params) { + if (vif->nw_type == AP_NETWORK && !pairwise && + (key_type == TKIP_CRYPT || key_type == AES_CRYPT || + key_type == WAPI_CRYPT) && params) { ar->ap_mode_bkey.valid = true; ar->ap_mode_bkey.key_index = key_index; ar->ap_mode_bkey.key_type = key_type; ar->ap_mode_bkey.key_len = key->key_len; memcpy(ar->ap_mode_bkey.key, key->key, key->key_len); - if (!test_bit(CONNECTED, &ar->flag)) { + if (!test_bit(CONNECTED, &vif->flags)) { ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delay initial group " "key configuration until AP mode has been " "started\n"); @@ -832,8 +1081,8 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, } } - if (ar->next_mode == AP_NETWORK && key_type == WEP_CRYPT && - !test_bit(CONNECTED, &ar->flag)) { + if (vif->next_mode == AP_NETWORK && key_type == WEP_CRYPT && + !test_bit(CONNECTED, &vif->flags)) { /* * Store the key locally so that it can be re-configured after * the AP mode has properly started @@ -841,31 +1090,29 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, */ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delay WEP key configuration " "until AP mode has been started\n"); - ar->wep_key_list[key_index].key_len = key->key_len; - memcpy(ar->wep_key_list[key_index].key, key->key, key->key_len); + vif->wep_key_list[key_index].key_len = key->key_len; + memcpy(vif->wep_key_list[key_index].key, key->key, + key->key_len); return 0; } - status = ath6kl_wmi_addkey_cmd(ar->wmi, ar->def_txkey_index, - key_type, key_usage, key->key_len, - key->seq, key->key, KEY_OP_INIT_VAL, - (u8 *) mac_addr, SYNC_BOTH_WMIFLAG); - - if (status) - return -EIO; - - return 0; + return ath6kl_wmi_addkey_cmd(ar->wmi, vif->fw_vif_idx, key_index, + key_type, key_usage, key->key_len, + key->seq, key->seq_len, key->key, + KEY_OP_INIT_VAL, + (u8 *) mac_addr, SYNC_BOTH_WMIFLAG); } static int ath6kl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev, u8 key_index, bool pairwise, const u8 *mac_addr) { - struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev); + struct ath6kl *ar = ath6kl_priv(ndev); + struct ath6kl_vif *vif = netdev_priv(ndev); ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index); - if (!ath6kl_cfg80211_ready(ar)) + if (!ath6kl_cfg80211_ready(vif)) return -EIO; if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) { @@ -875,15 +1122,15 @@ static int ath6kl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev, return -ENOENT; } - if (!ar->keys[key_index].key_len) { + if (!vif->keys[key_index].key_len) { ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d is empty\n", __func__, key_index); return 0; } - ar->keys[key_index].key_len = 0; + vif->keys[key_index].key_len = 0; - return ath6kl_wmi_deletekey_cmd(ar->wmi, key_index); + return ath6kl_wmi_deletekey_cmd(ar->wmi, vif->fw_vif_idx, key_index); } static int ath6kl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev, @@ -892,13 +1139,13 @@ static int ath6kl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev, void (*callback) (void *cookie, struct key_params *)) { - struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev); + struct ath6kl_vif *vif = netdev_priv(ndev); struct ath6kl_key *key = NULL; struct key_params params; ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index); - if (!ath6kl_cfg80211_ready(ar)) + if (!ath6kl_cfg80211_ready(vif)) return -EIO; if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) { @@ -908,7 +1155,7 @@ static int ath6kl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev, return -ENOENT; } - key = &ar->keys[key_index]; + key = &vif->keys[key_index]; memset(¶ms, 0, sizeof(params)); params.cipher = key->cipher; params.key_len = key->key_len; @@ -926,15 +1173,15 @@ static int ath6kl_cfg80211_set_default_key(struct wiphy *wiphy, u8 key_index, bool unicast, bool multicast) { - struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev); + struct ath6kl *ar = ath6kl_priv(ndev); + struct ath6kl_vif *vif = netdev_priv(ndev); struct ath6kl_key *key = NULL; - int status = 0; u8 key_usage; enum crypto_type key_type = NONE_CRYPT; ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index); - if (!ath6kl_cfg80211_ready(ar)) + if (!ath6kl_cfg80211_ready(vif)) return -EIO; if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) { @@ -944,43 +1191,41 @@ static int ath6kl_cfg80211_set_default_key(struct wiphy *wiphy, return -ENOENT; } - if (!ar->keys[key_index].key_len) { + if (!vif->keys[key_index].key_len) { ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: invalid key index %d\n", __func__, key_index); return -EINVAL; } - ar->def_txkey_index = key_index; - key = &ar->keys[ar->def_txkey_index]; + vif->def_txkey_index = key_index; + key = &vif->keys[vif->def_txkey_index]; key_usage = GROUP_USAGE; - if (ar->prwise_crypto == WEP_CRYPT) + if (vif->prwise_crypto == WEP_CRYPT) key_usage |= TX_USAGE; if (unicast) - key_type = ar->prwise_crypto; + key_type = vif->prwise_crypto; if (multicast) - key_type = ar->grp_crypto; + key_type = vif->grp_crypto; - if (ar->next_mode == AP_NETWORK && !test_bit(CONNECTED, &ar->flag)) + if (vif->next_mode == AP_NETWORK && !test_bit(CONNECTED, &vif->flags)) return 0; /* Delay until AP mode has been started */ - status = ath6kl_wmi_addkey_cmd(ar->wmi, ar->def_txkey_index, - key_type, key_usage, - key->key_len, key->seq, key->key, - KEY_OP_INIT_VAL, NULL, - SYNC_BOTH_WMIFLAG); - if (status) - return -EIO; - - return 0; + return ath6kl_wmi_addkey_cmd(ar->wmi, vif->fw_vif_idx, + vif->def_txkey_index, + key_type, key_usage, + key->key_len, key->seq, key->seq_len, + key->key, + KEY_OP_INIT_VAL, NULL, + SYNC_BOTH_WMIFLAG); } -void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl *ar, u8 keyid, +void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid, bool ismcast) { ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: keyid %d, ismcast %d\n", __func__, keyid, ismcast); - cfg80211_michael_mic_failure(ar->net_dev, ar->bssid, + cfg80211_michael_mic_failure(vif->ndev, vif->bssid, (ismcast ? NL80211_KEYTYPE_GROUP : NL80211_KEYTYPE_PAIRWISE), keyid, NULL, GFP_KERNEL); @@ -989,12 +1234,17 @@ void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl *ar, u8 keyid, static int ath6kl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed) { struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy); + struct ath6kl_vif *vif; int ret; ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: changed 0x%x\n", __func__, changed); - if (!ath6kl_cfg80211_ready(ar)) + vif = ath6kl_vif_first(ar); + if (!vif) + return -EIO; + + if (!ath6kl_cfg80211_ready(vif)) return -EIO; if (changed & WIPHY_PARAM_RTS_THRESHOLD) { @@ -1014,15 +1264,21 @@ static int ath6kl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed) */ static int ath6kl_cfg80211_set_txpower(struct wiphy *wiphy, enum nl80211_tx_power_setting type, - int dbm) + int mbm) { struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy); + struct ath6kl_vif *vif; u8 ath6kl_dbm; + int dbm = MBM_TO_DBM(mbm); ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type 0x%x, dbm %d\n", __func__, type, dbm); - if (!ath6kl_cfg80211_ready(ar)) + vif = ath6kl_vif_first(ar); + if (!vif) + return -EIO; + + if (!ath6kl_cfg80211_ready(vif)) return -EIO; switch (type) { @@ -1037,7 +1293,7 @@ static int ath6kl_cfg80211_set_txpower(struct wiphy *wiphy, return -EOPNOTSUPP; } - ath6kl_wmi_set_tx_pwr_cmd(ar->wmi, ath6kl_dbm); + ath6kl_wmi_set_tx_pwr_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_dbm); return 0; } @@ -1045,14 +1301,19 @@ static int ath6kl_cfg80211_set_txpower(struct wiphy *wiphy, static int ath6kl_cfg80211_get_txpower(struct wiphy *wiphy, int *dbm) { struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy); + struct ath6kl_vif *vif; + + vif = ath6kl_vif_first(ar); + if (!vif) + return -EIO; - if (!ath6kl_cfg80211_ready(ar)) + if (!ath6kl_cfg80211_ready(vif)) return -EIO; - if (test_bit(CONNECTED, &ar->flag)) { + if (test_bit(CONNECTED, &vif->flags)) { ar->tx_pwr = 0; - if (ath6kl_wmi_get_tx_pwr_cmd(ar->wmi) != 0) { + if (ath6kl_wmi_get_tx_pwr_cmd(ar->wmi, vif->fw_vif_idx) != 0) { ath6kl_err("ath6kl_wmi_get_tx_pwr_cmd failed\n"); return -EIO; } @@ -1076,11 +1337,12 @@ static int ath6kl_cfg80211_set_power_mgmt(struct wiphy *wiphy, { struct ath6kl *ar = ath6kl_priv(dev); struct wmi_power_mode_cmd mode; + struct ath6kl_vif *vif = netdev_priv(dev); ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: pmgmt %d, timeout %d\n", __func__, pmgmt, timeout); - if (!ath6kl_cfg80211_ready(ar)) + if (!ath6kl_cfg80211_ready(vif)) return -EIO; if (pmgmt) { @@ -1091,7 +1353,8 @@ static int ath6kl_cfg80211_set_power_mgmt(struct wiphy *wiphy, mode.pwr_mode = MAX_PERF_POWER; } - if (ath6kl_wmi_powermode_cmd(ar->wmi, mode.pwr_mode) != 0) { + if (ath6kl_wmi_powermode_cmd(ar->wmi, vif->fw_vif_idx, + mode.pwr_mode) != 0) { ath6kl_err("wmi_powermode_cmd failed\n"); return -EIO; } @@ -1099,41 +1362,83 @@ static int ath6kl_cfg80211_set_power_mgmt(struct wiphy *wiphy, return 0; } +static struct net_device *ath6kl_cfg80211_add_iface(struct wiphy *wiphy, + char *name, + enum nl80211_iftype type, + u32 *flags, + struct vif_params *params) +{ + struct ath6kl *ar = wiphy_priv(wiphy); + struct net_device *ndev; + u8 if_idx, nw_type; + + if (ar->num_vif == ar->vif_max) { + ath6kl_err("Reached maximum number of supported vif\n"); + return ERR_PTR(-EINVAL); + } + + if (!ath6kl_is_valid_iftype(ar, type, &if_idx, &nw_type)) { + ath6kl_err("Not a supported interface type\n"); + return ERR_PTR(-EINVAL); + } + + ndev = ath6kl_interface_add(ar, name, type, if_idx, nw_type); + if (!ndev) + return ERR_PTR(-ENOMEM); + + ar->num_vif++; + + return ndev; +} + +static int ath6kl_cfg80211_del_iface(struct wiphy *wiphy, + struct net_device *ndev) +{ + struct ath6kl *ar = wiphy_priv(wiphy); + struct ath6kl_vif *vif = netdev_priv(ndev); + + spin_lock_bh(&ar->list_lock); + list_del(&vif->list); + spin_unlock_bh(&ar->list_lock); + + ath6kl_cleanup_vif(vif, test_bit(WMI_READY, &ar->flag)); + + ath6kl_deinit_if_data(vif); + + return 0; +} + static int ath6kl_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev, enum nl80211_iftype type, u32 *flags, struct vif_params *params) { - struct ath6kl *ar = ath6kl_priv(ndev); - struct wireless_dev *wdev = ar->wdev; + struct ath6kl_vif *vif = netdev_priv(ndev); ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type %u\n", __func__, type); - if (!ath6kl_cfg80211_ready(ar)) - return -EIO; - switch (type) { case NL80211_IFTYPE_STATION: - ar->next_mode = INFRA_NETWORK; + vif->next_mode = INFRA_NETWORK; break; case NL80211_IFTYPE_ADHOC: - ar->next_mode = ADHOC_NETWORK; + vif->next_mode = ADHOC_NETWORK; break; case NL80211_IFTYPE_AP: - ar->next_mode = AP_NETWORK; + vif->next_mode = AP_NETWORK; break; case NL80211_IFTYPE_P2P_CLIENT: - ar->next_mode = INFRA_NETWORK; + vif->next_mode = INFRA_NETWORK; break; case NL80211_IFTYPE_P2P_GO: - ar->next_mode = AP_NETWORK; + vif->next_mode = AP_NETWORK; break; default: ath6kl_err("invalid interface type %u\n", type); return -EOPNOTSUPP; } - wdev->iftype = type; + vif->wdev.iftype = type; return 0; } @@ -1143,16 +1448,17 @@ static int ath6kl_cfg80211_join_ibss(struct wiphy *wiphy, struct cfg80211_ibss_params *ibss_param) { struct ath6kl *ar = ath6kl_priv(dev); + struct ath6kl_vif *vif = netdev_priv(dev); int status; - if (!ath6kl_cfg80211_ready(ar)) + if (!ath6kl_cfg80211_ready(vif)) return -EIO; - ar->ssid_len = ibss_param->ssid_len; - memcpy(ar->ssid, ibss_param->ssid, ar->ssid_len); + vif->ssid_len = ibss_param->ssid_len; + memcpy(vif->ssid, ibss_param->ssid, vif->ssid_len); if (ibss_param->channel) - ar->ch_hint = ibss_param->channel->center_freq; + vif->ch_hint = ibss_param->channel->center_freq; if (ibss_param->channel_fixed) { /* @@ -1164,44 +1470,45 @@ static int ath6kl_cfg80211_join_ibss(struct wiphy *wiphy, return -EOPNOTSUPP; } - memset(ar->req_bssid, 0, sizeof(ar->req_bssid)); + memset(vif->req_bssid, 0, sizeof(vif->req_bssid)); if (ibss_param->bssid && !is_broadcast_ether_addr(ibss_param->bssid)) - memcpy(ar->req_bssid, ibss_param->bssid, sizeof(ar->req_bssid)); + memcpy(vif->req_bssid, ibss_param->bssid, + sizeof(vif->req_bssid)); - ath6kl_set_wpa_version(ar, 0); + ath6kl_set_wpa_version(vif, 0); - status = ath6kl_set_auth_type(ar, NL80211_AUTHTYPE_OPEN_SYSTEM); + status = ath6kl_set_auth_type(vif, NL80211_AUTHTYPE_OPEN_SYSTEM); if (status) return status; if (ibss_param->privacy) { - ath6kl_set_cipher(ar, WLAN_CIPHER_SUITE_WEP40, true); - ath6kl_set_cipher(ar, WLAN_CIPHER_SUITE_WEP40, false); + ath6kl_set_cipher(vif, WLAN_CIPHER_SUITE_WEP40, true); + ath6kl_set_cipher(vif, WLAN_CIPHER_SUITE_WEP40, false); } else { - ath6kl_set_cipher(ar, 0, true); - ath6kl_set_cipher(ar, 0, false); + ath6kl_set_cipher(vif, 0, true); + ath6kl_set_cipher(vif, 0, false); } - ar->nw_type = ar->next_mode; + vif->nw_type = vif->next_mode; ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: connect called with authmode %d dot11 auth %d" " PW crypto %d PW crypto len %d GRP crypto %d" " GRP crypto len %d channel hint %u\n", __func__, - ar->auth_mode, ar->dot11_auth_mode, ar->prwise_crypto, - ar->prwise_crypto_len, ar->grp_crypto, - ar->grp_crypto_len, ar->ch_hint); - - status = ath6kl_wmi_connect_cmd(ar->wmi, ar->nw_type, - ar->dot11_auth_mode, ar->auth_mode, - ar->prwise_crypto, - ar->prwise_crypto_len, - ar->grp_crypto, ar->grp_crypto_len, - ar->ssid_len, ar->ssid, - ar->req_bssid, ar->ch_hint, - ar->connect_ctrl_flags); - set_bit(CONNECT_PEND, &ar->flag); + vif->auth_mode, vif->dot11_auth_mode, vif->prwise_crypto, + vif->prwise_crypto_len, vif->grp_crypto, + vif->grp_crypto_len, vif->ch_hint); + + status = ath6kl_wmi_connect_cmd(ar->wmi, vif->fw_vif_idx, vif->nw_type, + vif->dot11_auth_mode, vif->auth_mode, + vif->prwise_crypto, + vif->prwise_crypto_len, + vif->grp_crypto, vif->grp_crypto_len, + vif->ssid_len, vif->ssid, + vif->req_bssid, vif->ch_hint, + ar->connect_ctrl_flags, SUBTYPE_NONE); + set_bit(CONNECT_PEND, &vif->flags); return 0; } @@ -1209,14 +1516,14 @@ static int ath6kl_cfg80211_join_ibss(struct wiphy *wiphy, static int ath6kl_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev) { - struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(dev); + struct ath6kl_vif *vif = netdev_priv(dev); - if (!ath6kl_cfg80211_ready(ar)) + if (!ath6kl_cfg80211_ready(vif)) return -EIO; - ath6kl_disconnect(ar); - memset(ar->ssid, 0, sizeof(ar->ssid)); - ar->ssid_len = 0; + ath6kl_disconnect(vif); + memset(vif->ssid, 0, sizeof(vif->ssid)); + vif->ssid_len = 0; return 0; } @@ -1226,6 +1533,8 @@ static const u32 cipher_suites[] = { WLAN_CIPHER_SUITE_WEP104, WLAN_CIPHER_SUITE_TKIP, WLAN_CIPHER_SUITE_CCMP, + CCKM_KRK_CIPHER_SUITE, + WLAN_CIPHER_SUITE_SMS4, }; static bool is_rate_legacy(s32 rate) @@ -1293,21 +1602,22 @@ static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev, u8 *mac, struct station_info *sinfo) { struct ath6kl *ar = ath6kl_priv(dev); + struct ath6kl_vif *vif = netdev_priv(dev); long left; bool sgi; s32 rate; int ret; u8 mcs; - if (memcmp(mac, ar->bssid, ETH_ALEN) != 0) + if (memcmp(mac, vif->bssid, ETH_ALEN) != 0) return -ENOENT; if (down_interruptible(&ar->sem)) return -EBUSY; - set_bit(STATS_UPDATE_PEND, &ar->flag); + set_bit(STATS_UPDATE_PEND, &vif->flags); - ret = ath6kl_wmi_get_stats_cmd(ar->wmi); + ret = ath6kl_wmi_get_stats_cmd(ar->wmi, vif->fw_vif_idx); if (ret != 0) { up(&ar->sem); @@ -1316,7 +1626,7 @@ static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev, left = wait_event_interruptible_timeout(ar->event_wq, !test_bit(STATS_UPDATE_PEND, - &ar->flag), + &vif->flags), WMI_TIMEOUT); up(&ar->sem); @@ -1326,24 +1636,24 @@ static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev, else if (left < 0) return left; - if (ar->target_stats.rx_byte) { - sinfo->rx_bytes = ar->target_stats.rx_byte; + if (vif->target_stats.rx_byte) { + sinfo->rx_bytes = vif->target_stats.rx_byte; sinfo->filled |= STATION_INFO_RX_BYTES; - sinfo->rx_packets = ar->target_stats.rx_pkt; + sinfo->rx_packets = vif->target_stats.rx_pkt; sinfo->filled |= STATION_INFO_RX_PACKETS; } - if (ar->target_stats.tx_byte) { - sinfo->tx_bytes = ar->target_stats.tx_byte; + if (vif->target_stats.tx_byte) { + sinfo->tx_bytes = vif->target_stats.tx_byte; sinfo->filled |= STATION_INFO_TX_BYTES; - sinfo->tx_packets = ar->target_stats.tx_pkt; + sinfo->tx_packets = vif->target_stats.tx_pkt; sinfo->filled |= STATION_INFO_TX_PACKETS; } - sinfo->signal = ar->target_stats.cs_rssi; + sinfo->signal = vif->target_stats.cs_rssi; sinfo->filled |= STATION_INFO_SIGNAL; - rate = ar->target_stats.tx_ucast_rate; + rate = vif->target_stats.tx_ucast_rate; if (is_rate_legacy(rate)) { sinfo->txrate.legacy = rate / 100; @@ -1375,13 +1685,13 @@ static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev, sinfo->filled |= STATION_INFO_TX_BITRATE; - if (test_bit(CONNECTED, &ar->flag) && - test_bit(DTIM_PERIOD_AVAIL, &ar->flag) && - ar->nw_type == INFRA_NETWORK) { + if (test_bit(CONNECTED, &vif->flags) && + test_bit(DTIM_PERIOD_AVAIL, &vif->flags) && + vif->nw_type == INFRA_NETWORK) { sinfo->filled |= STATION_INFO_BSS_PARAM; sinfo->bss_param.flags = 0; - sinfo->bss_param.dtim_period = ar->assoc_bss_dtim_period; - sinfo->bss_param.beacon_interval = ar->assoc_bss_beacon_int; + sinfo->bss_param.dtim_period = vif->assoc_bss_dtim_period; + sinfo->bss_param.beacon_interval = vif->assoc_bss_beacon_int; } return 0; @@ -1391,7 +1701,9 @@ static int ath6kl_set_pmksa(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_pmksa *pmksa) { struct ath6kl *ar = ath6kl_priv(netdev); - return ath6kl_wmi_setpmkid_cmd(ar->wmi, pmksa->bssid, + struct ath6kl_vif *vif = netdev_priv(netdev); + + return ath6kl_wmi_setpmkid_cmd(ar->wmi, vif->fw_vif_idx, pmksa->bssid, pmksa->pmkid, true); } @@ -1399,25 +1711,302 @@ static int ath6kl_del_pmksa(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_pmksa *pmksa) { struct ath6kl *ar = ath6kl_priv(netdev); - return ath6kl_wmi_setpmkid_cmd(ar->wmi, pmksa->bssid, + struct ath6kl_vif *vif = netdev_priv(netdev); + + return ath6kl_wmi_setpmkid_cmd(ar->wmi, vif->fw_vif_idx, pmksa->bssid, pmksa->pmkid, false); } static int ath6kl_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev) { struct ath6kl *ar = ath6kl_priv(netdev); - if (test_bit(CONNECTED, &ar->flag)) - return ath6kl_wmi_setpmkid_cmd(ar->wmi, ar->bssid, NULL, false); + struct ath6kl_vif *vif = netdev_priv(netdev); + + if (test_bit(CONNECTED, &vif->flags)) + return ath6kl_wmi_setpmkid_cmd(ar->wmi, vif->fw_vif_idx, + vif->bssid, NULL, false); + return 0; +} + +static int ath6kl_wow_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow) +{ + struct ath6kl_vif *vif; + int ret, pos, left; + u32 filter = 0; + u16 i; + u8 mask[WOW_MASK_SIZE]; + + vif = ath6kl_vif_first(ar); + if (!vif) + return -EIO; + + if (!ath6kl_cfg80211_ready(vif)) + return -EIO; + + if (!test_bit(CONNECTED, &vif->flags)) + return -EINVAL; + + /* Clear existing WOW patterns */ + for (i = 0; i < WOW_MAX_FILTERS_PER_LIST; i++) + ath6kl_wmi_del_wow_pattern_cmd(ar->wmi, vif->fw_vif_idx, + WOW_LIST_ID, i); + /* Configure new WOW patterns */ + for (i = 0; i < wow->n_patterns; i++) { + + /* + * Convert given nl80211 specific mask value to equivalent + * driver specific mask value and send it to the chip along + * with patterns. For example, If the mask value defined in + * struct cfg80211_wowlan is 0xA (equivalent binary is 1010), + * then equivalent driver specific mask value is + * "0xFF 0x00 0xFF 0x00". + */ + memset(&mask, 0, sizeof(mask)); + for (pos = 0; pos < wow->patterns[i].pattern_len; pos++) { + if (wow->patterns[i].mask[pos / 8] & (0x1 << (pos % 8))) + mask[pos] = 0xFF; + } + /* + * Note: Pattern's offset is not passed as part of wowlan + * parameter from CFG layer. So it's always passed as ZERO + * to the firmware. It means, given WOW patterns are always + * matched from the first byte of received pkt in the firmware. + */ + ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi, + vif->fw_vif_idx, WOW_LIST_ID, + wow->patterns[i].pattern_len, + 0 /* pattern offset */, + wow->patterns[i].pattern, mask); + if (ret) + return ret; + } + + if (wow->disconnect) + filter |= WOW_FILTER_OPTION_NWK_DISASSOC; + + if (wow->magic_pkt) + filter |= WOW_FILTER_OPTION_MAGIC_PACKET; + + if (wow->gtk_rekey_failure) + filter |= WOW_FILTER_OPTION_GTK_ERROR; + + if (wow->eap_identity_req) + filter |= WOW_FILTER_OPTION_EAP_REQ; + + if (wow->four_way_handshake) + filter |= WOW_FILTER_OPTION_8021X_4WAYHS; + + ret = ath6kl_wmi_set_wow_mode_cmd(ar->wmi, vif->fw_vif_idx, + ATH6KL_WOW_MODE_ENABLE, + filter, + WOW_HOST_REQ_DELAY); + if (ret) + return ret; + + ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx, + ATH6KL_HOST_MODE_ASLEEP); + if (ret) + return ret; + + if (ar->tx_pending[ar->ctrl_ep]) { + left = wait_event_interruptible_timeout(ar->event_wq, + ar->tx_pending[ar->ctrl_ep] == 0, WMI_TIMEOUT); + if (left == 0) { + ath6kl_warn("clear wmi ctrl data timeout\n"); + ret = -ETIMEDOUT; + } else if (left < 0) { + ath6kl_warn("clear wmi ctrl data failed: %d\n", left); + ret = left; + } + } + + return ret; +} + +static int ath6kl_wow_resume(struct ath6kl *ar) +{ + struct ath6kl_vif *vif; + int ret; + + vif = ath6kl_vif_first(ar); + if (!vif) + return -EIO; + + ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx, + ATH6KL_HOST_MODE_AWAKE); + return ret; +} + +int ath6kl_cfg80211_suspend(struct ath6kl *ar, + enum ath6kl_cfg_suspend_mode mode, + struct cfg80211_wowlan *wow) +{ + int ret; + + switch (mode) { + case ATH6KL_CFG_SUSPEND_WOW: + + ath6kl_dbg(ATH6KL_DBG_SUSPEND, "wow mode suspend\n"); + + /* Flush all non control pkts in TX path */ + ath6kl_tx_data_cleanup(ar); + + ret = ath6kl_wow_suspend(ar, wow); + if (ret) { + ath6kl_err("wow suspend failed: %d\n", ret); + return ret; + } + ar->state = ATH6KL_STATE_WOW; + break; + + case ATH6KL_CFG_SUSPEND_DEEPSLEEP: + + ath6kl_cfg80211_stop_all(ar); + + /* save the current power mode before enabling power save */ + ar->wmi->saved_pwr_mode = ar->wmi->pwr_mode; + + ret = ath6kl_wmi_powermode_cmd(ar->wmi, 0, REC_POWER); + if (ret) { + ath6kl_warn("wmi powermode command failed during suspend: %d\n", + ret); + } + + ar->state = ATH6KL_STATE_DEEPSLEEP; + + break; + + case ATH6KL_CFG_SUSPEND_CUTPOWER: + + ath6kl_cfg80211_stop_all(ar); + + if (ar->state == ATH6KL_STATE_OFF) { + ath6kl_dbg(ATH6KL_DBG_SUSPEND, + "suspend hw off, no action for cutpower\n"); + break; + } + + ath6kl_dbg(ATH6KL_DBG_SUSPEND, "suspend cutting power\n"); + + ret = ath6kl_init_hw_stop(ar); + if (ret) { + ath6kl_warn("failed to stop hw during suspend: %d\n", + ret); + } + + ar->state = ATH6KL_STATE_CUTPOWER; + + break; + + case ATH6KL_CFG_SUSPEND_SCHED_SCAN: + /* + * Nothing needed for schedule scan, firmware is already in + * wow mode and sleeping most of the time. + */ + break; + + default: + break; + } + + return 0; +} + +int ath6kl_cfg80211_resume(struct ath6kl *ar) +{ + int ret; + + switch (ar->state) { + case ATH6KL_STATE_WOW: + ath6kl_dbg(ATH6KL_DBG_SUSPEND, "wow mode resume\n"); + + ret = ath6kl_wow_resume(ar); + if (ret) { + ath6kl_warn("wow mode resume failed: %d\n", ret); + return ret; + } + + ar->state = ATH6KL_STATE_ON; + break; + + case ATH6KL_STATE_DEEPSLEEP: + if (ar->wmi->pwr_mode != ar->wmi->saved_pwr_mode) { + ret = ath6kl_wmi_powermode_cmd(ar->wmi, 0, + ar->wmi->saved_pwr_mode); + if (ret) { + ath6kl_warn("wmi powermode command failed during resume: %d\n", + ret); + } + } + + ar->state = ATH6KL_STATE_ON; + + break; + + case ATH6KL_STATE_CUTPOWER: + ath6kl_dbg(ATH6KL_DBG_SUSPEND, "resume restoring power\n"); + + ret = ath6kl_init_hw_start(ar); + if (ret) { + ath6kl_warn("Failed to boot hw in resume: %d\n", ret); + return ret; + } + break; + + case ATH6KL_STATE_SCHED_SCAN: + break; + + default: + break; + } + return 0; } #ifdef CONFIG_PM -static int ar6k_cfg80211_suspend(struct wiphy *wiphy, + +/* hif layer decides what suspend mode to use */ +static int __ath6kl_cfg80211_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wow) { struct ath6kl *ar = wiphy_priv(wiphy); - return ath6kl_hif_suspend(ar); + return ath6kl_hif_suspend(ar, wow); +} + +static int __ath6kl_cfg80211_resume(struct wiphy *wiphy) +{ + struct ath6kl *ar = wiphy_priv(wiphy); + + return ath6kl_hif_resume(ar); +} + +/* + * FIXME: WOW suspend mode is selected if the host sdio controller supports + * both sdio irq wake up and keep power. The target pulls sdio data line to + * wake up the host when WOW pattern matches. This causes sdio irq handler + * is being called in the host side which internally hits ath6kl's RX path. + * + * Since sdio interrupt is not disabled, RX path executes even before + * the host executes the actual resume operation from PM module. + * + * In the current scenario, WOW resume should happen before start processing + * any data from the target. So It's required to perform WOW resume in RX path. + * Ideally we should perform WOW resume only in the actual platform + * resume path. This area needs bit rework to avoid WOW resume in RX path. + * + * ath6kl_check_wow_status() is called from ath6kl_rx(). + */ +void ath6kl_check_wow_status(struct ath6kl *ar) +{ + if (ar->state == ATH6KL_STATE_WOW) + ath6kl_cfg80211_resume(ar); +} + +#else + +void ath6kl_check_wow_status(struct ath6kl *ar) +{ } #endif @@ -1425,14 +2014,14 @@ static int ath6kl_set_channel(struct wiphy *wiphy, struct net_device *dev, struct ieee80211_channel *chan, enum nl80211_channel_type channel_type) { - struct ath6kl *ar = ath6kl_priv(dev); + struct ath6kl_vif *vif = netdev_priv(dev); - if (!ath6kl_cfg80211_ready(ar)) + if (!ath6kl_cfg80211_ready(vif)) return -EIO; ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: center_freq=%u hw_value=%u\n", __func__, chan->center_freq, chan->hw_value); - ar->next_chan = chan->center_freq; + vif->next_chan = chan->center_freq; return 0; } @@ -1444,9 +2033,10 @@ static bool ath6kl_is_p2p_ie(const u8 *pos) pos[4] == 0x9a && pos[5] == 0x09; } -static int ath6kl_set_ap_probe_resp_ies(struct ath6kl *ar, const u8 *ies, - size_t ies_len) +static int ath6kl_set_ap_probe_resp_ies(struct ath6kl_vif *vif, + const u8 *ies, size_t ies_len) { + struct ath6kl *ar = vif->ar; const u8 *pos; u8 *buf = NULL; size_t len = 0; @@ -1473,8 +2063,8 @@ static int ath6kl_set_ap_probe_resp_ies(struct ath6kl *ar, const u8 *ies, } } - ret = ath6kl_wmi_set_appie_cmd(ar->wmi, WMI_FRAME_PROBE_RESP, - buf, len); + ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx, + WMI_FRAME_PROBE_RESP, buf, len); kfree(buf); return ret; } @@ -1483,36 +2073,39 @@ static int ath6kl_ap_beacon(struct wiphy *wiphy, struct net_device *dev, struct beacon_parameters *info, bool add) { struct ath6kl *ar = ath6kl_priv(dev); + struct ath6kl_vif *vif = netdev_priv(dev); struct ieee80211_mgmt *mgmt; u8 *ies; int ies_len; struct wmi_connect_cmd p; int res; - int i; + int i, ret; ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: add=%d\n", __func__, add); - if (!ath6kl_cfg80211_ready(ar)) + if (!ath6kl_cfg80211_ready(vif)) return -EIO; - if (ar->next_mode != AP_NETWORK) + if (vif->next_mode != AP_NETWORK) return -EOPNOTSUPP; if (info->beacon_ies) { - res = ath6kl_wmi_set_appie_cmd(ar->wmi, WMI_FRAME_BEACON, + res = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx, + WMI_FRAME_BEACON, info->beacon_ies, info->beacon_ies_len); if (res) return res; } if (info->proberesp_ies) { - res = ath6kl_set_ap_probe_resp_ies(ar, info->proberesp_ies, + res = ath6kl_set_ap_probe_resp_ies(vif, info->proberesp_ies, info->proberesp_ies_len); if (res) return res; } if (info->assocresp_ies) { - res = ath6kl_wmi_set_appie_cmd(ar->wmi, WMI_FRAME_ASSOC_RESP, + res = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx, + WMI_FRAME_ASSOC_RESP, info->assocresp_ies, info->assocresp_ies_len); if (res) @@ -1539,12 +2132,14 @@ static int ath6kl_ap_beacon(struct wiphy *wiphy, struct net_device *dev, if (info->ssid == NULL) return -EINVAL; - memcpy(ar->ssid, info->ssid, info->ssid_len); - ar->ssid_len = info->ssid_len; + memcpy(vif->ssid, info->ssid, info->ssid_len); + vif->ssid_len = info->ssid_len; if (info->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE) return -EOPNOTSUPP; /* TODO */ - ar->dot11_auth_mode = OPEN_AUTH; + ret = ath6kl_set_auth_type(vif, info->auth_type); + if (ret) + return ret; memset(&p, 0, sizeof(p)); @@ -1566,7 +2161,7 @@ static int ath6kl_ap_beacon(struct wiphy *wiphy, struct net_device *dev, } if (p.auth_mode == 0) p.auth_mode = NONE_AUTH; - ar->auth_mode = p.auth_mode; + vif->auth_mode = p.auth_mode; for (i = 0; i < info->crypto.n_ciphers_pairwise; i++) { switch (info->crypto.ciphers_pairwise[i]) { @@ -1580,13 +2175,16 @@ static int ath6kl_ap_beacon(struct wiphy *wiphy, struct net_device *dev, case WLAN_CIPHER_SUITE_CCMP: p.prwise_crypto_type |= AES_CRYPT; break; + case WLAN_CIPHER_SUITE_SMS4: + p.prwise_crypto_type |= WAPI_CRYPT; + break; } } if (p.prwise_crypto_type == 0) { p.prwise_crypto_type = NONE_CRYPT; - ath6kl_set_cipher(ar, 0, true); + ath6kl_set_cipher(vif, 0, true); } else if (info->crypto.n_ciphers_pairwise == 1) - ath6kl_set_cipher(ar, info->crypto.ciphers_pairwise[0], true); + ath6kl_set_cipher(vif, info->crypto.ciphers_pairwise[0], true); switch (info->crypto.cipher_group) { case WLAN_CIPHER_SUITE_WEP40: @@ -1599,21 +2197,34 @@ static int ath6kl_ap_beacon(struct wiphy *wiphy, struct net_device *dev, case WLAN_CIPHER_SUITE_CCMP: p.grp_crypto_type = AES_CRYPT; break; + case WLAN_CIPHER_SUITE_SMS4: + p.grp_crypto_type = WAPI_CRYPT; + break; default: p.grp_crypto_type = NONE_CRYPT; break; } - ath6kl_set_cipher(ar, info->crypto.cipher_group, false); + ath6kl_set_cipher(vif, info->crypto.cipher_group, false); p.nw_type = AP_NETWORK; - ar->nw_type = ar->next_mode; + vif->nw_type = vif->next_mode; - p.ssid_len = ar->ssid_len; - memcpy(p.ssid, ar->ssid, ar->ssid_len); - p.dot11_auth_mode = ar->dot11_auth_mode; - p.ch = cpu_to_le16(ar->next_chan); + p.ssid_len = vif->ssid_len; + memcpy(p.ssid, vif->ssid, vif->ssid_len); + p.dot11_auth_mode = vif->dot11_auth_mode; + p.ch = cpu_to_le16(vif->next_chan); - res = ath6kl_wmi_ap_profile_commit(ar->wmi, &p); + if (vif->wdev.iftype == NL80211_IFTYPE_P2P_GO) { + p.nw_subtype = SUBTYPE_P2PGO; + } else { + /* + * Due to firmware limitation, it is not possible to + * do P2P mgmt operations in AP mode + */ + p.nw_subtype = SUBTYPE_NONE; + } + + res = ath6kl_wmi_ap_profile_commit(ar->wmi, vif->fw_vif_idx, &p); if (res < 0) return res; @@ -1635,14 +2246,15 @@ static int ath6kl_set_beacon(struct wiphy *wiphy, struct net_device *dev, static int ath6kl_del_beacon(struct wiphy *wiphy, struct net_device *dev) { struct ath6kl *ar = ath6kl_priv(dev); + struct ath6kl_vif *vif = netdev_priv(dev); - if (ar->nw_type != AP_NETWORK) + if (vif->nw_type != AP_NETWORK) return -EOPNOTSUPP; - if (!test_bit(CONNECTED, &ar->flag)) + if (!test_bit(CONNECTED, &vif->flags)) return -ENOTCONN; - ath6kl_wmi_disconnect_cmd(ar->wmi); - clear_bit(CONNECTED, &ar->flag); + ath6kl_wmi_disconnect_cmd(ar->wmi, vif->fw_vif_idx); + clear_bit(CONNECTED, &vif->flags); return 0; } @@ -1651,8 +2263,9 @@ static int ath6kl_change_station(struct wiphy *wiphy, struct net_device *dev, u8 *mac, struct station_parameters *params) { struct ath6kl *ar = ath6kl_priv(dev); + struct ath6kl_vif *vif = netdev_priv(dev); - if (ar->nw_type != AP_NETWORK) + if (vif->nw_type != AP_NETWORK) return -EOPNOTSUPP; /* Use this only for authorizing/unauthorizing a station */ @@ -1660,10 +2273,10 @@ static int ath6kl_change_station(struct wiphy *wiphy, struct net_device *dev, return -EOPNOTSUPP; if (params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED)) - return ath6kl_wmi_ap_set_mlme(ar->wmi, WMI_AP_MLME_AUTHORIZE, - mac, 0); - return ath6kl_wmi_ap_set_mlme(ar->wmi, WMI_AP_MLME_UNAUTHORIZE, mac, - 0); + return ath6kl_wmi_ap_set_mlme(ar->wmi, vif->fw_vif_idx, + WMI_AP_MLME_AUTHORIZE, mac, 0); + return ath6kl_wmi_ap_set_mlme(ar->wmi, vif->fw_vif_idx, + WMI_AP_MLME_UNAUTHORIZE, mac, 0); } static int ath6kl_remain_on_channel(struct wiphy *wiphy, @@ -1674,13 +2287,20 @@ static int ath6kl_remain_on_channel(struct wiphy *wiphy, u64 *cookie) { struct ath6kl *ar = ath6kl_priv(dev); + struct ath6kl_vif *vif = netdev_priv(dev); + u32 id; /* TODO: if already pending or ongoing remain-on-channel, * return -EBUSY */ - *cookie = 1; /* only a single pending request is supported */ + id = ++vif->last_roc_id; + if (id == 0) { + /* Do not use 0 as the cookie value */ + id = ++vif->last_roc_id; + } + *cookie = id; - return ath6kl_wmi_remain_on_chnl_cmd(ar->wmi, chan->center_freq, - duration); + return ath6kl_wmi_remain_on_chnl_cmd(ar->wmi, vif->fw_vif_idx, + chan->center_freq, duration); } static int ath6kl_cancel_remain_on_channel(struct wiphy *wiphy, @@ -1688,16 +2308,20 @@ static int ath6kl_cancel_remain_on_channel(struct wiphy *wiphy, u64 cookie) { struct ath6kl *ar = ath6kl_priv(dev); + struct ath6kl_vif *vif = netdev_priv(dev); - if (cookie != 1) + if (cookie != vif->last_roc_id) return -ENOENT; + vif->last_cancel_roc_id = cookie; - return ath6kl_wmi_cancel_remain_on_chnl_cmd(ar->wmi); + return ath6kl_wmi_cancel_remain_on_chnl_cmd(ar->wmi, vif->fw_vif_idx); } -static int ath6kl_send_go_probe_resp(struct ath6kl *ar, const u8 *buf, - size_t len, unsigned int freq) +static int ath6kl_send_go_probe_resp(struct ath6kl_vif *vif, + const u8 *buf, size_t len, + unsigned int freq) { + struct ath6kl *ar = vif->ar; const u8 *pos; u8 *p2p; int p2p_len; @@ -1724,8 +2348,8 @@ static int ath6kl_send_go_probe_resp(struct ath6kl *ar, const u8 *buf, pos += 2 + pos[1]; } - ret = ath6kl_wmi_send_probe_response_cmd(ar->wmi, freq, mgmt->da, - p2p, p2p_len); + ret = ath6kl_wmi_send_probe_response_cmd(ar->wmi, vif->fw_vif_idx, freq, + mgmt->da, p2p, p2p_len); kfree(p2p); return ret; } @@ -1734,44 +2358,61 @@ static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct net_device *dev, struct ieee80211_channel *chan, bool offchan, enum nl80211_channel_type channel_type, bool channel_type_valid, unsigned int wait, - const u8 *buf, size_t len, bool no_cck, u64 *cookie) + const u8 *buf, size_t len, bool no_cck, + bool dont_wait_for_ack, u64 *cookie) { struct ath6kl *ar = ath6kl_priv(dev); + struct ath6kl_vif *vif = netdev_priv(dev); u32 id; const struct ieee80211_mgmt *mgmt; mgmt = (const struct ieee80211_mgmt *) buf; if (buf + len >= mgmt->u.probe_resp.variable && - ar->nw_type == AP_NETWORK && test_bit(CONNECTED, &ar->flag) && + vif->nw_type == AP_NETWORK && test_bit(CONNECTED, &vif->flags) && ieee80211_is_probe_resp(mgmt->frame_control)) { /* * Send Probe Response frame in AP mode using a separate WMI * command to allow the target to fill in the generic IEs. */ *cookie = 0; /* TX status not supported */ - return ath6kl_send_go_probe_resp(ar, buf, len, + return ath6kl_send_go_probe_resp(vif, buf, len, chan->center_freq); } - id = ar->send_action_id++; + id = vif->send_action_id++; if (id == 0) { /* * 0 is a reserved value in the WMI command and shall not be * used for the command. */ - id = ar->send_action_id++; + id = vif->send_action_id++; } *cookie = id; - return ath6kl_wmi_send_action_cmd(ar->wmi, id, chan->center_freq, wait, - buf, len); + + if (test_bit(ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX, + ar->fw_capabilities)) { + /* + * If capable of doing P2P mgmt operations using + * station interface, send additional information like + * supported rates to advertise and xmit rates for + * probe requests + */ + return ath6kl_wmi_send_mgmt_cmd(ar->wmi, vif->fw_vif_idx, id, + chan->center_freq, wait, + buf, len, no_cck); + } else { + return ath6kl_wmi_send_action_cmd(ar->wmi, vif->fw_vif_idx, id, + chan->center_freq, wait, + buf, len); + } } static void ath6kl_mgmt_frame_register(struct wiphy *wiphy, struct net_device *dev, u16 frame_type, bool reg) { - struct ath6kl *ar = ath6kl_priv(dev); + struct ath6kl_vif *vif = netdev_priv(dev); ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: frame_type=0x%x reg=%d\n", __func__, frame_type, reg); @@ -1781,8 +2422,92 @@ static void ath6kl_mgmt_frame_register(struct wiphy *wiphy, * we cannot send WMI_PROBE_REQ_REPORT_CMD here. Instead, we * hardcode target to report Probe Request frames all the time. */ - ar->probe_req_report = reg; + vif->probe_req_report = reg; + } +} + +static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy, + struct net_device *dev, + struct cfg80211_sched_scan_request *request) +{ + struct ath6kl *ar = ath6kl_priv(dev); + struct ath6kl_vif *vif = netdev_priv(dev); + u16 interval; + int ret; + u8 i; + + if (ar->state != ATH6KL_STATE_ON) + return -EIO; + + if (vif->sme_state != SME_DISCONNECTED) + return -EBUSY; + + for (i = 0; i < ar->wiphy->max_sched_scan_ssids; i++) { + ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx, + i, DISABLE_SSID_FLAG, + 0, NULL); } + + /* fw uses seconds, also make sure that it's >0 */ + interval = max_t(u16, 1, request->interval / 1000); + + ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx, + interval, interval, + 10, 0, 0, 0, 3, 0, 0, 0); + + if (request->n_ssids && request->ssids[0].ssid_len) { + for (i = 0; i < request->n_ssids; i++) { + ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx, + i, SPECIFIC_SSID_FLAG, + request->ssids[i].ssid_len, + request->ssids[i].ssid); + } + } + + ret = ath6kl_wmi_set_wow_mode_cmd(ar->wmi, vif->fw_vif_idx, + ATH6KL_WOW_MODE_ENABLE, + WOW_FILTER_SSID, + WOW_HOST_REQ_DELAY); + if (ret) { + ath6kl_warn("Failed to enable wow with ssid filter: %d\n", ret); + return ret; + } + + /* this also clears IE in fw if it's not set */ + ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx, + WMI_FRAME_PROBE_REQ, + request->ie, request->ie_len); + if (ret) { + ath6kl_warn("Failed to set probe request IE for scheduled scan: %d", + ret); + return ret; + } + + ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx, + ATH6KL_HOST_MODE_ASLEEP); + if (ret) { + ath6kl_warn("Failed to enable host sleep mode for sched scan: %d\n", + ret); + return ret; + } + + ar->state = ATH6KL_STATE_SCHED_SCAN; + + return ret; +} + +static int ath6kl_cfg80211_sscan_stop(struct wiphy *wiphy, + struct net_device *dev) +{ + struct ath6kl_vif *vif = netdev_priv(dev); + bool stopped; + + stopped = __ath6kl_cfg80211_sscan_stop(vif); + + if (!stopped) + return -EIO; + + return 0; } static const struct ieee80211_txrx_stypes @@ -1808,6 +2533,8 @@ ath6kl_mgmt_stypes[NUM_NL80211_IFTYPES] = { }; static struct cfg80211_ops ath6kl_cfg80211_ops = { + .add_virtual_intf = ath6kl_cfg80211_add_iface, + .del_virtual_intf = ath6kl_cfg80211_del_iface, .change_virtual_intf = ath6kl_cfg80211_change_iface, .scan = ath6kl_cfg80211_scan, .connect = ath6kl_cfg80211_connect, @@ -1828,7 +2555,8 @@ static struct cfg80211_ops ath6kl_cfg80211_ops = { .flush_pmksa = ath6kl_flush_pmksa, CFG80211_TESTMODE_CMD(ath6kl_tm_cmd) #ifdef CONFIG_PM - .suspend = ar6k_cfg80211_suspend, + .suspend = __ath6kl_cfg80211_suspend, + .resume = __ath6kl_cfg80211_resume, #endif .set_channel = ath6kl_set_channel, .add_beacon = ath6kl_add_beacon, @@ -1839,78 +2567,277 @@ static struct cfg80211_ops ath6kl_cfg80211_ops = { .cancel_remain_on_channel = ath6kl_cancel_remain_on_channel, .mgmt_tx = ath6kl_mgmt_tx, .mgmt_frame_register = ath6kl_mgmt_frame_register, + .sched_scan_start = ath6kl_cfg80211_sscan_start, + .sched_scan_stop = ath6kl_cfg80211_sscan_stop, }; -struct wireless_dev *ath6kl_cfg80211_init(struct device *dev) +void ath6kl_cfg80211_stop(struct ath6kl_vif *vif) { - int ret = 0; - struct wireless_dev *wdev; - struct ath6kl *ar; + ath6kl_cfg80211_sscan_disable(vif); - wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL); - if (!wdev) { - ath6kl_err("couldn't allocate wireless device\n"); - return NULL; + switch (vif->sme_state) { + case SME_DISCONNECTED: + break; + case SME_CONNECTING: + cfg80211_connect_result(vif->ndev, vif->bssid, NULL, 0, + NULL, 0, + WLAN_STATUS_UNSPECIFIED_FAILURE, + GFP_KERNEL); + break; + case SME_CONNECTED: + cfg80211_disconnected(vif->ndev, 0, NULL, 0, GFP_KERNEL); + break; + } + + if (test_bit(CONNECTED, &vif->flags) || + test_bit(CONNECT_PEND, &vif->flags)) + ath6kl_wmi_disconnect_cmd(vif->ar->wmi, vif->fw_vif_idx); + + vif->sme_state = SME_DISCONNECTED; + clear_bit(CONNECTED, &vif->flags); + clear_bit(CONNECT_PEND, &vif->flags); + + /* disable scanning */ + if (ath6kl_wmi_scanparams_cmd(vif->ar->wmi, vif->fw_vif_idx, 0xFFFF, + 0, 0, 0, 0, 0, 0, 0, 0, 0) != 0) + ath6kl_warn("failed to disable scan during stop\n"); + + ath6kl_cfg80211_scan_complete_event(vif, true); +} + +void ath6kl_cfg80211_stop_all(struct ath6kl *ar) +{ + struct ath6kl_vif *vif; + + vif = ath6kl_vif_first(ar); + if (!vif) { + /* save the current power mode before enabling power save */ + ar->wmi->saved_pwr_mode = ar->wmi->pwr_mode; + + if (ath6kl_wmi_powermode_cmd(ar->wmi, 0, REC_POWER) != 0) + ath6kl_warn("ath6kl_deep_sleep_enable: " + "wmi_powermode_cmd failed\n"); + return; } + /* + * FIXME: we should take ar->list_lock to protect changes in the + * vif_list, but that's not trivial to do as ath6kl_cfg80211_stop() + * sleeps. + */ + list_for_each_entry(vif, &ar->vif_list, list) + ath6kl_cfg80211_stop(vif); +} + +struct ath6kl *ath6kl_core_alloc(struct device *dev) +{ + struct ath6kl *ar; + struct wiphy *wiphy; + u8 ctr; + /* create a new wiphy for use with cfg80211 */ - wdev->wiphy = wiphy_new(&ath6kl_cfg80211_ops, sizeof(struct ath6kl)); - if (!wdev->wiphy) { + wiphy = wiphy_new(&ath6kl_cfg80211_ops, sizeof(struct ath6kl)); + + if (!wiphy) { ath6kl_err("couldn't allocate wiphy device\n"); - kfree(wdev); return NULL; } - ar = wiphy_priv(wdev->wiphy); + ar = wiphy_priv(wiphy); ar->p2p = !!ath6kl_p2p; + ar->wiphy = wiphy; + ar->dev = dev; - wdev->wiphy->mgmt_stypes = ath6kl_mgmt_stypes; + ar->vif_max = 1; - wdev->wiphy->max_remain_on_channel_duration = 5000; + ar->max_norm_iface = 1; + + spin_lock_init(&ar->lock); + spin_lock_init(&ar->mcastpsq_lock); + spin_lock_init(&ar->list_lock); + + init_waitqueue_head(&ar->event_wq); + sema_init(&ar->sem, 1); + + INIT_LIST_HEAD(&ar->amsdu_rx_buffer_queue); + INIT_LIST_HEAD(&ar->vif_list); + + clear_bit(WMI_ENABLED, &ar->flag); + clear_bit(SKIP_SCAN, &ar->flag); + clear_bit(DESTROY_IN_PROGRESS, &ar->flag); + + ar->listen_intvl_t = A_DEFAULT_LISTEN_INTERVAL; + ar->listen_intvl_b = 0; + ar->tx_pwr = 0; + + ar->intra_bss = 1; + ar->lrssi_roam_threshold = DEF_LRSSI_ROAM_THRESHOLD; + + ar->state = ATH6KL_STATE_OFF; + + memset((u8 *)ar->sta_list, 0, + AP_MAX_NUM_STA * sizeof(struct ath6kl_sta)); + + /* Init the PS queues */ + for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) { + spin_lock_init(&ar->sta_list[ctr].psq_lock); + skb_queue_head_init(&ar->sta_list[ctr].psq); + } + + skb_queue_head_init(&ar->mcastpsq); + + memcpy(ar->ap_country_code, DEF_AP_COUNTRY_CODE, 3); + + return ar; +} + +int ath6kl_register_ieee80211_hw(struct ath6kl *ar) +{ + struct wiphy *wiphy = ar->wiphy; + int ret; + + wiphy->mgmt_stypes = ath6kl_mgmt_stypes; + + wiphy->max_remain_on_channel_duration = 5000; /* set device pointer for wiphy */ - set_wiphy_dev(wdev->wiphy, dev); + set_wiphy_dev(wiphy, ar->dev); - wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | - BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP); + wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_ADHOC) | + BIT(NL80211_IFTYPE_AP); if (ar->p2p) { - wdev->wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_GO) | - BIT(NL80211_IFTYPE_P2P_CLIENT); + wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_GO) | + BIT(NL80211_IFTYPE_P2P_CLIENT); } - /* max num of ssids that can be probed during scanning */ - wdev->wiphy->max_scan_ssids = MAX_PROBED_SSID_INDEX; - wdev->wiphy->max_scan_ie_len = 1000; /* FIX: what is correct limit? */ - wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &ath6kl_band_2ghz; - wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = &ath6kl_band_5ghz; - wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; - - wdev->wiphy->cipher_suites = cipher_suites; - wdev->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); - ret = wiphy_register(wdev->wiphy); + /* max num of ssids that can be probed during scanning */ + wiphy->max_scan_ssids = MAX_PROBED_SSID_INDEX; + wiphy->max_scan_ie_len = 1000; /* FIX: what is correct limit? */ + wiphy->bands[IEEE80211_BAND_2GHZ] = &ath6kl_band_2ghz; + wiphy->bands[IEEE80211_BAND_5GHZ] = &ath6kl_band_5ghz; + wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; + + wiphy->cipher_suites = cipher_suites; + wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); + + wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT | + WIPHY_WOWLAN_DISCONNECT | + WIPHY_WOWLAN_GTK_REKEY_FAILURE | + WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | + WIPHY_WOWLAN_EAP_IDENTITY_REQ | + WIPHY_WOWLAN_4WAY_HANDSHAKE; + wiphy->wowlan.n_patterns = WOW_MAX_FILTERS_PER_LIST; + wiphy->wowlan.pattern_min_len = 1; + wiphy->wowlan.pattern_max_len = WOW_PATTERN_SIZE; + + wiphy->max_sched_scan_ssids = 10; + + ret = wiphy_register(wiphy); if (ret < 0) { ath6kl_err("couldn't register wiphy device\n"); - wiphy_free(wdev->wiphy); - kfree(wdev); - return NULL; + return ret; } - return wdev; + return 0; } -void ath6kl_cfg80211_deinit(struct ath6kl *ar) +static int ath6kl_init_if_data(struct ath6kl_vif *vif) { - struct wireless_dev *wdev = ar->wdev; - - if (ar->scan_req) { - cfg80211_scan_done(ar->scan_req, true); - ar->scan_req = NULL; + vif->aggr_cntxt = aggr_init(vif->ndev); + if (!vif->aggr_cntxt) { + ath6kl_err("failed to initialize aggr\n"); + return -ENOMEM; } - if (!wdev) - return; + setup_timer(&vif->disconnect_timer, disconnect_timer_handler, + (unsigned long) vif->ndev); + setup_timer(&vif->sched_scan_timer, ath6kl_wmi_sscan_timer, + (unsigned long) vif); + + set_bit(WMM_ENABLED, &vif->flags); + spin_lock_init(&vif->if_lock); - wiphy_unregister(wdev->wiphy); - wiphy_free(wdev->wiphy); - kfree(wdev); + return 0; +} + +void ath6kl_deinit_if_data(struct ath6kl_vif *vif) +{ + struct ath6kl *ar = vif->ar; + + aggr_module_destroy(vif->aggr_cntxt); + + ar->avail_idx_map |= BIT(vif->fw_vif_idx); + + if (vif->nw_type == ADHOC_NETWORK) + ar->ibss_if_active = false; + + unregister_netdevice(vif->ndev); + + ar->num_vif--; +} + +struct net_device *ath6kl_interface_add(struct ath6kl *ar, char *name, + enum nl80211_iftype type, u8 fw_vif_idx, + u8 nw_type) +{ + struct net_device *ndev; + struct ath6kl_vif *vif; + + ndev = alloc_netdev(sizeof(*vif), name, ether_setup); + if (!ndev) + return NULL; + + vif = netdev_priv(ndev); + ndev->ieee80211_ptr = &vif->wdev; + vif->wdev.wiphy = ar->wiphy; + vif->ar = ar; + vif->ndev = ndev; + SET_NETDEV_DEV(ndev, wiphy_dev(vif->wdev.wiphy)); + vif->wdev.netdev = ndev; + vif->wdev.iftype = type; + vif->fw_vif_idx = fw_vif_idx; + vif->nw_type = vif->next_mode = nw_type; + + memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN); + if (fw_vif_idx != 0) + ndev->dev_addr[0] = (ndev->dev_addr[0] ^ (1 << fw_vif_idx)) | + 0x2; + + init_netdev(ndev); + + ath6kl_init_control_info(vif); + + /* TODO: Pass interface specific pointer instead of ar */ + if (ath6kl_init_if_data(vif)) + goto err; + + if (register_netdevice(ndev)) + goto err; + + ar->avail_idx_map &= ~BIT(fw_vif_idx); + vif->sme_state = SME_DISCONNECTED; + set_bit(WLAN_ENABLED, &vif->flags); + ar->wlan_pwr_state = WLAN_POWER_STATE_ON; + set_bit(NETDEV_REGISTERED, &vif->flags); + + if (type == NL80211_IFTYPE_ADHOC) + ar->ibss_if_active = true; + + spin_lock_bh(&ar->list_lock); + list_add_tail(&vif->list, &ar->vif_list); + spin_unlock_bh(&ar->list_lock); + + return ndev; + +err: + aggr_module_destroy(vif->aggr_cntxt); + free_netdev(ndev); + return NULL; +} + +void ath6kl_deinit_ieee80211_hw(struct ath6kl *ar) +{ + wiphy_unregister(ar->wiphy); + wiphy_free(ar->wiphy); } diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.h b/drivers/net/wireless/ath/ath6kl/cfg80211.h index a84adc249c6..81f20a57231 100644 --- a/drivers/net/wireless/ath/ath6kl/cfg80211.h +++ b/drivers/net/wireless/ath/ath6kl/cfg80211.h @@ -17,23 +17,43 @@ #ifndef ATH6KL_CFG80211_H #define ATH6KL_CFG80211_H -struct wireless_dev *ath6kl_cfg80211_init(struct device *dev); -void ath6kl_cfg80211_deinit(struct ath6kl *ar); +enum ath6kl_cfg_suspend_mode { + ATH6KL_CFG_SUSPEND_DEEPSLEEP, + ATH6KL_CFG_SUSPEND_CUTPOWER, + ATH6KL_CFG_SUSPEND_WOW, + ATH6KL_CFG_SUSPEND_SCHED_SCAN, +}; -void ath6kl_cfg80211_scan_complete_event(struct ath6kl *ar, int status); +struct net_device *ath6kl_interface_add(struct ath6kl *ar, char *name, + enum nl80211_iftype type, + u8 fw_vif_idx, u8 nw_type); +int ath6kl_register_ieee80211_hw(struct ath6kl *ar); +struct ath6kl *ath6kl_core_alloc(struct device *dev); +void ath6kl_deinit_ieee80211_hw(struct ath6kl *ar); -void ath6kl_cfg80211_connect_event(struct ath6kl *ar, u16 channel, +void ath6kl_cfg80211_scan_complete_event(struct ath6kl_vif *vif, bool aborted); + +void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel, u8 *bssid, u16 listen_intvl, u16 beacon_intvl, enum network_type nw_type, u8 beacon_ie_len, u8 assoc_req_len, u8 assoc_resp_len, u8 *assoc_info); -void ath6kl_cfg80211_disconnect_event(struct ath6kl *ar, u8 reason, +void ath6kl_cfg80211_disconnect_event(struct ath6kl_vif *vif, u8 reason, u8 *bssid, u8 assoc_resp_len, u8 *assoc_info, u16 proto_reason); -void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl *ar, u8 keyid, +void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid, bool ismcast); +int ath6kl_cfg80211_suspend(struct ath6kl *ar, + enum ath6kl_cfg_suspend_mode mode, + struct cfg80211_wowlan *wow); + +int ath6kl_cfg80211_resume(struct ath6kl *ar); + +void ath6kl_cfg80211_stop(struct ath6kl_vif *vif); +void ath6kl_cfg80211_stop_all(struct ath6kl *ar); + #endif /* ATH6KL_CFG80211_H */ diff --git a/drivers/net/wireless/ath/ath6kl/common.h b/drivers/net/wireless/ath/ath6kl/common.h index b92f0e5d233..bfd6597763d 100644 --- a/drivers/net/wireless/ath/ath6kl/common.h +++ b/drivers/net/wireless/ath/ath6kl/common.h @@ -23,8 +23,6 @@ extern int ath6kl_printk(const char *level, const char *fmt, ...); -#define A_CACHE_LINE_PAD 128 - /* * Reflects the version of binary interface exposed by ATH6KL target * firmware. Needs to be incremented by 1 for any change in the firmware @@ -73,25 +71,16 @@ enum crypto_type { WEP_CRYPT = 0x02, TKIP_CRYPT = 0x04, AES_CRYPT = 0x08, + WAPI_CRYPT = 0x10, }; struct htc_endpoint_credit_dist; struct ath6kl; enum htc_credit_dist_reason; -struct htc_credit_state_info; +struct ath6kl_htc_credit_info; -int ath6k_setup_credit_dist(void *htc_handle, - struct htc_credit_state_info *cred_info); -void ath6k_credit_distribute(struct htc_credit_state_info *cred_inf, - struct list_head *epdist_list, - enum htc_credit_dist_reason reason); -void ath6k_credit_init(struct htc_credit_state_info *cred_inf, - struct list_head *ep_list, - int tot_credits); -void ath6k_seek_credits(struct htc_credit_state_info *cred_inf, - struct htc_endpoint_credit_dist *ep_dist); struct ath6kl *ath6kl_core_alloc(struct device *sdev); int ath6kl_core_init(struct ath6kl *ar); -int ath6kl_unavail_ev(struct ath6kl *ar); +void ath6kl_core_cleanup(struct ath6kl *ar); struct sk_buff *ath6kl_buf_alloc(int size); #endif /* COMMON_H */ diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h index 6d8a4845baa..c863a28f2e0 100644 --- a/drivers/net/wireless/ath/ath6kl/core.h +++ b/drivers/net/wireless/ath/ath6kl/core.h @@ -70,10 +70,20 @@ enum ath6kl_fw_ie_type { ATH6KL_FW_IE_RESERVED_RAM_SIZE = 5, ATH6KL_FW_IE_CAPABILITIES = 6, ATH6KL_FW_IE_PATCH_ADDR = 7, + ATH6KL_FW_IE_BOARD_ADDR = 8, + ATH6KL_FW_IE_VIF_MAX = 9, }; enum ath6kl_fw_capability { ATH6KL_FW_CAPABILITY_HOST_P2P = 0, + ATH6KL_FW_CAPABILITY_SCHED_SCAN = 1, + + /* + * Firmware is capable of supporting P2P mgmt operations on a + * station interface. After group formation, the station + * interface will become a P2P client/GO interface as the case may be + */ + ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX, /* this needs to be last */ ATH6KL_FW_CAPABILITY_MAX, @@ -88,37 +98,47 @@ struct ath6kl_fw_ie { }; /* AR6003 1.0 definitions */ -#define AR6003_REV1_VERSION 0x300002ba +#define AR6003_HW_1_0_VERSION 0x300002ba /* AR6003 2.0 definitions */ -#define AR6003_REV2_VERSION 0x30000384 -#define AR6003_REV2_PATCH_DOWNLOAD_ADDRESS 0x57e910 -#define AR6003_REV2_OTP_FILE "ath6k/AR6003/hw2.0/otp.bin.z77" -#define AR6003_REV2_FIRMWARE_FILE "ath6k/AR6003/hw2.0/athwlan.bin.z77" -#define AR6003_REV2_TCMD_FIRMWARE_FILE "ath6k/AR6003/hw2.0/athtcmd_ram.bin" -#define AR6003_REV2_PATCH_FILE "ath6k/AR6003/hw2.0/data.patch.bin" -#define AR6003_REV2_FIRMWARE_2_FILE "ath6k/AR6003/hw2.0/fw-2.bin" -#define AR6003_REV2_BOARD_DATA_FILE "ath6k/AR6003/hw2.0/bdata.bin" -#define AR6003_REV2_DEFAULT_BOARD_DATA_FILE "ath6k/AR6003/hw2.0/bdata.SD31.bin" +#define AR6003_HW_2_0_VERSION 0x30000384 +#define AR6003_HW_2_0_PATCH_DOWNLOAD_ADDRESS 0x57e910 +#define AR6003_HW_2_0_OTP_FILE "ath6k/AR6003/hw2.0/otp.bin.z77" +#define AR6003_HW_2_0_FIRMWARE_FILE "ath6k/AR6003/hw2.0/athwlan.bin.z77" +#define AR6003_HW_2_0_TCMD_FIRMWARE_FILE "ath6k/AR6003/hw2.0/athtcmd_ram.bin" +#define AR6003_HW_2_0_PATCH_FILE "ath6k/AR6003/hw2.0/data.patch.bin" +#define AR6003_HW_2_0_FIRMWARE_2_FILE "ath6k/AR6003/hw2.0/fw-2.bin" +#define AR6003_HW_2_0_BOARD_DATA_FILE "ath6k/AR6003/hw2.0/bdata.bin" +#define AR6003_HW_2_0_DEFAULT_BOARD_DATA_FILE \ + "ath6k/AR6003/hw2.0/bdata.SD31.bin" /* AR6003 3.0 definitions */ -#define AR6003_REV3_VERSION 0x30000582 -#define AR6003_REV3_OTP_FILE "ath6k/AR6003/hw2.1.1/otp.bin" -#define AR6003_REV3_FIRMWARE_FILE "ath6k/AR6003/hw2.1.1/athwlan.bin" -#define AR6003_REV3_TCMD_FIRMWARE_FILE "ath6k/AR6003/hw2.1.1/athtcmd_ram.bin" -#define AR6003_REV3_PATCH_FILE "ath6k/AR6003/hw2.1.1/data.patch.bin" -#define AR6003_REV3_FIRMWARE_2_FILE "ath6k/AR6003/hw2.1.1/fw-2.bin" -#define AR6003_REV3_BOARD_DATA_FILE "ath6k/AR6003/hw2.1.1/bdata.bin" -#define AR6003_REV3_DEFAULT_BOARD_DATA_FILE \ - "ath6k/AR6003/hw2.1.1/bdata.SD31.bin" +#define AR6003_HW_2_1_1_VERSION 0x30000582 +#define AR6003_HW_2_1_1_OTP_FILE "ath6k/AR6003/hw2.1.1/otp.bin" +#define AR6003_HW_2_1_1_FIRMWARE_FILE "ath6k/AR6003/hw2.1.1/athwlan.bin" +#define AR6003_HW_2_1_1_TCMD_FIRMWARE_FILE \ + "ath6k/AR6003/hw2.1.1/athtcmd_ram.bin" +#define AR6003_HW_2_1_1_PATCH_FILE "ath6k/AR6003/hw2.1.1/data.patch.bin" +#define AR6003_HW_2_1_1_FIRMWARE_2_FILE "ath6k/AR6003/hw2.1.1/fw-2.bin" +#define AR6003_HW_2_1_1_BOARD_DATA_FILE "ath6k/AR6003/hw2.1.1/bdata.bin" +#define AR6003_HW_2_1_1_DEFAULT_BOARD_DATA_FILE \ + "ath6k/AR6003/hw2.1.1/bdata.SD31.bin" /* AR6004 1.0 definitions */ -#define AR6004_REV1_VERSION 0x30000623 -#define AR6004_REV1_FIRMWARE_FILE "ath6k/AR6004/hw6.1/fw.ram.bin" -#define AR6004_REV1_FIRMWARE_2_FILE "ath6k/AR6004/hw6.1/fw-2.bin" -#define AR6004_REV1_BOARD_DATA_FILE "ath6k/AR6004/hw6.1/bdata.bin" -#define AR6004_REV1_DEFAULT_BOARD_DATA_FILE "ath6k/AR6004/hw6.1/bdata.DB132.bin" -#define AR6004_REV1_EPPING_FIRMWARE_FILE "ath6k/AR6004/hw6.1/endpointping.bin" +#define AR6004_HW_1_0_VERSION 0x30000623 +#define AR6004_HW_1_0_FIRMWARE_2_FILE "ath6k/AR6004/hw1.0/fw-2.bin" +#define AR6004_HW_1_0_FIRMWARE_FILE "ath6k/AR6004/hw1.0/fw.ram.bin" +#define AR6004_HW_1_0_BOARD_DATA_FILE "ath6k/AR6004/hw1.0/bdata.bin" +#define AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE \ + "ath6k/AR6004/hw1.0/bdata.DB132.bin" + +/* AR6004 1.1 definitions */ +#define AR6004_HW_1_1_VERSION 0x30000001 +#define AR6004_HW_1_1_FIRMWARE_2_FILE "ath6k/AR6004/hw1.1/fw-2.bin" +#define AR6004_HW_1_1_FIRMWARE_FILE "ath6k/AR6004/hw1.1/fw.ram.bin" +#define AR6004_HW_1_1_BOARD_DATA_FILE "ath6k/AR6004/hw1.1/bdata.bin" +#define AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE \ + "ath6k/AR6004/hw1.1/bdata.DB132.bin" /* Per STA data, used in AP mode */ #define STA_PS_AWAKE BIT(0) @@ -166,6 +186,7 @@ struct ath6kl_fw_ie { #define ATH6KL_CONF_IGNORE_PS_FAIL_EVT_IN_SCAN BIT(1) #define ATH6KL_CONF_ENABLE_11N BIT(2) #define ATH6KL_CONF_ENABLE_TX_BURST BIT(3) +#define ATH6KL_CONF_SUSPEND_CUTPOWER BIT(4) enum wlan_low_pwr_state { WLAN_POWER_STATE_ON, @@ -271,6 +292,8 @@ struct ath6kl_bmi { u32 cmd_credits; bool done_sent; u8 *cmd_buf; + u32 max_data_size; + u32 max_cmd_size; }; struct target_stats { @@ -380,40 +403,42 @@ struct ath6kl_req_key { u8 key_len; }; -/* Flag info */ -#define WMI_ENABLED 0 -#define WMI_READY 1 -#define CONNECTED 2 -#define STATS_UPDATE_PEND 3 -#define CONNECT_PEND 4 -#define WMM_ENABLED 5 -#define NETQ_STOPPED 6 -#define WMI_CTRL_EP_FULL 7 -#define DTIM_EXPIRED 8 -#define DESTROY_IN_PROGRESS 9 -#define NETDEV_REGISTERED 10 -#define SKIP_SCAN 11 -#define WLAN_ENABLED 12 -#define TESTMODE 13 -#define CLEAR_BSSFILTER_ON_BEACON 14 -#define DTIM_PERIOD_AVAIL 15 +enum ath6kl_hif_type { + ATH6KL_HIF_TYPE_SDIO, + ATH6KL_HIF_TYPE_USB, +}; -struct ath6kl { - struct device *dev; - struct net_device *net_dev; - struct ath6kl_bmi bmi; - const struct ath6kl_hif_ops *hif_ops; - struct wmi *wmi; - int tx_pending[ENDPOINT_MAX]; - int total_tx_data_pend; - struct htc_target *htc_target; - void *hif_priv; - spinlock_t lock; - struct semaphore sem; +/* + * Driver's maximum limit, note that some firmwares support only one vif + * and the runtime (current) limit must be checked from ar->vif_max. + */ +#define ATH6KL_VIF_MAX 3 + +/* vif flags info */ +enum ath6kl_vif_state { + CONNECTED, + CONNECT_PEND, + WMM_ENABLED, + NETQ_STOPPED, + DTIM_EXPIRED, + NETDEV_REGISTERED, + CLEAR_BSSFILTER_ON_BEACON, + DTIM_PERIOD_AVAIL, + WLAN_ENABLED, + STATS_UPDATE_PEND, +}; + +struct ath6kl_vif { + struct list_head list; + struct wireless_dev wdev; + struct net_device *ndev; + struct ath6kl *ar; + /* Lock to protect vif specific net_stats and flags */ + spinlock_t if_lock; + u8 fw_vif_idx; + unsigned long flags; int ssid_len; u8 ssid[IEEE80211_MAX_SSID_LEN]; - u8 next_mode; - u8 nw_type; u8 dot11_auth_mode; u8 auth_mode; u8 prwise_crypto; @@ -421,21 +446,91 @@ struct ath6kl { u8 grp_crypto; u8 grp_crypto_len; u8 def_txkey_index; - struct ath6kl_wep_key wep_key_list[WMI_MAX_KEY_INDEX + 1]; + u8 next_mode; + u8 nw_type; u8 bssid[ETH_ALEN]; u8 req_bssid[ETH_ALEN]; u16 ch_hint; u16 bss_ch; + struct ath6kl_wep_key wep_key_list[WMI_MAX_KEY_INDEX + 1]; + struct ath6kl_key keys[WMI_MAX_KEY_INDEX + 1]; + struct aggr_info *aggr_cntxt; + + struct timer_list disconnect_timer; + struct timer_list sched_scan_timer; + + struct cfg80211_scan_request *scan_req; + enum sme_state sme_state; + int reconnect_flag; + u32 last_roc_id; + u32 last_cancel_roc_id; + u32 send_action_id; + bool probe_req_report; + u16 next_chan; + u16 assoc_bss_beacon_int; + u8 assoc_bss_dtim_period; + struct net_device_stats net_stats; + struct target_stats target_stats; +}; + +#define WOW_LIST_ID 0 +#define WOW_HOST_REQ_DELAY 500 /* ms */ + +#define ATH6KL_SCHED_SCAN_RESULT_DELAY 5000 /* ms */ + +/* Flag info */ +enum ath6kl_dev_state { + WMI_ENABLED, + WMI_READY, + WMI_CTRL_EP_FULL, + TESTMODE, + DESTROY_IN_PROGRESS, + SKIP_SCAN, + ROAM_TBL_PEND, + FIRST_BOOT, +}; + +enum ath6kl_state { + ATH6KL_STATE_OFF, + ATH6KL_STATE_ON, + ATH6KL_STATE_DEEPSLEEP, + ATH6KL_STATE_CUTPOWER, + ATH6KL_STATE_WOW, + ATH6KL_STATE_SCHED_SCAN, +}; + +struct ath6kl { + struct device *dev; + struct wiphy *wiphy; + + enum ath6kl_state state; + + struct ath6kl_bmi bmi; + const struct ath6kl_hif_ops *hif_ops; + struct wmi *wmi; + int tx_pending[ENDPOINT_MAX]; + int total_tx_data_pend; + struct htc_target *htc_target; + enum ath6kl_hif_type hif_type; + void *hif_priv; + struct list_head vif_list; + /* Lock to avoid race in vif_list entries among add/del/traverse */ + spinlock_t list_lock; + u8 num_vif; + unsigned int vif_max; + u8 max_norm_iface; + u8 avail_idx_map; + spinlock_t lock; + struct semaphore sem; u16 listen_intvl_b; u16 listen_intvl_t; u8 lrssi_roam_threshold; struct ath6kl_version version; u32 target_type; u8 tx_pwr; - struct net_device_stats net_stats; - struct target_stats target_stats; struct ath6kl_node_mapping node_map[MAX_NODE_NUM]; u8 ibss_ps_enable; + bool ibss_if_active; u8 node_num; u8 next_ep_id; struct ath6kl_cookie *cookie_list; @@ -446,7 +541,7 @@ struct ath6kl { u8 hiac_stream_active_pri; u8 ep2ac_map[ENDPOINT_MAX]; enum htc_endpoint_id ctrl_ep; - struct htc_credit_state_info credit_state_info; + struct ath6kl_htc_credit_info credit_state_info; u32 connect_ctrl_flags; u32 user_key_ctrl; u8 usr_bss_filter; @@ -456,30 +551,37 @@ struct ath6kl { struct sk_buff_head mcastpsq; spinlock_t mcastpsq_lock; u8 intra_bss; - struct aggr_info *aggr_cntxt; struct wmi_ap_mode_stat ap_stats; u8 ap_country_code[3]; struct list_head amsdu_rx_buffer_queue; - struct timer_list disconnect_timer; u8 rx_meta_ver; - struct wireless_dev *wdev; - struct cfg80211_scan_request *scan_req; - struct ath6kl_key keys[WMI_MAX_KEY_INDEX + 1]; - enum sme_state sme_state; enum wlan_low_pwr_state wlan_pwr_state; - struct wmi_scan_params_cmd sc_params; + u8 mac_addr[ETH_ALEN]; #define AR_MCAST_FILTER_MAC_ADDR_SIZE 4 struct { void *rx_report; size_t rx_report_len; } tm; - struct { + struct ath6kl_hw { + u32 id; + const char *name; u32 dataset_patch_addr; u32 app_load_addr; u32 app_start_override_addr; u32 board_ext_data_addr; u32 reserved_ram_size; + u32 board_addr; + u32 refclk_hz; + u32 uarttx_pin; + + const char *fw_otp; + const char *fw; + const char *fw_tcmd; + const char *fw_patch; + const char *fw_api2; + const char *fw_board; + const char *fw_default_board; } hw; u16 conf_flags; @@ -487,7 +589,6 @@ struct ath6kl { struct ath6kl_mbox_info mbox_info; struct ath6kl_cookie cookie_mem[MAX_COOKIE_NUM]; - int reconnect_flag; unsigned long flag; u8 *fw_board; @@ -508,13 +609,7 @@ struct ath6kl { struct dentry *debugfs_phy; - u32 send_action_id; - bool probe_req_report; - u16 next_chan; - bool p2p; - u16 assoc_bss_beacon_int; - u8 assoc_bss_dtim_period; #ifdef CONFIG_ATH6KL_DEBUG struct { @@ -529,23 +624,19 @@ struct ath6kl { struct { unsigned int invalid_rate; } war_stats; + + u8 *roam_tbl; + unsigned int roam_tbl_len; + + u8 keepalive; + u8 disc_timeout; } debug; #endif /* CONFIG_ATH6KL_DEBUG */ }; -static inline void *ath6kl_priv(struct net_device *dev) +static inline struct ath6kl *ath6kl_priv(struct net_device *dev) { - return wdev_priv(dev->ieee80211_ptr); -} - -static inline void ath6kl_deposit_credit_to_ep(struct htc_credit_state_info - *cred_info, - struct htc_endpoint_credit_dist - *ep_dist, int credits) -{ - ep_dist->credits += credits; - ep_dist->cred_assngd += credits; - cred_info->cur_free_credits -= credits; + return ((struct ath6kl_vif *) netdev_priv(dev))->ar; } static inline u32 ath6kl_get_hi_item_addr(struct ath6kl *ar, @@ -561,7 +652,6 @@ static inline u32 ath6kl_get_hi_item_addr(struct ath6kl *ar, return addr; } -void ath6kl_destroy(struct net_device *dev, unsigned int unregister); int ath6kl_configure_target(struct ath6kl *ar); void ath6kl_detect_error(unsigned long ptr); void disconnect_timer_handler(unsigned long ptr); @@ -579,10 +669,8 @@ int ath6kl_diag_write(struct ath6kl *ar, u32 address, void *data, u32 length); int ath6kl_diag_read32(struct ath6kl *ar, u32 address, u32 *value); int ath6kl_diag_read(struct ath6kl *ar, u32 address, void *data, u32 length); int ath6kl_read_fwlogs(struct ath6kl *ar); -void ath6kl_init_profile_info(struct ath6kl *ar); +void ath6kl_init_profile_info(struct ath6kl_vif *vif); void ath6kl_tx_data_cleanup(struct ath6kl *ar); -void ath6kl_stop_endpoint(struct net_device *dev, bool keep_profile, - bool get_dbglogs); struct ath6kl_cookie *ath6kl_alloc_cookie(struct ath6kl *ar); void ath6kl_free_cookie(struct ath6kl *ar, struct ath6kl_cookie *cookie); @@ -598,40 +686,49 @@ struct htc_packet *ath6kl_alloc_amsdu_rxbuf(struct htc_target *target, void aggr_module_destroy(struct aggr_info *aggr_info); void aggr_reset_state(struct aggr_info *aggr_info); -struct ath6kl_sta *ath6kl_find_sta(struct ath6kl *ar, u8 * node_addr); +struct ath6kl_sta *ath6kl_find_sta(struct ath6kl_vif *vif, u8 * node_addr); struct ath6kl_sta *ath6kl_find_sta_by_aid(struct ath6kl *ar, u8 aid); void ath6kl_ready_event(void *devt, u8 * datap, u32 sw_ver, u32 abi_ver); int ath6kl_control_tx(void *devt, struct sk_buff *skb, enum htc_endpoint_id eid); -void ath6kl_connect_event(struct ath6kl *ar, u16 channel, +void ath6kl_connect_event(struct ath6kl_vif *vif, u16 channel, u8 *bssid, u16 listen_int, u16 beacon_int, enum network_type net_type, u8 beacon_ie_len, u8 assoc_req_len, u8 assoc_resp_len, u8 *assoc_info); -void ath6kl_connect_ap_mode_bss(struct ath6kl *ar, u16 channel); -void ath6kl_connect_ap_mode_sta(struct ath6kl *ar, u16 aid, u8 *mac_addr, +void ath6kl_connect_ap_mode_bss(struct ath6kl_vif *vif, u16 channel); +void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr, u8 keymgmt, u8 ucipher, u8 auth, u8 assoc_req_len, u8 *assoc_info); -void ath6kl_disconnect_event(struct ath6kl *ar, u8 reason, +void ath6kl_disconnect_event(struct ath6kl_vif *vif, u8 reason, u8 *bssid, u8 assoc_resp_len, u8 *assoc_info, u16 prot_reason_status); -void ath6kl_tkip_micerr_event(struct ath6kl *ar, u8 keyid, bool ismcast); +void ath6kl_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid, bool ismcast); void ath6kl_txpwr_rx_evt(void *devt, u8 tx_pwr); -void ath6kl_scan_complete_evt(struct ath6kl *ar, int status); -void ath6kl_tgt_stats_event(struct ath6kl *ar, u8 *ptr, u32 len); +void ath6kl_scan_complete_evt(struct ath6kl_vif *vif, int status); +void ath6kl_tgt_stats_event(struct ath6kl_vif *vif, u8 *ptr, u32 len); void ath6kl_indicate_tx_activity(void *devt, u8 traffic_class, bool active); enum htc_endpoint_id ath6kl_ac2_endpoint_id(void *devt, u8 ac); -void ath6kl_pspoll_event(struct ath6kl *ar, u8 aid); +void ath6kl_pspoll_event(struct ath6kl_vif *vif, u8 aid); -void ath6kl_dtimexpiry_event(struct ath6kl *ar); -void ath6kl_disconnect(struct ath6kl *ar); -void ath6kl_deep_sleep_enable(struct ath6kl *ar); -void aggr_recv_delba_req_evt(struct ath6kl *ar, u8 tid); -void aggr_recv_addba_req_evt(struct ath6kl *ar, u8 tid, u16 seq_no, +void ath6kl_dtimexpiry_event(struct ath6kl_vif *vif); +void ath6kl_disconnect(struct ath6kl_vif *vif); +void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid); +void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid, u16 seq_no, u8 win_sz); void ath6kl_wakeup_event(void *dev); -void ath6kl_target_failure(struct ath6kl *ar); + +void ath6kl_reset_device(struct ath6kl *ar, u32 target_type, + bool wait_fot_compltn, bool cold_reset); +void ath6kl_init_control_info(struct ath6kl_vif *vif); +void ath6kl_deinit_if_data(struct ath6kl_vif *vif); +void ath6kl_core_free(struct ath6kl *ar); +struct ath6kl_vif *ath6kl_vif_first(struct ath6kl *ar); +void ath6kl_cleanup_vif(struct ath6kl_vif *vif, bool wmi_ready); +int ath6kl_init_hw_start(struct ath6kl *ar); +int ath6kl_init_hw_stop(struct ath6kl *ar); +void ath6kl_check_wow_status(struct ath6kl *ar); #endif /* CORE_H */ diff --git a/drivers/net/wireless/ath/ath6kl/debug.c b/drivers/net/wireless/ath/ath6kl/debug.c index 7879b531428..eb808b46f94 100644 --- a/drivers/net/wireless/ath/ath6kl/debug.c +++ b/drivers/net/wireless/ath/ath6kl/debug.c @@ -143,49 +143,48 @@ void ath6kl_dump_registers(struct ath6kl_device *dev, static void dump_cred_dist(struct htc_endpoint_credit_dist *ep_dist) { - ath6kl_dbg(ATH6KL_DBG_ANY, + ath6kl_dbg(ATH6KL_DBG_CREDIT, "--- endpoint: %d svc_id: 0x%X ---\n", ep_dist->endpoint, ep_dist->svc_id); - ath6kl_dbg(ATH6KL_DBG_ANY, " dist_flags : 0x%X\n", + ath6kl_dbg(ATH6KL_DBG_CREDIT, " dist_flags : 0x%X\n", ep_dist->dist_flags); - ath6kl_dbg(ATH6KL_DBG_ANY, " cred_norm : %d\n", + ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_norm : %d\n", ep_dist->cred_norm); - ath6kl_dbg(ATH6KL_DBG_ANY, " cred_min : %d\n", + ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_min : %d\n", ep_dist->cred_min); - ath6kl_dbg(ATH6KL_DBG_ANY, " credits : %d\n", + ath6kl_dbg(ATH6KL_DBG_CREDIT, " credits : %d\n", ep_dist->credits); - ath6kl_dbg(ATH6KL_DBG_ANY, " cred_assngd : %d\n", + ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_assngd : %d\n", ep_dist->cred_assngd); - ath6kl_dbg(ATH6KL_DBG_ANY, " seek_cred : %d\n", + ath6kl_dbg(ATH6KL_DBG_CREDIT, " seek_cred : %d\n", ep_dist->seek_cred); - ath6kl_dbg(ATH6KL_DBG_ANY, " cred_sz : %d\n", + ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_sz : %d\n", ep_dist->cred_sz); - ath6kl_dbg(ATH6KL_DBG_ANY, " cred_per_msg : %d\n", + ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_per_msg : %d\n", ep_dist->cred_per_msg); - ath6kl_dbg(ATH6KL_DBG_ANY, " cred_to_dist : %d\n", + ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_to_dist : %d\n", ep_dist->cred_to_dist); - ath6kl_dbg(ATH6KL_DBG_ANY, " txq_depth : %d\n", - get_queue_depth(&((struct htc_endpoint *) - ep_dist->htc_rsvd)->txq)); - ath6kl_dbg(ATH6KL_DBG_ANY, + ath6kl_dbg(ATH6KL_DBG_CREDIT, " txq_depth : %d\n", + get_queue_depth(&ep_dist->htc_ep->txq)); + ath6kl_dbg(ATH6KL_DBG_CREDIT, "----------------------------------\n"); } +/* FIXME: move to htc.c */ void dump_cred_dist_stats(struct htc_target *target) { struct htc_endpoint_credit_dist *ep_list; - if (!AR_DBG_LVL_CHECK(ATH6KL_DBG_TRC)) + if (!AR_DBG_LVL_CHECK(ATH6KL_DBG_CREDIT)) return; list_for_each_entry(ep_list, &target->cred_dist_list, list) dump_cred_dist(ep_list); - ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:%p dist:%p\n", - target->cred_dist_cntxt, NULL); - ath6kl_dbg(ATH6KL_DBG_TRC, "credit distribution, total : %d, free : %d\n", - target->cred_dist_cntxt->total_avail_credits, - target->cred_dist_cntxt->cur_free_credits); + ath6kl_dbg(ATH6KL_DBG_CREDIT, + "credit distribution total %d free %d\n", + target->credit_info->total_avail_credits, + target->credit_info->cur_free_credits); } static int ath6kl_debugfs_open(struct inode *inode, struct file *file) @@ -397,13 +396,20 @@ static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath6kl *ar = file->private_data; - struct target_stats *tgt_stats = &ar->target_stats; + struct ath6kl_vif *vif; + struct target_stats *tgt_stats; char *buf; unsigned int len = 0, buf_len = 1500; int i; long left; ssize_t ret_cnt; + vif = ath6kl_vif_first(ar); + if (!vif) + return -EIO; + + tgt_stats = &vif->target_stats; + buf = kzalloc(buf_len, GFP_KERNEL); if (!buf) return -ENOMEM; @@ -413,9 +419,9 @@ static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf, return -EBUSY; } - set_bit(STATS_UPDATE_PEND, &ar->flag); + set_bit(STATS_UPDATE_PEND, &vif->flags); - if (ath6kl_wmi_get_stats_cmd(ar->wmi)) { + if (ath6kl_wmi_get_stats_cmd(ar->wmi, 0)) { up(&ar->sem); kfree(buf); return -EIO; @@ -423,7 +429,7 @@ static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf, left = wait_event_interruptible_timeout(ar->event_wq, !test_bit(STATS_UPDATE_PEND, - &ar->flag), WMI_TIMEOUT); + &vif->flags), WMI_TIMEOUT); up(&ar->sem); @@ -555,10 +561,10 @@ static ssize_t read_file_credit_dist_stats(struct file *file, len += scnprintf(buf + len, buf_len - len, "%25s%5d\n", "Total Avail Credits: ", - target->cred_dist_cntxt->total_avail_credits); + target->credit_info->total_avail_credits); len += scnprintf(buf + len, buf_len - len, "%25s%5d\n", "Free credits :", - target->cred_dist_cntxt->cur_free_credits); + target->credit_info->cur_free_credits); len += scnprintf(buf + len, buf_len - len, " Epid Flags Cred_norm Cred_min Credits Cred_assngd" @@ -577,8 +583,7 @@ static ssize_t read_file_credit_dist_stats(struct file *file, print_credit_info("%9d", cred_per_msg); print_credit_info("%14d", cred_to_dist); len += scnprintf(buf + len, buf_len - len, "%12d\n", - get_queue_depth(&((struct htc_endpoint *) - ep_list->htc_rsvd)->txq)); + get_queue_depth(&ep_list->htc_ep->txq)); } if (len > buf_len) @@ -596,6 +601,107 @@ static const struct file_operations fops_credit_dist_stats = { .llseek = default_llseek, }; +static unsigned int print_endpoint_stat(struct htc_target *target, char *buf, + unsigned int buf_len, unsigned int len, + int offset, const char *name) +{ + int i; + struct htc_endpoint_stats *ep_st; + u32 *counter; + + len += scnprintf(buf + len, buf_len - len, "%s:", name); + for (i = 0; i < ENDPOINT_MAX; i++) { + ep_st = &target->endpoint[i].ep_st; + counter = ((u32 *) ep_st) + (offset / 4); + len += scnprintf(buf + len, buf_len - len, " %u", *counter); + } + len += scnprintf(buf + len, buf_len - len, "\n"); + + return len; +} + +static ssize_t ath6kl_endpoint_stats_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + struct htc_target *target = ar->htc_target; + char *buf; + unsigned int buf_len, len = 0; + ssize_t ret_cnt; + + buf_len = sizeof(struct htc_endpoint_stats) / sizeof(u32) * + (25 + ENDPOINT_MAX * 11); + buf = kmalloc(buf_len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + +#define EPSTAT(name) \ + len = print_endpoint_stat(target, buf, buf_len, len, \ + offsetof(struct htc_endpoint_stats, name), \ + #name) + EPSTAT(cred_low_indicate); + EPSTAT(tx_issued); + EPSTAT(tx_pkt_bundled); + EPSTAT(tx_bundles); + EPSTAT(tx_dropped); + EPSTAT(tx_cred_rpt); + EPSTAT(cred_rpt_from_rx); + EPSTAT(cred_rpt_from_other); + EPSTAT(cred_rpt_ep0); + EPSTAT(cred_from_rx); + EPSTAT(cred_from_other); + EPSTAT(cred_from_ep0); + EPSTAT(cred_cosumd); + EPSTAT(cred_retnd); + EPSTAT(rx_pkts); + EPSTAT(rx_lkahds); + EPSTAT(rx_bundl); + EPSTAT(rx_bundle_lkahd); + EPSTAT(rx_bundle_from_hdr); + EPSTAT(rx_alloc_thresh_hit); + EPSTAT(rxalloc_thresh_byte); +#undef EPSTAT + + if (len > buf_len) + len = buf_len; + + ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len); + kfree(buf); + return ret_cnt; +} + +static ssize_t ath6kl_endpoint_stats_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + struct htc_target *target = ar->htc_target; + int ret, i; + u32 val; + struct htc_endpoint_stats *ep_st; + + ret = kstrtou32_from_user(user_buf, count, 0, &val); + if (ret) + return ret; + if (val == 0) { + for (i = 0; i < ENDPOINT_MAX; i++) { + ep_st = &target->endpoint[i].ep_st; + memset(ep_st, 0, sizeof(*ep_st)); + } + } + + return count; +} + +static const struct file_operations fops_endpoint_stats = { + .open = ath6kl_debugfs_open, + .read = ath6kl_endpoint_stats_read, + .write = ath6kl_endpoint_stats_write, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + static unsigned long ath6kl_get_num_reg(void) { int i; @@ -868,6 +974,660 @@ static const struct file_operations fops_diag_reg_write = { .llseek = default_llseek, }; +int ath6kl_debug_roam_tbl_event(struct ath6kl *ar, const void *buf, + size_t len) +{ + const struct wmi_target_roam_tbl *tbl; + u16 num_entries; + + if (len < sizeof(*tbl)) + return -EINVAL; + + tbl = (const struct wmi_target_roam_tbl *) buf; + num_entries = le16_to_cpu(tbl->num_entries); + if (sizeof(*tbl) + num_entries * sizeof(struct wmi_bss_roam_info) > + len) + return -EINVAL; + + if (ar->debug.roam_tbl == NULL || + ar->debug.roam_tbl_len < (unsigned int) len) { + kfree(ar->debug.roam_tbl); + ar->debug.roam_tbl = kmalloc(len, GFP_ATOMIC); + if (ar->debug.roam_tbl == NULL) + return -ENOMEM; + } + + memcpy(ar->debug.roam_tbl, buf, len); + ar->debug.roam_tbl_len = len; + + if (test_bit(ROAM_TBL_PEND, &ar->flag)) { + clear_bit(ROAM_TBL_PEND, &ar->flag); + wake_up(&ar->event_wq); + } + + return 0; +} + +static ssize_t ath6kl_roam_table_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + int ret; + long left; + struct wmi_target_roam_tbl *tbl; + u16 num_entries, i; + char *buf; + unsigned int len, buf_len; + ssize_t ret_cnt; + + if (down_interruptible(&ar->sem)) + return -EBUSY; + + set_bit(ROAM_TBL_PEND, &ar->flag); + + ret = ath6kl_wmi_get_roam_tbl_cmd(ar->wmi); + if (ret) { + up(&ar->sem); + return ret; + } + + left = wait_event_interruptible_timeout( + ar->event_wq, !test_bit(ROAM_TBL_PEND, &ar->flag), WMI_TIMEOUT); + up(&ar->sem); + + if (left <= 0) + return -ETIMEDOUT; + + if (ar->debug.roam_tbl == NULL) + return -ENOMEM; + + tbl = (struct wmi_target_roam_tbl *) ar->debug.roam_tbl; + num_entries = le16_to_cpu(tbl->num_entries); + + buf_len = 100 + num_entries * 100; + buf = kzalloc(buf_len, GFP_KERNEL); + if (buf == NULL) + return -ENOMEM; + len = 0; + len += scnprintf(buf + len, buf_len - len, + "roam_mode=%u\n\n" + "# roam_util bssid rssi rssidt last_rssi util bias\n", + le16_to_cpu(tbl->roam_mode)); + + for (i = 0; i < num_entries; i++) { + struct wmi_bss_roam_info *info = &tbl->info[i]; + len += scnprintf(buf + len, buf_len - len, + "%d %pM %d %d %d %d %d\n", + a_sle32_to_cpu(info->roam_util), info->bssid, + info->rssi, info->rssidt, info->last_rssi, + info->util, info->bias); + } + + if (len > buf_len) + len = buf_len; + + ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len); + + kfree(buf); + return ret_cnt; +} + +static const struct file_operations fops_roam_table = { + .read = ath6kl_roam_table_read, + .open = ath6kl_debugfs_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t ath6kl_force_roam_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + int ret; + char buf[20]; + size_t len; + u8 bssid[ETH_ALEN]; + int i; + int addr[ETH_ALEN]; + + len = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + buf[len] = '\0'; + + if (sscanf(buf, "%02x:%02x:%02x:%02x:%02x:%02x", + &addr[0], &addr[1], &addr[2], &addr[3], &addr[4], &addr[5]) + != ETH_ALEN) + return -EINVAL; + for (i = 0; i < ETH_ALEN; i++) + bssid[i] = addr[i]; + + ret = ath6kl_wmi_force_roam_cmd(ar->wmi, bssid); + if (ret) + return ret; + + return count; +} + +static const struct file_operations fops_force_roam = { + .write = ath6kl_force_roam_write, + .open = ath6kl_debugfs_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t ath6kl_roam_mode_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + int ret; + char buf[20]; + size_t len; + enum wmi_roam_mode mode; + + len = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + buf[len] = '\0'; + if (len > 0 && buf[len - 1] == '\n') + buf[len - 1] = '\0'; + + if (strcasecmp(buf, "default") == 0) + mode = WMI_DEFAULT_ROAM_MODE; + else if (strcasecmp(buf, "bssbias") == 0) + mode = WMI_HOST_BIAS_ROAM_MODE; + else if (strcasecmp(buf, "lock") == 0) + mode = WMI_LOCK_BSS_MODE; + else + return -EINVAL; + + ret = ath6kl_wmi_set_roam_mode_cmd(ar->wmi, mode); + if (ret) + return ret; + + return count; +} + +static const struct file_operations fops_roam_mode = { + .write = ath6kl_roam_mode_write, + .open = ath6kl_debugfs_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +void ath6kl_debug_set_keepalive(struct ath6kl *ar, u8 keepalive) +{ + ar->debug.keepalive = keepalive; +} + +static ssize_t ath6kl_keepalive_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + char buf[16]; + int len; + + len = snprintf(buf, sizeof(buf), "%u\n", ar->debug.keepalive); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t ath6kl_keepalive_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + int ret; + u8 val; + + ret = kstrtou8_from_user(user_buf, count, 0, &val); + if (ret) + return ret; + + ret = ath6kl_wmi_set_keepalive_cmd(ar->wmi, 0, val); + if (ret) + return ret; + + return count; +} + +static const struct file_operations fops_keepalive = { + .open = ath6kl_debugfs_open, + .read = ath6kl_keepalive_read, + .write = ath6kl_keepalive_write, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +void ath6kl_debug_set_disconnect_timeout(struct ath6kl *ar, u8 timeout) +{ + ar->debug.disc_timeout = timeout; +} + +static ssize_t ath6kl_disconnect_timeout_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + char buf[16]; + int len; + + len = snprintf(buf, sizeof(buf), "%u\n", ar->debug.disc_timeout); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t ath6kl_disconnect_timeout_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + int ret; + u8 val; + + ret = kstrtou8_from_user(user_buf, count, 0, &val); + if (ret) + return ret; + + ret = ath6kl_wmi_disctimeout_cmd(ar->wmi, 0, val); + if (ret) + return ret; + + return count; +} + +static const struct file_operations fops_disconnect_timeout = { + .open = ath6kl_debugfs_open, + .read = ath6kl_disconnect_timeout_read, + .write = ath6kl_disconnect_timeout_write, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t ath6kl_create_qos_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + + struct ath6kl *ar = file->private_data; + struct ath6kl_vif *vif; + char buf[200]; + ssize_t len; + char *sptr, *token; + struct wmi_create_pstream_cmd pstream; + u32 val32; + u16 val16; + + vif = ath6kl_vif_first(ar); + if (!vif) + return -EIO; + + len = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + buf[len] = '\0'; + sptr = buf; + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou8(token, 0, &pstream.user_pri)) + return -EINVAL; + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou8(token, 0, &pstream.traffic_direc)) + return -EINVAL; + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou8(token, 0, &pstream.traffic_class)) + return -EINVAL; + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou8(token, 0, &pstream.traffic_type)) + return -EINVAL; + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou8(token, 0, &pstream.voice_psc_cap)) + return -EINVAL; + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou32(token, 0, &val32)) + return -EINVAL; + pstream.min_service_int = cpu_to_le32(val32); + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou32(token, 0, &val32)) + return -EINVAL; + pstream.max_service_int = cpu_to_le32(val32); + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou32(token, 0, &val32)) + return -EINVAL; + pstream.inactivity_int = cpu_to_le32(val32); + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou32(token, 0, &val32)) + return -EINVAL; + pstream.suspension_int = cpu_to_le32(val32); + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou32(token, 0, &val32)) + return -EINVAL; + pstream.service_start_time = cpu_to_le32(val32); + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou8(token, 0, &pstream.tsid)) + return -EINVAL; + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou16(token, 0, &val16)) + return -EINVAL; + pstream.nominal_msdu = cpu_to_le16(val16); + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou16(token, 0, &val16)) + return -EINVAL; + pstream.max_msdu = cpu_to_le16(val16); + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou32(token, 0, &val32)) + return -EINVAL; + pstream.min_data_rate = cpu_to_le32(val32); + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou32(token, 0, &val32)) + return -EINVAL; + pstream.mean_data_rate = cpu_to_le32(val32); + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou32(token, 0, &val32)) + return -EINVAL; + pstream.peak_data_rate = cpu_to_le32(val32); + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou32(token, 0, &val32)) + return -EINVAL; + pstream.max_burst_size = cpu_to_le32(val32); + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou32(token, 0, &val32)) + return -EINVAL; + pstream.delay_bound = cpu_to_le32(val32); + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou32(token, 0, &val32)) + return -EINVAL; + pstream.min_phy_rate = cpu_to_le32(val32); + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou32(token, 0, &val32)) + return -EINVAL; + pstream.sba = cpu_to_le32(val32); + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou32(token, 0, &val32)) + return -EINVAL; + pstream.medium_time = cpu_to_le32(val32); + + ath6kl_wmi_create_pstream_cmd(ar->wmi, vif->fw_vif_idx, &pstream); + + return count; +} + +static const struct file_operations fops_create_qos = { + .write = ath6kl_create_qos_write, + .open = ath6kl_debugfs_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t ath6kl_delete_qos_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + + struct ath6kl *ar = file->private_data; + struct ath6kl_vif *vif; + char buf[100]; + ssize_t len; + char *sptr, *token; + u8 traffic_class; + u8 tsid; + + vif = ath6kl_vif_first(ar); + if (!vif) + return -EIO; + + len = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + buf[len] = '\0'; + sptr = buf; + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou8(token, 0, &traffic_class)) + return -EINVAL; + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou8(token, 0, &tsid)) + return -EINVAL; + + ath6kl_wmi_delete_pstream_cmd(ar->wmi, vif->fw_vif_idx, + traffic_class, tsid); + + return count; +} + +static const struct file_operations fops_delete_qos = { + .write = ath6kl_delete_qos_write, + .open = ath6kl_debugfs_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t ath6kl_bgscan_int_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + u16 bgscan_int; + char buf[32]; + ssize_t len; + + len = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + + buf[len] = '\0'; + if (kstrtou16(buf, 0, &bgscan_int)) + return -EINVAL; + + if (bgscan_int == 0) + bgscan_int = 0xffff; + + ath6kl_wmi_scanparams_cmd(ar->wmi, 0, 0, 0, bgscan_int, 0, 0, 0, 3, + 0, 0, 0); + + return count; +} + +static const struct file_operations fops_bgscan_int = { + .write = ath6kl_bgscan_int_write, + .open = ath6kl_debugfs_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t ath6kl_listen_int_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + u16 listen_int_t, listen_int_b; + char buf[32]; + char *sptr, *token; + ssize_t len; + + len = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + + buf[len] = '\0'; + sptr = buf; + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + + if (kstrtou16(token, 0, &listen_int_t)) + return -EINVAL; + + if (kstrtou16(sptr, 0, &listen_int_b)) + return -EINVAL; + + if ((listen_int_t < 15) || (listen_int_t > 5000)) + return -EINVAL; + + if ((listen_int_b < 1) || (listen_int_b > 50)) + return -EINVAL; + + ar->listen_intvl_t = listen_int_t; + ar->listen_intvl_b = listen_int_b; + + ath6kl_wmi_listeninterval_cmd(ar->wmi, 0, ar->listen_intvl_t, + ar->listen_intvl_b); + + return count; +} + +static ssize_t ath6kl_listen_int_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + char buf[32]; + int len; + + len = scnprintf(buf, sizeof(buf), "%u %u\n", ar->listen_intvl_t, + ar->listen_intvl_b); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static const struct file_operations fops_listen_int = { + .read = ath6kl_listen_int_read, + .write = ath6kl_listen_int_write, + .open = ath6kl_debugfs_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t ath6kl_power_params_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + u8 buf[100]; + unsigned int len = 0; + char *sptr, *token; + u16 idle_period, ps_poll_num, dtim, + tx_wakeup, num_tx; + + len = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + buf[len] = '\0'; + sptr = buf; + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou16(token, 0, &idle_period)) + return -EINVAL; + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou16(token, 0, &ps_poll_num)) + return -EINVAL; + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou16(token, 0, &dtim)) + return -EINVAL; + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou16(token, 0, &tx_wakeup)) + return -EINVAL; + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou16(token, 0, &num_tx)) + return -EINVAL; + + ath6kl_wmi_pmparams_cmd(ar->wmi, 0, idle_period, ps_poll_num, + dtim, tx_wakeup, num_tx, 0); + + return count; +} + +static const struct file_operations fops_power_params = { + .write = ath6kl_power_params_write, + .open = ath6kl_debugfs_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + int ath6kl_debug_init(struct ath6kl *ar) { ar->debug.fwlog_buf.buf = vmalloc(ATH6KL_FWLOG_SIZE); @@ -889,7 +1649,7 @@ int ath6kl_debug_init(struct ath6kl *ar) ar->debug.fwlog_mask = 0; ar->debugfs_phy = debugfs_create_dir("ath6kl", - ar->wdev->wiphy->debugfsdir); + ar->wiphy->debugfsdir); if (!ar->debugfs_phy) { vfree(ar->debug.fwlog_buf.buf); kfree(ar->debug.fwlog_tmp); @@ -902,6 +1662,9 @@ int ath6kl_debug_init(struct ath6kl *ar) debugfs_create_file("credit_dist_stats", S_IRUSR, ar->debugfs_phy, ar, &fops_credit_dist_stats); + debugfs_create_file("endpoint_stats", S_IRUSR | S_IWUSR, + ar->debugfs_phy, ar, &fops_endpoint_stats); + debugfs_create_file("fwlog", S_IRUSR, ar->debugfs_phy, ar, &fops_fwlog); @@ -923,6 +1686,33 @@ int ath6kl_debug_init(struct ath6kl *ar) debugfs_create_file("war_stats", S_IRUSR, ar->debugfs_phy, ar, &fops_war_stats); + debugfs_create_file("roam_table", S_IRUSR, ar->debugfs_phy, ar, + &fops_roam_table); + + debugfs_create_file("force_roam", S_IWUSR, ar->debugfs_phy, ar, + &fops_force_roam); + + debugfs_create_file("roam_mode", S_IWUSR, ar->debugfs_phy, ar, + &fops_roam_mode); + + debugfs_create_file("keepalive", S_IRUSR | S_IWUSR, ar->debugfs_phy, ar, + &fops_keepalive); + + debugfs_create_file("disconnect_timeout", S_IRUSR | S_IWUSR, + ar->debugfs_phy, ar, &fops_disconnect_timeout); + + debugfs_create_file("create_qos", S_IWUSR, ar->debugfs_phy, ar, + &fops_create_qos); + + debugfs_create_file("delete_qos", S_IWUSR, ar->debugfs_phy, ar, + &fops_delete_qos); + + debugfs_create_file("bgscan_interval", S_IWUSR, + ar->debugfs_phy, ar, &fops_bgscan_int); + + debugfs_create_file("power_params", S_IWUSR, ar->debugfs_phy, ar, + &fops_power_params); + return 0; } @@ -930,6 +1720,7 @@ void ath6kl_debug_cleanup(struct ath6kl *ar) { vfree(ar->debug.fwlog_buf.buf); kfree(ar->debug.fwlog_tmp); + kfree(ar->debug.roam_tbl); } #endif diff --git a/drivers/net/wireless/ath/ath6kl/debug.h b/drivers/net/wireless/ath/ath6kl/debug.h index 7b7675f70a1..9853c9c125c 100644 --- a/drivers/net/wireless/ath/ath6kl/debug.h +++ b/drivers/net/wireless/ath/ath6kl/debug.h @@ -17,19 +17,19 @@ #ifndef DEBUG_H #define DEBUG_H -#include "htc_hif.h" +#include "hif.h" enum ATH6K_DEBUG_MASK { - ATH6KL_DBG_WLAN_CONNECT = BIT(0), /* wlan connect */ - ATH6KL_DBG_WLAN_SCAN = BIT(1), /* wlan scan */ + ATH6KL_DBG_CREDIT = BIT(0), + /* hole */ ATH6KL_DBG_WLAN_TX = BIT(2), /* wlan tx */ ATH6KL_DBG_WLAN_RX = BIT(3), /* wlan rx */ ATH6KL_DBG_BMI = BIT(4), /* bmi tracing */ - ATH6KL_DBG_HTC_SEND = BIT(5), /* htc send */ - ATH6KL_DBG_HTC_RECV = BIT(6), /* htc recv */ + ATH6KL_DBG_HTC = BIT(5), + ATH6KL_DBG_HIF = BIT(6), ATH6KL_DBG_IRQ = BIT(7), /* interrupt processing */ - ATH6KL_DBG_PM = BIT(8), /* power management */ - ATH6KL_DBG_WLAN_NODE = BIT(9), /* general wlan node tracing */ + /* hole */ + /* hole */ ATH6KL_DBG_WMI = BIT(10), /* wmi tracing */ ATH6KL_DBG_TRC = BIT(11), /* generic func tracing */ ATH6KL_DBG_SCATTER = BIT(12), /* hif scatter tracing */ @@ -40,6 +40,7 @@ enum ATH6K_DEBUG_MASK { ATH6KL_DBG_SDIO_DUMP = BIT(17), ATH6KL_DBG_BOOT = BIT(18), /* driver init and fw boot */ ATH6KL_DBG_WMI_DUMP = BIT(19), + ATH6KL_DBG_SUSPEND = BIT(20), ATH6KL_DBG_ANY = 0xffffffff /* enable all logs */ }; @@ -90,6 +91,10 @@ void ath6kl_dump_registers(struct ath6kl_device *dev, void dump_cred_dist_stats(struct htc_target *target); void ath6kl_debug_fwlog_event(struct ath6kl *ar, const void *buf, size_t len); void ath6kl_debug_war(struct ath6kl *ar, enum ath6kl_war war); +int ath6kl_debug_roam_tbl_event(struct ath6kl *ar, const void *buf, + size_t len); +void ath6kl_debug_set_keepalive(struct ath6kl *ar, u8 keepalive); +void ath6kl_debug_set_disconnect_timeout(struct ath6kl *ar, u8 timeout); int ath6kl_debug_init(struct ath6kl *ar); void ath6kl_debug_cleanup(struct ath6kl *ar); @@ -125,6 +130,21 @@ static inline void ath6kl_debug_war(struct ath6kl *ar, enum ath6kl_war war) { } +static inline int ath6kl_debug_roam_tbl_event(struct ath6kl *ar, + const void *buf, size_t len) +{ + return 0; +} + +static inline void ath6kl_debug_set_keepalive(struct ath6kl *ar, u8 keepalive) +{ +} + +static inline void ath6kl_debug_set_disconnect_timeout(struct ath6kl *ar, + u8 timeout) +{ +} + static inline int ath6kl_debug_init(struct ath6kl *ar) { return 0; diff --git a/drivers/net/wireless/ath/ath6kl/hif-ops.h b/drivers/net/wireless/ath/ath6kl/hif-ops.h index d6c898f3d0b..2fe1dadfc77 100644 --- a/drivers/net/wireless/ath/ath6kl/hif-ops.h +++ b/drivers/net/wireless/ath/ath6kl/hif-ops.h @@ -18,10 +18,16 @@ #define HIF_OPS_H #include "hif.h" +#include "debug.h" static inline int hif_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf, u32 len, u32 request) { + ath6kl_dbg(ATH6KL_DBG_HIF, + "hif %s sync addr 0x%x buf 0x%p len %d request 0x%x\n", + (request & HIF_WRITE) ? "write" : "read", + addr, buf, len, request); + return ar->hif_ops->read_write_sync(ar, addr, buf, len, request); } @@ -29,16 +35,24 @@ static inline int hif_write_async(struct ath6kl *ar, u32 address, u8 *buffer, u32 length, u32 request, struct htc_packet *packet) { + ath6kl_dbg(ATH6KL_DBG_HIF, + "hif write async addr 0x%x buf 0x%p len %d request 0x%x\n", + address, buffer, length, request); + return ar->hif_ops->write_async(ar, address, buffer, length, request, packet); } static inline void ath6kl_hif_irq_enable(struct ath6kl *ar) { + ath6kl_dbg(ATH6KL_DBG_HIF, "hif irq enable\n"); + return ar->hif_ops->irq_enable(ar); } static inline void ath6kl_hif_irq_disable(struct ath6kl *ar) { + ath6kl_dbg(ATH6KL_DBG_HIF, "hif irq disable\n"); + return ar->hif_ops->irq_disable(ar); } @@ -69,9 +83,70 @@ static inline void ath6kl_hif_cleanup_scatter(struct ath6kl *ar) return ar->hif_ops->cleanup_scatter(ar); } -static inline int ath6kl_hif_suspend(struct ath6kl *ar) +static inline int ath6kl_hif_suspend(struct ath6kl *ar, + struct cfg80211_wowlan *wow) +{ + ath6kl_dbg(ATH6KL_DBG_HIF, "hif suspend\n"); + + return ar->hif_ops->suspend(ar, wow); +} + +/* + * Read from the ATH6KL through its diagnostic window. No cooperation from + * the Target is required for this. + */ +static inline int ath6kl_hif_diag_read32(struct ath6kl *ar, u32 address, + u32 *value) { - return ar->hif_ops->suspend(ar); + return ar->hif_ops->diag_read32(ar, address, value); +} + +/* + * Write to the ATH6KL through its diagnostic window. No cooperation from + * the Target is required for this. + */ +static inline int ath6kl_hif_diag_write32(struct ath6kl *ar, u32 address, + __le32 value) +{ + return ar->hif_ops->diag_write32(ar, address, value); +} + +static inline int ath6kl_hif_bmi_read(struct ath6kl *ar, u8 *buf, u32 len) +{ + return ar->hif_ops->bmi_read(ar, buf, len); +} + +static inline int ath6kl_hif_bmi_write(struct ath6kl *ar, u8 *buf, u32 len) +{ + return ar->hif_ops->bmi_write(ar, buf, len); +} + +static inline int ath6kl_hif_resume(struct ath6kl *ar) +{ + ath6kl_dbg(ATH6KL_DBG_HIF, "hif resume\n"); + + return ar->hif_ops->resume(ar); +} + +static inline int ath6kl_hif_power_on(struct ath6kl *ar) +{ + ath6kl_dbg(ATH6KL_DBG_HIF, "hif power on\n"); + + return ar->hif_ops->power_on(ar); +} + +static inline int ath6kl_hif_power_off(struct ath6kl *ar) +{ + ath6kl_dbg(ATH6KL_DBG_HIF, "hif power off\n"); + + return ar->hif_ops->power_off(ar); +} + +static inline void ath6kl_hif_stop(struct ath6kl *ar) +{ + ath6kl_dbg(ATH6KL_DBG_HIF, "hif stop\n"); + + ar->hif_ops->stop(ar); } #endif diff --git a/drivers/net/wireless/ath/ath6kl/htc_hif.c b/drivers/net/wireless/ath/ath6kl/hif.c index 86b1cc7409c..e57da35e59f 100644 --- a/drivers/net/wireless/ath/ath6kl/htc_hif.c +++ b/drivers/net/wireless/ath/ath6kl/hif.c @@ -13,18 +13,19 @@ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +#include "hif.h" #include "core.h" #include "target.h" #include "hif-ops.h" -#include "htc_hif.h" #include "debug.h" #define MAILBOX_FOR_BLOCK_SIZE 1 #define ATH6KL_TIME_QUANTUM 10 /* in ms */ -static int ath6kldev_cp_scat_dma_buf(struct hif_scatter_req *req, bool from_dma) +static int ath6kl_hif_cp_scat_dma_buf(struct hif_scatter_req *req, + bool from_dma) { u8 *buf; int i; @@ -46,12 +47,11 @@ static int ath6kldev_cp_scat_dma_buf(struct hif_scatter_req *req, bool from_dma) return 0; } -int ath6kldev_rw_comp_handler(void *context, int status) +int ath6kl_hif_rw_comp_handler(void *context, int status) { struct htc_packet *packet = context; - ath6kl_dbg(ATH6KL_DBG_HTC_RECV, - "ath6kldev_rw_comp_handler (pkt:0x%p , status: %d\n", + ath6kl_dbg(ATH6KL_DBG_HIF, "hif rw completion pkt 0x%p status %d\n", packet, status); packet->status = status; @@ -59,30 +59,83 @@ int ath6kldev_rw_comp_handler(void *context, int status) return 0; } +#define REG_DUMP_COUNT_AR6003 60 +#define REGISTER_DUMP_LEN_MAX 60 -static int ath6kldev_proc_dbg_intr(struct ath6kl_device *dev) +static void ath6kl_hif_dump_fw_crash(struct ath6kl *ar) { - u32 dummy; - int status; + __le32 regdump_val[REGISTER_DUMP_LEN_MAX]; + u32 i, address, regdump_addr = 0; + int ret; + + if (ar->target_type != TARGET_TYPE_AR6003) + return; + + /* the reg dump pointer is copied to the host interest area */ + address = ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_failure_state)); + address = TARG_VTOP(ar->target_type, address); + + /* read RAM location through diagnostic window */ + ret = ath6kl_diag_read32(ar, address, ®dump_addr); + + if (ret || !regdump_addr) { + ath6kl_warn("failed to get ptr to register dump area: %d\n", + ret); + return; + } + + ath6kl_dbg(ATH6KL_DBG_IRQ, "register dump data address 0x%x\n", + regdump_addr); + regdump_addr = TARG_VTOP(ar->target_type, regdump_addr); + + /* fetch register dump data */ + ret = ath6kl_diag_read(ar, regdump_addr, (u8 *)®dump_val[0], + REG_DUMP_COUNT_AR6003 * (sizeof(u32))); + if (ret) { + ath6kl_warn("failed to get register dump: %d\n", ret); + return; + } + + ath6kl_info("crash dump:\n"); + ath6kl_info("hw 0x%x fw %s\n", ar->wiphy->hw_version, + ar->wiphy->fw_version); + + BUILD_BUG_ON(REG_DUMP_COUNT_AR6003 % 4); - ath6kl_err("target debug interrupt\n"); + for (i = 0; i < REG_DUMP_COUNT_AR6003 / 4; i++) { + ath6kl_info("%d: 0x%8.8x 0x%8.8x 0x%8.8x 0x%8.8x\n", + 4 * i, + le32_to_cpu(regdump_val[i]), + le32_to_cpu(regdump_val[i + 1]), + le32_to_cpu(regdump_val[i + 2]), + le32_to_cpu(regdump_val[i + 3])); + } + +} + +static int ath6kl_hif_proc_dbg_intr(struct ath6kl_device *dev) +{ + u32 dummy; + int ret; - ath6kl_target_failure(dev->ar); + ath6kl_warn("firmware crashed\n"); /* * read counter to clear the interrupt, the debug error interrupt is * counter 0. */ - status = hif_read_write_sync(dev->ar, COUNT_DEC_ADDRESS, + ret = hif_read_write_sync(dev->ar, COUNT_DEC_ADDRESS, (u8 *)&dummy, 4, HIF_RD_SYNC_BYTE_INC); - if (status) - WARN_ON(1); + if (ret) + ath6kl_warn("Failed to clear debug interrupt: %d\n", ret); - return status; + ath6kl_hif_dump_fw_crash(dev->ar); + + return ret; } /* mailbox recv message polling */ -int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd, +int ath6kl_hif_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd, int timeout) { struct ath6kl_irq_proc_registers *rg; @@ -118,7 +171,7 @@ int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd, /* delay a little */ mdelay(ATH6KL_TIME_QUANTUM); - ath6kl_dbg(ATH6KL_DBG_HTC_RECV, "retry mbox poll : %d\n", i); + ath6kl_dbg(ATH6KL_DBG_HIF, "hif retry mbox poll try %d\n", i); } if (i == 0) { @@ -131,7 +184,7 @@ int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd, * Target failure handler will be called in case of * an assert. */ - ath6kldev_proc_dbg_intr(dev); + ath6kl_hif_proc_dbg_intr(dev); } return status; @@ -141,11 +194,14 @@ int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd, * Disable packet reception (used in case the host runs out of buffers) * using the interrupt enable registers through the host I/F */ -int ath6kldev_rx_control(struct ath6kl_device *dev, bool enable_rx) +int ath6kl_hif_rx_control(struct ath6kl_device *dev, bool enable_rx) { struct ath6kl_irq_enable_reg regs; int status = 0; + ath6kl_dbg(ATH6KL_DBG_HIF, "hif rx %s\n", + enable_rx ? "enable" : "disable"); + /* take the lock to protect interrupt enable shadows */ spin_lock_bh(&dev->lock); @@ -168,7 +224,7 @@ int ath6kldev_rx_control(struct ath6kl_device *dev, bool enable_rx) return status; } -int ath6kldev_submit_scat_req(struct ath6kl_device *dev, +int ath6kl_hif_submit_scat_req(struct ath6kl_device *dev, struct hif_scatter_req *scat_req, bool read) { int status = 0; @@ -185,14 +241,14 @@ int ath6kldev_submit_scat_req(struct ath6kl_device *dev, dev->ar->mbox_info.htc_addr; } - ath6kl_dbg((ATH6KL_DBG_HTC_RECV | ATH6KL_DBG_HTC_SEND), - "ath6kldev_submit_scat_req, entries: %d, total len: %d mbox:0x%X (mode: %s : %s)\n", + ath6kl_dbg(ATH6KL_DBG_HIF, + "hif submit scatter request entries %d len %d mbox 0x%x %s %s\n", scat_req->scat_entries, scat_req->len, scat_req->addr, !read ? "async" : "sync", (read) ? "rd" : "wr"); if (!read && scat_req->virt_scat) { - status = ath6kldev_cp_scat_dma_buf(scat_req, false); + status = ath6kl_hif_cp_scat_dma_buf(scat_req, false); if (status) { scat_req->status = status; scat_req->complete(dev->ar->htc_target, scat_req); @@ -207,13 +263,13 @@ int ath6kldev_submit_scat_req(struct ath6kl_device *dev, scat_req->status = status; if (!status && scat_req->virt_scat) scat_req->status = - ath6kldev_cp_scat_dma_buf(scat_req, true); + ath6kl_hif_cp_scat_dma_buf(scat_req, true); } return status; } -static int ath6kldev_proc_counter_intr(struct ath6kl_device *dev) +static int ath6kl_hif_proc_counter_intr(struct ath6kl_device *dev) { u8 counter_int_status; @@ -232,12 +288,12 @@ static int ath6kldev_proc_counter_intr(struct ath6kl_device *dev) * the debug assertion counter interrupt. */ if (counter_int_status & ATH6KL_TARGET_DEBUG_INTR_MASK) - return ath6kldev_proc_dbg_intr(dev); + return ath6kl_hif_proc_dbg_intr(dev); return 0; } -static int ath6kldev_proc_err_intr(struct ath6kl_device *dev) +static int ath6kl_hif_proc_err_intr(struct ath6kl_device *dev) { int status; u8 error_int_status; @@ -282,7 +338,7 @@ static int ath6kldev_proc_err_intr(struct ath6kl_device *dev) return status; } -static int ath6kldev_proc_cpu_intr(struct ath6kl_device *dev) +static int ath6kl_hif_proc_cpu_intr(struct ath6kl_device *dev) { int status; u8 cpu_int_status; @@ -417,7 +473,7 @@ static int proc_pending_irqs(struct ath6kl_device *dev, bool *done) * we rapidly pull packets. */ status = ath6kl_htc_rxmsg_pending_handler(dev->htc_cnxt, - &lk_ahd, &fetched); + lk_ahd, &fetched); if (status) goto out; @@ -436,21 +492,21 @@ static int proc_pending_irqs(struct ath6kl_device *dev, bool *done) if (MS(HOST_INT_STATUS_CPU, host_int_status)) { /* CPU Interrupt */ - status = ath6kldev_proc_cpu_intr(dev); + status = ath6kl_hif_proc_cpu_intr(dev); if (status) goto out; } if (MS(HOST_INT_STATUS_ERROR, host_int_status)) { /* Error Interrupt */ - status = ath6kldev_proc_err_intr(dev); + status = ath6kl_hif_proc_err_intr(dev); if (status) goto out; } if (MS(HOST_INT_STATUS_COUNTER, host_int_status)) /* Counter Interrupt */ - status = ath6kldev_proc_counter_intr(dev); + status = ath6kl_hif_proc_counter_intr(dev); out: /* @@ -479,9 +535,10 @@ out: } /* interrupt handler, kicks off all interrupt processing */ -int ath6kldev_intr_bh_handler(struct ath6kl *ar) +int ath6kl_hif_intr_bh_handler(struct ath6kl *ar) { struct ath6kl_device *dev = ar->htc_target->dev; + unsigned long timeout; int status = 0; bool done = false; @@ -495,7 +552,8 @@ int ath6kldev_intr_bh_handler(struct ath6kl *ar) * IRQ processing is synchronous, interrupt status registers can be * re-read. */ - while (!done) { + timeout = jiffies + msecs_to_jiffies(ATH6KL_HIF_COMMUNICATION_TIMEOUT); + while (time_before(jiffies, timeout) && !done) { status = proc_pending_irqs(dev, &done); if (status) break; @@ -504,7 +562,7 @@ int ath6kldev_intr_bh_handler(struct ath6kl *ar) return status; } -static int ath6kldev_enable_intrs(struct ath6kl_device *dev) +static int ath6kl_hif_enable_intrs(struct ath6kl_device *dev) { struct ath6kl_irq_enable_reg regs; int status; @@ -552,7 +610,7 @@ static int ath6kldev_enable_intrs(struct ath6kl_device *dev) return status; } -int ath6kldev_disable_intrs(struct ath6kl_device *dev) +int ath6kl_hif_disable_intrs(struct ath6kl_device *dev) { struct ath6kl_irq_enable_reg regs; @@ -571,7 +629,7 @@ int ath6kldev_disable_intrs(struct ath6kl_device *dev) } /* enable device interrupts */ -int ath6kldev_unmask_intrs(struct ath6kl_device *dev) +int ath6kl_hif_unmask_intrs(struct ath6kl_device *dev) { int status = 0; @@ -583,29 +641,29 @@ int ath6kldev_unmask_intrs(struct ath6kl_device *dev) * target "soft" resets. The ATH6KL interrupt enables reset back to an * "enabled" state when this happens. */ - ath6kldev_disable_intrs(dev); + ath6kl_hif_disable_intrs(dev); /* unmask the host controller interrupts */ ath6kl_hif_irq_enable(dev->ar); - status = ath6kldev_enable_intrs(dev); + status = ath6kl_hif_enable_intrs(dev); return status; } /* disable all device interrupts */ -int ath6kldev_mask_intrs(struct ath6kl_device *dev) +int ath6kl_hif_mask_intrs(struct ath6kl_device *dev) { /* * Mask the interrupt at the HIF layer to avoid any stray interrupt * taken while we zero out our shadow registers in - * ath6kldev_disable_intrs(). + * ath6kl_hif_disable_intrs(). */ ath6kl_hif_irq_disable(dev->ar); - return ath6kldev_disable_intrs(dev); + return ath6kl_hif_disable_intrs(dev); } -int ath6kldev_setup(struct ath6kl_device *dev) +int ath6kl_hif_setup(struct ath6kl_device *dev) { int status = 0; @@ -621,19 +679,17 @@ int ath6kldev_setup(struct ath6kl_device *dev) /* must be a power of 2 */ if ((dev->htc_cnxt->block_sz & (dev->htc_cnxt->block_sz - 1)) != 0) { WARN_ON(1); + status = -EINVAL; goto fail_setup; } /* assemble mask, used for padding to a block */ dev->htc_cnxt->block_mask = dev->htc_cnxt->block_sz - 1; - ath6kl_dbg(ATH6KL_DBG_TRC, "block size: %d, mbox addr:0x%X\n", + ath6kl_dbg(ATH6KL_DBG_HIF, "hif block size %d mbox addr 0x%x\n", dev->htc_cnxt->block_sz, dev->ar->mbox_info.htc_addr); - ath6kl_dbg(ATH6KL_DBG_TRC, - "hif interrupt processing is sync only\n"); - - status = ath6kldev_disable_intrs(dev); + status = ath6kl_hif_disable_intrs(dev); fail_setup: return status; diff --git a/drivers/net/wireless/ath/ath6kl/hif.h b/drivers/net/wireless/ath/ath6kl/hif.h index 797e2d1d9bf..699a036f3a4 100644 --- a/drivers/net/wireless/ath/ath6kl/hif.h +++ b/drivers/net/wireless/ath/ath6kl/hif.h @@ -35,6 +35,7 @@ #define MAX_SCATTER_REQ_TRANSFER_SIZE (32 * 1024) #define MANUFACTURER_ID_AR6003_BASE 0x300 +#define MANUFACTURER_ID_AR6004_BASE 0x400 /* SDIO manufacturer ID and Codes */ #define MANUFACTURER_ID_ATH6KL_BASE_MASK 0xFF00 #define MANUFACTURER_CODE 0x271 /* Atheros */ @@ -59,6 +60,18 @@ /* mode to enable special 4-bit interrupt assertion without clock */ #define SDIO_IRQ_MODE_ASYNC_4BIT_IRQ (1 << 0) +/* HTC runs over mailbox 0 */ +#define HTC_MAILBOX 0 + +#define ATH6KL_TARGET_DEBUG_INTR_MASK 0x01 + +/* FIXME: are these duplicates with MAX_SCATTER_ values in hif.h? */ +#define ATH6KL_SCATTER_ENTRIES_PER_REQ 16 +#define ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER (16 * 1024) +#define ATH6KL_SCATTER_REQS 4 + +#define ATH6KL_HIF_COMMUNICATION_TIMEOUT 1000 + struct bus_request { struct list_head list; @@ -186,6 +199,34 @@ struct hif_scatter_req { struct hif_scatter_item scat_list[1]; }; +struct ath6kl_irq_proc_registers { + u8 host_int_status; + u8 cpu_int_status; + u8 error_int_status; + u8 counter_int_status; + u8 mbox_frame; + u8 rx_lkahd_valid; + u8 host_int_status2; + u8 gmbox_rx_avail; + __le32 rx_lkahd[2]; + __le32 rx_gmbox_lkahd_alias[2]; +} __packed; + +struct ath6kl_irq_enable_reg { + u8 int_status_en; + u8 cpu_int_status_en; + u8 err_int_status_en; + u8 cntr_int_status_en; +} __packed; + +struct ath6kl_device { + spinlock_t lock; + struct ath6kl_irq_proc_registers irq_proc_reg; + struct ath6kl_irq_enable_reg irq_en_reg; + struct htc_target *htc_cnxt; + struct ath6kl *ar; +}; + struct ath6kl_hif_ops { int (*read_write_sync)(struct ath6kl *ar, u32 addr, u8 *buf, u32 len, u32 request); @@ -202,7 +243,30 @@ struct ath6kl_hif_ops { int (*scat_req_rw) (struct ath6kl *ar, struct hif_scatter_req *scat_req); void (*cleanup_scatter)(struct ath6kl *ar); - int (*suspend)(struct ath6kl *ar); + int (*suspend)(struct ath6kl *ar, struct cfg80211_wowlan *wow); + int (*resume)(struct ath6kl *ar); + int (*diag_read32)(struct ath6kl *ar, u32 address, u32 *value); + int (*diag_write32)(struct ath6kl *ar, u32 address, __le32 value); + int (*bmi_read)(struct ath6kl *ar, u8 *buf, u32 len); + int (*bmi_write)(struct ath6kl *ar, u8 *buf, u32 len); + int (*power_on)(struct ath6kl *ar); + int (*power_off)(struct ath6kl *ar); + void (*stop)(struct ath6kl *ar); }; +int ath6kl_hif_setup(struct ath6kl_device *dev); +int ath6kl_hif_unmask_intrs(struct ath6kl_device *dev); +int ath6kl_hif_mask_intrs(struct ath6kl_device *dev); +int ath6kl_hif_poll_mboxmsg_rx(struct ath6kl_device *dev, + u32 *lk_ahd, int timeout); +int ath6kl_hif_rx_control(struct ath6kl_device *dev, bool enable_rx); +int ath6kl_hif_disable_intrs(struct ath6kl_device *dev); + +int ath6kl_hif_rw_comp_handler(void *context, int status); +int ath6kl_hif_intr_bh_handler(struct ath6kl *ar); + +/* Scatter Function and Definitions */ +int ath6kl_hif_submit_scat_req(struct ath6kl_device *dev, + struct hif_scatter_req *scat_req, bool read); + #endif diff --git a/drivers/net/wireless/ath/ath6kl/htc.c b/drivers/net/wireless/ath/ath6kl/htc.c index f88a7c9e414..f3b63ca25c7 100644 --- a/drivers/net/wireless/ath/ath6kl/htc.c +++ b/drivers/net/wireless/ath/ath6kl/htc.c @@ -15,13 +15,321 @@ */ #include "core.h" -#include "htc_hif.h" +#include "hif.h" #include "debug.h" #include "hif-ops.h" #include <asm/unaligned.h> #define CALC_TXRX_PADDED_LEN(dev, len) (__ALIGN_MASK((len), (dev)->block_mask)) +/* Functions for Tx credit handling */ +static void ath6kl_credit_deposit(struct ath6kl_htc_credit_info *cred_info, + struct htc_endpoint_credit_dist *ep_dist, + int credits) +{ + ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit deposit ep %d credits %d\n", + ep_dist->endpoint, credits); + + ep_dist->credits += credits; + ep_dist->cred_assngd += credits; + cred_info->cur_free_credits -= credits; +} + +static void ath6kl_credit_init(struct ath6kl_htc_credit_info *cred_info, + struct list_head *ep_list, + int tot_credits) +{ + struct htc_endpoint_credit_dist *cur_ep_dist; + int count; + + ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit init total %d\n", tot_credits); + + cred_info->cur_free_credits = tot_credits; + cred_info->total_avail_credits = tot_credits; + + list_for_each_entry(cur_ep_dist, ep_list, list) { + if (cur_ep_dist->endpoint == ENDPOINT_0) + continue; + + cur_ep_dist->cred_min = cur_ep_dist->cred_per_msg; + + if (tot_credits > 4) { + if ((cur_ep_dist->svc_id == WMI_DATA_BK_SVC) || + (cur_ep_dist->svc_id == WMI_DATA_BE_SVC)) { + ath6kl_credit_deposit(cred_info, + cur_ep_dist, + cur_ep_dist->cred_min); + cur_ep_dist->dist_flags |= HTC_EP_ACTIVE; + } + } + + if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) { + ath6kl_credit_deposit(cred_info, cur_ep_dist, + cur_ep_dist->cred_min); + /* + * Control service is always marked active, it + * never goes inactive EVER. + */ + cur_ep_dist->dist_flags |= HTC_EP_ACTIVE; + } else if (cur_ep_dist->svc_id == WMI_DATA_BK_SVC) + /* this is the lowest priority data endpoint */ + /* FIXME: this looks fishy, check */ + cred_info->lowestpri_ep_dist = cur_ep_dist->list; + + /* + * Streams have to be created (explicit | implicit) for all + * kinds of traffic. BE endpoints are also inactive in the + * beginning. When BE traffic starts it creates implicit + * streams that redistributes credits. + * + * Note: all other endpoints have minimums set but are + * initially given NO credits. credits will be distributed + * as traffic activity demands + */ + } + + WARN_ON(cred_info->cur_free_credits <= 0); + + list_for_each_entry(cur_ep_dist, ep_list, list) { + if (cur_ep_dist->endpoint == ENDPOINT_0) + continue; + + if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) + cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg; + else { + /* + * For the remaining data endpoints, we assume that + * each cred_per_msg are the same. We use a simple + * calculation here, we take the remaining credits + * and determine how many max messages this can + * cover and then set each endpoint's normal value + * equal to 3/4 this amount. + */ + count = (cred_info->cur_free_credits / + cur_ep_dist->cred_per_msg) + * cur_ep_dist->cred_per_msg; + count = (count * 3) >> 2; + count = max(count, cur_ep_dist->cred_per_msg); + cur_ep_dist->cred_norm = count; + + } + + ath6kl_dbg(ATH6KL_DBG_CREDIT, + "credit ep %d svc_id %d credits %d per_msg %d norm %d min %d\n", + cur_ep_dist->endpoint, + cur_ep_dist->svc_id, + cur_ep_dist->credits, + cur_ep_dist->cred_per_msg, + cur_ep_dist->cred_norm, + cur_ep_dist->cred_min); + } +} + +/* initialize and setup credit distribution */ +int ath6kl_credit_setup(void *htc_handle, + struct ath6kl_htc_credit_info *cred_info) +{ + u16 servicepriority[5]; + + memset(cred_info, 0, sizeof(struct ath6kl_htc_credit_info)); + + servicepriority[0] = WMI_CONTROL_SVC; /* highest */ + servicepriority[1] = WMI_DATA_VO_SVC; + servicepriority[2] = WMI_DATA_VI_SVC; + servicepriority[3] = WMI_DATA_BE_SVC; + servicepriority[4] = WMI_DATA_BK_SVC; /* lowest */ + + /* set priority list */ + ath6kl_htc_set_credit_dist(htc_handle, cred_info, servicepriority, 5); + + return 0; +} + +/* reduce an ep's credits back to a set limit */ +static void ath6kl_credit_reduce(struct ath6kl_htc_credit_info *cred_info, + struct htc_endpoint_credit_dist *ep_dist, + int limit) +{ + int credits; + + ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit reduce ep %d limit %d\n", + ep_dist->endpoint, limit); + + ep_dist->cred_assngd = limit; + + if (ep_dist->credits <= limit) + return; + + credits = ep_dist->credits - limit; + ep_dist->credits -= credits; + cred_info->cur_free_credits += credits; +} + +static void ath6kl_credit_update(struct ath6kl_htc_credit_info *cred_info, + struct list_head *epdist_list) +{ + struct htc_endpoint_credit_dist *cur_dist_list; + + list_for_each_entry(cur_dist_list, epdist_list, list) { + if (cur_dist_list->endpoint == ENDPOINT_0) + continue; + + if (cur_dist_list->cred_to_dist > 0) { + cur_dist_list->credits += + cur_dist_list->cred_to_dist; + cur_dist_list->cred_to_dist = 0; + if (cur_dist_list->credits > + cur_dist_list->cred_assngd) + ath6kl_credit_reduce(cred_info, + cur_dist_list, + cur_dist_list->cred_assngd); + + if (cur_dist_list->credits > + cur_dist_list->cred_norm) + ath6kl_credit_reduce(cred_info, cur_dist_list, + cur_dist_list->cred_norm); + + if (!(cur_dist_list->dist_flags & HTC_EP_ACTIVE)) { + if (cur_dist_list->txq_depth == 0) + ath6kl_credit_reduce(cred_info, + cur_dist_list, 0); + } + } + } +} + +/* + * HTC has an endpoint that needs credits, ep_dist is the endpoint in + * question. + */ +static void ath6kl_credit_seek(struct ath6kl_htc_credit_info *cred_info, + struct htc_endpoint_credit_dist *ep_dist) +{ + struct htc_endpoint_credit_dist *curdist_list; + int credits = 0; + int need; + + if (ep_dist->svc_id == WMI_CONTROL_SVC) + goto out; + + if ((ep_dist->svc_id == WMI_DATA_VI_SVC) || + (ep_dist->svc_id == WMI_DATA_VO_SVC)) + if ((ep_dist->cred_assngd >= ep_dist->cred_norm)) + goto out; + + /* + * For all other services, we follow a simple algorithm of: + * + * 1. checking the free pool for credits + * 2. checking lower priority endpoints for credits to take + */ + + credits = min(cred_info->cur_free_credits, ep_dist->seek_cred); + + if (credits >= ep_dist->seek_cred) + goto out; + + /* + * We don't have enough in the free pool, try taking away from + * lower priority services The rule for taking away credits: + * + * 1. Only take from lower priority endpoints + * 2. Only take what is allocated above the minimum (never + * starve an endpoint completely) + * 3. Only take what you need. + */ + + list_for_each_entry_reverse(curdist_list, + &cred_info->lowestpri_ep_dist, + list) { + if (curdist_list == ep_dist) + break; + + need = ep_dist->seek_cred - cred_info->cur_free_credits; + + if ((curdist_list->cred_assngd - need) >= + curdist_list->cred_min) { + /* + * The current one has been allocated more than + * it's minimum and it has enough credits assigned + * above it's minimum to fulfill our need try to + * take away just enough to fulfill our need. + */ + ath6kl_credit_reduce(cred_info, curdist_list, + curdist_list->cred_assngd - need); + + if (cred_info->cur_free_credits >= + ep_dist->seek_cred) + break; + } + + if (curdist_list->endpoint == ENDPOINT_0) + break; + } + + credits = min(cred_info->cur_free_credits, ep_dist->seek_cred); + +out: + /* did we find some credits? */ + if (credits) + ath6kl_credit_deposit(cred_info, ep_dist, credits); + + ep_dist->seek_cred = 0; +} + +/* redistribute credits based on activity change */ +static void ath6kl_credit_redistribute(struct ath6kl_htc_credit_info *info, + struct list_head *ep_dist_list) +{ + struct htc_endpoint_credit_dist *curdist_list; + + list_for_each_entry(curdist_list, ep_dist_list, list) { + if (curdist_list->endpoint == ENDPOINT_0) + continue; + + if ((curdist_list->svc_id == WMI_DATA_BK_SVC) || + (curdist_list->svc_id == WMI_DATA_BE_SVC)) + curdist_list->dist_flags |= HTC_EP_ACTIVE; + + if ((curdist_list->svc_id != WMI_CONTROL_SVC) && + !(curdist_list->dist_flags & HTC_EP_ACTIVE)) { + if (curdist_list->txq_depth == 0) + ath6kl_credit_reduce(info, curdist_list, 0); + else + ath6kl_credit_reduce(info, + curdist_list, + curdist_list->cred_min); + } + } +} + +/* + * + * This function is invoked whenever endpoints require credit + * distributions. A lock is held while this function is invoked, this + * function shall NOT block. The ep_dist_list is a list of distribution + * structures in prioritized order as defined by the call to the + * htc_set_credit_dist() api. + */ +static void ath6kl_credit_distribute(struct ath6kl_htc_credit_info *cred_info, + struct list_head *ep_dist_list, + enum htc_credit_dist_reason reason) +{ + switch (reason) { + case HTC_CREDIT_DIST_SEND_COMPLETE: + ath6kl_credit_update(cred_info, ep_dist_list); + break; + case HTC_CREDIT_DIST_ACTIVITY_CHANGE: + ath6kl_credit_redistribute(cred_info, ep_dist_list); + break; + default: + break; + } + + WARN_ON(cred_info->cur_free_credits > cred_info->total_avail_credits); + WARN_ON(cred_info->cur_free_credits < 0); +} + static void ath6kl_htc_tx_buf_align(u8 **buf, unsigned long len) { u8 *align_addr; @@ -102,12 +410,12 @@ static void htc_tx_comp_update(struct htc_target *target, packet->info.tx.cred_used; endpoint->cred_dist.txq_depth = get_queue_depth(&endpoint->txq); - ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n", - target->cred_dist_cntxt, &target->cred_dist_list); + ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx ctxt 0x%p dist 0x%p\n", + target->credit_info, &target->cred_dist_list); - ath6k_credit_distribute(target->cred_dist_cntxt, - &target->cred_dist_list, - HTC_CREDIT_DIST_SEND_COMPLETE); + ath6kl_credit_distribute(target->credit_info, + &target->cred_dist_list, + HTC_CREDIT_DIST_SEND_COMPLETE); spin_unlock_bh(&target->tx_lock); } @@ -118,8 +426,8 @@ static void htc_tx_complete(struct htc_endpoint *endpoint, if (list_empty(txq)) return; - ath6kl_dbg(ATH6KL_DBG_HTC_SEND, - "send complete ep %d, (%d pkts)\n", + ath6kl_dbg(ATH6KL_DBG_HTC, + "htc tx complete ep %d pkts %d\n", endpoint->eid, get_queue_depth(txq)); ath6kl_tx_complete(endpoint->target->dev->ar, txq); @@ -131,6 +439,9 @@ static void htc_tx_comp_handler(struct htc_target *target, struct htc_endpoint *endpoint = &target->endpoint[packet->endpoint]; struct list_head container; + ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx complete seqno %d\n", + packet->info.tx.seqno); + htc_tx_comp_update(target, endpoint, packet); INIT_LIST_HEAD(&container); list_add_tail(&packet->list, &container); @@ -148,8 +459,8 @@ static void htc_async_tx_scat_complete(struct htc_target *target, INIT_LIST_HEAD(&tx_compq); - ath6kl_dbg(ATH6KL_DBG_HTC_SEND, - "htc_async_tx_scat_complete total len: %d entries: %d\n", + ath6kl_dbg(ATH6KL_DBG_HTC, + "htc tx scat complete len %d entries %d\n", scat_req->len, scat_req->scat_entries); if (scat_req->status) @@ -190,16 +501,13 @@ static int ath6kl_htc_tx_issue(struct htc_target *target, send_len = packet->act_len + HTC_HDR_LENGTH; - ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "%s: transmit len : %d (%s)\n", - __func__, send_len, sync ? "sync" : "async"); - padded_len = CALC_TXRX_PADDED_LEN(target, send_len); - ath6kl_dbg(ATH6KL_DBG_HTC_SEND, - "DevSendPacket, padded len: %d mbox:0x%X (mode:%s)\n", - padded_len, - target->dev->ar->mbox_info.htc_addr, - sync ? "sync" : "async"); + ath6kl_dbg(ATH6KL_DBG_HTC, + "htc tx issue len %d seqno %d padded_len %d mbox 0x%X %s\n", + send_len, packet->info.tx.seqno, padded_len, + target->dev->ar->mbox_info.htc_addr, + sync ? "sync" : "async"); if (sync) { status = hif_read_write_sync(target->dev->ar, @@ -227,7 +535,7 @@ static int htc_check_credits(struct htc_target *target, *req_cred = (len > target->tgt_cred_sz) ? DIV_ROUND_UP(len, target->tgt_cred_sz) : 1; - ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "creds required:%d got:%d\n", + ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit check need %d got %d\n", *req_cred, ep->cred_dist.credits); if (ep->cred_dist.credits < *req_cred) { @@ -237,16 +545,13 @@ static int htc_check_credits(struct htc_target *target, /* Seek more credits */ ep->cred_dist.seek_cred = *req_cred - ep->cred_dist.credits; - ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n", - target->cred_dist_cntxt, &ep->cred_dist); - - ath6k_seek_credits(target->cred_dist_cntxt, &ep->cred_dist); + ath6kl_credit_seek(target->credit_info, &ep->cred_dist); ep->cred_dist.seek_cred = 0; if (ep->cred_dist.credits < *req_cred) { - ath6kl_dbg(ATH6KL_DBG_HTC_SEND, - "not enough credits for ep %d - leaving packet in queue\n", + ath6kl_dbg(ATH6KL_DBG_CREDIT, + "credit not found for ep %d\n", eid); return -EINVAL; } @@ -260,17 +565,15 @@ static int htc_check_credits(struct htc_target *target, ep->cred_dist.seek_cred = ep->cred_dist.cred_per_msg - ep->cred_dist.credits; - ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n", - target->cred_dist_cntxt, &ep->cred_dist); - - ath6k_seek_credits(target->cred_dist_cntxt, &ep->cred_dist); + ath6kl_credit_seek(target->credit_info, &ep->cred_dist); /* see if we were successful in getting more */ if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) { /* tell the target we need credits ASAP! */ *flags |= HTC_FLAGS_NEED_CREDIT_UPDATE; ep->ep_st.cred_low_indicate += 1; - ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "host needs credits\n"); + ath6kl_dbg(ATH6KL_DBG_CREDIT, + "credit we need credits asap\n"); } } @@ -295,8 +598,8 @@ static void ath6kl_htc_tx_pkts_get(struct htc_target *target, packet = list_first_entry(&endpoint->txq, struct htc_packet, list); - ath6kl_dbg(ATH6KL_DBG_HTC_SEND, - "got head pkt:0x%p , queue depth: %d\n", + ath6kl_dbg(ATH6KL_DBG_HTC, + "htc tx got packet 0x%p queue depth %d\n", packet, get_queue_depth(&endpoint->txq)); len = CALC_TXRX_PADDED_LEN(target, @@ -404,9 +707,9 @@ static int ath6kl_htc_tx_setup_scat_list(struct htc_target *target, scat_req->len += len; scat_req->scat_entries++; - ath6kl_dbg(ATH6KL_DBG_HTC_SEND, - "%d, adding pkt : 0x%p len:%d (remaining space:%d)\n", - i, packet, len, rem_scat); + ath6kl_dbg(ATH6KL_DBG_HTC, + "htc tx adding (%d) pkt 0x%p seqno %d len %d remaining %d\n", + i, packet, packet->info.tx.seqno, len, rem_scat); } /* Roll back scatter setup in case of any failure */ @@ -455,12 +758,12 @@ static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint, if (!scat_req) { /* no scatter resources */ - ath6kl_dbg(ATH6KL_DBG_HTC_SEND, - "no more scatter resources\n"); + ath6kl_dbg(ATH6KL_DBG_HTC, + "htc tx no more scatter resources\n"); break; } - ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "pkts to scatter: %d\n", + ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx pkts to scatter: %d\n", n_scat); scat_req->len = 0; @@ -479,10 +782,10 @@ static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint, n_sent_bundle++; tot_pkts_bundle += scat_req->scat_entries; - ath6kl_dbg(ATH6KL_DBG_HTC_SEND, - "send scatter total bytes: %d , entries: %d\n", + ath6kl_dbg(ATH6KL_DBG_HTC, + "htc tx scatter bytes %d entries %d\n", scat_req->len, scat_req->scat_entries); - ath6kldev_submit_scat_req(target->dev, scat_req, false); + ath6kl_hif_submit_scat_req(target->dev, scat_req, false); if (status) break; @@ -490,8 +793,8 @@ static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint, *sent_bundle = n_sent_bundle; *n_bundle_pkts = tot_pkts_bundle; - ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "%s (sent:%d)\n", - __func__, n_sent_bundle); + ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx bundle sent %d pkts\n", + n_sent_bundle); return; } @@ -510,7 +813,7 @@ static void ath6kl_htc_tx_from_queue(struct htc_target *target, if (endpoint->tx_proc_cnt > 1) { endpoint->tx_proc_cnt--; spin_unlock_bh(&target->tx_lock); - ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "htc_try_send (busy)\n"); + ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx busy\n"); return; } @@ -588,15 +891,12 @@ static bool ath6kl_htc_tx_try(struct htc_target *target, overflow = true; if (overflow) - ath6kl_dbg(ATH6KL_DBG_HTC_SEND, - "ep %d, tx queue will overflow :%d , tx depth:%d, max:%d\n", - endpoint->eid, overflow, txq_depth, + ath6kl_dbg(ATH6KL_DBG_HTC, + "htc tx overflow ep %d depth %d max %d\n", + endpoint->eid, txq_depth, endpoint->max_txq_depth); if (overflow && ep_cb.tx_full) { - ath6kl_dbg(ATH6KL_DBG_HTC_SEND, - "indicating overflowed tx packet: 0x%p\n", tx_pkt); - if (ep_cb.tx_full(endpoint->target, tx_pkt) == HTC_SEND_FULL_DROP) { endpoint->ep_st.tx_dropped += 1; @@ -625,12 +925,12 @@ static void htc_chk_ep_txq(struct htc_target *target) * are not modifying any state. */ list_for_each_entry(cred_dist, &target->cred_dist_list, list) { - endpoint = (struct htc_endpoint *)cred_dist->htc_rsvd; + endpoint = cred_dist->htc_ep; spin_lock_bh(&target->tx_lock); if (!list_empty(&endpoint->txq)) { - ath6kl_dbg(ATH6KL_DBG_HTC_SEND, - "ep %d has %d credits and %d packets in tx queue\n", + ath6kl_dbg(ATH6KL_DBG_HTC, + "htc creds ep %d credits %d pkts %d\n", cred_dist->endpoint, endpoint->cred_dist.credits, get_queue_depth(&endpoint->txq)); @@ -704,13 +1004,13 @@ static int htc_setup_tx_complete(struct htc_target *target) } void ath6kl_htc_set_credit_dist(struct htc_target *target, - struct htc_credit_state_info *cred_dist_cntxt, + struct ath6kl_htc_credit_info *credit_info, u16 srvc_pri_order[], int list_len) { struct htc_endpoint *endpoint; int i, ep; - target->cred_dist_cntxt = cred_dist_cntxt; + target->credit_info = credit_info; list_add_tail(&target->endpoint[ENDPOINT_0].cred_dist.list, &target->cred_dist_list); @@ -736,8 +1036,8 @@ int ath6kl_htc_tx(struct htc_target *target, struct htc_packet *packet) struct htc_endpoint *endpoint; struct list_head queue; - ath6kl_dbg(ATH6KL_DBG_HTC_SEND, - "htc_tx: ep id: %d, buf: 0x%p, len: %d\n", + ath6kl_dbg(ATH6KL_DBG_HTC, + "htc tx ep id %d buf 0x%p len %d\n", packet->endpoint, packet->buf, packet->act_len); if (packet->endpoint >= ENDPOINT_MAX) { @@ -787,8 +1087,8 @@ void ath6kl_htc_flush_txep(struct htc_target *target, list_for_each_entry_safe(packet, tmp_pkt, &discard_q, list) { packet->status = -ECANCELED; list_del(&packet->list); - ath6kl_dbg(ATH6KL_DBG_TRC, - "flushing tx pkt:0x%p, len:%d, ep:%d tag:0x%X\n", + ath6kl_dbg(ATH6KL_DBG_HTC, + "htc tx flushing pkt 0x%p len %d ep %d tag 0x%x\n", packet, packet->act_len, packet->endpoint, packet->info.tx.tag); @@ -844,12 +1144,13 @@ void ath6kl_htc_indicate_activity_change(struct htc_target *target, endpoint->cred_dist.txq_depth = get_queue_depth(&endpoint->txq); - ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n", - target->cred_dist_cntxt, &target->cred_dist_list); + ath6kl_dbg(ATH6KL_DBG_HTC, + "htc tx activity ctxt 0x%p dist 0x%p\n", + target->credit_info, &target->cred_dist_list); - ath6k_credit_distribute(target->cred_dist_cntxt, - &target->cred_dist_list, - HTC_CREDIT_DIST_ACTIVITY_CHANGE); + ath6kl_credit_distribute(target->credit_info, + &target->cred_dist_list, + HTC_CREDIT_DIST_ACTIVITY_CHANGE); } spin_unlock_bh(&target->tx_lock); @@ -919,15 +1220,15 @@ static int ath6kl_htc_rx_packet(struct htc_target *target, padded_len = CALC_TXRX_PADDED_LEN(target, rx_len); if (padded_len > packet->buf_len) { - ath6kl_err("not enough receive space for packet - padlen:%d recvlen:%d bufferlen:%d\n", + ath6kl_err("not enough receive space for packet - padlen %d recvlen %d bufferlen %d\n", padded_len, rx_len, packet->buf_len); return -ENOMEM; } - ath6kl_dbg(ATH6KL_DBG_HTC_RECV, - "dev_rx_pkt (0x%p : hdr:0x%X) padded len: %d mbox:0x%X (mode:%s)\n", + ath6kl_dbg(ATH6KL_DBG_HTC, + "htc rx 0x%p hdr x%x len %d mbox 0x%x\n", packet, packet->info.rx.exp_hdr, - padded_len, dev->ar->mbox_info.htc_addr, "sync"); + padded_len, dev->ar->mbox_info.htc_addr); status = hif_read_write_sync(dev->ar, dev->ar->mbox_info.htc_addr, @@ -1137,8 +1438,8 @@ static int ath6kl_htc_rx_alloc(struct htc_target *target, } endpoint->ep_st.rx_bundle_from_hdr += 1; - ath6kl_dbg(ATH6KL_DBG_HTC_RECV, - "htc hdr indicates :%d msg can be fetched as a bundle\n", + ath6kl_dbg(ATH6KL_DBG_HTC, + "htc rx bundle pkts %d\n", n_msg); } else /* HTC header only indicates 1 message to fetch */ @@ -1191,8 +1492,8 @@ static void htc_ctrl_rx(struct htc_target *context, struct htc_packet *packets) ath6kl_err("htc_ctrl_rx, got message with len:%zu\n", packets->act_len + HTC_HDR_LENGTH); - ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, - "Unexpected ENDPOINT 0 Message", "", + ath6kl_dbg_dump(ATH6KL_DBG_HTC, + "htc rx unexpected endpoint 0 message", "", packets->buf - HTC_HDR_LENGTH, packets->act_len + HTC_HDR_LENGTH); } @@ -1209,9 +1510,6 @@ static void htc_proc_cred_rpt(struct htc_target *target, int tot_credits = 0, i; bool dist = false; - ath6kl_dbg(ATH6KL_DBG_HTC_SEND, - "htc_proc_cred_rpt, credit report entries:%d\n", n_entries); - spin_lock_bh(&target->tx_lock); for (i = 0; i < n_entries; i++, rpt++) { @@ -1223,8 +1521,9 @@ static void htc_proc_cred_rpt(struct htc_target *target, endpoint = &target->endpoint[rpt->eid]; - ath6kl_dbg(ATH6KL_DBG_HTC_SEND, " ep %d got %d credits\n", - rpt->eid, rpt->credits); + ath6kl_dbg(ATH6KL_DBG_CREDIT, + "credit report ep %d credits %d\n", + rpt->eid, rpt->credits); endpoint->ep_st.tx_cred_rpt += 1; endpoint->ep_st.cred_retnd += rpt->credits; @@ -1264,21 +1563,14 @@ static void htc_proc_cred_rpt(struct htc_target *target, tot_credits += rpt->credits; } - ath6kl_dbg(ATH6KL_DBG_HTC_SEND, - "report indicated %d credits to distribute\n", - tot_credits); - if (dist) { /* * This was a credit return based on a completed send * operations note, this is done with the lock held */ - ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n", - target->cred_dist_cntxt, &target->cred_dist_list); - - ath6k_credit_distribute(target->cred_dist_cntxt, - &target->cred_dist_list, - HTC_CREDIT_DIST_SEND_COMPLETE); + ath6kl_credit_distribute(target->credit_info, + &target->cred_dist_list, + HTC_CREDIT_DIST_SEND_COMPLETE); } spin_unlock_bh(&target->tx_lock); @@ -1320,14 +1612,15 @@ static int htc_parse_trailer(struct htc_target *target, if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF)) && next_lk_ahds) { - ath6kl_dbg(ATH6KL_DBG_HTC_RECV, - "lk_ahd report found (pre valid:0x%X, post valid:0x%X)\n", + ath6kl_dbg(ATH6KL_DBG_HTC, + "htc rx lk_ahd found pre_valid 0x%x post_valid 0x%x\n", lk_ahd->pre_valid, lk_ahd->post_valid); /* look ahead bytes are valid, copy them over */ memcpy((u8 *)&next_lk_ahds[0], lk_ahd->lk_ahd, 4); - ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Next Look Ahead", + ath6kl_dbg_dump(ATH6KL_DBG_HTC, + "htc rx next look ahead", "", next_lk_ahds, 4); *n_lk_ahds = 1; @@ -1346,7 +1639,7 @@ static int htc_parse_trailer(struct htc_target *target, bundle_lkahd_rpt = (struct htc_bundle_lkahd_rpt *) record_buf; - ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Bundle lk_ahd", + ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bundle lk_ahd", "", record_buf, record->len); for (i = 0; i < len; i++) { @@ -1378,10 +1671,8 @@ static int htc_proc_trailer(struct htc_target *target, u8 *record_buf; u8 *orig_buf; - ath6kl_dbg(ATH6KL_DBG_HTC_RECV, "+htc_proc_trailer (len:%d)\n", len); - - ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Recv Trailer", "", - buf, len); + ath6kl_dbg(ATH6KL_DBG_HTC, "htc rx trailer len %d\n", len); + ath6kl_dbg_dump(ATH6KL_DBG_HTC, NULL, "", buf, len); orig_buf = buf; orig_len = len; @@ -1418,7 +1709,7 @@ static int htc_proc_trailer(struct htc_target *target, } if (status) - ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "BAD Recv Trailer", + ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bad trailer", "", orig_buf, orig_len); return status; @@ -1436,9 +1727,6 @@ static int ath6kl_htc_rx_process_hdr(struct htc_target *target, if (n_lkahds != NULL) *n_lkahds = 0; - ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "HTC Recv PKT", "htc ", - packet->buf, packet->act_len); - /* * NOTE: we cannot assume the alignment of buf, so we use the safe * macros to retrieve 16 bit fields. @@ -1480,9 +1768,9 @@ static int ath6kl_htc_rx_process_hdr(struct htc_target *target, if (lk_ahd != packet->info.rx.exp_hdr) { ath6kl_err("%s(): lk_ahd mismatch! (pPkt:0x%p flags:0x%X)\n", __func__, packet, packet->info.rx.rx_flags); - ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Expected Message lk_ahd", + ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx expected lk_ahd", "", &packet->info.rx.exp_hdr, 4); - ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Current Frame Header", + ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx current header", "", (u8 *)&lk_ahd, sizeof(lk_ahd)); status = -ENOMEM; goto fail_rx; @@ -1518,15 +1806,8 @@ static int ath6kl_htc_rx_process_hdr(struct htc_target *target, fail_rx: if (status) - ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "BAD HTC Recv PKT", - "", packet->buf, - packet->act_len < 256 ? packet->act_len : 256); - else { - if (packet->act_len > 0) - ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, - "HTC - Application Msg", "", - packet->buf, packet->act_len); - } + ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bad packet", + "", packet->buf, packet->act_len); return status; } @@ -1534,8 +1815,8 @@ fail_rx: static void ath6kl_htc_rx_complete(struct htc_endpoint *endpoint, struct htc_packet *packet) { - ath6kl_dbg(ATH6KL_DBG_HTC_RECV, - "htc calling ep %d recv callback on packet 0x%p\n", + ath6kl_dbg(ATH6KL_DBG_HTC, + "htc rx complete ep %d packet 0x%p\n", endpoint->eid, packet); endpoint->ep_cb.rx(endpoint->target, packet); } @@ -1571,9 +1852,9 @@ static int ath6kl_htc_rx_bundle(struct htc_target *target, len = 0; - ath6kl_dbg(ATH6KL_DBG_HTC_RECV, - "%s(): (numpackets: %d , actual : %d)\n", - __func__, get_queue_depth(rxq), n_scat_pkt); + ath6kl_dbg(ATH6KL_DBG_HTC, + "htc rx bundle depth %d pkts %d\n", + get_queue_depth(rxq), n_scat_pkt); scat_req = hif_scatter_req_get(target->dev->ar); @@ -1620,7 +1901,7 @@ static int ath6kl_htc_rx_bundle(struct htc_target *target, scat_req->len = len; scat_req->scat_entries = i; - status = ath6kldev_submit_scat_req(target->dev, scat_req, true); + status = ath6kl_hif_submit_scat_req(target->dev, scat_req, true); if (!status) *n_pkt_fetched = i; @@ -1643,7 +1924,6 @@ static int ath6kl_htc_rx_process_packets(struct htc_target *target, int status = 0; list_for_each_entry_safe(packet, tmp_pkt, comp_pktq, list) { - list_del(&packet->list); ep = &target->endpoint[packet->endpoint]; /* process header for each of the recv packet */ @@ -1652,6 +1932,8 @@ static int ath6kl_htc_rx_process_packets(struct htc_target *target, if (status) return status; + list_del(&packet->list); + if (list_empty(comp_pktq)) { /* * Last packet's more packet flag is set @@ -1686,11 +1968,15 @@ static int ath6kl_htc_rx_fetch(struct htc_target *target, int fetched_pkts; bool part_bundle = false; int status = 0; + struct list_head tmp_rxq; + struct htc_packet *packet, *tmp_pkt; /* now go fetch the list of HTC packets */ while (!list_empty(rx_pktq)) { fetched_pkts = 0; + INIT_LIST_HEAD(&tmp_rxq); + if (target->rx_bndl_enable && (get_queue_depth(rx_pktq) > 1)) { /* * There are enough packets to attempt a @@ -1698,28 +1984,27 @@ static int ath6kl_htc_rx_fetch(struct htc_target *target, * allowed. */ status = ath6kl_htc_rx_bundle(target, rx_pktq, - comp_pktq, + &tmp_rxq, &fetched_pkts, part_bundle); if (status) - return status; + goto fail_rx; if (!list_empty(rx_pktq)) part_bundle = true; + + list_splice_tail_init(&tmp_rxq, comp_pktq); } if (!fetched_pkts) { - struct htc_packet *packet; packet = list_first_entry(rx_pktq, struct htc_packet, list); - list_del(&packet->list); - /* fully synchronous */ packet->completion = NULL; - if (!list_empty(rx_pktq)) + if (!list_is_singular(rx_pktq)) /* * look_aheads in all packet * except the last one in the @@ -1731,18 +2016,42 @@ static int ath6kl_htc_rx_fetch(struct htc_target *target, /* go fetch the packet */ status = ath6kl_htc_rx_packet(target, packet, packet->act_len); + + list_move_tail(&packet->list, &tmp_rxq); + if (status) - return status; + goto fail_rx; - list_add_tail(&packet->list, comp_pktq); + list_splice_tail_init(&tmp_rxq, comp_pktq); } } + return 0; + +fail_rx: + + /* + * Cleanup any packets we allocated but didn't use to + * actually fetch any packets. + */ + + list_for_each_entry_safe(packet, tmp_pkt, rx_pktq, list) { + list_del(&packet->list); + htc_reclaim_rxbuf(target, packet, + &target->endpoint[packet->endpoint]); + } + + list_for_each_entry_safe(packet, tmp_pkt, &tmp_rxq, list) { + list_del(&packet->list); + htc_reclaim_rxbuf(target, packet, + &target->endpoint[packet->endpoint]); + } + return status; } int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target, - u32 msg_look_ahead[], int *num_pkts) + u32 msg_look_ahead, int *num_pkts) { struct htc_packet *packets, *tmp_pkt; struct htc_endpoint *endpoint; @@ -1759,7 +2068,7 @@ int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target, * On first entry copy the look_aheads into our temp array for * processing */ - memcpy(look_aheads, msg_look_ahead, sizeof(look_aheads)); + look_aheads[0] = msg_look_ahead; while (true) { @@ -1827,15 +2136,6 @@ int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target, if (status) { ath6kl_err("failed to get pending recv messages: %d\n", status); - /* - * Cleanup any packets we allocated but didn't use to - * actually fetch any packets. - */ - list_for_each_entry_safe(packets, tmp_pkt, &rx_pktq, list) { - list_del(&packets->list); - htc_reclaim_rxbuf(target, packets, - &target->endpoint[packets->endpoint]); - } /* cleanup any packets in sync completion queue */ list_for_each_entry_safe(packets, tmp_pkt, &comp_pktq, list) { @@ -1846,7 +2146,7 @@ int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target, if (target->htc_flags & HTC_OP_STATE_STOPPING) { ath6kl_warn("host is going to stop blocking receiver for htc_stop\n"); - ath6kldev_rx_control(target->dev, false); + ath6kl_hif_rx_control(target->dev, false); } } @@ -1856,7 +2156,7 @@ int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target, */ if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) { ath6kl_warn("host has no rx buffers blocking receiver to prevent overrun\n"); - ath6kldev_rx_control(target->dev, false); + ath6kl_hif_rx_control(target->dev, false); } *num_pkts = n_fetched; @@ -1874,12 +2174,12 @@ static struct htc_packet *htc_wait_for_ctrl_msg(struct htc_target *target) struct htc_frame_hdr *htc_hdr; u32 look_ahead; - if (ath6kldev_poll_mboxmsg_rx(target->dev, &look_ahead, + if (ath6kl_hif_poll_mboxmsg_rx(target->dev, &look_ahead, HTC_TARGET_RESPONSE_TIMEOUT)) return NULL; - ath6kl_dbg(ATH6KL_DBG_HTC_RECV, - "htc_wait_for_ctrl_msg: look_ahead : 0x%X\n", look_ahead); + ath6kl_dbg(ATH6KL_DBG_HTC, + "htc rx wait ctrl look_ahead 0x%X\n", look_ahead); htc_hdr = (struct htc_frame_hdr *)&look_ahead; @@ -1943,8 +2243,8 @@ int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target, depth = get_queue_depth(pkt_queue); - ath6kl_dbg(ATH6KL_DBG_HTC_RECV, - "htc_add_rxbuf_multiple: ep id: %d, cnt:%d, len: %d\n", + ath6kl_dbg(ATH6KL_DBG_HTC, + "htc rx add multiple ep id %d cnt %d len %d\n", first_pkt->endpoint, depth, first_pkt->buf_len); endpoint = &target->endpoint[first_pkt->endpoint]; @@ -1969,8 +2269,8 @@ int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target, /* check if we are blocked waiting for a new buffer */ if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) { if (target->ep_waiting == first_pkt->endpoint) { - ath6kl_dbg(ATH6KL_DBG_HTC_RECV, - "receiver was blocked on ep:%d, unblocking.\n", + ath6kl_dbg(ATH6KL_DBG_HTC, + "htc rx blocked on ep %d, unblocking\n", target->ep_waiting); target->rx_st_flags &= ~HTC_RECV_WAIT_BUFFERS; target->ep_waiting = ENDPOINT_MAX; @@ -1982,7 +2282,7 @@ int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target, if (rx_unblock && !(target->htc_flags & HTC_OP_STATE_STOPPING)) /* TODO : implement a buffer threshold count? */ - ath6kldev_rx_control(target->dev, true); + ath6kl_hif_rx_control(target->dev, true); return status; } @@ -2004,8 +2304,8 @@ void ath6kl_htc_flush_rx_buf(struct htc_target *target) &endpoint->rx_bufq, list) { list_del(&packet->list); spin_unlock_bh(&target->rx_lock); - ath6kl_dbg(ATH6KL_DBG_HTC_RECV, - "flushing rx pkt:0x%p, len:%d, ep:%d\n", + ath6kl_dbg(ATH6KL_DBG_HTC, + "htc rx flush pkt 0x%p len %d ep %d\n", packet, packet->buf_len, packet->endpoint); dev_kfree_skb(packet->pkt_cntxt); @@ -2028,8 +2328,8 @@ int ath6kl_htc_conn_service(struct htc_target *target, unsigned int max_msg_sz = 0; int status = 0; - ath6kl_dbg(ATH6KL_DBG_TRC, - "htc_conn_service, target:0x%p service id:0x%X\n", + ath6kl_dbg(ATH6KL_DBG_HTC, + "htc connect service target 0x%p service id 0x%x\n", target, conn_req->svc_id); if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) { @@ -2115,7 +2415,7 @@ int ath6kl_htc_conn_service(struct htc_target *target, endpoint->len_max = max_msg_sz; endpoint->ep_cb = conn_req->ep_cb; endpoint->cred_dist.svc_id = conn_req->svc_id; - endpoint->cred_dist.htc_rsvd = endpoint; + endpoint->cred_dist.htc_ep = endpoint; endpoint->cred_dist.endpoint = assigned_ep; endpoint->cred_dist.cred_sz = target->tgt_cred_sz; @@ -2172,6 +2472,7 @@ static void reset_ep_state(struct htc_target *target) } /* reset distribution list */ + /* FIXME: free existing entries */ INIT_LIST_HEAD(&target->cred_dist_list); } @@ -2201,8 +2502,8 @@ static void htc_setup_msg_bndl(struct htc_target *target) target->msg_per_bndl_max = min(target->max_scat_entries, target->msg_per_bndl_max); - ath6kl_dbg(ATH6KL_DBG_TRC, - "htc bundling allowed. max msg per htc bundle: %d\n", + ath6kl_dbg(ATH6KL_DBG_BOOT, + "htc bundling allowed msg_per_bndl_max %d\n", target->msg_per_bndl_max); /* Max rx bundle size is limited by the max tx bundle size */ @@ -2211,7 +2512,7 @@ static void htc_setup_msg_bndl(struct htc_target *target) target->max_tx_bndl_sz = min(HIF_MBOX0_EXT_WIDTH, target->max_xfer_szper_scatreq); - ath6kl_dbg(ATH6KL_DBG_ANY, "max recv: %d max send: %d\n", + ath6kl_dbg(ATH6KL_DBG_BOOT, "htc max_rx_bndl_sz %d max_tx_bndl_sz %d\n", target->max_rx_bndl_sz, target->max_tx_bndl_sz); if (target->max_tx_bndl_sz) @@ -2265,8 +2566,8 @@ int ath6kl_htc_wait_target(struct htc_target *target) target->tgt_creds = le16_to_cpu(rdy_msg->ver2_0_info.cred_cnt); target->tgt_cred_sz = le16_to_cpu(rdy_msg->ver2_0_info.cred_sz); - ath6kl_dbg(ATH6KL_DBG_HTC_RECV, - "target ready: credits: %d credit size: %d\n", + ath6kl_dbg(ATH6KL_DBG_BOOT, + "htc target ready credits %d size %d\n", target->tgt_creds, target->tgt_cred_sz); /* check if this is an extended ready message */ @@ -2280,7 +2581,7 @@ int ath6kl_htc_wait_target(struct htc_target *target) target->msg_per_bndl_max = 0; } - ath6kl_dbg(ATH6KL_DBG_TRC, "using htc protocol version : %s (%d)\n", + ath6kl_dbg(ATH6KL_DBG_BOOT, "htc using protocol %s (%d)\n", (target->htc_tgt_ver == HTC_VERSION_2P0) ? "2.0" : ">= 2.1", target->htc_tgt_ver); @@ -2300,6 +2601,10 @@ int ath6kl_htc_wait_target(struct htc_target *target) status = ath6kl_htc_conn_service((void *)target, &connect, &resp); if (status) + /* + * FIXME: this call doesn't make sense, the caller should + * call ath6kl_htc_cleanup() when it wants remove htc + */ ath6kl_hif_cleanup_scatter(target->dev->ar); fail_wait_target: @@ -2320,8 +2625,11 @@ int ath6kl_htc_start(struct htc_target *target) struct htc_packet *packet; int status; + memset(&target->dev->irq_proc_reg, 0, + sizeof(target->dev->irq_proc_reg)); + /* Disable interrupts at the chip level */ - ath6kldev_disable_intrs(target->dev); + ath6kl_hif_disable_intrs(target->dev); target->htc_flags = 0; target->rx_st_flags = 0; @@ -2334,8 +2642,8 @@ int ath6kl_htc_start(struct htc_target *target) } /* NOTE: the first entry in the distribution list is ENDPOINT_0 */ - ath6k_credit_init(target->cred_dist_cntxt, &target->cred_dist_list, - target->tgt_creds); + ath6kl_credit_init(target->credit_info, &target->cred_dist_list, + target->tgt_creds); dump_cred_dist_stats(target); @@ -2346,7 +2654,7 @@ int ath6kl_htc_start(struct htc_target *target) return status; /* unmask interrupts */ - status = ath6kldev_unmask_intrs(target->dev); + status = ath6kl_hif_unmask_intrs(target->dev); if (status) ath6kl_htc_stop(target); @@ -2354,6 +2662,44 @@ int ath6kl_htc_start(struct htc_target *target) return status; } +static int ath6kl_htc_reset(struct htc_target *target) +{ + u32 block_size, ctrl_bufsz; + struct htc_packet *packet; + int i; + + reset_ep_state(target); + + block_size = target->dev->ar->mbox_info.block_size; + + ctrl_bufsz = (block_size > HTC_MAX_CTRL_MSG_LEN) ? + (block_size + HTC_HDR_LENGTH) : + (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH); + + for (i = 0; i < NUM_CONTROL_BUFFERS; i++) { + packet = kzalloc(sizeof(*packet), GFP_KERNEL); + if (!packet) + return -ENOMEM; + + packet->buf_start = kzalloc(ctrl_bufsz, GFP_KERNEL); + if (!packet->buf_start) { + kfree(packet); + return -ENOMEM; + } + + packet->buf_len = ctrl_bufsz; + if (i < NUM_CONTROL_RX_BUFFERS) { + packet->act_len = 0; + packet->buf = packet->buf_start; + packet->endpoint = ENDPOINT_0; + list_add_tail(&packet->list, &target->free_ctrl_rxbuf); + } else + list_add_tail(&packet->list, &target->free_ctrl_txbuf); + } + + return 0; +} + /* htc_stop: stop interrupt reception, and flush all queued buffers */ void ath6kl_htc_stop(struct htc_target *target) { @@ -2366,21 +2712,19 @@ void ath6kl_htc_stop(struct htc_target *target) * function returns all pending HIF I/O has completed, we can * safely flush the queues. */ - ath6kldev_mask_intrs(target->dev); + ath6kl_hif_mask_intrs(target->dev); ath6kl_htc_flush_txep_all(target); ath6kl_htc_flush_rx_buf(target); - reset_ep_state(target); + ath6kl_htc_reset(target); } void *ath6kl_htc_create(struct ath6kl *ar) { struct htc_target *target = NULL; - struct htc_packet *packet; - int status = 0, i = 0; - u32 block_size, ctrl_bufsz; + int status = 0; target = kzalloc(sizeof(*target), GFP_KERNEL); if (!target) { @@ -2392,7 +2736,7 @@ void *ath6kl_htc_create(struct ath6kl *ar) if (!target->dev) { ath6kl_err("unable to allocate memory\n"); status = -ENOMEM; - goto fail_create_htc; + goto err_htc_cleanup; } spin_lock_init(&target->htc_lock); @@ -2407,49 +2751,20 @@ void *ath6kl_htc_create(struct ath6kl *ar) target->dev->htc_cnxt = target; target->ep_waiting = ENDPOINT_MAX; - reset_ep_state(target); - - status = ath6kldev_setup(target->dev); - + status = ath6kl_hif_setup(target->dev); if (status) - goto fail_create_htc; - - block_size = ar->mbox_info.block_size; - - ctrl_bufsz = (block_size > HTC_MAX_CTRL_MSG_LEN) ? - (block_size + HTC_HDR_LENGTH) : - (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH); - - for (i = 0; i < NUM_CONTROL_BUFFERS; i++) { - packet = kzalloc(sizeof(*packet), GFP_KERNEL); - if (!packet) - break; + goto err_htc_cleanup; - packet->buf_start = kzalloc(ctrl_bufsz, GFP_KERNEL); - if (!packet->buf_start) { - kfree(packet); - break; - } + status = ath6kl_htc_reset(target); + if (status) + goto err_htc_cleanup; - packet->buf_len = ctrl_bufsz; - if (i < NUM_CONTROL_RX_BUFFERS) { - packet->act_len = 0; - packet->buf = packet->buf_start; - packet->endpoint = ENDPOINT_0; - list_add_tail(&packet->list, &target->free_ctrl_rxbuf); - } else - list_add_tail(&packet->list, &target->free_ctrl_txbuf); - } + return target; -fail_create_htc: - if (i != NUM_CONTROL_BUFFERS || status) { - if (target) { - ath6kl_htc_cleanup(target); - target = NULL; - } - } +err_htc_cleanup: + ath6kl_htc_cleanup(target); - return target; + return NULL; } /* cleanup the HTC instance */ diff --git a/drivers/net/wireless/ath/ath6kl/htc.h b/drivers/net/wireless/ath/ath6kl/htc.h index 8ce0c2c07de..57672e1ed1a 100644 --- a/drivers/net/wireless/ath/ath6kl/htc.h +++ b/drivers/net/wireless/ath/ath6kl/htc.h @@ -393,7 +393,7 @@ struct htc_endpoint_credit_dist { int cred_per_msg; /* reserved for HTC use */ - void *htc_rsvd; + struct htc_endpoint *htc_ep; /* * current depth of TX queue , i.e. messages waiting for credits @@ -414,9 +414,11 @@ enum htc_credit_dist_reason { HTC_CREDIT_DIST_SEEK_CREDITS, }; -struct htc_credit_state_info { +struct ath6kl_htc_credit_info { int total_avail_credits; int cur_free_credits; + + /* list of lowest priority endpoints */ struct list_head lowestpri_ep_dist; }; @@ -508,10 +510,13 @@ struct ath6kl_device; /* our HTC target state */ struct htc_target { struct htc_endpoint endpoint[ENDPOINT_MAX]; + + /* contains struct htc_endpoint_credit_dist */ struct list_head cred_dist_list; + struct list_head free_ctrl_txbuf; struct list_head free_ctrl_rxbuf; - struct htc_credit_state_info *cred_dist_cntxt; + struct ath6kl_htc_credit_info *credit_info; int tgt_creds; unsigned int tgt_cred_sz; spinlock_t htc_lock; @@ -542,7 +547,7 @@ struct htc_target { void *ath6kl_htc_create(struct ath6kl *ar); void ath6kl_htc_set_credit_dist(struct htc_target *target, - struct htc_credit_state_info *cred_info, + struct ath6kl_htc_credit_info *cred_info, u16 svc_pri_order[], int len); int ath6kl_htc_wait_target(struct htc_target *target); int ath6kl_htc_start(struct htc_target *target); @@ -563,7 +568,10 @@ int ath6kl_htc_get_rxbuf_num(struct htc_target *target, int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target, struct list_head *pktq); int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target, - u32 msg_look_ahead[], int *n_pkts); + u32 msg_look_ahead, int *n_pkts); + +int ath6kl_credit_setup(void *htc_handle, + struct ath6kl_htc_credit_info *cred_info); static inline void set_htc_pkt_info(struct htc_packet *packet, void *context, u8 *buf, unsigned int len, diff --git a/drivers/net/wireless/ath/ath6kl/htc_hif.h b/drivers/net/wireless/ath/ath6kl/htc_hif.h deleted file mode 100644 index 171ad63d89b..00000000000 --- a/drivers/net/wireless/ath/ath6kl/htc_hif.h +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Copyright (c) 2007-2011 Atheros Communications Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#ifndef HTC_HIF_H -#define HTC_HIF_H - -#include "htc.h" -#include "hif.h" - -#define ATH6KL_MAILBOXES 4 - -/* HTC runs over mailbox 0 */ -#define HTC_MAILBOX 0 - -#define ATH6KL_TARGET_DEBUG_INTR_MASK 0x01 - -#define OTHER_INTS_ENABLED (INT_STATUS_ENABLE_ERROR_MASK | \ - INT_STATUS_ENABLE_CPU_MASK | \ - INT_STATUS_ENABLE_COUNTER_MASK) - -#define ATH6KL_REG_IO_BUFFER_SIZE 32 -#define ATH6KL_MAX_REG_IO_BUFFERS 8 -#define ATH6KL_SCATTER_ENTRIES_PER_REQ 16 -#define ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER (16 * 1024) -#define ATH6KL_SCATTER_REQS 4 - -#ifndef A_CACHE_LINE_PAD -#define A_CACHE_LINE_PAD 128 -#endif -#define ATH6KL_MIN_SCATTER_ENTRIES_PER_REQ 2 -#define ATH6KL_MIN_TRANSFER_SIZE_PER_SCATTER (4 * 1024) - -struct ath6kl_irq_proc_registers { - u8 host_int_status; - u8 cpu_int_status; - u8 error_int_status; - u8 counter_int_status; - u8 mbox_frame; - u8 rx_lkahd_valid; - u8 host_int_status2; - u8 gmbox_rx_avail; - __le32 rx_lkahd[2]; - __le32 rx_gmbox_lkahd_alias[2]; -} __packed; - -struct ath6kl_irq_enable_reg { - u8 int_status_en; - u8 cpu_int_status_en; - u8 err_int_status_en; - u8 cntr_int_status_en; -} __packed; - -struct ath6kl_device { - spinlock_t lock; - u8 pad1[A_CACHE_LINE_PAD]; - struct ath6kl_irq_proc_registers irq_proc_reg; - u8 pad2[A_CACHE_LINE_PAD]; - struct ath6kl_irq_enable_reg irq_en_reg; - u8 pad3[A_CACHE_LINE_PAD]; - struct htc_target *htc_cnxt; - struct ath6kl *ar; -}; - -int ath6kldev_setup(struct ath6kl_device *dev); -int ath6kldev_unmask_intrs(struct ath6kl_device *dev); -int ath6kldev_mask_intrs(struct ath6kl_device *dev); -int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device *dev, - u32 *lk_ahd, int timeout); -int ath6kldev_rx_control(struct ath6kl_device *dev, bool enable_rx); -int ath6kldev_disable_intrs(struct ath6kl_device *dev); - -int ath6kldev_rw_comp_handler(void *context, int status); -int ath6kldev_intr_bh_handler(struct ath6kl *ar); - -/* Scatter Function and Definitions */ -int ath6kldev_submit_scat_req(struct ath6kl_device *dev, - struct hif_scatter_req *scat_req, bool read); - -#endif /*ATH6KL_H_ */ diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c index c1d2366704b..7f55be3092d 100644 --- a/drivers/net/wireless/ath/ath6kl/init.c +++ b/drivers/net/wireless/ath/ath6kl/init.c @@ -16,6 +16,7 @@ */ #include <linux/moduleparam.h> +#include <linux/errno.h> #include <linux/of.h> #include <linux/mmc/sdio_func.h> #include "core.h" @@ -26,9 +27,85 @@ unsigned int debug_mask; static unsigned int testmode; +static bool suspend_cutpower; module_param(debug_mask, uint, 0644); module_param(testmode, uint, 0644); +module_param(suspend_cutpower, bool, 0444); + +static const struct ath6kl_hw hw_list[] = { + { + .id = AR6003_HW_2_0_VERSION, + .name = "ar6003 hw 2.0", + .dataset_patch_addr = 0x57e884, + .app_load_addr = 0x543180, + .board_ext_data_addr = 0x57e500, + .reserved_ram_size = 6912, + .refclk_hz = 26000000, + .uarttx_pin = 8, + + /* hw2.0 needs override address hardcoded */ + .app_start_override_addr = 0x944C00, + + .fw_otp = AR6003_HW_2_0_OTP_FILE, + .fw = AR6003_HW_2_0_FIRMWARE_FILE, + .fw_tcmd = AR6003_HW_2_0_TCMD_FIRMWARE_FILE, + .fw_patch = AR6003_HW_2_0_PATCH_FILE, + .fw_api2 = AR6003_HW_2_0_FIRMWARE_2_FILE, + .fw_board = AR6003_HW_2_0_BOARD_DATA_FILE, + .fw_default_board = AR6003_HW_2_0_DEFAULT_BOARD_DATA_FILE, + }, + { + .id = AR6003_HW_2_1_1_VERSION, + .name = "ar6003 hw 2.1.1", + .dataset_patch_addr = 0x57ff74, + .app_load_addr = 0x1234, + .board_ext_data_addr = 0x542330, + .reserved_ram_size = 512, + .refclk_hz = 26000000, + .uarttx_pin = 8, + + .fw_otp = AR6003_HW_2_1_1_OTP_FILE, + .fw = AR6003_HW_2_1_1_FIRMWARE_FILE, + .fw_tcmd = AR6003_HW_2_1_1_TCMD_FIRMWARE_FILE, + .fw_patch = AR6003_HW_2_1_1_PATCH_FILE, + .fw_api2 = AR6003_HW_2_1_1_FIRMWARE_2_FILE, + .fw_board = AR6003_HW_2_1_1_BOARD_DATA_FILE, + .fw_default_board = AR6003_HW_2_1_1_DEFAULT_BOARD_DATA_FILE, + }, + { + .id = AR6004_HW_1_0_VERSION, + .name = "ar6004 hw 1.0", + .dataset_patch_addr = 0x57e884, + .app_load_addr = 0x1234, + .board_ext_data_addr = 0x437000, + .reserved_ram_size = 19456, + .board_addr = 0x433900, + .refclk_hz = 26000000, + .uarttx_pin = 11, + + .fw = AR6004_HW_1_0_FIRMWARE_FILE, + .fw_api2 = AR6004_HW_1_0_FIRMWARE_2_FILE, + .fw_board = AR6004_HW_1_0_BOARD_DATA_FILE, + .fw_default_board = AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE, + }, + { + .id = AR6004_HW_1_1_VERSION, + .name = "ar6004 hw 1.1", + .dataset_patch_addr = 0x57e884, + .app_load_addr = 0x1234, + .board_ext_data_addr = 0x437000, + .reserved_ram_size = 11264, + .board_addr = 0x43d400, + .refclk_hz = 40000000, + .uarttx_pin = 11, + + .fw = AR6004_HW_1_1_FIRMWARE_FILE, + .fw_api2 = AR6004_HW_1_1_FIRMWARE_2_FILE, + .fw_board = AR6004_HW_1_1_BOARD_DATA_FILE, + .fw_default_board = AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE, + }, +}; /* * Include definitions here that can be used to tune the WLAN module @@ -55,7 +132,6 @@ module_param(testmode, uint, 0644); */ #define WLAN_CONFIG_DISCONNECT_TIMEOUT 10 -#define CONFIG_AR600x_DEBUG_UART_TX_PIN 8 #define ATH6KL_DATA_OFFSET 64 struct sk_buff *ath6kl_buf_alloc(int size) @@ -73,37 +149,21 @@ struct sk_buff *ath6kl_buf_alloc(int size) return skb; } -void ath6kl_init_profile_info(struct ath6kl *ar) +void ath6kl_init_profile_info(struct ath6kl_vif *vif) { - ar->ssid_len = 0; - memset(ar->ssid, 0, sizeof(ar->ssid)); - - ar->dot11_auth_mode = OPEN_AUTH; - ar->auth_mode = NONE_AUTH; - ar->prwise_crypto = NONE_CRYPT; - ar->prwise_crypto_len = 0; - ar->grp_crypto = NONE_CRYPT; - ar->grp_crypto_len = 0; - memset(ar->wep_key_list, 0, sizeof(ar->wep_key_list)); - memset(ar->req_bssid, 0, sizeof(ar->req_bssid)); - memset(ar->bssid, 0, sizeof(ar->bssid)); - ar->bss_ch = 0; - ar->nw_type = ar->next_mode = INFRA_NETWORK; -} - -static u8 ath6kl_get_fw_iftype(struct ath6kl *ar) -{ - switch (ar->nw_type) { - case INFRA_NETWORK: - return HI_OPTION_FW_MODE_BSS_STA; - case ADHOC_NETWORK: - return HI_OPTION_FW_MODE_IBSS; - case AP_NETWORK: - return HI_OPTION_FW_MODE_AP; - default: - ath6kl_err("Unsupported interface type :%d\n", ar->nw_type); - return 0xff; - } + vif->ssid_len = 0; + memset(vif->ssid, 0, sizeof(vif->ssid)); + + vif->dot11_auth_mode = OPEN_AUTH; + vif->auth_mode = NONE_AUTH; + vif->prwise_crypto = NONE_CRYPT; + vif->prwise_crypto_len = 0; + vif->grp_crypto = NONE_CRYPT; + vif->grp_crypto_len = 0; + memset(vif->wep_key_list, 0, sizeof(vif->wep_key_list)); + memset(vif->req_bssid, 0, sizeof(vif->req_bssid)); + memset(vif->bssid, 0, sizeof(vif->bssid)); + vif->bss_ch = 0; } static int ath6kl_set_host_app_area(struct ath6kl *ar) @@ -120,7 +180,7 @@ static int ath6kl_set_host_app_area(struct ath6kl *ar) return -EIO; address = TARG_VTOP(ar->target_type, data); - host_app_area.wmi_protocol_ver = WMI_PROTOCOL_VERSION; + host_app_area.wmi_protocol_ver = cpu_to_le32(WMI_PROTOCOL_VERSION); if (ath6kl_diag_write(ar, address, (u8 *) &host_app_area, sizeof(struct host_app_area))) return -EIO; @@ -258,40 +318,12 @@ static int ath6kl_init_service_ep(struct ath6kl *ar) return 0; } -static void ath6kl_init_control_info(struct ath6kl *ar) +void ath6kl_init_control_info(struct ath6kl_vif *vif) { - u8 ctr; - - clear_bit(WMI_ENABLED, &ar->flag); - ath6kl_init_profile_info(ar); - ar->def_txkey_index = 0; - memset(ar->wep_key_list, 0, sizeof(ar->wep_key_list)); - ar->ch_hint = 0; - ar->listen_intvl_t = A_DEFAULT_LISTEN_INTERVAL; - ar->listen_intvl_b = 0; - ar->tx_pwr = 0; - clear_bit(SKIP_SCAN, &ar->flag); - set_bit(WMM_ENABLED, &ar->flag); - ar->intra_bss = 1; - memset(&ar->sc_params, 0, sizeof(ar->sc_params)); - ar->sc_params.short_scan_ratio = WMI_SHORTSCANRATIO_DEFAULT; - ar->sc_params.scan_ctrl_flags = DEFAULT_SCAN_CTRL_FLAGS; - ar->lrssi_roam_threshold = DEF_LRSSI_ROAM_THRESHOLD; - - memset((u8 *)ar->sta_list, 0, - AP_MAX_NUM_STA * sizeof(struct ath6kl_sta)); - - spin_lock_init(&ar->mcastpsq_lock); - - /* Init the PS queues */ - for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) { - spin_lock_init(&ar->sta_list[ctr].psq_lock); - skb_queue_head_init(&ar->sta_list[ctr].psq); - } - - skb_queue_head_init(&ar->mcastpsq); - - memcpy(ar->ap_country_code, DEF_AP_COUNTRY_CODE, 3); + ath6kl_init_profile_info(vif); + vif->def_txkey_index = 0; + memset(vif->wep_key_list, 0, sizeof(vif->wep_key_list)); + vif->ch_hint = 0; } /* @@ -341,62 +373,7 @@ out: return status; } -#define REG_DUMP_COUNT_AR6003 60 -#define REGISTER_DUMP_LEN_MAX 60 - -static void ath6kl_dump_target_assert_info(struct ath6kl *ar) -{ - u32 address; - u32 regdump_loc = 0; - int status; - u32 regdump_val[REGISTER_DUMP_LEN_MAX]; - u32 i; - - if (ar->target_type != TARGET_TYPE_AR6003) - return; - - /* the reg dump pointer is copied to the host interest area */ - address = ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_failure_state)); - address = TARG_VTOP(ar->target_type, address); - - /* read RAM location through diagnostic window */ - status = ath6kl_diag_read32(ar, address, ®dump_loc); - - if (status || !regdump_loc) { - ath6kl_err("failed to get ptr to register dump area\n"); - return; - } - - ath6kl_dbg(ATH6KL_DBG_TRC, "location of register dump data: 0x%X\n", - regdump_loc); - regdump_loc = TARG_VTOP(ar->target_type, regdump_loc); - - /* fetch register dump data */ - status = ath6kl_diag_read(ar, regdump_loc, (u8 *)®dump_val[0], - REG_DUMP_COUNT_AR6003 * (sizeof(u32))); - - if (status) { - ath6kl_err("failed to get register dump\n"); - return; - } - ath6kl_dbg(ATH6KL_DBG_TRC, "Register Dump:\n"); - - for (i = 0; i < REG_DUMP_COUNT_AR6003; i++) - ath6kl_dbg(ATH6KL_DBG_TRC, " %d : 0x%8.8X\n", - i, regdump_val[i]); - -} - -void ath6kl_target_failure(struct ath6kl *ar) -{ - ath6kl_err("target asserted\n"); - - /* try dumping target assertion information (if any) */ - ath6kl_dump_target_assert_info(ar); - -} - -static int ath6kl_target_config_wlan_params(struct ath6kl *ar) +static int ath6kl_target_config_wlan_params(struct ath6kl *ar, int idx) { int status = 0; int ret; @@ -406,46 +383,46 @@ static int ath6kl_target_config_wlan_params(struct ath6kl *ar) * default values. Required if checksum offload is needed. Set * RxMetaVersion to 2. */ - if (ath6kl_wmi_set_rx_frame_format_cmd(ar->wmi, + if (ath6kl_wmi_set_rx_frame_format_cmd(ar->wmi, idx, ar->rx_meta_ver, 0, 0)) { ath6kl_err("unable to set the rx frame format\n"); status = -EIO; } if (ar->conf_flags & ATH6KL_CONF_IGNORE_PS_FAIL_EVT_IN_SCAN) - if ((ath6kl_wmi_pmparams_cmd(ar->wmi, 0, 1, 0, 0, 1, + if ((ath6kl_wmi_pmparams_cmd(ar->wmi, idx, 0, 1, 0, 0, 1, IGNORE_POWER_SAVE_FAIL_EVENT_DURING_SCAN)) != 0) { ath6kl_err("unable to set power save fail event policy\n"); status = -EIO; } if (!(ar->conf_flags & ATH6KL_CONF_IGNORE_ERP_BARKER)) - if ((ath6kl_wmi_set_lpreamble_cmd(ar->wmi, 0, + if ((ath6kl_wmi_set_lpreamble_cmd(ar->wmi, idx, 0, WMI_DONOT_IGNORE_BARKER_IN_ERP)) != 0) { ath6kl_err("unable to set barker preamble policy\n"); status = -EIO; } - if (ath6kl_wmi_set_keepalive_cmd(ar->wmi, + if (ath6kl_wmi_set_keepalive_cmd(ar->wmi, idx, WLAN_CONFIG_KEEP_ALIVE_INTERVAL)) { ath6kl_err("unable to set keep alive interval\n"); status = -EIO; } - if (ath6kl_wmi_disctimeout_cmd(ar->wmi, + if (ath6kl_wmi_disctimeout_cmd(ar->wmi, idx, WLAN_CONFIG_DISCONNECT_TIMEOUT)) { ath6kl_err("unable to set disconnect timeout\n"); status = -EIO; } if (!(ar->conf_flags & ATH6KL_CONF_ENABLE_TX_BURST)) - if (ath6kl_wmi_set_wmm_txop(ar->wmi, WMI_TXOP_DISABLED)) { + if (ath6kl_wmi_set_wmm_txop(ar->wmi, idx, WMI_TXOP_DISABLED)) { ath6kl_err("unable to set txop bursting\n"); status = -EIO; } - if (ar->p2p) { - ret = ath6kl_wmi_info_req_cmd(ar->wmi, + if (ar->p2p && (ar->vif_max == 1 || idx)) { + ret = ath6kl_wmi_info_req_cmd(ar->wmi, idx, P2P_FLAG_CAPABILITIES_REQ | P2P_FLAG_MACADDR_REQ | P2P_FLAG_HMODEL_REQ); @@ -453,13 +430,13 @@ static int ath6kl_target_config_wlan_params(struct ath6kl *ar) ath6kl_dbg(ATH6KL_DBG_TRC, "failed to request P2P " "capabilities (%d) - assuming P2P not " "supported\n", ret); - ar->p2p = 0; + ar->p2p = false; } } - if (ar->p2p) { + if (ar->p2p && (ar->vif_max == 1 || idx)) { /* Enable Probe Request reporting for P2P */ - ret = ath6kl_wmi_probe_report_req_cmd(ar->wmi, true); + ret = ath6kl_wmi_probe_report_req_cmd(ar->wmi, idx, true); if (ret) { ath6kl_dbg(ATH6KL_DBG_TRC, "failed to enable Probe " "Request reporting (%d)\n", ret); @@ -472,13 +449,40 @@ static int ath6kl_target_config_wlan_params(struct ath6kl *ar) int ath6kl_configure_target(struct ath6kl *ar) { u32 param, ram_reserved_size; - u8 fw_iftype; + u8 fw_iftype, fw_mode = 0, fw_submode = 0; + int i, status; - fw_iftype = ath6kl_get_fw_iftype(ar); - if (fw_iftype == 0xff) - return -EINVAL; + /* + * Note: Even though the firmware interface type is + * chosen as BSS_STA for all three interfaces, can + * be configured to IBSS/AP as long as the fw submode + * remains normal mode (0 - AP, STA and IBSS). But + * due to an target assert in firmware only one interface is + * configured for now. + */ + fw_iftype = HI_OPTION_FW_MODE_BSS_STA; + + for (i = 0; i < ar->vif_max; i++) + fw_mode |= fw_iftype << (i * HI_OPTION_FW_MODE_BITS); + + /* + * By default, submodes : + * vif[0] - AP/STA/IBSS + * vif[1] - "P2P dev"/"P2P GO"/"P2P Client" + * vif[2] - "P2P dev"/"P2P GO"/"P2P Client" + */ + + for (i = 0; i < ar->max_norm_iface; i++) + fw_submode |= HI_OPTION_FW_SUBMODE_NONE << + (i * HI_OPTION_FW_SUBMODE_BITS); + + for (i = ar->max_norm_iface; i < ar->vif_max; i++) + fw_submode |= HI_OPTION_FW_SUBMODE_P2PDEV << + (i * HI_OPTION_FW_SUBMODE_BITS); + + if (ar->p2p && ar->vif_max == 1) + fw_submode = HI_OPTION_FW_SUBMODE_P2PDEV; - /* Tell target which HTC version it is used*/ param = HTC_PROTOCOL_VERSION; if (ath6kl_bmi_write(ar, ath6kl_get_hi_item_addr(ar, @@ -499,12 +503,10 @@ int ath6kl_configure_target(struct ath6kl *ar) return -EIO; } - param |= (1 << HI_OPTION_NUM_DEV_SHIFT); - param |= (fw_iftype << HI_OPTION_FW_MODE_SHIFT); - if (ar->p2p && fw_iftype == HI_OPTION_FW_MODE_BSS_STA) { - param |= HI_OPTION_FW_SUBMODE_P2PDEV << - HI_OPTION_FW_SUBMODE_SHIFT; - } + param |= (ar->vif_max << HI_OPTION_NUM_DEV_SHIFT); + param |= fw_mode << HI_OPTION_FW_MODE_SHIFT; + param |= fw_submode << HI_OPTION_FW_SUBMODE_SHIFT; + param |= (0 << HI_OPTION_MAC_ADDR_METHOD_SHIFT); param |= (0 << HI_OPTION_FW_BRIDGE_SHIFT); @@ -550,71 +552,55 @@ int ath6kl_configure_target(struct ath6kl *ar) /* use default number of control buffers */ return -EIO; + /* Configure GPIO AR600x UART */ + param = ar->hw.uarttx_pin; + status = ath6kl_bmi_write(ar, + ath6kl_get_hi_item_addr(ar, + HI_ITEM(hi_dbg_uart_txpin)), + (u8 *)¶m, 4); + if (status) + return status; + + /* Configure target refclk_hz */ + param = ar->hw.refclk_hz; + status = ath6kl_bmi_write(ar, + ath6kl_get_hi_item_addr(ar, + HI_ITEM(hi_refclk_hz)), + (u8 *)¶m, 4); + if (status) + return status; + return 0; } -struct ath6kl *ath6kl_core_alloc(struct device *sdev) +void ath6kl_core_free(struct ath6kl *ar) { - struct net_device *dev; - struct ath6kl *ar; - struct wireless_dev *wdev; - - wdev = ath6kl_cfg80211_init(sdev); - if (!wdev) { - ath6kl_err("ath6kl_cfg80211_init failed\n"); - return NULL; - } - - ar = wdev_priv(wdev); - ar->dev = sdev; - ar->wdev = wdev; - wdev->iftype = NL80211_IFTYPE_STATION; - - if (ath6kl_debug_init(ar)) { - ath6kl_err("Failed to initialize debugfs\n"); - ath6kl_cfg80211_deinit(ar); - return NULL; - } - - dev = alloc_netdev(0, "wlan%d", ether_setup); - if (!dev) { - ath6kl_err("no memory for network device instance\n"); - ath6kl_cfg80211_deinit(ar); - return NULL; - } - - dev->ieee80211_ptr = wdev; - SET_NETDEV_DEV(dev, wiphy_dev(wdev->wiphy)); - wdev->netdev = dev; - ar->sme_state = SME_DISCONNECTED; - - init_netdev(dev); + wiphy_free(ar->wiphy); +} - ar->net_dev = dev; - set_bit(WLAN_ENABLED, &ar->flag); +void ath6kl_core_cleanup(struct ath6kl *ar) +{ + ath6kl_hif_power_off(ar); - ar->wlan_pwr_state = WLAN_POWER_STATE_ON; + destroy_workqueue(ar->ath6kl_wq); - spin_lock_init(&ar->lock); + if (ar->htc_target) + ath6kl_htc_cleanup(ar->htc_target); - ath6kl_init_control_info(ar); - init_waitqueue_head(&ar->event_wq); - sema_init(&ar->sem, 1); - clear_bit(DESTROY_IN_PROGRESS, &ar->flag); + ath6kl_cookie_cleanup(ar); - INIT_LIST_HEAD(&ar->amsdu_rx_buffer_queue); + ath6kl_cleanup_amsdu_rxbufs(ar); - setup_timer(&ar->disconnect_timer, disconnect_timer_handler, - (unsigned long) dev); + ath6kl_bmi_cleanup(ar); - return ar; -} + ath6kl_debug_cleanup(ar); -int ath6kl_unavail_ev(struct ath6kl *ar) -{ - ath6kl_destroy(ar->net_dev, 1); + kfree(ar->fw_board); + kfree(ar->fw_otp); + kfree(ar->fw); + kfree(ar->fw_patch); - return 0; + ath6kl_deinit_ieee80211_hw(ar); } /* firmware upload */ @@ -643,11 +629,11 @@ static int ath6kl_get_fw(struct ath6kl *ar, const char *filename, static const char *get_target_ver_dir(const struct ath6kl *ar) { switch (ar->version.target_ver) { - case AR6003_REV1_VERSION: + case AR6003_HW_1_0_VERSION: return "ath6k/AR6003/hw1.0"; - case AR6003_REV2_VERSION: + case AR6003_HW_2_0_VERSION: return "ath6k/AR6003/hw2.0"; - case AR6003_REV3_VERSION: + case AR6003_HW_2_1_1_VERSION: return "ath6k/AR6003/hw2.1.1"; } ath6kl_warn("%s: unsupported target version 0x%x.\n", __func__, @@ -705,17 +691,10 @@ static int ath6kl_fetch_board_file(struct ath6kl *ar) if (ar->fw_board != NULL) return 0; - switch (ar->version.target_ver) { - case AR6003_REV2_VERSION: - filename = AR6003_REV2_BOARD_DATA_FILE; - break; - case AR6004_REV1_VERSION: - filename = AR6004_REV1_BOARD_DATA_FILE; - break; - default: - filename = AR6003_REV3_BOARD_DATA_FILE; - break; - } + if (WARN_ON(ar->hw.fw_board == NULL)) + return -EINVAL; + + filename = ar->hw.fw_board; ret = ath6kl_get_fw(ar, filename, &ar->fw_board, &ar->fw_board_len); @@ -733,17 +712,7 @@ static int ath6kl_fetch_board_file(struct ath6kl *ar) ath6kl_warn("Failed to get board file %s (%d), trying to find default board file.\n", filename, ret); - switch (ar->version.target_ver) { - case AR6003_REV2_VERSION: - filename = AR6003_REV2_DEFAULT_BOARD_DATA_FILE; - break; - case AR6004_REV1_VERSION: - filename = AR6004_REV1_DEFAULT_BOARD_DATA_FILE; - break; - default: - filename = AR6003_REV3_DEFAULT_BOARD_DATA_FILE; - break; - } + filename = ar->hw.fw_default_board; ret = ath6kl_get_fw(ar, filename, &ar->fw_board, &ar->fw_board_len); @@ -767,19 +736,14 @@ static int ath6kl_fetch_otp_file(struct ath6kl *ar) if (ar->fw_otp != NULL) return 0; - switch (ar->version.target_ver) { - case AR6003_REV2_VERSION: - filename = AR6003_REV2_OTP_FILE; - break; - case AR6004_REV1_VERSION: - ath6kl_dbg(ATH6KL_DBG_TRC, "AR6004 doesn't need OTP file\n"); + if (ar->hw.fw_otp == NULL) { + ath6kl_dbg(ATH6KL_DBG_BOOT, + "no OTP file configured for this hw\n"); return 0; - break; - default: - filename = AR6003_REV3_OTP_FILE; - break; } + filename = ar->hw.fw_otp; + ret = ath6kl_get_fw(ar, filename, &ar->fw_otp, &ar->fw_otp_len); if (ret) { @@ -800,38 +764,22 @@ static int ath6kl_fetch_fw_file(struct ath6kl *ar) return 0; if (testmode) { - switch (ar->version.target_ver) { - case AR6003_REV2_VERSION: - filename = AR6003_REV2_TCMD_FIRMWARE_FILE; - break; - case AR6003_REV3_VERSION: - filename = AR6003_REV3_TCMD_FIRMWARE_FILE; - break; - case AR6004_REV1_VERSION: - ath6kl_warn("testmode not supported with ar6004\n"); + if (ar->hw.fw_tcmd == NULL) { + ath6kl_warn("testmode not supported\n"); return -EOPNOTSUPP; - default: - ath6kl_warn("unknown target version: 0x%x\n", - ar->version.target_ver); - return -EINVAL; } + filename = ar->hw.fw_tcmd; + set_bit(TESTMODE, &ar->flag); goto get_fw; } - switch (ar->version.target_ver) { - case AR6003_REV2_VERSION: - filename = AR6003_REV2_FIRMWARE_FILE; - break; - case AR6004_REV1_VERSION: - filename = AR6004_REV1_FIRMWARE_FILE; - break; - default: - filename = AR6003_REV3_FIRMWARE_FILE; - break; - } + if (WARN_ON(ar->hw.fw == NULL)) + return -EINVAL; + + filename = ar->hw.fw; get_fw: ret = ath6kl_get_fw(ar, filename, &ar->fw, &ar->fw_len); @@ -849,27 +797,20 @@ static int ath6kl_fetch_patch_file(struct ath6kl *ar) const char *filename; int ret; - switch (ar->version.target_ver) { - case AR6003_REV2_VERSION: - filename = AR6003_REV2_PATCH_FILE; - break; - case AR6004_REV1_VERSION: - /* FIXME: implement for AR6004 */ + if (ar->fw_patch != NULL) return 0; - break; - default: - filename = AR6003_REV3_PATCH_FILE; - break; - } - if (ar->fw_patch == NULL) { - ret = ath6kl_get_fw(ar, filename, &ar->fw_patch, - &ar->fw_patch_len); - if (ret) { - ath6kl_err("Failed to get patch file %s: %d\n", - filename, ret); - return ret; - } + if (ar->hw.fw_patch == NULL) + return 0; + + filename = ar->hw.fw_patch; + + ret = ath6kl_get_fw(ar, filename, &ar->fw_patch, + &ar->fw_patch_len); + if (ret) { + ath6kl_err("Failed to get patch file %s: %d\n", + filename, ret); + return ret; } return 0; @@ -904,19 +845,10 @@ static int ath6kl_fetch_fw_api2(struct ath6kl *ar) int ret, ie_id, i, index, bit; __le32 *val; - switch (ar->version.target_ver) { - case AR6003_REV2_VERSION: - filename = AR6003_REV2_FIRMWARE_2_FILE; - break; - case AR6003_REV3_VERSION: - filename = AR6003_REV3_FIRMWARE_2_FILE; - break; - case AR6004_REV1_VERSION: - filename = AR6004_REV1_FIRMWARE_2_FILE; - break; - default: + if (ar->hw.fw_api2 == NULL) return -EOPNOTSUPP; - } + + filename = ar->hw.fw_api2; ret = request_firmware(&fw, filename, ar->dev); if (ret) @@ -1006,12 +938,15 @@ static int ath6kl_fetch_fw_api2(struct ath6kl *ar) ar->hw.reserved_ram_size); break; case ATH6KL_FW_IE_CAPABILITIES: + if (ie_len < DIV_ROUND_UP(ATH6KL_FW_CAPABILITY_MAX, 8)) + break; + ath6kl_dbg(ATH6KL_DBG_BOOT, "found firmware capabilities ie (%zd B)\n", ie_len); for (i = 0; i < ATH6KL_FW_CAPABILITY_MAX; i++) { - index = ALIGN(i, 8) / 8; + index = i / 8; bit = i % 8; if (data[index] & (1 << bit)) @@ -1030,9 +965,34 @@ static int ath6kl_fetch_fw_api2(struct ath6kl *ar) ar->hw.dataset_patch_addr = le32_to_cpup(val); ath6kl_dbg(ATH6KL_DBG_BOOT, - "found patch address ie 0x%d\n", + "found patch address ie 0x%x\n", ar->hw.dataset_patch_addr); break; + case ATH6KL_FW_IE_BOARD_ADDR: + if (ie_len != sizeof(*val)) + break; + + val = (__le32 *) data; + ar->hw.board_addr = le32_to_cpup(val); + + ath6kl_dbg(ATH6KL_DBG_BOOT, + "found board address ie 0x%x\n", + ar->hw.board_addr); + break; + case ATH6KL_FW_IE_VIF_MAX: + if (ie_len != sizeof(*val)) + break; + + val = (__le32 *) data; + ar->vif_max = min_t(unsigned int, le32_to_cpup(val), + ATH6KL_VIF_MAX); + + if (ar->vif_max > 1 && !ar->p2p) + ar->max_norm_iface = 2; + + ath6kl_dbg(ATH6KL_DBG_BOOT, + "found vif max ie %d\n", ar->vif_max); + break; default: ath6kl_dbg(ATH6KL_DBG_BOOT, "Unknown fw ie: %u\n", le32_to_cpup(&hdr->id)); @@ -1087,8 +1047,8 @@ static int ath6kl_upload_board_file(struct ath6kl *ar) * For AR6004, host determine Target RAM address for * writing board data. */ - if (ar->target_type == TARGET_TYPE_AR6004) { - board_address = AR6004_REV1_BOARD_DATA_ADDRESS; + if (ar->hw.board_addr != 0) { + board_address = ar->hw.board_addr; ath6kl_bmi_write(ar, ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_board_data)), @@ -1106,7 +1066,8 @@ static int ath6kl_upload_board_file(struct ath6kl *ar) HI_ITEM(hi_board_ext_data)), (u8 *) &board_ext_address, 4); - if (board_ext_address == 0) { + if (ar->target_type == TARGET_TYPE_AR6003 && + board_ext_address == 0) { ath6kl_err("Failed to get board file target address.\n"); return -EINVAL; } @@ -1126,8 +1087,8 @@ static int ath6kl_upload_board_file(struct ath6kl *ar) break; } - if (ar->fw_board_len == (board_data_size + - board_ext_data_size)) { + if (board_ext_address && + ar->fw_board_len == (board_data_size + board_ext_data_size)) { /* write extended board data */ ath6kl_dbg(ATH6KL_DBG_BOOT, @@ -1182,10 +1143,11 @@ static int ath6kl_upload_board_file(struct ath6kl *ar) static int ath6kl_upload_otp(struct ath6kl *ar) { u32 address, param; + bool from_hw = false; int ret; - if (WARN_ON(ar->fw_otp == NULL)) - return -ENOENT; + if (ar->fw_otp == NULL) + return 0; address = ar->hw.app_load_addr; @@ -1210,15 +1172,20 @@ static int ath6kl_upload_otp(struct ath6kl *ar) return ret; } - ar->hw.app_start_override_addr = address; + if (ar->hw.app_start_override_addr == 0) { + ar->hw.app_start_override_addr = address; + from_hw = true; + } - ath6kl_dbg(ATH6KL_DBG_BOOT, "app_start_override_addr 0x%x\n", + ath6kl_dbg(ATH6KL_DBG_BOOT, "app_start_override_addr%s 0x%x\n", + from_hw ? " (from hw)" : "", ar->hw.app_start_override_addr); /* execute the OTP code */ - ath6kl_dbg(ATH6KL_DBG_BOOT, "executing OTP at 0x%x\n", address); + ath6kl_dbg(ATH6KL_DBG_BOOT, "executing OTP at 0x%x\n", + ar->hw.app_start_override_addr); param = 0; - ath6kl_bmi_execute(ar, address, ¶m); + ath6kl_bmi_execute(ar, ar->hw.app_start_override_addr, ¶m); return ret; } @@ -1229,7 +1196,7 @@ static int ath6kl_upload_firmware(struct ath6kl *ar) int ret; if (WARN_ON(ar->fw == NULL)) - return -ENOENT; + return 0; address = ar->hw.app_load_addr; @@ -1259,8 +1226,8 @@ static int ath6kl_upload_patch(struct ath6kl *ar) u32 address, param; int ret; - if (WARN_ON(ar->fw_patch == NULL)) - return -ENOENT; + if (ar->fw_patch == NULL) + return 0; address = ar->hw.dataset_patch_addr; @@ -1345,7 +1312,7 @@ static int ath6kl_init_upload(struct ath6kl *ar) return status; /* WAR to avoid SDIO CRC err */ - if (ar->version.target_ver == AR6003_REV2_VERSION) { + if (ar->version.target_ver == AR6003_HW_2_0_VERSION) { ath6kl_err("temporary war to avoid sdio crc error\n"); param = 0x20; @@ -1402,43 +1369,29 @@ static int ath6kl_init_upload(struct ath6kl *ar) if (status) return status; - /* Configure GPIO AR6003 UART */ - param = CONFIG_AR600x_DEBUG_UART_TX_PIN; - status = ath6kl_bmi_write(ar, - ath6kl_get_hi_item_addr(ar, - HI_ITEM(hi_dbg_uart_txpin)), - (u8 *)¶m, 4); - return status; } static int ath6kl_init_hw_params(struct ath6kl *ar) { - switch (ar->version.target_ver) { - case AR6003_REV2_VERSION: - ar->hw.dataset_patch_addr = AR6003_REV2_DATASET_PATCH_ADDRESS; - ar->hw.app_load_addr = AR6003_REV2_APP_LOAD_ADDRESS; - ar->hw.board_ext_data_addr = AR6003_REV2_BOARD_EXT_DATA_ADDRESS; - ar->hw.reserved_ram_size = AR6003_REV2_RAM_RESERVE_SIZE; - break; - case AR6003_REV3_VERSION: - ar->hw.dataset_patch_addr = AR6003_REV3_DATASET_PATCH_ADDRESS; - ar->hw.app_load_addr = 0x1234; - ar->hw.board_ext_data_addr = AR6003_REV3_BOARD_EXT_DATA_ADDRESS; - ar->hw.reserved_ram_size = AR6003_REV3_RAM_RESERVE_SIZE; - break; - case AR6004_REV1_VERSION: - ar->hw.dataset_patch_addr = AR6003_REV2_DATASET_PATCH_ADDRESS; - ar->hw.app_load_addr = AR6003_REV3_APP_LOAD_ADDRESS; - ar->hw.board_ext_data_addr = AR6004_REV1_BOARD_EXT_DATA_ADDRESS; - ar->hw.reserved_ram_size = AR6004_REV1_RAM_RESERVE_SIZE; - break; - default: + const struct ath6kl_hw *hw; + int i; + + for (i = 0; i < ARRAY_SIZE(hw_list); i++) { + hw = &hw_list[i]; + + if (hw->id == ar->version.target_ver) + break; + } + + if (i == ARRAY_SIZE(hw_list)) { ath6kl_err("Unsupported hardware version: 0x%x\n", ar->version.target_ver); return -EINVAL; } + ar->hw = *hw; + ath6kl_dbg(ATH6KL_DBG_BOOT, "target_ver 0x%x target_type 0x%x dataset_patch 0x%x app_load_addr 0x%x\n", ar->version.target_ver, ar->target_type, @@ -1447,75 +1400,75 @@ static int ath6kl_init_hw_params(struct ath6kl *ar) "app_start_override_addr 0x%x board_ext_data_addr 0x%x reserved_ram_size 0x%x", ar->hw.app_start_override_addr, ar->hw.board_ext_data_addr, ar->hw.reserved_ram_size); + ath6kl_dbg(ATH6KL_DBG_BOOT, + "refclk_hz %d uarttx_pin %d", + ar->hw.refclk_hz, ar->hw.uarttx_pin); return 0; } -static int ath6kl_init(struct net_device *dev) +static const char *ath6kl_init_get_hif_name(enum ath6kl_hif_type type) { - struct ath6kl *ar = ath6kl_priv(dev); - int status = 0; - s32 timeleft; + switch (type) { + case ATH6KL_HIF_TYPE_SDIO: + return "sdio"; + case ATH6KL_HIF_TYPE_USB: + return "usb"; + } - if (!ar) - return -EIO; + return NULL; +} + +int ath6kl_init_hw_start(struct ath6kl *ar) +{ + long timeleft; + int ret, i; + + ath6kl_dbg(ATH6KL_DBG_BOOT, "hw start\n"); + + ret = ath6kl_hif_power_on(ar); + if (ret) + return ret; + + ret = ath6kl_configure_target(ar); + if (ret) + goto err_power_off; + + ret = ath6kl_init_upload(ar); + if (ret) + goto err_power_off; /* Do we need to finish the BMI phase */ + /* FIXME: return error from ath6kl_bmi_done() */ if (ath6kl_bmi_done(ar)) { - status = -EIO; - goto ath6kl_init_done; - } - - /* Indicate that WMI is enabled (although not ready yet) */ - set_bit(WMI_ENABLED, &ar->flag); - ar->wmi = ath6kl_wmi_init(ar); - if (!ar->wmi) { - ath6kl_err("failed to initialize wmi\n"); - status = -EIO; - goto ath6kl_init_done; + ret = -EIO; + goto err_power_off; } - ath6kl_dbg(ATH6KL_DBG_TRC, "%s: got wmi @ 0x%p.\n", __func__, ar->wmi); - /* * The reason we have to wait for the target here is that the * driver layer has to init BMI in order to set the host block * size. */ if (ath6kl_htc_wait_target(ar->htc_target)) { - status = -EIO; - goto err_node_cleanup; + ret = -EIO; + goto err_power_off; } if (ath6kl_init_service_ep(ar)) { - status = -EIO; + ret = -EIO; goto err_cleanup_scatter; } - /* setup access class priority mappings */ - ar->ac_stream_pri_map[WMM_AC_BK] = 0; /* lowest */ - ar->ac_stream_pri_map[WMM_AC_BE] = 1; - ar->ac_stream_pri_map[WMM_AC_VI] = 2; - ar->ac_stream_pri_map[WMM_AC_VO] = 3; /* highest */ - - /* give our connected endpoints some buffers */ - ath6kl_rx_refill(ar->htc_target, ar->ctrl_ep); - ath6kl_rx_refill(ar->htc_target, ar->ac2ep_map[WMM_AC_BE]); - - /* allocate some buffers that handle larger AMSDU frames */ - ath6kl_refill_amsdu_rxbufs(ar, ATH6KL_MAX_AMSDU_RX_BUFFERS); - /* setup credit distribution */ - ath6k_setup_credit_dist(ar->htc_target, &ar->credit_state_info); - - ath6kl_cookie_init(ar); + ath6kl_credit_setup(ar->htc_target, &ar->credit_state_info); /* start HTC */ - status = ath6kl_htc_start(ar->htc_target); - - if (status) { + ret = ath6kl_htc_start(ar->htc_target); + if (ret) { + /* FIXME: call this */ ath6kl_cookie_cleanup(ar); - goto err_rxbuf_cleanup; + goto err_cleanup_scatter; } /* Wait for Wmi event to be ready */ @@ -1526,54 +1479,81 @@ static int ath6kl_init(struct net_device *dev) ath6kl_dbg(ATH6KL_DBG_BOOT, "firmware booted\n"); + + if (test_and_clear_bit(FIRST_BOOT, &ar->flag)) { + ath6kl_info("%s %s fw %s%s\n", + ar->hw.name, + ath6kl_init_get_hif_name(ar->hif_type), + ar->wiphy->fw_version, + test_bit(TESTMODE, &ar->flag) ? " testmode" : ""); + } + if (ar->version.abi_ver != ATH6KL_ABI_VERSION) { ath6kl_err("abi version mismatch: host(0x%x), target(0x%x)\n", ATH6KL_ABI_VERSION, ar->version.abi_ver); - status = -EIO; + ret = -EIO; goto err_htc_stop; } if (!timeleft || signal_pending(current)) { ath6kl_err("wmi is not ready or wait was interrupted\n"); - status = -EIO; + ret = -EIO; goto err_htc_stop; } ath6kl_dbg(ATH6KL_DBG_TRC, "%s: wmi is ready\n", __func__); /* communicate the wmi protocol verision to the target */ + /* FIXME: return error */ if ((ath6kl_set_host_app_area(ar)) != 0) ath6kl_err("unable to set the host app area\n"); - ar->conf_flags = ATH6KL_CONF_IGNORE_ERP_BARKER | - ATH6KL_CONF_ENABLE_11N | ATH6KL_CONF_ENABLE_TX_BURST; + for (i = 0; i < ar->vif_max; i++) { + ret = ath6kl_target_config_wlan_params(ar, i); + if (ret) + goto err_htc_stop; + } - ar->wdev->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM; + ar->state = ATH6KL_STATE_ON; - status = ath6kl_target_config_wlan_params(ar); - if (!status) - goto ath6kl_init_done; + return 0; err_htc_stop: ath6kl_htc_stop(ar->htc_target); -err_rxbuf_cleanup: - ath6kl_htc_flush_rx_buf(ar->htc_target); - ath6kl_cleanup_amsdu_rxbufs(ar); err_cleanup_scatter: ath6kl_hif_cleanup_scatter(ar); -err_node_cleanup: - ath6kl_wmi_shutdown(ar->wmi); - clear_bit(WMI_ENABLED, &ar->flag); - ar->wmi = NULL; +err_power_off: + ath6kl_hif_power_off(ar); -ath6kl_init_done: - return status; + return ret; +} + +int ath6kl_init_hw_stop(struct ath6kl *ar) +{ + int ret; + + ath6kl_dbg(ATH6KL_DBG_BOOT, "hw stop\n"); + + ath6kl_htc_stop(ar->htc_target); + + ath6kl_hif_stop(ar); + + ath6kl_bmi_reset(ar); + + ret = ath6kl_hif_power_off(ar); + if (ret) + ath6kl_warn("failed to power off hif: %d\n", ret); + + ar->state = ATH6KL_STATE_OFF; + + return 0; } int ath6kl_core_init(struct ath6kl *ar) { - int ret = 0; struct ath6kl_bmi_target_info targ_info; + struct net_device *ndev; + int ret = 0, i; ar->ath6kl_wq = create_singlethread_workqueue("ath6kl"); if (!ar->ath6kl_wq) @@ -1583,145 +1563,236 @@ int ath6kl_core_init(struct ath6kl *ar) if (ret) goto err_wq; - ret = ath6kl_bmi_get_target_info(ar, &targ_info); + /* + * Turn on power to get hardware (target) version and leave power + * on delibrately as we will boot the hardware anyway within few + * seconds. + */ + ret = ath6kl_hif_power_on(ar); if (ret) goto err_bmi_cleanup; + ret = ath6kl_bmi_get_target_info(ar, &targ_info); + if (ret) + goto err_power_off; + ar->version.target_ver = le32_to_cpu(targ_info.version); ar->target_type = le32_to_cpu(targ_info.type); - ar->wdev->wiphy->hw_version = le32_to_cpu(targ_info.version); + ar->wiphy->hw_version = le32_to_cpu(targ_info.version); ret = ath6kl_init_hw_params(ar); if (ret) - goto err_bmi_cleanup; - - ret = ath6kl_configure_target(ar); - if (ret) - goto err_bmi_cleanup; + goto err_power_off; ar->htc_target = ath6kl_htc_create(ar); if (!ar->htc_target) { ret = -ENOMEM; - goto err_bmi_cleanup; - } - - ar->aggr_cntxt = aggr_init(ar->net_dev); - if (!ar->aggr_cntxt) { - ath6kl_err("failed to initialize aggr\n"); - ret = -ENOMEM; - goto err_htc_cleanup; + goto err_power_off; } ret = ath6kl_fetch_firmwares(ar); if (ret) goto err_htc_cleanup; - ret = ath6kl_init_upload(ar); - if (ret) + /* FIXME: we should free all firmwares in the error cases below */ + + /* Indicate that WMI is enabled (although not ready yet) */ + set_bit(WMI_ENABLED, &ar->flag); + ar->wmi = ath6kl_wmi_init(ar); + if (!ar->wmi) { + ath6kl_err("failed to initialize wmi\n"); + ret = -EIO; goto err_htc_cleanup; + } + + ath6kl_dbg(ATH6KL_DBG_TRC, "%s: got wmi @ 0x%p.\n", __func__, ar->wmi); - ret = ath6kl_init(ar->net_dev); + ret = ath6kl_register_ieee80211_hw(ar); if (ret) - goto err_htc_cleanup; + goto err_node_cleanup; - /* This runs the init function if registered */ - ret = register_netdev(ar->net_dev); + ret = ath6kl_debug_init(ar); if (ret) { - ath6kl_err("register_netdev failed\n"); - ath6kl_destroy(ar->net_dev, 0); - return ret; + wiphy_unregister(ar->wiphy); + goto err_node_cleanup; + } + + for (i = 0; i < ar->vif_max; i++) + ar->avail_idx_map |= BIT(i); + + rtnl_lock(); + + /* Add an initial station interface */ + ndev = ath6kl_interface_add(ar, "wlan%d", NL80211_IFTYPE_STATION, 0, + INFRA_NETWORK); + + rtnl_unlock(); + + if (!ndev) { + ath6kl_err("Failed to instantiate a network device\n"); + ret = -ENOMEM; + wiphy_unregister(ar->wiphy); + goto err_debug_init; } - set_bit(NETDEV_REGISTERED, &ar->flag); ath6kl_dbg(ATH6KL_DBG_TRC, "%s: name=%s dev=0x%p, ar=0x%p\n", - __func__, ar->net_dev->name, ar->net_dev, ar); + __func__, ndev->name, ndev, ar); + + /* setup access class priority mappings */ + ar->ac_stream_pri_map[WMM_AC_BK] = 0; /* lowest */ + ar->ac_stream_pri_map[WMM_AC_BE] = 1; + ar->ac_stream_pri_map[WMM_AC_VI] = 2; + ar->ac_stream_pri_map[WMM_AC_VO] = 3; /* highest */ + + /* give our connected endpoints some buffers */ + ath6kl_rx_refill(ar->htc_target, ar->ctrl_ep); + ath6kl_rx_refill(ar->htc_target, ar->ac2ep_map[WMM_AC_BE]); + + /* allocate some buffers that handle larger AMSDU frames */ + ath6kl_refill_amsdu_rxbufs(ar, ATH6KL_MAX_AMSDU_RX_BUFFERS); + + ath6kl_cookie_init(ar); + + ar->conf_flags = ATH6KL_CONF_IGNORE_ERP_BARKER | + ATH6KL_CONF_ENABLE_11N | ATH6KL_CONF_ENABLE_TX_BURST; + + if (suspend_cutpower) + ar->conf_flags |= ATH6KL_CONF_SUSPEND_CUTPOWER; + + ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM | + WIPHY_FLAG_HAVE_AP_SME | + WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL | + WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD; + + if (test_bit(ATH6KL_FW_CAPABILITY_SCHED_SCAN, ar->fw_capabilities)) + ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN; + + ar->wiphy->probe_resp_offload = + NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | + NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 | + NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P | + NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U; + + set_bit(FIRST_BOOT, &ar->flag); + + ret = ath6kl_init_hw_start(ar); + if (ret) { + ath6kl_err("Failed to start hardware: %d\n", ret); + goto err_rxbuf_cleanup; + } + + /* + * Set mac address which is received in ready event + * FIXME: Move to ath6kl_interface_add() + */ + memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN); return ret; +err_rxbuf_cleanup: + ath6kl_htc_flush_rx_buf(ar->htc_target); + ath6kl_cleanup_amsdu_rxbufs(ar); + rtnl_lock(); + ath6kl_deinit_if_data(netdev_priv(ndev)); + rtnl_unlock(); + wiphy_unregister(ar->wiphy); +err_debug_init: + ath6kl_debug_cleanup(ar); +err_node_cleanup: + ath6kl_wmi_shutdown(ar->wmi); + clear_bit(WMI_ENABLED, &ar->flag); + ar->wmi = NULL; err_htc_cleanup: ath6kl_htc_cleanup(ar->htc_target); +err_power_off: + ath6kl_hif_power_off(ar); err_bmi_cleanup: ath6kl_bmi_cleanup(ar); err_wq: destroy_workqueue(ar->ath6kl_wq); + return ret; } -void ath6kl_stop_txrx(struct ath6kl *ar) +void ath6kl_cleanup_vif(struct ath6kl_vif *vif, bool wmi_ready) { - struct net_device *ndev = ar->net_dev; + static u8 bcast_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + bool discon_issued; - if (!ndev) - return; + netif_stop_queue(vif->ndev); - set_bit(DESTROY_IN_PROGRESS, &ar->flag); + clear_bit(WLAN_ENABLED, &vif->flags); - if (down_interruptible(&ar->sem)) { - ath6kl_err("down_interruptible failed\n"); - return; - } + if (wmi_ready) { + discon_issued = test_bit(CONNECTED, &vif->flags) || + test_bit(CONNECT_PEND, &vif->flags); + ath6kl_disconnect(vif); + del_timer(&vif->disconnect_timer); - if (ar->wlan_pwr_state != WLAN_POWER_STATE_CUT_PWR) - ath6kl_stop_endpoint(ndev, false, true); + if (discon_issued) + ath6kl_disconnect_event(vif, DISCONNECT_CMD, + (vif->nw_type & AP_NETWORK) ? + bcast_mac : vif->bssid, + 0, NULL, 0); + } - clear_bit(WLAN_ENABLED, &ar->flag); + if (vif->scan_req) { + cfg80211_scan_done(vif->scan_req, true); + vif->scan_req = NULL; + } } -/* - * We need to differentiate between the surprise and planned removal of the - * device because of the following consideration: - * - * - In case of surprise removal, the hcd already frees up the pending - * for the device and hence there is no need to unregister the function - * driver inorder to get these requests. For planned removal, the function - * driver has to explicitly unregister itself to have the hcd return all the - * pending requests before the data structures for the devices are freed up. - * Note that as per the current implementation, the function driver will - * end up releasing all the devices since there is no API to selectively - * release a particular device. - * - * - Certain commands issued to the target can be skipped for surprise - * removal since they will anyway not go through. - */ -void ath6kl_destroy(struct net_device *dev, unsigned int unregister) +void ath6kl_stop_txrx(struct ath6kl *ar) { - struct ath6kl *ar; + struct ath6kl_vif *vif, *tmp_vif; - if (!dev || !ath6kl_priv(dev)) { - ath6kl_err("failed to get device structure\n"); + set_bit(DESTROY_IN_PROGRESS, &ar->flag); + + if (down_interruptible(&ar->sem)) { + ath6kl_err("down_interruptible failed\n"); return; } - ar = ath6kl_priv(dev); - - destroy_workqueue(ar->ath6kl_wq); - - if (ar->htc_target) - ath6kl_htc_cleanup(ar->htc_target); - - aggr_module_destroy(ar->aggr_cntxt); - - ath6kl_cookie_cleanup(ar); - - ath6kl_cleanup_amsdu_rxbufs(ar); + spin_lock_bh(&ar->list_lock); + list_for_each_entry_safe(vif, tmp_vif, &ar->vif_list, list) { + list_del(&vif->list); + spin_unlock_bh(&ar->list_lock); + ath6kl_cleanup_vif(vif, test_bit(WMI_READY, &ar->flag)); + rtnl_lock(); + ath6kl_deinit_if_data(vif); + rtnl_unlock(); + spin_lock_bh(&ar->list_lock); + } + spin_unlock_bh(&ar->list_lock); - ath6kl_bmi_cleanup(ar); + clear_bit(WMI_READY, &ar->flag); - ath6kl_debug_cleanup(ar); + /* + * After wmi_shudown all WMI events will be dropped. We + * need to cleanup the buffers allocated in AP mode and + * give disconnect notification to stack, which usually + * happens in the disconnect_event. Simulate the disconnect + * event by calling the function directly. Sometimes + * disconnect_event will be received when the debug logs + * are collected. + */ + ath6kl_wmi_shutdown(ar->wmi); - if (unregister && test_bit(NETDEV_REGISTERED, &ar->flag)) { - unregister_netdev(dev); - clear_bit(NETDEV_REGISTERED, &ar->flag); + clear_bit(WMI_ENABLED, &ar->flag); + if (ar->htc_target) { + ath6kl_dbg(ATH6KL_DBG_TRC, "%s: shut down htc\n", __func__); + ath6kl_htc_stop(ar->htc_target); } - free_netdev(dev); - - kfree(ar->fw_board); - kfree(ar->fw_otp); - kfree(ar->fw); - kfree(ar->fw_patch); + /* + * Try to reset the device if we can. The driver may have been + * configure NOT to reset the target during a debug session. + */ + ath6kl_dbg(ATH6KL_DBG_TRC, + "attempting to reset target on instance destroy\n"); + ath6kl_reset_device(ar, ar->target_type, true, true); - ath6kl_cfg80211_deinit(ar); + clear_bit(WLAN_ENABLED, &ar->flag); } diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c index 30b5a53db9e..eea3c747653 100644 --- a/drivers/net/wireless/ath/ath6kl/main.c +++ b/drivers/net/wireless/ath/ath6kl/main.c @@ -20,12 +20,13 @@ #include "target.h" #include "debug.h" -struct ath6kl_sta *ath6kl_find_sta(struct ath6kl *ar, u8 *node_addr) +struct ath6kl_sta *ath6kl_find_sta(struct ath6kl_vif *vif, u8 *node_addr) { + struct ath6kl *ar = vif->ar; struct ath6kl_sta *conn = NULL; u8 i, max_conn; - max_conn = (ar->nw_type == AP_NETWORK) ? AP_MAX_NUM_STA : 0; + max_conn = (vif->nw_type == AP_NETWORK) ? AP_MAX_NUM_STA : 0; for (i = 0; i < max_conn; i++) { if (memcmp(node_addr, ar->sta_list[i].mac, ETH_ALEN) == 0) { @@ -174,64 +175,6 @@ void ath6kl_free_cookie(struct ath6kl *ar, struct ath6kl_cookie *cookie) ar->cookie_count++; } -/* set the window address register (using 4-byte register access ). */ -static int ath6kl_set_addrwin_reg(struct ath6kl *ar, u32 reg_addr, u32 addr) -{ - int status; - s32 i; - __le32 addr_val; - - /* - * Write bytes 1,2,3 of the register to set the upper address bytes, - * the LSB is written last to initiate the access cycle - */ - - for (i = 1; i <= 3; i++) { - /* - * Fill the buffer with the address byte value we want to - * hit 4 times. No need to worry about endianness as the - * same byte is copied to all four bytes of addr_val at - * any time. - */ - memset((u8 *)&addr_val, ((u8 *)&addr)[i], 4); - - /* - * Hit each byte of the register address with a 4-byte - * write operation to the same address, this is a harmless - * operation. - */ - status = hif_read_write_sync(ar, reg_addr + i, (u8 *)&addr_val, - 4, HIF_WR_SYNC_BYTE_FIX); - if (status) - break; - } - - if (status) { - ath6kl_err("failed to write initial bytes of 0x%x to window reg: 0x%X\n", - addr, reg_addr); - return status; - } - - /* - * Write the address register again, this time write the whole - * 4-byte value. The effect here is that the LSB write causes the - * cycle to start, the extra 3 byte write to bytes 1,2,3 has no - * effect since we are writing the same values again - */ - addr_val = cpu_to_le32(addr); - status = hif_read_write_sync(ar, reg_addr, - (u8 *)&(addr_val), - 4, HIF_WR_SYNC_BYTE_INC); - - if (status) { - ath6kl_err("failed to write 0x%x to window reg: 0x%X\n", - addr, reg_addr); - return status; - } - - return 0; -} - /* * Read from the hardware through its diagnostic window. No cooperation * from the firmware is required for this. @@ -240,14 +183,7 @@ int ath6kl_diag_read32(struct ath6kl *ar, u32 address, u32 *value) { int ret; - /* set window register to start read cycle */ - ret = ath6kl_set_addrwin_reg(ar, WINDOW_READ_ADDR_ADDRESS, address); - if (ret) - return ret; - - /* read the data */ - ret = hif_read_write_sync(ar, WINDOW_DATA_ADDRESS, (u8 *) value, - sizeof(*value), HIF_RD_SYNC_BYTE_INC); + ret = ath6kl_hif_diag_read32(ar, address, value); if (ret) { ath6kl_warn("failed to read32 through diagnose window: %d\n", ret); @@ -265,18 +201,15 @@ int ath6kl_diag_write32(struct ath6kl *ar, u32 address, __le32 value) { int ret; - /* set write data */ - ret = hif_read_write_sync(ar, WINDOW_DATA_ADDRESS, (u8 *) &value, - sizeof(value), HIF_WR_SYNC_BYTE_INC); + ret = ath6kl_hif_diag_write32(ar, address, value); + if (ret) { ath6kl_err("failed to write 0x%x during diagnose window to 0x%d\n", address, value); return ret; } - /* set window register, which starts the write cycle */ - return ath6kl_set_addrwin_reg(ar, WINDOW_WRITE_ADDR_ADDRESS, - address); + return 0; } int ath6kl_diag_read(struct ath6kl *ar, u32 address, void *data, u32 length) @@ -393,8 +326,8 @@ out: #define AR6003_RESET_CONTROL_ADDRESS 0x00004000 #define AR6004_RESET_CONTROL_ADDRESS 0x00004000 -static void ath6kl_reset_device(struct ath6kl *ar, u32 target_type, - bool wait_fot_compltn, bool cold_reset) +void ath6kl_reset_device(struct ath6kl *ar, u32 target_type, + bool wait_fot_compltn, bool cold_reset) { int status = 0; u32 address; @@ -425,102 +358,33 @@ static void ath6kl_reset_device(struct ath6kl *ar, u32 target_type, ath6kl_err("failed to reset target\n"); } -void ath6kl_stop_endpoint(struct net_device *dev, bool keep_profile, - bool get_dbglogs) -{ - struct ath6kl *ar = ath6kl_priv(dev); - static u8 bcast_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; - bool discon_issued; - - netif_stop_queue(dev); - - /* disable the target and the interrupts associated with it */ - if (test_bit(WMI_READY, &ar->flag)) { - discon_issued = (test_bit(CONNECTED, &ar->flag) || - test_bit(CONNECT_PEND, &ar->flag)); - ath6kl_disconnect(ar); - if (!keep_profile) - ath6kl_init_profile_info(ar); - - del_timer(&ar->disconnect_timer); - - clear_bit(WMI_READY, &ar->flag); - ath6kl_wmi_shutdown(ar->wmi); - clear_bit(WMI_ENABLED, &ar->flag); - ar->wmi = NULL; - - /* - * After wmi_shudown all WMI events will be dropped. We - * need to cleanup the buffers allocated in AP mode and - * give disconnect notification to stack, which usually - * happens in the disconnect_event. Simulate the disconnect - * event by calling the function directly. Sometimes - * disconnect_event will be received when the debug logs - * are collected. - */ - if (discon_issued) - ath6kl_disconnect_event(ar, DISCONNECT_CMD, - (ar->nw_type & AP_NETWORK) ? - bcast_mac : ar->bssid, - 0, NULL, 0); - - ar->user_key_ctrl = 0; - - } else { - ath6kl_dbg(ATH6KL_DBG_TRC, - "%s: wmi is not ready 0x%p 0x%p\n", - __func__, ar, ar->wmi); - - /* Shut down WMI if we have started it */ - if (test_bit(WMI_ENABLED, &ar->flag)) { - ath6kl_dbg(ATH6KL_DBG_TRC, - "%s: shut down wmi\n", __func__); - ath6kl_wmi_shutdown(ar->wmi); - clear_bit(WMI_ENABLED, &ar->flag); - ar->wmi = NULL; - } - } - - if (ar->htc_target) { - ath6kl_dbg(ATH6KL_DBG_TRC, "%s: shut down htc\n", __func__); - ath6kl_htc_stop(ar->htc_target); - } - - /* - * Try to reset the device if we can. The driver may have been - * configure NOT to reset the target during a debug session. - */ - ath6kl_dbg(ATH6KL_DBG_TRC, - "attempting to reset target on instance destroy\n"); - ath6kl_reset_device(ar, ar->target_type, true, true); -} - -static void ath6kl_install_static_wep_keys(struct ath6kl *ar) +static void ath6kl_install_static_wep_keys(struct ath6kl_vif *vif) { u8 index; u8 keyusage; for (index = WMI_MIN_KEY_INDEX; index <= WMI_MAX_KEY_INDEX; index++) { - if (ar->wep_key_list[index].key_len) { + if (vif->wep_key_list[index].key_len) { keyusage = GROUP_USAGE; - if (index == ar->def_txkey_index) + if (index == vif->def_txkey_index) keyusage |= TX_USAGE; - ath6kl_wmi_addkey_cmd(ar->wmi, + ath6kl_wmi_addkey_cmd(vif->ar->wmi, vif->fw_vif_idx, index, WEP_CRYPT, keyusage, - ar->wep_key_list[index].key_len, - NULL, - ar->wep_key_list[index].key, + vif->wep_key_list[index].key_len, + NULL, 0, + vif->wep_key_list[index].key, KEY_OP_INIT_VAL, NULL, NO_SYNC_WMIFLAG); } } } -void ath6kl_connect_ap_mode_bss(struct ath6kl *ar, u16 channel) +void ath6kl_connect_ap_mode_bss(struct ath6kl_vif *vif, u16 channel) { + struct ath6kl *ar = vif->ar; struct ath6kl_req_key *ik; int res; u8 key_rsc[ATH6KL_KEY_SEQ_LEN]; @@ -529,11 +393,13 @@ void ath6kl_connect_ap_mode_bss(struct ath6kl *ar, u16 channel) ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "AP mode started on %u MHz\n", channel); - switch (ar->auth_mode) { + switch (vif->auth_mode) { case NONE_AUTH: - if (ar->prwise_crypto == WEP_CRYPT) - ath6kl_install_static_wep_keys(ar); - break; + if (vif->prwise_crypto == WEP_CRYPT) + ath6kl_install_static_wep_keys(vif); + if (!ik->valid || ik->key_type != WAPI_CRYPT) + break; + /* for WAPI, we need to set the delayed group key, continue: */ case WPA_PSK_AUTH: case WPA2_PSK_AUTH: case (WPA_PSK_AUTH | WPA2_PSK_AUTH): @@ -544,8 +410,9 @@ void ath6kl_connect_ap_mode_bss(struct ath6kl *ar, u16 channel) "the initial group key for AP mode\n"); memset(key_rsc, 0, sizeof(key_rsc)); res = ath6kl_wmi_addkey_cmd( - ar->wmi, ik->key_index, ik->key_type, - GROUP_USAGE, ik->key_len, key_rsc, ik->key, + ar->wmi, vif->fw_vif_idx, ik->key_index, ik->key_type, + GROUP_USAGE, ik->key_len, key_rsc, ATH6KL_KEY_SEQ_LEN, + ik->key, KEY_OP_INIT_VAL, NULL, SYNC_BOTH_WMIFLAG); if (res) { ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delayed " @@ -554,15 +421,16 @@ void ath6kl_connect_ap_mode_bss(struct ath6kl *ar, u16 channel) break; } - ath6kl_wmi_bssfilter_cmd(ar->wmi, NONE_BSS_FILTER, 0); - set_bit(CONNECTED, &ar->flag); - netif_carrier_on(ar->net_dev); + ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, NONE_BSS_FILTER, 0); + set_bit(CONNECTED, &vif->flags); + netif_carrier_on(vif->ndev); } -void ath6kl_connect_ap_mode_sta(struct ath6kl *ar, u16 aid, u8 *mac_addr, +void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr, u8 keymgmt, u8 ucipher, u8 auth, u8 assoc_req_len, u8 *assoc_info) { + struct ath6kl *ar = vif->ar; u8 *ies = NULL, *wpa_ie = NULL, *pos; size_t ies_len = 0; struct station_info sinfo; @@ -600,6 +468,18 @@ void ath6kl_connect_ap_mode_sta(struct ath6kl *ar, u16 aid, u8 *mac_addr, wpa_ie = pos; /* WPS IE */ break; /* overrides WPA/RSN IE */ } + } else if (pos[0] == 0x44 && wpa_ie == NULL) { + /* + * Note: WAPI Parameter Set IE re-uses Element ID that + * was officially allocated for BSS AC Access Delay. As + * such, we need to be a bit more careful on when + * parsing the frame. However, BSS AC Access Delay + * element is not supposed to be included in + * (Re)Association Request frames, so this should not + * cause problems. + */ + wpa_ie = pos; /* WAPI IE */ + break; } pos += 2 + pos[1]; } @@ -617,380 +497,49 @@ void ath6kl_connect_ap_mode_sta(struct ath6kl *ar, u16 aid, u8 *mac_addr, sinfo.assoc_req_ies_len = ies_len; sinfo.filled |= STATION_INFO_ASSOC_REQ_IES; - cfg80211_new_sta(ar->net_dev, mac_addr, &sinfo, GFP_KERNEL); - - netif_wake_queue(ar->net_dev); -} - -/* Functions for Tx credit handling */ -void ath6k_credit_init(struct htc_credit_state_info *cred_info, - struct list_head *ep_list, - int tot_credits) -{ - struct htc_endpoint_credit_dist *cur_ep_dist; - int count; - - cred_info->cur_free_credits = tot_credits; - cred_info->total_avail_credits = tot_credits; - - list_for_each_entry(cur_ep_dist, ep_list, list) { - if (cur_ep_dist->endpoint == ENDPOINT_0) - continue; - - cur_ep_dist->cred_min = cur_ep_dist->cred_per_msg; - - if (tot_credits > 4) - if ((cur_ep_dist->svc_id == WMI_DATA_BK_SVC) || - (cur_ep_dist->svc_id == WMI_DATA_BE_SVC)) { - ath6kl_deposit_credit_to_ep(cred_info, - cur_ep_dist, - cur_ep_dist->cred_min); - cur_ep_dist->dist_flags |= HTC_EP_ACTIVE; - } - - if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) { - ath6kl_deposit_credit_to_ep(cred_info, cur_ep_dist, - cur_ep_dist->cred_min); - /* - * Control service is always marked active, it - * never goes inactive EVER. - */ - cur_ep_dist->dist_flags |= HTC_EP_ACTIVE; - } else if (cur_ep_dist->svc_id == WMI_DATA_BK_SVC) - /* this is the lowest priority data endpoint */ - cred_info->lowestpri_ep_dist = cur_ep_dist->list; - - /* - * Streams have to be created (explicit | implicit) for all - * kinds of traffic. BE endpoints are also inactive in the - * beginning. When BE traffic starts it creates implicit - * streams that redistributes credits. - * - * Note: all other endpoints have minimums set but are - * initially given NO credits. credits will be distributed - * as traffic activity demands - */ - } - - WARN_ON(cred_info->cur_free_credits <= 0); - - list_for_each_entry(cur_ep_dist, ep_list, list) { - if (cur_ep_dist->endpoint == ENDPOINT_0) - continue; - - if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) - cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg; - else { - /* - * For the remaining data endpoints, we assume that - * each cred_per_msg are the same. We use a simple - * calculation here, we take the remaining credits - * and determine how many max messages this can - * cover and then set each endpoint's normal value - * equal to 3/4 this amount. - */ - count = (cred_info->cur_free_credits / - cur_ep_dist->cred_per_msg) - * cur_ep_dist->cred_per_msg; - count = (count * 3) >> 2; - count = max(count, cur_ep_dist->cred_per_msg); - cur_ep_dist->cred_norm = count; - - } - } -} - -/* initialize and setup credit distribution */ -int ath6k_setup_credit_dist(void *htc_handle, - struct htc_credit_state_info *cred_info) -{ - u16 servicepriority[5]; - - memset(cred_info, 0, sizeof(struct htc_credit_state_info)); - - servicepriority[0] = WMI_CONTROL_SVC; /* highest */ - servicepriority[1] = WMI_DATA_VO_SVC; - servicepriority[2] = WMI_DATA_VI_SVC; - servicepriority[3] = WMI_DATA_BE_SVC; - servicepriority[4] = WMI_DATA_BK_SVC; /* lowest */ - - /* set priority list */ - ath6kl_htc_set_credit_dist(htc_handle, cred_info, servicepriority, 5); - - return 0; -} - -/* reduce an ep's credits back to a set limit */ -static void ath6k_reduce_credits(struct htc_credit_state_info *cred_info, - struct htc_endpoint_credit_dist *ep_dist, - int limit) -{ - int credits; - - ep_dist->cred_assngd = limit; - - if (ep_dist->credits <= limit) - return; + cfg80211_new_sta(vif->ndev, mac_addr, &sinfo, GFP_KERNEL); - credits = ep_dist->credits - limit; - ep_dist->credits -= credits; - cred_info->cur_free_credits += credits; -} - -static void ath6k_credit_update(struct htc_credit_state_info *cred_info, - struct list_head *epdist_list) -{ - struct htc_endpoint_credit_dist *cur_dist_list; - - list_for_each_entry(cur_dist_list, epdist_list, list) { - if (cur_dist_list->endpoint == ENDPOINT_0) - continue; - - if (cur_dist_list->cred_to_dist > 0) { - cur_dist_list->credits += - cur_dist_list->cred_to_dist; - cur_dist_list->cred_to_dist = 0; - if (cur_dist_list->credits > - cur_dist_list->cred_assngd) - ath6k_reduce_credits(cred_info, - cur_dist_list, - cur_dist_list->cred_assngd); - - if (cur_dist_list->credits > - cur_dist_list->cred_norm) - ath6k_reduce_credits(cred_info, cur_dist_list, - cur_dist_list->cred_norm); - - if (!(cur_dist_list->dist_flags & HTC_EP_ACTIVE)) { - if (cur_dist_list->txq_depth == 0) - ath6k_reduce_credits(cred_info, - cur_dist_list, 0); - } - } - } -} - -/* - * HTC has an endpoint that needs credits, ep_dist is the endpoint in - * question. - */ -void ath6k_seek_credits(struct htc_credit_state_info *cred_info, - struct htc_endpoint_credit_dist *ep_dist) -{ - struct htc_endpoint_credit_dist *curdist_list; - int credits = 0; - int need; - - if (ep_dist->svc_id == WMI_CONTROL_SVC) - goto out; - - if ((ep_dist->svc_id == WMI_DATA_VI_SVC) || - (ep_dist->svc_id == WMI_DATA_VO_SVC)) - if ((ep_dist->cred_assngd >= ep_dist->cred_norm)) - goto out; - - /* - * For all other services, we follow a simple algorithm of: - * - * 1. checking the free pool for credits - * 2. checking lower priority endpoints for credits to take - */ - - credits = min(cred_info->cur_free_credits, ep_dist->seek_cred); - - if (credits >= ep_dist->seek_cred) - goto out; - - /* - * We don't have enough in the free pool, try taking away from - * lower priority services The rule for taking away credits: - * - * 1. Only take from lower priority endpoints - * 2. Only take what is allocated above the minimum (never - * starve an endpoint completely) - * 3. Only take what you need. - */ - - list_for_each_entry_reverse(curdist_list, - &cred_info->lowestpri_ep_dist, - list) { - if (curdist_list == ep_dist) - break; - - need = ep_dist->seek_cred - cred_info->cur_free_credits; - - if ((curdist_list->cred_assngd - need) >= - curdist_list->cred_min) { - /* - * The current one has been allocated more than - * it's minimum and it has enough credits assigned - * above it's minimum to fulfill our need try to - * take away just enough to fulfill our need. - */ - ath6k_reduce_credits(cred_info, curdist_list, - curdist_list->cred_assngd - need); - - if (cred_info->cur_free_credits >= - ep_dist->seek_cred) - break; - } - - if (curdist_list->endpoint == ENDPOINT_0) - break; - } - - credits = min(cred_info->cur_free_credits, ep_dist->seek_cred); - -out: - /* did we find some credits? */ - if (credits) - ath6kl_deposit_credit_to_ep(cred_info, ep_dist, credits); - - ep_dist->seek_cred = 0; -} - -/* redistribute credits based on activity change */ -static void ath6k_redistribute_credits(struct htc_credit_state_info *info, - struct list_head *ep_dist_list) -{ - struct htc_endpoint_credit_dist *curdist_list; - - list_for_each_entry(curdist_list, ep_dist_list, list) { - if (curdist_list->endpoint == ENDPOINT_0) - continue; - - if ((curdist_list->svc_id == WMI_DATA_BK_SVC) || - (curdist_list->svc_id == WMI_DATA_BE_SVC)) - curdist_list->dist_flags |= HTC_EP_ACTIVE; - - if ((curdist_list->svc_id != WMI_CONTROL_SVC) && - !(curdist_list->dist_flags & HTC_EP_ACTIVE)) { - if (curdist_list->txq_depth == 0) - ath6k_reduce_credits(info, - curdist_list, 0); - else - ath6k_reduce_credits(info, - curdist_list, - curdist_list->cred_min); - } - } -} - -/* - * - * This function is invoked whenever endpoints require credit - * distributions. A lock is held while this function is invoked, this - * function shall NOT block. The ep_dist_list is a list of distribution - * structures in prioritized order as defined by the call to the - * htc_set_credit_dist() api. - */ -void ath6k_credit_distribute(struct htc_credit_state_info *cred_info, - struct list_head *ep_dist_list, - enum htc_credit_dist_reason reason) -{ - switch (reason) { - case HTC_CREDIT_DIST_SEND_COMPLETE: - ath6k_credit_update(cred_info, ep_dist_list); - break; - case HTC_CREDIT_DIST_ACTIVITY_CHANGE: - ath6k_redistribute_credits(cred_info, ep_dist_list); - break; - default: - break; - } - - WARN_ON(cred_info->cur_free_credits > cred_info->total_avail_credits); - WARN_ON(cred_info->cur_free_credits < 0); + netif_wake_queue(vif->ndev); } void disconnect_timer_handler(unsigned long ptr) { struct net_device *dev = (struct net_device *)ptr; - struct ath6kl *ar = ath6kl_priv(dev); + struct ath6kl_vif *vif = netdev_priv(dev); - ath6kl_init_profile_info(ar); - ath6kl_disconnect(ar); + ath6kl_init_profile_info(vif); + ath6kl_disconnect(vif); } -void ath6kl_disconnect(struct ath6kl *ar) +void ath6kl_disconnect(struct ath6kl_vif *vif) { - if (test_bit(CONNECTED, &ar->flag) || - test_bit(CONNECT_PEND, &ar->flag)) { - ath6kl_wmi_disconnect_cmd(ar->wmi); + if (test_bit(CONNECTED, &vif->flags) || + test_bit(CONNECT_PEND, &vif->flags)) { + ath6kl_wmi_disconnect_cmd(vif->ar->wmi, vif->fw_vif_idx); /* * Disconnect command is issued, clear the connect pending * flag. The connected flag will be cleared in * disconnect event notification. */ - clear_bit(CONNECT_PEND, &ar->flag); + clear_bit(CONNECT_PEND, &vif->flags); } } -void ath6kl_deep_sleep_enable(struct ath6kl *ar) -{ - switch (ar->sme_state) { - case SME_CONNECTING: - cfg80211_connect_result(ar->net_dev, ar->bssid, NULL, 0, - NULL, 0, - WLAN_STATUS_UNSPECIFIED_FAILURE, - GFP_KERNEL); - break; - case SME_CONNECTED: - default: - /* - * FIXME: oddly enough smeState is in DISCONNECTED during - * suspend, why? Need to send disconnected event in that - * state. - */ - cfg80211_disconnected(ar->net_dev, 0, NULL, 0, GFP_KERNEL); - break; - } - - if (test_bit(CONNECTED, &ar->flag) || - test_bit(CONNECT_PEND, &ar->flag)) - ath6kl_wmi_disconnect_cmd(ar->wmi); - - ar->sme_state = SME_DISCONNECTED; - - /* disable scanning */ - if (ath6kl_wmi_scanparams_cmd(ar->wmi, 0xFFFF, 0, 0, 0, 0, 0, 0, 0, - 0, 0) != 0) - printk(KERN_WARNING "ath6kl: failed to disable scan " - "during suspend\n"); - - ath6kl_cfg80211_scan_complete_event(ar, -ECANCELED); -} - /* WMI Event handlers */ -static const char *get_hw_id_string(u32 id) -{ - switch (id) { - case AR6003_REV1_VERSION: - return "1.0"; - case AR6003_REV2_VERSION: - return "2.0"; - case AR6003_REV3_VERSION: - return "2.1.1"; - default: - return "unknown"; - } -} - void ath6kl_ready_event(void *devt, u8 *datap, u32 sw_ver, u32 abi_ver) { struct ath6kl *ar = devt; - struct net_device *dev = ar->net_dev; - memcpy(dev->dev_addr, datap, ETH_ALEN); + memcpy(ar->mac_addr, datap, ETH_ALEN); ath6kl_dbg(ATH6KL_DBG_TRC, "%s: mac addr = %pM\n", - __func__, dev->dev_addr); + __func__, ar->mac_addr); ar->version.wlan_ver = sw_ver; ar->version.abi_ver = abi_ver; - snprintf(ar->wdev->wiphy->fw_version, - sizeof(ar->wdev->wiphy->fw_version), + snprintf(ar->wiphy->fw_version, + sizeof(ar->wiphy->fw_version), "%u.%u.%u.%u", (ar->version.wlan_ver & 0xf0000000) >> 28, (ar->version.wlan_ver & 0x0f000000) >> 24, @@ -1000,79 +549,85 @@ void ath6kl_ready_event(void *devt, u8 *datap, u32 sw_ver, u32 abi_ver) /* indicate to the waiting thread that the ready event was received */ set_bit(WMI_READY, &ar->flag); wake_up(&ar->event_wq); - - ath6kl_info("hw %s fw %s%s\n", - get_hw_id_string(ar->wdev->wiphy->hw_version), - ar->wdev->wiphy->fw_version, - test_bit(TESTMODE, &ar->flag) ? " testmode" : ""); } -void ath6kl_scan_complete_evt(struct ath6kl *ar, int status) +void ath6kl_scan_complete_evt(struct ath6kl_vif *vif, int status) { - ath6kl_cfg80211_scan_complete_event(ar, status); + struct ath6kl *ar = vif->ar; + bool aborted = false; + + if (status != WMI_SCAN_STATUS_SUCCESS) + aborted = true; + + ath6kl_cfg80211_scan_complete_event(vif, aborted); if (!ar->usr_bss_filter) { - clear_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag); - ath6kl_wmi_bssfilter_cmd(ar->wmi, NONE_BSS_FILTER, 0); + clear_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags); + ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, + NONE_BSS_FILTER, 0); } - ath6kl_dbg(ATH6KL_DBG_WLAN_SCAN, "scan complete: %d\n", status); + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "scan complete: %d\n", status); } -void ath6kl_connect_event(struct ath6kl *ar, u16 channel, u8 *bssid, +void ath6kl_connect_event(struct ath6kl_vif *vif, u16 channel, u8 *bssid, u16 listen_int, u16 beacon_int, enum network_type net_type, u8 beacon_ie_len, u8 assoc_req_len, u8 assoc_resp_len, u8 *assoc_info) { - unsigned long flags; + struct ath6kl *ar = vif->ar; - ath6kl_cfg80211_connect_event(ar, channel, bssid, + ath6kl_cfg80211_connect_event(vif, channel, bssid, listen_int, beacon_int, net_type, beacon_ie_len, assoc_req_len, assoc_resp_len, assoc_info); - memcpy(ar->bssid, bssid, sizeof(ar->bssid)); - ar->bss_ch = channel; + memcpy(vif->bssid, bssid, sizeof(vif->bssid)); + vif->bss_ch = channel; - if ((ar->nw_type == INFRA_NETWORK)) - ath6kl_wmi_listeninterval_cmd(ar->wmi, ar->listen_intvl_t, + if ((vif->nw_type == INFRA_NETWORK)) + ath6kl_wmi_listeninterval_cmd(ar->wmi, vif->fw_vif_idx, + ar->listen_intvl_t, ar->listen_intvl_b); - netif_wake_queue(ar->net_dev); + netif_wake_queue(vif->ndev); /* Update connect & link status atomically */ - spin_lock_irqsave(&ar->lock, flags); - set_bit(CONNECTED, &ar->flag); - clear_bit(CONNECT_PEND, &ar->flag); - netif_carrier_on(ar->net_dev); - spin_unlock_irqrestore(&ar->lock, flags); + spin_lock_bh(&vif->if_lock); + set_bit(CONNECTED, &vif->flags); + clear_bit(CONNECT_PEND, &vif->flags); + netif_carrier_on(vif->ndev); + spin_unlock_bh(&vif->if_lock); - aggr_reset_state(ar->aggr_cntxt); - ar->reconnect_flag = 0; + aggr_reset_state(vif->aggr_cntxt); + vif->reconnect_flag = 0; - if ((ar->nw_type == ADHOC_NETWORK) && ar->ibss_ps_enable) { + if ((vif->nw_type == ADHOC_NETWORK) && ar->ibss_ps_enable) { memset(ar->node_map, 0, sizeof(ar->node_map)); ar->node_num = 0; ar->next_ep_id = ENDPOINT_2; } if (!ar->usr_bss_filter) { - set_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag); - ath6kl_wmi_bssfilter_cmd(ar->wmi, CURRENT_BSS_FILTER, 0); + set_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags); + ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, + CURRENT_BSS_FILTER, 0); } } -void ath6kl_tkip_micerr_event(struct ath6kl *ar, u8 keyid, bool ismcast) +void ath6kl_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid, bool ismcast) { struct ath6kl_sta *sta; + struct ath6kl *ar = vif->ar; u8 tsc[6]; + /* * For AP case, keyid will have aid of STA which sent pkt with * MIC error. Use this aid to get MAC & send it to hostapd. */ - if (ar->nw_type == AP_NETWORK) { + if (vif->nw_type == AP_NETWORK) { sta = ath6kl_find_sta_by_aid(ar, (keyid >> 2)); if (!sta) return; @@ -1081,19 +636,20 @@ void ath6kl_tkip_micerr_event(struct ath6kl *ar, u8 keyid, bool ismcast) "ap tkip mic error received from aid=%d\n", keyid); memset(tsc, 0, sizeof(tsc)); /* FIX: get correct TSC */ - cfg80211_michael_mic_failure(ar->net_dev, sta->mac, + cfg80211_michael_mic_failure(vif->ndev, sta->mac, NL80211_KEYTYPE_PAIRWISE, keyid, tsc, GFP_KERNEL); } else - ath6kl_cfg80211_tkip_micerr_event(ar, keyid, ismcast); + ath6kl_cfg80211_tkip_micerr_event(vif, keyid, ismcast); } -static void ath6kl_update_target_stats(struct ath6kl *ar, u8 *ptr, u32 len) +static void ath6kl_update_target_stats(struct ath6kl_vif *vif, u8 *ptr, u32 len) { struct wmi_target_stats *tgt_stats = (struct wmi_target_stats *) ptr; - struct target_stats *stats = &ar->target_stats; + struct ath6kl *ar = vif->ar; + struct target_stats *stats = &vif->target_stats; struct tkip_ccmp_stats *ccmp_stats; u8 ac; @@ -1189,8 +745,8 @@ static void ath6kl_update_target_stats(struct ath6kl *ar, u8 *ptr, u32 len) stats->wow_evt_discarded += le16_to_cpu(tgt_stats->wow_stats.wow_evt_discarded); - if (test_bit(STATS_UPDATE_PEND, &ar->flag)) { - clear_bit(STATS_UPDATE_PEND, &ar->flag); + if (test_bit(STATS_UPDATE_PEND, &vif->flags)) { + clear_bit(STATS_UPDATE_PEND, &vif->flags); wake_up(&ar->event_wq); } } @@ -1200,14 +756,15 @@ static void ath6kl_add_le32(__le32 *var, __le32 val) *var = cpu_to_le32(le32_to_cpu(*var) + le32_to_cpu(val)); } -void ath6kl_tgt_stats_event(struct ath6kl *ar, u8 *ptr, u32 len) +void ath6kl_tgt_stats_event(struct ath6kl_vif *vif, u8 *ptr, u32 len) { struct wmi_ap_mode_stat *p = (struct wmi_ap_mode_stat *) ptr; + struct ath6kl *ar = vif->ar; struct wmi_ap_mode_stat *ap = &ar->ap_stats; struct wmi_per_sta_stat *st_ap, *st_p; u8 ac; - if (ar->nw_type == AP_NETWORK) { + if (vif->nw_type == AP_NETWORK) { if (len < sizeof(*p)) return; @@ -1226,7 +783,7 @@ void ath6kl_tgt_stats_event(struct ath6kl *ar, u8 *ptr, u32 len) } } else { - ath6kl_update_target_stats(ar, ptr, len); + ath6kl_update_target_stats(vif, ptr, len); } } @@ -1245,11 +802,12 @@ void ath6kl_txpwr_rx_evt(void *devt, u8 tx_pwr) wake_up(&ar->event_wq); } -void ath6kl_pspoll_event(struct ath6kl *ar, u8 aid) +void ath6kl_pspoll_event(struct ath6kl_vif *vif, u8 aid) { struct ath6kl_sta *conn; struct sk_buff *skb; bool psq_empty = false; + struct ath6kl *ar = vif->ar; conn = ath6kl_find_sta_by_aid(ar, aid); @@ -1272,7 +830,7 @@ void ath6kl_pspoll_event(struct ath6kl *ar, u8 aid) spin_unlock_bh(&conn->psq_lock); conn->sta_flags |= STA_PS_POLLED; - ath6kl_data_tx(skb, ar->net_dev); + ath6kl_data_tx(skb, vif->ndev); conn->sta_flags &= ~STA_PS_POLLED; spin_lock_bh(&conn->psq_lock); @@ -1280,13 +838,14 @@ void ath6kl_pspoll_event(struct ath6kl *ar, u8 aid) spin_unlock_bh(&conn->psq_lock); if (psq_empty) - ath6kl_wmi_set_pvb_cmd(ar->wmi, conn->aid, 0); + ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx, conn->aid, 0); } -void ath6kl_dtimexpiry_event(struct ath6kl *ar) +void ath6kl_dtimexpiry_event(struct ath6kl_vif *vif) { bool mcastq_empty = false; struct sk_buff *skb; + struct ath6kl *ar = vif->ar; /* * If there are no associated STAs, ignore the DTIM expiry event. @@ -1308,31 +867,31 @@ void ath6kl_dtimexpiry_event(struct ath6kl *ar) return; /* set the STA flag to dtim_expired for the frame to go out */ - set_bit(DTIM_EXPIRED, &ar->flag); + set_bit(DTIM_EXPIRED, &vif->flags); spin_lock_bh(&ar->mcastpsq_lock); while ((skb = skb_dequeue(&ar->mcastpsq)) != NULL) { spin_unlock_bh(&ar->mcastpsq_lock); - ath6kl_data_tx(skb, ar->net_dev); + ath6kl_data_tx(skb, vif->ndev); spin_lock_bh(&ar->mcastpsq_lock); } spin_unlock_bh(&ar->mcastpsq_lock); - clear_bit(DTIM_EXPIRED, &ar->flag); + clear_bit(DTIM_EXPIRED, &vif->flags); /* clear the LSB of the BitMapCtl field of the TIM IE */ - ath6kl_wmi_set_pvb_cmd(ar->wmi, MCAST_AID, 0); + ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx, MCAST_AID, 0); } -void ath6kl_disconnect_event(struct ath6kl *ar, u8 reason, u8 *bssid, +void ath6kl_disconnect_event(struct ath6kl_vif *vif, u8 reason, u8 *bssid, u8 assoc_resp_len, u8 *assoc_info, u16 prot_reason_status) { - unsigned long flags; + struct ath6kl *ar = vif->ar; - if (ar->nw_type == AP_NETWORK) { + if (vif->nw_type == AP_NETWORK) { if (!ath6kl_remove_sta(ar, bssid, prot_reason_status)) return; @@ -1344,31 +903,31 @@ void ath6kl_disconnect_event(struct ath6kl *ar, u8 reason, u8 *bssid, /* clear the LSB of the TIM IE's BitMapCtl field */ if (test_bit(WMI_READY, &ar->flag)) - ath6kl_wmi_set_pvb_cmd(ar->wmi, MCAST_AID, 0); + ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx, + MCAST_AID, 0); } if (!is_broadcast_ether_addr(bssid)) { /* send event to application */ - cfg80211_del_sta(ar->net_dev, bssid, GFP_KERNEL); + cfg80211_del_sta(vif->ndev, bssid, GFP_KERNEL); } - if (memcmp(ar->net_dev->dev_addr, bssid, ETH_ALEN) == 0) { - memset(ar->wep_key_list, 0, sizeof(ar->wep_key_list)); - clear_bit(CONNECTED, &ar->flag); + if (memcmp(vif->ndev->dev_addr, bssid, ETH_ALEN) == 0) { + memset(vif->wep_key_list, 0, sizeof(vif->wep_key_list)); + clear_bit(CONNECTED, &vif->flags); } return; } - ath6kl_cfg80211_disconnect_event(ar, reason, bssid, + ath6kl_cfg80211_disconnect_event(vif, reason, bssid, assoc_resp_len, assoc_info, prot_reason_status); - aggr_reset_state(ar->aggr_cntxt); + aggr_reset_state(vif->aggr_cntxt); - del_timer(&ar->disconnect_timer); + del_timer(&vif->disconnect_timer); - ath6kl_dbg(ATH6KL_DBG_WLAN_CONNECT, - "disconnect reason is %d\n", reason); + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "disconnect reason is %d\n", reason); /* * If the event is due to disconnect cmd from the host, only they @@ -1377,83 +936,88 @@ void ath6kl_disconnect_event(struct ath6kl *ar, u8 reason, u8 *bssid, */ if (reason == DISCONNECT_CMD) { if (!ar->usr_bss_filter && test_bit(WMI_READY, &ar->flag)) - ath6kl_wmi_bssfilter_cmd(ar->wmi, NONE_BSS_FILTER, 0); + ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, + NONE_BSS_FILTER, 0); } else { - set_bit(CONNECT_PEND, &ar->flag); + set_bit(CONNECT_PEND, &vif->flags); if (((reason == ASSOC_FAILED) && (prot_reason_status == 0x11)) || ((reason == ASSOC_FAILED) && (prot_reason_status == 0x0) - && (ar->reconnect_flag == 1))) { - set_bit(CONNECTED, &ar->flag); + && (vif->reconnect_flag == 1))) { + set_bit(CONNECTED, &vif->flags); return; } } /* update connect & link status atomically */ - spin_lock_irqsave(&ar->lock, flags); - clear_bit(CONNECTED, &ar->flag); - netif_carrier_off(ar->net_dev); - spin_unlock_irqrestore(&ar->lock, flags); + spin_lock_bh(&vif->if_lock); + clear_bit(CONNECTED, &vif->flags); + netif_carrier_off(vif->ndev); + spin_unlock_bh(&vif->if_lock); - if ((reason != CSERV_DISCONNECT) || (ar->reconnect_flag != 1)) - ar->reconnect_flag = 0; + if ((reason != CSERV_DISCONNECT) || (vif->reconnect_flag != 1)) + vif->reconnect_flag = 0; if (reason != CSERV_DISCONNECT) ar->user_key_ctrl = 0; - netif_stop_queue(ar->net_dev); - memset(ar->bssid, 0, sizeof(ar->bssid)); - ar->bss_ch = 0; + netif_stop_queue(vif->ndev); + memset(vif->bssid, 0, sizeof(vif->bssid)); + vif->bss_ch = 0; ath6kl_tx_data_cleanup(ar); } -static int ath6kl_open(struct net_device *dev) +struct ath6kl_vif *ath6kl_vif_first(struct ath6kl *ar) { - struct ath6kl *ar = ath6kl_priv(dev); - unsigned long flags; + struct ath6kl_vif *vif; - spin_lock_irqsave(&ar->lock, flags); + spin_lock_bh(&ar->list_lock); + if (list_empty(&ar->vif_list)) { + spin_unlock_bh(&ar->list_lock); + return NULL; + } - set_bit(WLAN_ENABLED, &ar->flag); + vif = list_first_entry(&ar->vif_list, struct ath6kl_vif, list); - if (test_bit(CONNECTED, &ar->flag)) { + spin_unlock_bh(&ar->list_lock); + + return vif; +} + +static int ath6kl_open(struct net_device *dev) +{ + struct ath6kl_vif *vif = netdev_priv(dev); + + set_bit(WLAN_ENABLED, &vif->flags); + + if (test_bit(CONNECTED, &vif->flags)) { netif_carrier_on(dev); netif_wake_queue(dev); } else netif_carrier_off(dev); - spin_unlock_irqrestore(&ar->lock, flags); - return 0; } static int ath6kl_close(struct net_device *dev) { - struct ath6kl *ar = ath6kl_priv(dev); + struct ath6kl_vif *vif = netdev_priv(dev); netif_stop_queue(dev); - ath6kl_disconnect(ar); - - if (test_bit(WMI_READY, &ar->flag)) { - if (ath6kl_wmi_scanparams_cmd(ar->wmi, 0xFFFF, 0, 0, 0, 0, 0, 0, - 0, 0, 0)) - return -EIO; - - clear_bit(WLAN_ENABLED, &ar->flag); - } + ath6kl_cfg80211_stop(vif); - ath6kl_cfg80211_scan_complete_event(ar, -ECANCELED); + clear_bit(WLAN_ENABLED, &vif->flags); return 0; } static struct net_device_stats *ath6kl_get_stats(struct net_device *dev) { - struct ath6kl *ar = ath6kl_priv(dev); + struct ath6kl_vif *vif = netdev_priv(dev); - return &ar->net_stats; + return &vif->net_stats; } static struct net_device_ops ath6kl_netdev_ops = { @@ -1466,6 +1030,7 @@ static struct net_device_ops ath6kl_netdev_ops = { void init_netdev(struct net_device *dev) { dev->netdev_ops = &ath6kl_netdev_ops; + dev->destructor = free_netdev; dev->watchdog_timeo = ATH6KL_TX_TIMEOUT; dev->needed_headroom = ETH_HLEN; diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c index 066d4f88807..9475e2d0d0b 100644 --- a/drivers/net/wireless/ath/ath6kl/sdio.c +++ b/drivers/net/wireless/ath/ath6kl/sdio.c @@ -22,7 +22,7 @@ #include <linux/mmc/sdio_ids.h> #include <linux/mmc/sdio.h> #include <linux/mmc/sd.h> -#include "htc_hif.h" +#include "hif.h" #include "hif-ops.h" #include "target.h" #include "debug.h" @@ -40,12 +40,18 @@ struct ath6kl_sdio { struct bus_request bus_req[BUS_REQUEST_MAX_NUM]; struct ath6kl *ar; + u8 *dma_buffer; + /* protects access to dma_buffer */ + struct mutex dma_buffer_mutex; + /* scatter request list head */ struct list_head scat_req; spinlock_t scat_lock; + bool scatter_enabled; + bool is_disabled; atomic_t irq_handling; const struct sdio_device_id *id; @@ -135,6 +141,8 @@ static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr, { int ret = 0; + sdio_claim_host(func); + if (request & HIF_WRITE) { /* FIXME: looks like ugly workaround for something */ if (addr >= HIF_MBOX_BASE_ADDR && @@ -156,6 +164,8 @@ static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr, ret = sdio_memcpy_fromio(func, buf, addr, len); } + sdio_release_host(func); + ath6kl_dbg(ATH6KL_DBG_SDIO, "%s addr 0x%x%s buf 0x%p len %d\n", request & HIF_WRITE ? "wr" : "rd", addr, request & HIF_FIXED_ADDRESS ? " (fixed)" : "", buf, len); @@ -167,12 +177,11 @@ static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr, static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio) { struct bus_request *bus_req; - unsigned long flag; - spin_lock_irqsave(&ar_sdio->lock, flag); + spin_lock_bh(&ar_sdio->lock); if (list_empty(&ar_sdio->bus_req_freeq)) { - spin_unlock_irqrestore(&ar_sdio->lock, flag); + spin_unlock_bh(&ar_sdio->lock); return NULL; } @@ -180,7 +189,7 @@ static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio) struct bus_request, list); list_del(&bus_req->list); - spin_unlock_irqrestore(&ar_sdio->lock, flag); + spin_unlock_bh(&ar_sdio->lock); ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n", __func__, bus_req); @@ -190,14 +199,12 @@ static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio) static void ath6kl_sdio_free_bus_req(struct ath6kl_sdio *ar_sdio, struct bus_request *bus_req) { - unsigned long flag; - ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n", __func__, bus_req); - spin_lock_irqsave(&ar_sdio->lock, flag); + spin_lock_bh(&ar_sdio->lock); list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq); - spin_unlock_irqrestore(&ar_sdio->lock, flag); + spin_unlock_bh(&ar_sdio->lock); } static void ath6kl_sdio_setup_scat_data(struct hif_scatter_req *scat_req, @@ -291,10 +298,14 @@ static int ath6kl_sdio_scat_rw(struct ath6kl_sdio *ar_sdio, mmc_req.cmd = &cmd; mmc_req.data = &data; + sdio_claim_host(ar_sdio->func); + mmc_set_data_timeout(&data, ar_sdio->func->card); /* synchronous call to process request */ mmc_wait_for_req(ar_sdio->func->card->host, &mmc_req); + sdio_release_host(ar_sdio->func); + status = cmd.error ? cmd.error : data.error; scat_complete: @@ -389,17 +400,19 @@ static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf, if (buf_needs_bounce(buf)) { if (!ar_sdio->dma_buffer) return -ENOMEM; + mutex_lock(&ar_sdio->dma_buffer_mutex); tbuf = ar_sdio->dma_buffer; memcpy(tbuf, buf, len); bounced = true; } else tbuf = buf; - sdio_claim_host(ar_sdio->func); ret = ath6kl_sdio_io(ar_sdio->func, request, addr, tbuf, len); if ((request & HIF_READ) && bounced) memcpy(buf, tbuf, len); - sdio_release_host(ar_sdio->func); + + if (bounced) + mutex_unlock(&ar_sdio->dma_buffer_mutex); return ret; } @@ -418,29 +431,25 @@ static void __ath6kl_sdio_write_async(struct ath6kl_sdio *ar_sdio, req->request); context = req->packet; ath6kl_sdio_free_bus_req(ar_sdio, req); - ath6kldev_rw_comp_handler(context, status); + ath6kl_hif_rw_comp_handler(context, status); } } static void ath6kl_sdio_write_async_work(struct work_struct *work) { struct ath6kl_sdio *ar_sdio; - unsigned long flags; struct bus_request *req, *tmp_req; ar_sdio = container_of(work, struct ath6kl_sdio, wr_async_work); - sdio_claim_host(ar_sdio->func); - spin_lock_irqsave(&ar_sdio->wr_async_lock, flags); + spin_lock_bh(&ar_sdio->wr_async_lock); list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) { list_del(&req->list); - spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags); + spin_unlock_bh(&ar_sdio->wr_async_lock); __ath6kl_sdio_write_async(ar_sdio, req); - spin_lock_irqsave(&ar_sdio->wr_async_lock, flags); + spin_lock_bh(&ar_sdio->wr_async_lock); } - spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags); - - sdio_release_host(ar_sdio->func); + spin_unlock_bh(&ar_sdio->wr_async_lock); } static void ath6kl_sdio_irq_handler(struct sdio_func *func) @@ -459,20 +468,23 @@ static void ath6kl_sdio_irq_handler(struct sdio_func *func) */ sdio_release_host(ar_sdio->func); - status = ath6kldev_intr_bh_handler(ar_sdio->ar); + status = ath6kl_hif_intr_bh_handler(ar_sdio->ar); sdio_claim_host(ar_sdio->func); atomic_set(&ar_sdio->irq_handling, 0); WARN_ON(status && status != -ECANCELED); } -static int ath6kl_sdio_power_on(struct ath6kl_sdio *ar_sdio) +static int ath6kl_sdio_power_on(struct ath6kl *ar) { + struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); struct sdio_func *func = ar_sdio->func; int ret = 0; if (!ar_sdio->is_disabled) return 0; + ath6kl_dbg(ATH6KL_DBG_BOOT, "sdio power on\n"); + sdio_claim_host(func); ret = sdio_enable_func(func); @@ -495,13 +507,16 @@ static int ath6kl_sdio_power_on(struct ath6kl_sdio *ar_sdio) return ret; } -static int ath6kl_sdio_power_off(struct ath6kl_sdio *ar_sdio) +static int ath6kl_sdio_power_off(struct ath6kl *ar) { + struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); int ret; if (ar_sdio->is_disabled) return 0; + ath6kl_dbg(ATH6KL_DBG_BOOT, "sdio power off\n"); + /* Disable the card */ sdio_claim_host(ar_sdio->func); ret = sdio_disable_func(ar_sdio->func); @@ -521,7 +536,6 @@ static int ath6kl_sdio_write_async(struct ath6kl *ar, u32 address, u8 *buffer, { struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); struct bus_request *bus_req; - unsigned long flags; bus_req = ath6kl_sdio_alloc_busreq(ar_sdio); @@ -534,9 +548,9 @@ static int ath6kl_sdio_write_async(struct ath6kl *ar, u32 address, u8 *buffer, bus_req->request = request; bus_req->packet = packet; - spin_lock_irqsave(&ar_sdio->wr_async_lock, flags); + spin_lock_bh(&ar_sdio->wr_async_lock); list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq); - spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags); + spin_unlock_bh(&ar_sdio->wr_async_lock); queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work); return 0; @@ -582,9 +596,8 @@ static struct hif_scatter_req *ath6kl_sdio_scatter_req_get(struct ath6kl *ar) { struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); struct hif_scatter_req *node = NULL; - unsigned long flag; - spin_lock_irqsave(&ar_sdio->scat_lock, flag); + spin_lock_bh(&ar_sdio->scat_lock); if (!list_empty(&ar_sdio->scat_req)) { node = list_first_entry(&ar_sdio->scat_req, @@ -592,7 +605,7 @@ static struct hif_scatter_req *ath6kl_sdio_scatter_req_get(struct ath6kl *ar) list_del(&node->list); } - spin_unlock_irqrestore(&ar_sdio->scat_lock, flag); + spin_unlock_bh(&ar_sdio->scat_lock); return node; } @@ -601,13 +614,12 @@ static void ath6kl_sdio_scatter_req_add(struct ath6kl *ar, struct hif_scatter_req *s_req) { struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); - unsigned long flag; - spin_lock_irqsave(&ar_sdio->scat_lock, flag); + spin_lock_bh(&ar_sdio->scat_lock); list_add_tail(&s_req->list, &ar_sdio->scat_req); - spin_unlock_irqrestore(&ar_sdio->scat_lock, flag); + spin_unlock_bh(&ar_sdio->scat_lock); } @@ -618,7 +630,6 @@ static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar, struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); u32 request = scat_req->req; int status = 0; - unsigned long flags; if (!scat_req->len) return -EINVAL; @@ -627,14 +638,12 @@ static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar, "hif-scatter: total len: %d scatter entries: %d\n", scat_req->len, scat_req->scat_entries); - if (request & HIF_SYNCHRONOUS) { - sdio_claim_host(ar_sdio->func); + if (request & HIF_SYNCHRONOUS) status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest); - sdio_release_host(ar_sdio->func); - } else { - spin_lock_irqsave(&ar_sdio->wr_async_lock, flags); + else { + spin_lock_bh(&ar_sdio->wr_async_lock); list_add_tail(&scat_req->busrequest->list, &ar_sdio->wr_asyncq); - spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags); + spin_unlock_bh(&ar_sdio->wr_async_lock); queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work); } @@ -646,23 +655,27 @@ static void ath6kl_sdio_cleanup_scatter(struct ath6kl *ar) { struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); struct hif_scatter_req *s_req, *tmp_req; - unsigned long flag; /* empty the free list */ - spin_lock_irqsave(&ar_sdio->scat_lock, flag); + spin_lock_bh(&ar_sdio->scat_lock); list_for_each_entry_safe(s_req, tmp_req, &ar_sdio->scat_req, list) { list_del(&s_req->list); - spin_unlock_irqrestore(&ar_sdio->scat_lock, flag); + spin_unlock_bh(&ar_sdio->scat_lock); + /* + * FIXME: should we also call completion handler with + * ath6kl_hif_rw_comp_handler() with status -ECANCELED so + * that the packet is properly freed? + */ if (s_req->busrequest) ath6kl_sdio_free_bus_req(ar_sdio, s_req->busrequest); kfree(s_req->virt_dma_buf); kfree(s_req->sgentries); kfree(s_req); - spin_lock_irqsave(&ar_sdio->scat_lock, flag); + spin_lock_bh(&ar_sdio->scat_lock); } - spin_unlock_irqrestore(&ar_sdio->scat_lock, flag); + spin_unlock_bh(&ar_sdio->scat_lock); } /* setup of HIF scatter resources */ @@ -673,6 +686,11 @@ static int ath6kl_sdio_enable_scatter(struct ath6kl *ar) int ret; bool virt_scat = false; + if (ar_sdio->scatter_enabled) + return 0; + + ar_sdio->scatter_enabled = true; + /* check if host supports scatter and it meets our requirements */ if (ar_sdio->func->card->host->max_segs < MAX_SCATTER_ENTRIES_PER_REQ) { ath6kl_err("host only supports scatter of :%d entries, need: %d\n", @@ -687,8 +705,8 @@ static int ath6kl_sdio_enable_scatter(struct ath6kl *ar) MAX_SCATTER_REQUESTS, virt_scat); if (!ret) { - ath6kl_dbg(ATH6KL_DBG_SCATTER, - "hif-scatter enabled: max scatter req : %d entries: %d\n", + ath6kl_dbg(ATH6KL_DBG_BOOT, + "hif-scatter enabled requests %d entries %d\n", MAX_SCATTER_REQUESTS, MAX_SCATTER_ENTRIES_PER_REQ); @@ -712,8 +730,8 @@ static int ath6kl_sdio_enable_scatter(struct ath6kl *ar) return ret; } - ath6kl_dbg(ATH6KL_DBG_SCATTER, - "Vitual scatter enabled, max_scat_req:%d, entries:%d\n", + ath6kl_dbg(ATH6KL_DBG_BOOT, + "virtual scatter enabled requests %d entries %d\n", ATH6KL_SCATTER_REQS, ATH6KL_SCATTER_ENTRIES_PER_REQ); target->max_scat_entries = ATH6KL_SCATTER_ENTRIES_PER_REQ; @@ -724,7 +742,47 @@ static int ath6kl_sdio_enable_scatter(struct ath6kl *ar) return 0; } -static int ath6kl_sdio_suspend(struct ath6kl *ar) +static int ath6kl_sdio_config(struct ath6kl *ar) +{ + struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); + struct sdio_func *func = ar_sdio->func; + int ret; + + sdio_claim_host(func); + + if ((ar_sdio->id->device & MANUFACTURER_ID_ATH6KL_BASE_MASK) >= + MANUFACTURER_ID_AR6003_BASE) { + /* enable 4-bit ASYNC interrupt on AR6003 or later */ + ret = ath6kl_sdio_func0_cmd52_wr_byte(func->card, + CCCR_SDIO_IRQ_MODE_REG, + SDIO_IRQ_MODE_ASYNC_4BIT_IRQ); + if (ret) { + ath6kl_err("Failed to enable 4-bit async irq mode %d\n", + ret); + goto out; + } + + ath6kl_dbg(ATH6KL_DBG_BOOT, "4-bit async irq mode enabled\n"); + } + + /* give us some time to enable, in ms */ + func->enable_timeout = 100; + + ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE); + if (ret) { + ath6kl_err("Set sdio block size %d failed: %d)\n", + HIF_MBOX_BLOCK_SIZE, ret); + sdio_release_host(func); + goto out; + } + +out: + sdio_release_host(func); + + return ret; +} + +static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow) { struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); struct sdio_func *func = ar_sdio->func; @@ -733,12 +791,14 @@ static int ath6kl_sdio_suspend(struct ath6kl *ar) flags = sdio_get_host_pm_caps(func); - if (!(flags & MMC_PM_KEEP_POWER)) - /* as host doesn't support keep power we need to bail out */ - ath6kl_dbg(ATH6KL_DBG_SDIO, - "func %d doesn't support MMC_PM_KEEP_POWER\n", - func->num); - return -EINVAL; + ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio suspend pm_caps 0x%x\n", flags); + + if (!(flags & MMC_PM_KEEP_POWER) || + (ar->conf_flags & ATH6KL_CONF_SUSPEND_CUTPOWER)) { + /* as host doesn't support keep power we need to cut power */ + return ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_CUTPOWER, + NULL); + } ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); if (ret) { @@ -747,11 +807,367 @@ static int ath6kl_sdio_suspend(struct ath6kl *ar) return ret; } - ath6kl_deep_sleep_enable(ar); + if (!(flags & MMC_PM_WAKE_SDIO_IRQ)) + goto deepsleep; + + /* sdio irq wakes up host */ + + if (ar->state == ATH6KL_STATE_SCHED_SCAN) { + ret = ath6kl_cfg80211_suspend(ar, + ATH6KL_CFG_SUSPEND_SCHED_SCAN, + NULL); + if (ret) { + ath6kl_warn("Schedule scan suspend failed: %d", ret); + return ret; + } + + ret = sdio_set_host_pm_flags(func, MMC_PM_WAKE_SDIO_IRQ); + if (ret) + ath6kl_warn("set sdio wake irq flag failed: %d\n", ret); + + return ret; + } + + if (wow) { + /* + * The host sdio controller is capable of keep power and + * sdio irq wake up at this point. It's fine to continue + * wow suspend operation. + */ + ret = ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_WOW, wow); + if (ret) + return ret; + + ret = sdio_set_host_pm_flags(func, MMC_PM_WAKE_SDIO_IRQ); + if (ret) + ath6kl_err("set sdio wake irq flag failed: %d\n", ret); + + return ret; + } + +deepsleep: + return ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_DEEPSLEEP, NULL); +} + +static int ath6kl_sdio_resume(struct ath6kl *ar) +{ + switch (ar->state) { + case ATH6KL_STATE_OFF: + case ATH6KL_STATE_CUTPOWER: + ath6kl_dbg(ATH6KL_DBG_SUSPEND, + "sdio resume configuring sdio\n"); + + /* need to set sdio settings after power is cut from sdio */ + ath6kl_sdio_config(ar); + break; + + case ATH6KL_STATE_ON: + break; + + case ATH6KL_STATE_DEEPSLEEP: + break; + + case ATH6KL_STATE_WOW: + break; + case ATH6KL_STATE_SCHED_SCAN: + break; + } + + ath6kl_cfg80211_resume(ar); + + return 0; +} + +/* set the window address register (using 4-byte register access ). */ +static int ath6kl_set_addrwin_reg(struct ath6kl *ar, u32 reg_addr, u32 addr) +{ + int status; + u8 addr_val[4]; + s32 i; + + /* + * Write bytes 1,2,3 of the register to set the upper address bytes, + * the LSB is written last to initiate the access cycle + */ + + for (i = 1; i <= 3; i++) { + /* + * Fill the buffer with the address byte value we want to + * hit 4 times. + */ + memset(addr_val, ((u8 *)&addr)[i], 4); + + /* + * Hit each byte of the register address with a 4-byte + * write operation to the same address, this is a harmless + * operation. + */ + status = ath6kl_sdio_read_write_sync(ar, reg_addr + i, addr_val, + 4, HIF_WR_SYNC_BYTE_FIX); + if (status) + break; + } + + if (status) { + ath6kl_err("%s: failed to write initial bytes of 0x%x " + "to window reg: 0x%X\n", __func__, + addr, reg_addr); + return status; + } + + /* + * Write the address register again, this time write the whole + * 4-byte value. The effect here is that the LSB write causes the + * cycle to start, the extra 3 byte write to bytes 1,2,3 has no + * effect since we are writing the same values again + */ + status = ath6kl_sdio_read_write_sync(ar, reg_addr, (u8 *)(&addr), + 4, HIF_WR_SYNC_BYTE_INC); + + if (status) { + ath6kl_err("%s: failed to write 0x%x to window reg: 0x%X\n", + __func__, addr, reg_addr); + return status; + } + + return 0; +} + +static int ath6kl_sdio_diag_read32(struct ath6kl *ar, u32 address, u32 *data) +{ + int status; + + /* set window register to start read cycle */ + status = ath6kl_set_addrwin_reg(ar, WINDOW_READ_ADDR_ADDRESS, + address); + + if (status) + return status; + + /* read the data */ + status = ath6kl_sdio_read_write_sync(ar, WINDOW_DATA_ADDRESS, + (u8 *)data, sizeof(u32), HIF_RD_SYNC_BYTE_INC); + if (status) { + ath6kl_err("%s: failed to read from window data addr\n", + __func__); + return status; + } + + return status; +} + +static int ath6kl_sdio_diag_write32(struct ath6kl *ar, u32 address, + __le32 data) +{ + int status; + u32 val = (__force u32) data; + + /* set write data */ + status = ath6kl_sdio_read_write_sync(ar, WINDOW_DATA_ADDRESS, + (u8 *) &val, sizeof(u32), HIF_WR_SYNC_BYTE_INC); + if (status) { + ath6kl_err("%s: failed to write 0x%x to window data addr\n", + __func__, data); + return status; + } + + /* set window register, which starts the write cycle */ + return ath6kl_set_addrwin_reg(ar, WINDOW_WRITE_ADDR_ADDRESS, + address); +} + +static int ath6kl_sdio_bmi_credits(struct ath6kl *ar) +{ + u32 addr; + unsigned long timeout; + int ret; + + ar->bmi.cmd_credits = 0; + + /* Read the counter register to get the command credits */ + addr = COUNT_DEC_ADDRESS + (HTC_MAILBOX_NUM_MAX + ENDPOINT1) * 4; + + timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT); + while (time_before(jiffies, timeout) && !ar->bmi.cmd_credits) { + + /* + * Hit the credit counter with a 4-byte access, the first byte + * read will hit the counter and cause a decrement, while the + * remaining 3 bytes has no effect. The rationale behind this + * is to make all HIF accesses 4-byte aligned. + */ + ret = ath6kl_sdio_read_write_sync(ar, addr, + (u8 *)&ar->bmi.cmd_credits, 4, + HIF_RD_SYNC_BYTE_INC); + if (ret) { + ath6kl_err("Unable to decrement the command credit " + "count register: %d\n", ret); + return ret; + } + + /* The counter is only 8 bits. + * Ignore anything in the upper 3 bytes + */ + ar->bmi.cmd_credits &= 0xFF; + } + + if (!ar->bmi.cmd_credits) { + ath6kl_err("bmi communication timeout\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static int ath6kl_bmi_get_rx_lkahd(struct ath6kl *ar) +{ + unsigned long timeout; + u32 rx_word = 0; + int ret = 0; + + timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT); + while ((time_before(jiffies, timeout)) && !rx_word) { + ret = ath6kl_sdio_read_write_sync(ar, + RX_LOOKAHEAD_VALID_ADDRESS, + (u8 *)&rx_word, sizeof(rx_word), + HIF_RD_SYNC_BYTE_INC); + if (ret) { + ath6kl_err("unable to read RX_LOOKAHEAD_VALID\n"); + return ret; + } + + /* all we really want is one bit */ + rx_word &= (1 << ENDPOINT1); + } + + if (!rx_word) { + ath6kl_err("bmi_recv_buf FIFO empty\n"); + return -EINVAL; + } + + return ret; +} + +static int ath6kl_sdio_bmi_write(struct ath6kl *ar, u8 *buf, u32 len) +{ + int ret; + u32 addr; + + ret = ath6kl_sdio_bmi_credits(ar); + if (ret) + return ret; + + addr = ar->mbox_info.htc_addr; + + ret = ath6kl_sdio_read_write_sync(ar, addr, buf, len, + HIF_WR_SYNC_BYTE_INC); + if (ret) + ath6kl_err("unable to send the bmi data to the device\n"); + + return ret; +} + +static int ath6kl_sdio_bmi_read(struct ath6kl *ar, u8 *buf, u32 len) +{ + int ret; + u32 addr; + + /* + * During normal bootup, small reads may be required. + * Rather than issue an HIF Read and then wait as the Target + * adds successive bytes to the FIFO, we wait here until + * we know that response data is available. + * + * This allows us to cleanly timeout on an unexpected + * Target failure rather than risk problems at the HIF level. + * In particular, this avoids SDIO timeouts and possibly garbage + * data on some host controllers. And on an interconnect + * such as Compact Flash (as well as some SDIO masters) which + * does not provide any indication on data timeout, it avoids + * a potential hang or garbage response. + * + * Synchronization is more difficult for reads larger than the + * size of the MBOX FIFO (128B), because the Target is unable + * to push the 129th byte of data until AFTER the Host posts an + * HIF Read and removes some FIFO data. So for large reads the + * Host proceeds to post an HIF Read BEFORE all the data is + * actually available to read. Fortunately, large BMI reads do + * not occur in practice -- they're supported for debug/development. + * + * So Host/Target BMI synchronization is divided into these cases: + * CASE 1: length < 4 + * Should not happen + * + * CASE 2: 4 <= length <= 128 + * Wait for first 4 bytes to be in FIFO + * If CONSERVATIVE_BMI_READ is enabled, also wait for + * a BMI command credit, which indicates that the ENTIRE + * response is available in the the FIFO + * + * CASE 3: length > 128 + * Wait for the first 4 bytes to be in FIFO + * + * For most uses, a small timeout should be sufficient and we will + * usually see a response quickly; but there may be some unusual + * (debug) cases of BMI_EXECUTE where we want an larger timeout. + * For now, we use an unbounded busy loop while waiting for + * BMI_EXECUTE. + * + * If BMI_EXECUTE ever needs to support longer-latency execution, + * especially in production, this code needs to be enhanced to sleep + * and yield. Also note that BMI_COMMUNICATION_TIMEOUT is currently + * a function of Host processor speed. + */ + if (len >= 4) { /* NB: Currently, always true */ + ret = ath6kl_bmi_get_rx_lkahd(ar); + if (ret) + return ret; + } + + addr = ar->mbox_info.htc_addr; + ret = ath6kl_sdio_read_write_sync(ar, addr, buf, len, + HIF_RD_SYNC_BYTE_INC); + if (ret) { + ath6kl_err("Unable to read the bmi data from the device: %d\n", + ret); + return ret; + } return 0; } +static void ath6kl_sdio_stop(struct ath6kl *ar) +{ + struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); + struct bus_request *req, *tmp_req; + void *context; + + /* FIXME: make sure that wq is not queued again */ + + cancel_work_sync(&ar_sdio->wr_async_work); + + spin_lock_bh(&ar_sdio->wr_async_lock); + + list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) { + list_del(&req->list); + + if (req->scat_req) { + /* this is a scatter gather request */ + req->scat_req->status = -ECANCELED; + req->scat_req->complete(ar_sdio->ar->htc_target, + req->scat_req); + } else { + context = req->packet; + ath6kl_sdio_free_bus_req(ar_sdio, req); + ath6kl_hif_rw_comp_handler(context, -ECANCELED); + } + } + + spin_unlock_bh(&ar_sdio->wr_async_lock); + + WARN_ON(get_queue_depth(&ar_sdio->scat_req) != 4); +} + static const struct ath6kl_hif_ops ath6kl_sdio_ops = { .read_write_sync = ath6kl_sdio_read_write_sync, .write_async = ath6kl_sdio_write_async, @@ -763,8 +1179,47 @@ static const struct ath6kl_hif_ops ath6kl_sdio_ops = { .scat_req_rw = ath6kl_sdio_async_rw_scatter, .cleanup_scatter = ath6kl_sdio_cleanup_scatter, .suspend = ath6kl_sdio_suspend, + .resume = ath6kl_sdio_resume, + .diag_read32 = ath6kl_sdio_diag_read32, + .diag_write32 = ath6kl_sdio_diag_write32, + .bmi_read = ath6kl_sdio_bmi_read, + .bmi_write = ath6kl_sdio_bmi_write, + .power_on = ath6kl_sdio_power_on, + .power_off = ath6kl_sdio_power_off, + .stop = ath6kl_sdio_stop, }; +#ifdef CONFIG_PM_SLEEP + +/* + * Empty handlers so that mmc subsystem doesn't remove us entirely during + * suspend. We instead follow cfg80211 suspend/resume handlers. + */ +static int ath6kl_sdio_pm_suspend(struct device *device) +{ + ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio pm suspend\n"); + + return 0; +} + +static int ath6kl_sdio_pm_resume(struct device *device) +{ + ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio pm resume\n"); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(ath6kl_sdio_pm_ops, ath6kl_sdio_pm_suspend, + ath6kl_sdio_pm_resume); + +#define ATH6KL_SDIO_PM_OPS (&ath6kl_sdio_pm_ops) + +#else + +#define ATH6KL_SDIO_PM_OPS NULL + +#endif /* CONFIG_PM_SLEEP */ + static int ath6kl_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id) { @@ -773,8 +1228,8 @@ static int ath6kl_sdio_probe(struct sdio_func *func, struct ath6kl *ar; int count; - ath6kl_dbg(ATH6KL_DBG_SDIO, - "new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n", + ath6kl_dbg(ATH6KL_DBG_BOOT, + "sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n", func->num, func->vendor, func->device, func->max_blksize, func->cur_blksize); @@ -797,6 +1252,7 @@ static int ath6kl_sdio_probe(struct sdio_func *func, spin_lock_init(&ar_sdio->lock); spin_lock_init(&ar_sdio->scat_lock); spin_lock_init(&ar_sdio->wr_async_lock); + mutex_init(&ar_sdio->dma_buffer_mutex); INIT_LIST_HEAD(&ar_sdio->scat_req); INIT_LIST_HEAD(&ar_sdio->bus_req_freeq); @@ -815,62 +1271,29 @@ static int ath6kl_sdio_probe(struct sdio_func *func, } ar_sdio->ar = ar; + ar->hif_type = ATH6KL_HIF_TYPE_SDIO; ar->hif_priv = ar_sdio; ar->hif_ops = &ath6kl_sdio_ops; + ar->bmi.max_data_size = 256; ath6kl_sdio_set_mbox_info(ar); - sdio_claim_host(func); - - if ((ar_sdio->id->device & MANUFACTURER_ID_ATH6KL_BASE_MASK) >= - MANUFACTURER_ID_AR6003_BASE) { - /* enable 4-bit ASYNC interrupt on AR6003 or later */ - ret = ath6kl_sdio_func0_cmd52_wr_byte(func->card, - CCCR_SDIO_IRQ_MODE_REG, - SDIO_IRQ_MODE_ASYNC_4BIT_IRQ); - if (ret) { - ath6kl_err("Failed to enable 4-bit async irq mode %d\n", - ret); - sdio_release_host(func); - goto err_cfg80211; - } - - ath6kl_dbg(ATH6KL_DBG_SDIO, "4-bit async irq mode enabled\n"); - } - - /* give us some time to enable, in ms */ - func->enable_timeout = 100; - - sdio_release_host(func); - - ret = ath6kl_sdio_power_on(ar_sdio); - if (ret) - goto err_cfg80211; - - sdio_claim_host(func); - - ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE); + ret = ath6kl_sdio_config(ar); if (ret) { - ath6kl_err("Set sdio block size %d failed: %d)\n", - HIF_MBOX_BLOCK_SIZE, ret); - sdio_release_host(func); - goto err_off; + ath6kl_err("Failed to config sdio: %d\n", ret); + goto err_core_alloc; } - sdio_release_host(func); - ret = ath6kl_core_init(ar); if (ret) { ath6kl_err("Failed to init ath6kl core\n"); - goto err_off; + goto err_core_alloc; } return ret; -err_off: - ath6kl_sdio_power_off(ar_sdio); -err_cfg80211: - ath6kl_cfg80211_deinit(ar_sdio->ar); +err_core_alloc: + ath6kl_core_free(ar_sdio->ar); err_dma: kfree(ar_sdio->dma_buffer); err_hif: @@ -883,8 +1306,8 @@ static void ath6kl_sdio_remove(struct sdio_func *func) { struct ath6kl_sdio *ar_sdio; - ath6kl_dbg(ATH6KL_DBG_SDIO, - "removed func %d vendor 0x%x device 0x%x\n", + ath6kl_dbg(ATH6KL_DBG_BOOT, + "sdio removed func %d vendor 0x%x device 0x%x\n", func->num, func->vendor, func->device); ar_sdio = sdio_get_drvdata(func); @@ -892,9 +1315,7 @@ static void ath6kl_sdio_remove(struct sdio_func *func) ath6kl_stop_txrx(ar_sdio->ar); cancel_work_sync(&ar_sdio->wr_async_work); - ath6kl_unavail_ev(ar_sdio->ar); - - ath6kl_sdio_power_off(ar_sdio); + ath6kl_core_cleanup(ar_sdio->ar); kfree(ar_sdio->dma_buffer); kfree(ar_sdio); @@ -903,16 +1324,19 @@ static void ath6kl_sdio_remove(struct sdio_func *func) static const struct sdio_device_id ath6kl_sdio_devices[] = { {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x0))}, {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x1))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x0))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x1))}, {}, }; MODULE_DEVICE_TABLE(sdio, ath6kl_sdio_devices); static struct sdio_driver ath6kl_sdio_driver = { - .name = "ath6kl_sdio", + .name = "ath6kl", .id_table = ath6kl_sdio_devices, .probe = ath6kl_sdio_probe, .remove = ath6kl_sdio_remove, + .drv.pm = ATH6KL_SDIO_PM_OPS, }; static int __init ath6kl_sdio_init(void) @@ -938,13 +1362,19 @@ MODULE_AUTHOR("Atheros Communications, Inc."); MODULE_DESCRIPTION("Driver support for Atheros AR600x SDIO devices"); MODULE_LICENSE("Dual BSD/GPL"); -MODULE_FIRMWARE(AR6003_REV2_OTP_FILE); -MODULE_FIRMWARE(AR6003_REV2_FIRMWARE_FILE); -MODULE_FIRMWARE(AR6003_REV2_PATCH_FILE); -MODULE_FIRMWARE(AR6003_REV2_BOARD_DATA_FILE); -MODULE_FIRMWARE(AR6003_REV2_DEFAULT_BOARD_DATA_FILE); -MODULE_FIRMWARE(AR6003_REV3_OTP_FILE); -MODULE_FIRMWARE(AR6003_REV3_FIRMWARE_FILE); -MODULE_FIRMWARE(AR6003_REV3_PATCH_FILE); -MODULE_FIRMWARE(AR6003_REV3_BOARD_DATA_FILE); -MODULE_FIRMWARE(AR6003_REV3_DEFAULT_BOARD_DATA_FILE); +MODULE_FIRMWARE(AR6003_HW_2_0_OTP_FILE); +MODULE_FIRMWARE(AR6003_HW_2_0_FIRMWARE_FILE); +MODULE_FIRMWARE(AR6003_HW_2_0_PATCH_FILE); +MODULE_FIRMWARE(AR6003_HW_2_0_BOARD_DATA_FILE); +MODULE_FIRMWARE(AR6003_HW_2_0_DEFAULT_BOARD_DATA_FILE); +MODULE_FIRMWARE(AR6003_HW_2_1_1_OTP_FILE); +MODULE_FIRMWARE(AR6003_HW_2_1_1_FIRMWARE_FILE); +MODULE_FIRMWARE(AR6003_HW_2_1_1_PATCH_FILE); +MODULE_FIRMWARE(AR6003_HW_2_1_1_BOARD_DATA_FILE); +MODULE_FIRMWARE(AR6003_HW_2_1_1_DEFAULT_BOARD_DATA_FILE); +MODULE_FIRMWARE(AR6004_HW_1_0_FIRMWARE_FILE); +MODULE_FIRMWARE(AR6004_HW_1_0_BOARD_DATA_FILE); +MODULE_FIRMWARE(AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE); +MODULE_FIRMWARE(AR6004_HW_1_1_FIRMWARE_FILE); +MODULE_FIRMWARE(AR6004_HW_1_1_BOARD_DATA_FILE); +MODULE_FIRMWARE(AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE); diff --git a/drivers/net/wireless/ath/ath6kl/target.h b/drivers/net/wireless/ath/ath6kl/target.h index c9a76051f04..108a723a108 100644 --- a/drivers/net/wireless/ath/ath6kl/target.h +++ b/drivers/net/wireless/ath/ath6kl/target.h @@ -20,7 +20,7 @@ #define AR6003_BOARD_DATA_SZ 1024 #define AR6003_BOARD_EXT_DATA_SZ 768 -#define AR6004_BOARD_DATA_SZ 7168 +#define AR6004_BOARD_DATA_SZ 6144 #define AR6004_BOARD_EXT_DATA_SZ 0 #define RESET_CONTROL_ADDRESS 0x00000000 @@ -320,7 +320,10 @@ struct host_interest { | (2) | (2) | (2) | (2) | (2) | (2) | (2) | (2) |------------------------------------------------------------------------------| */ +#define HI_OPTION_FW_MODE_BITS 0x2 #define HI_OPTION_FW_MODE_SHIFT 0xC + +#define HI_OPTION_FW_SUBMODE_BITS 0x2 #define HI_OPTION_FW_SUBMODE_SHIFT 0x14 /* Convert a Target virtual address into a Target physical address */ @@ -331,20 +334,6 @@ struct host_interest { (((target_type) == TARGET_TYPE_AR6003) ? AR6003_VTOP(vaddr) : \ (((target_type) == TARGET_TYPE_AR6004) ? AR6004_VTOP(vaddr) : 0)) -#define AR6003_REV2_APP_LOAD_ADDRESS 0x543180 -#define AR6003_REV2_BOARD_EXT_DATA_ADDRESS 0x57E500 -#define AR6003_REV2_DATASET_PATCH_ADDRESS 0x57e884 -#define AR6003_REV2_RAM_RESERVE_SIZE 6912 - -#define AR6003_REV3_APP_LOAD_ADDRESS 0x545000 -#define AR6003_REV3_BOARD_EXT_DATA_ADDRESS 0x542330 -#define AR6003_REV3_DATASET_PATCH_ADDRESS 0x57FF74 -#define AR6003_REV3_RAM_RESERVE_SIZE 512 - -#define AR6004_REV1_BOARD_DATA_ADDRESS 0x435400 -#define AR6004_REV1_BOARD_EXT_DATA_ADDRESS 0x437000 -#define AR6004_REV1_RAM_RESERVE_SIZE 11264 - #define ATH6KL_FWLOG_PAYLOAD_SIZE 1500 struct ath6kl_dbglog_buf { diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c index a7117074f81..506a3031a88 100644 --- a/drivers/net/wireless/ath/ath6kl/txrx.c +++ b/drivers/net/wireless/ath/ath6kl/txrx.c @@ -77,12 +77,13 @@ static u8 ath6kl_ibss_map_epid(struct sk_buff *skb, struct net_device *dev, return ar->node_map[ep_map].ep_id; } -static bool ath6kl_powersave_ap(struct ath6kl *ar, struct sk_buff *skb, +static bool ath6kl_powersave_ap(struct ath6kl_vif *vif, struct sk_buff *skb, bool *more_data) { struct ethhdr *datap = (struct ethhdr *) skb->data; struct ath6kl_sta *conn = NULL; bool ps_queued = false, is_psq_empty = false; + struct ath6kl *ar = vif->ar; if (is_multicast_ether_addr(datap->h_dest)) { u8 ctr = 0; @@ -100,7 +101,7 @@ static bool ath6kl_powersave_ap(struct ath6kl *ar, struct sk_buff *skb, * If this transmit is not because of a Dtim Expiry * q it. */ - if (!test_bit(DTIM_EXPIRED, &ar->flag)) { + if (!test_bit(DTIM_EXPIRED, &vif->flags)) { bool is_mcastq_empty = false; spin_lock_bh(&ar->mcastpsq_lock); @@ -116,6 +117,7 @@ static bool ath6kl_powersave_ap(struct ath6kl *ar, struct sk_buff *skb, */ if (is_mcastq_empty) ath6kl_wmi_set_pvb_cmd(ar->wmi, + vif->fw_vif_idx, MCAST_AID, 1); ps_queued = true; @@ -131,7 +133,7 @@ static bool ath6kl_powersave_ap(struct ath6kl *ar, struct sk_buff *skb, } } } else { - conn = ath6kl_find_sta(ar, datap->h_dest); + conn = ath6kl_find_sta(vif, datap->h_dest); if (!conn) { dev_kfree_skb(skb); @@ -154,6 +156,7 @@ static bool ath6kl_powersave_ap(struct ath6kl *ar, struct sk_buff *skb, */ if (is_psq_empty) ath6kl_wmi_set_pvb_cmd(ar->wmi, + vif->fw_vif_idx, conn->aid, 1); ps_queued = true; @@ -235,6 +238,7 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev) struct ath6kl *ar = ath6kl_priv(dev); struct ath6kl_cookie *cookie = NULL; enum htc_endpoint_id eid = ENDPOINT_UNUSED; + struct ath6kl_vif *vif = netdev_priv(dev); u32 map_no = 0; u16 htc_tag = ATH6KL_DATA_PKT_TAG; u8 ac = 99 ; /* initialize to unmapped ac */ @@ -246,7 +250,7 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev) skb, skb->data, skb->len); /* If target is not associated */ - if (!test_bit(CONNECTED, &ar->flag)) { + if (!test_bit(CONNECTED, &vif->flags)) { dev_kfree_skb(skb); return 0; } @@ -255,15 +259,21 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev) goto fail_tx; /* AP mode Power saving processing */ - if (ar->nw_type == AP_NETWORK) { - if (ath6kl_powersave_ap(ar, skb, &more_data)) + if (vif->nw_type == AP_NETWORK) { + if (ath6kl_powersave_ap(vif, skb, &more_data)) return 0; } if (test_bit(WMI_ENABLED, &ar->flag)) { if (skb_headroom(skb) < dev->needed_headroom) { - WARN_ON(1); - goto fail_tx; + struct sk_buff *tmp_skb = skb; + + skb = skb_realloc_headroom(skb, dev->needed_headroom); + kfree_skb(tmp_skb); + if (skb == NULL) { + vif->net_stats.tx_dropped++; + return 0; + } } if (ath6kl_wmi_dix_2_dot3(ar->wmi, skb)) { @@ -272,18 +282,20 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev) } if (ath6kl_wmi_data_hdr_add(ar->wmi, skb, DATA_MSGTYPE, - more_data, 0, 0, NULL)) { + more_data, 0, 0, NULL, + vif->fw_vif_idx)) { ath6kl_err("wmi_data_hdr_add failed\n"); goto fail_tx; } - if ((ar->nw_type == ADHOC_NETWORK) && - ar->ibss_ps_enable && test_bit(CONNECTED, &ar->flag)) + if ((vif->nw_type == ADHOC_NETWORK) && + ar->ibss_ps_enable && test_bit(CONNECTED, &vif->flags)) chk_adhoc_ps_mapping = true; else { /* get the stream mapping */ - ret = ath6kl_wmi_implicit_create_pstream(ar->wmi, skb, - 0, test_bit(WMM_ENABLED, &ar->flag), &ac); + ret = ath6kl_wmi_implicit_create_pstream(ar->wmi, + vif->fw_vif_idx, skb, + 0, test_bit(WMM_ENABLED, &vif->flags), &ac); if (ret) goto fail_tx; } @@ -354,8 +366,8 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev) fail_tx: dev_kfree_skb(skb); - ar->net_stats.tx_dropped++; - ar->net_stats.tx_aborted_errors++; + vif->net_stats.tx_dropped++; + vif->net_stats.tx_aborted_errors++; return 0; } @@ -426,7 +438,9 @@ enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target, struct htc_packet *packet) { struct ath6kl *ar = target->dev->ar; + struct ath6kl_vif *vif; enum htc_endpoint_id endpoint = packet->endpoint; + enum htc_send_full_action action = HTC_SEND_FULL_KEEP; if (endpoint == ar->ctrl_ep) { /* @@ -439,19 +453,11 @@ enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target, set_bit(WMI_CTRL_EP_FULL, &ar->flag); spin_unlock_bh(&ar->lock); ath6kl_err("wmi ctrl ep is full\n"); - return HTC_SEND_FULL_KEEP; + return action; } if (packet->info.tx.tag == ATH6KL_CONTROL_PKT_TAG) - return HTC_SEND_FULL_KEEP; - - if (ar->nw_type == ADHOC_NETWORK) - /* - * In adhoc mode, we cannot differentiate traffic - * priorities so there is no need to continue, however we - * should stop the network. - */ - goto stop_net_queues; + return action; /* * The last MAX_HI_COOKIE_NUM "batch" of cookies are reserved for @@ -464,24 +470,36 @@ enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target, * Give preference to the highest priority stream by * dropping the packets which overflowed. */ - return HTC_SEND_FULL_DROP; + action = HTC_SEND_FULL_DROP; -stop_net_queues: - spin_lock_bh(&ar->lock); - set_bit(NETQ_STOPPED, &ar->flag); - spin_unlock_bh(&ar->lock); - netif_stop_queue(ar->net_dev); + /* FIXME: Locking */ + spin_lock_bh(&ar->list_lock); + list_for_each_entry(vif, &ar->vif_list, list) { + if (vif->nw_type == ADHOC_NETWORK || + action != HTC_SEND_FULL_DROP) { + spin_unlock_bh(&ar->list_lock); + + spin_lock_bh(&vif->if_lock); + set_bit(NETQ_STOPPED, &vif->flags); + spin_unlock_bh(&vif->if_lock); + netif_stop_queue(vif->ndev); - return HTC_SEND_FULL_KEEP; + return action; + } + } + spin_unlock_bh(&ar->list_lock); + + return action; } /* TODO this needs to be looked at */ -static void ath6kl_tx_clear_node_map(struct ath6kl *ar, +static void ath6kl_tx_clear_node_map(struct ath6kl_vif *vif, enum htc_endpoint_id eid, u32 map_no) { + struct ath6kl *ar = vif->ar; u32 i; - if (ar->nw_type != ADHOC_NETWORK) + if (vif->nw_type != ADHOC_NETWORK) return; if (!ar->ibss_ps_enable) @@ -523,7 +541,9 @@ void ath6kl_tx_complete(void *context, struct list_head *packet_queue) int status; enum htc_endpoint_id eid; bool wake_event = false; - bool flushing = false; + bool flushing[ATH6KL_VIF_MAX] = {false}; + u8 if_idx; + struct ath6kl_vif *vif; skb_queue_head_init(&skb_queue); @@ -549,8 +569,6 @@ void ath6kl_tx_complete(void *context, struct list_head *packet_queue) if (!skb || !skb->data) goto fatal; - packet->buf = skb->data; - __skb_queue_tail(&skb_queue, skb); if (!status && (packet->act_len != skb->len)) @@ -569,15 +587,30 @@ void ath6kl_tx_complete(void *context, struct list_head *packet_queue) wake_event = true; } + if (eid == ar->ctrl_ep) { + if_idx = wmi_cmd_hdr_get_if_idx( + (struct wmi_cmd_hdr *) packet->buf); + } else { + if_idx = wmi_data_hdr_get_if_idx( + (struct wmi_data_hdr *) packet->buf); + } + + vif = ath6kl_get_vif_by_index(ar, if_idx); + if (!vif) { + ath6kl_free_cookie(ar, ath6kl_cookie); + continue; + } + if (status) { if (status == -ECANCELED) /* a packet was flushed */ - flushing = true; + flushing[if_idx] = true; + + vif->net_stats.tx_errors++; - ar->net_stats.tx_errors++; + if (status != -ENOSPC && status != -ECANCELED) + ath6kl_warn("tx complete error: %d\n", status); - if (status != -ENOSPC) - ath6kl_err("tx error, status: 0x%x\n", status); ath6kl_dbg(ATH6KL_DBG_WLAN_TX, "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n", __func__, skb, packet->buf, packet->act_len, @@ -588,27 +621,34 @@ void ath6kl_tx_complete(void *context, struct list_head *packet_queue) __func__, skb, packet->buf, packet->act_len, eid, "OK"); - flushing = false; - ar->net_stats.tx_packets++; - ar->net_stats.tx_bytes += skb->len; + flushing[if_idx] = false; + vif->net_stats.tx_packets++; + vif->net_stats.tx_bytes += skb->len; } - ath6kl_tx_clear_node_map(ar, eid, map_no); + ath6kl_tx_clear_node_map(vif, eid, map_no); ath6kl_free_cookie(ar, ath6kl_cookie); - if (test_bit(NETQ_STOPPED, &ar->flag)) - clear_bit(NETQ_STOPPED, &ar->flag); + if (test_bit(NETQ_STOPPED, &vif->flags)) + clear_bit(NETQ_STOPPED, &vif->flags); } spin_unlock_bh(&ar->lock); __skb_queue_purge(&skb_queue); - if (test_bit(CONNECTED, &ar->flag)) { - if (!flushing) - netif_wake_queue(ar->net_dev); + /* FIXME: Locking */ + spin_lock_bh(&ar->list_lock); + list_for_each_entry(vif, &ar->vif_list, list) { + if (test_bit(CONNECTED, &vif->flags) && + !flushing[vif->fw_vif_idx]) { + spin_unlock_bh(&ar->list_lock); + netif_wake_queue(vif->ndev); + spin_lock_bh(&ar->list_lock); + } } + spin_unlock_bh(&ar->list_lock); if (wake_event) wake_up(&ar->event_wq); @@ -1041,8 +1081,9 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet) struct ath6kl_sta *conn = NULL; struct sk_buff *skb1 = NULL; struct ethhdr *datap = NULL; + struct ath6kl_vif *vif; u16 seq_no, offset; - u8 tid; + u8 tid, if_idx; ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: ar=0x%p eid=%d, skb=0x%p, data=0x%p, len=0x%x status:%d", @@ -1050,7 +1091,23 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet) packet->act_len, status); if (status || !(skb->data + HTC_HDR_LENGTH)) { - ar->net_stats.rx_errors++; + dev_kfree_skb(skb); + return; + } + + skb_put(skb, packet->act_len + HTC_HDR_LENGTH); + skb_pull(skb, HTC_HDR_LENGTH); + + if (ept == ar->ctrl_ep) { + if_idx = + wmi_cmd_hdr_get_if_idx((struct wmi_cmd_hdr *) skb->data); + } else { + if_idx = + wmi_data_hdr_get_if_idx((struct wmi_data_hdr *) skb->data); + } + + vif = ath6kl_get_vif_by_index(ar, if_idx); + if (!vif) { dev_kfree_skb(skb); return; } @@ -1059,28 +1116,28 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet) * Take lock to protect buffer counts and adaptive power throughput * state. */ - spin_lock_bh(&ar->lock); + spin_lock_bh(&vif->if_lock); - ar->net_stats.rx_packets++; - ar->net_stats.rx_bytes += packet->act_len; + vif->net_stats.rx_packets++; + vif->net_stats.rx_bytes += packet->act_len; - spin_unlock_bh(&ar->lock); + spin_unlock_bh(&vif->if_lock); - skb_put(skb, packet->act_len + HTC_HDR_LENGTH); - skb_pull(skb, HTC_HDR_LENGTH); ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "rx ", skb->data, skb->len); - skb->dev = ar->net_dev; + skb->dev = vif->ndev; if (!test_bit(WMI_ENABLED, &ar->flag)) { if (EPPING_ALIGNMENT_PAD > 0) skb_pull(skb, EPPING_ALIGNMENT_PAD); - ath6kl_deliver_frames_to_nw_stack(ar->net_dev, skb); + ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb); return; } + ath6kl_check_wow_status(ar); + if (ept == ar->ctrl_ep) { ath6kl_wmi_control_rx(ar->wmi, skb); return; @@ -1096,18 +1153,18 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet) * that do not have LLC hdr. They are 16 bytes in size. * Allow these frames in the AP mode. */ - if (ar->nw_type != AP_NETWORK && + if (vif->nw_type != AP_NETWORK && ((packet->act_len < min_hdr_len) || (packet->act_len > WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH))) { ath6kl_info("frame len is too short or too long\n"); - ar->net_stats.rx_errors++; - ar->net_stats.rx_length_errors++; + vif->net_stats.rx_errors++; + vif->net_stats.rx_length_errors++; dev_kfree_skb(skb); return; } /* Get the Power save state of the STA */ - if (ar->nw_type == AP_NETWORK) { + if (vif->nw_type == AP_NETWORK) { meta_type = wmi_data_hdr_get_meta(dhdr); ps_state = !!((dhdr->info >> WMI_DATA_HDR_PS_SHIFT) & @@ -1129,7 +1186,7 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet) } datap = (struct ethhdr *) (skb->data + offset); - conn = ath6kl_find_sta(ar, datap->h_source); + conn = ath6kl_find_sta(vif, datap->h_source); if (!conn) { dev_kfree_skb(skb); @@ -1160,12 +1217,13 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet) while ((skbuff = skb_dequeue(&conn->psq)) != NULL) { spin_unlock_bh(&conn->psq_lock); - ath6kl_data_tx(skbuff, ar->net_dev); + ath6kl_data_tx(skbuff, vif->ndev); spin_lock_bh(&conn->psq_lock); } spin_unlock_bh(&conn->psq_lock); /* Clear the PVB for this STA */ - ath6kl_wmi_set_pvb_cmd(ar->wmi, conn->aid, 0); + ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx, + conn->aid, 0); } } @@ -1215,12 +1273,12 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet) return; } - if (!(ar->net_dev->flags & IFF_UP)) { + if (!(vif->ndev->flags & IFF_UP)) { dev_kfree_skb(skb); return; } - if (ar->nw_type == AP_NETWORK) { + if (vif->nw_type == AP_NETWORK) { datap = (struct ethhdr *) skb->data; if (is_multicast_ether_addr(datap->h_dest)) /* @@ -1235,8 +1293,7 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet) * frame to it on the air else send the * frame up the stack. */ - struct ath6kl_sta *conn = NULL; - conn = ath6kl_find_sta(ar, datap->h_dest); + conn = ath6kl_find_sta(vif, datap->h_dest); if (conn && ar->intra_bss) { skb1 = skb; @@ -1247,18 +1304,23 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet) } } if (skb1) - ath6kl_data_tx(skb1, ar->net_dev); + ath6kl_data_tx(skb1, vif->ndev); + + if (skb == NULL) { + /* nothing to deliver up the stack */ + return; + } } datap = (struct ethhdr *) skb->data; if (is_unicast_ether_addr(datap->h_dest) && - aggr_process_recv_frm(ar->aggr_cntxt, tid, seq_no, + aggr_process_recv_frm(vif->aggr_cntxt, tid, seq_no, is_amsdu, skb)) /* aggregation code will handle the skb */ return; - ath6kl_deliver_frames_to_nw_stack(ar->net_dev, skb); + ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb); } static void aggr_timeout(unsigned long arg) @@ -1336,9 +1398,10 @@ static void aggr_delete_tid_state(struct aggr_info *p_aggr, u8 tid) memset(stats, 0, sizeof(struct rxtid_stats)); } -void aggr_recv_addba_req_evt(struct ath6kl *ar, u8 tid, u16 seq_no, u8 win_sz) +void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid, u16 seq_no, + u8 win_sz) { - struct aggr_info *p_aggr = ar->aggr_cntxt; + struct aggr_info *p_aggr = vif->aggr_cntxt; struct rxtid *rxtid; struct rxtid_stats *stats; u16 hold_q_size; @@ -1405,9 +1468,9 @@ struct aggr_info *aggr_init(struct net_device *dev) return p_aggr; } -void aggr_recv_delba_req_evt(struct ath6kl *ar, u8 tid) +void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid) { - struct aggr_info *p_aggr = ar->aggr_cntxt; + struct aggr_info *p_aggr = vif->aggr_cntxt; struct rxtid *rxtid; if (!p_aggr) diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c index a7de23cbd2c..f6f2aa27fc2 100644 --- a/drivers/net/wireless/ath/ath6kl/wmi.c +++ b/drivers/net/wireless/ath/ath6kl/wmi.c @@ -21,7 +21,7 @@ #include "../regd.h" #include "../regd_common.h" -static int ath6kl_wmi_sync_point(struct wmi *wmi); +static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx); static const s32 wmi_rate_tbl[][2] = { /* {W/O SGI, with SGI} */ @@ -81,6 +81,26 @@ enum htc_endpoint_id ath6kl_wmi_get_control_ep(struct wmi *wmi) return wmi->ep_id; } +struct ath6kl_vif *ath6kl_get_vif_by_index(struct ath6kl *ar, u8 if_idx) +{ + struct ath6kl_vif *vif, *found = NULL; + + if (WARN_ON(if_idx > (ar->vif_max - 1))) + return NULL; + + /* FIXME: Locking */ + spin_lock_bh(&ar->list_lock); + list_for_each_entry(vif, &ar->vif_list, list) { + if (vif->fw_vif_idx == if_idx) { + found = vif; + break; + } + } + spin_unlock_bh(&ar->list_lock); + + return found; +} + /* Performs DIX to 802.3 encapsulation for transmit packets. * Assumes the entire DIX header is contigous and that there is * enough room in the buffer for a 802.3 mac header and LLC+SNAP headers. @@ -162,12 +182,12 @@ static int ath6kl_wmi_meta_add(struct wmi *wmi, struct sk_buff *skb, int ath6kl_wmi_data_hdr_add(struct wmi *wmi, struct sk_buff *skb, u8 msg_type, bool more_data, enum wmi_data_hdr_data_type data_type, - u8 meta_ver, void *tx_meta_info) + u8 meta_ver, void *tx_meta_info, u8 if_idx) { struct wmi_data_hdr *data_hdr; int ret; - if (WARN_ON(skb == NULL)) + if (WARN_ON(skb == NULL || (if_idx > wmi->parent_dev->vif_max - 1))) return -EINVAL; if (tx_meta_info) { @@ -189,7 +209,7 @@ int ath6kl_wmi_data_hdr_add(struct wmi *wmi, struct sk_buff *skb, WMI_DATA_HDR_MORE_MASK << WMI_DATA_HDR_MORE_SHIFT; data_hdr->info2 = cpu_to_le16(meta_ver << WMI_DATA_HDR_META_SHIFT); - data_hdr->info3 = 0; + data_hdr->info3 = cpu_to_le16(if_idx & WMI_DATA_HDR_IF_IDX_MASK); return 0; } @@ -216,7 +236,8 @@ static u8 ath6kl_wmi_determine_user_priority(u8 *pkt, u32 layer2_pri) return ip_pri; } -int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, struct sk_buff *skb, +int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, u8 if_idx, + struct sk_buff *skb, u32 layer2_priority, bool wmm_enabled, u8 *ac) { @@ -262,7 +283,12 @@ int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, struct sk_buff *skb, usr_pri = layer2_priority & 0x7; } - /* workaround for WMM S5 */ + /* + * workaround for WMM S5 + * + * FIXME: wmi->traffic_class is always 100 so this test doesn't + * make sense + */ if ((wmi->traffic_class == WMM_AC_VI) && ((usr_pri == 5) || (usr_pri == 4))) usr_pri = 1; @@ -284,7 +310,7 @@ int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, struct sk_buff *skb, cpu_to_le32(WMI_IMPLICIT_PSTREAM_INACTIVITY_INT); /* Implicit streams are created with TSID 0xFF */ cmd.tsid = WMI_IMPLICIT_PSTREAM; - ath6kl_wmi_create_pstream_cmd(wmi, &cmd); + ath6kl_wmi_create_pstream_cmd(wmi, if_idx, &cmd); } *ac = traffic_class; @@ -410,13 +436,14 @@ static int ath6kl_wmi_tx_complete_event_rx(u8 *datap, int len) } static int ath6kl_wmi_remain_on_chnl_event_rx(struct wmi *wmi, u8 *datap, - int len) + int len, struct ath6kl_vif *vif) { struct wmi_remain_on_chnl_event *ev; u32 freq; u32 dur; struct ieee80211_channel *chan; struct ath6kl *ar = wmi->parent_dev; + u32 id; if (len < sizeof(*ev)) return -EINVAL; @@ -426,26 +453,29 @@ static int ath6kl_wmi_remain_on_chnl_event_rx(struct wmi *wmi, u8 *datap, dur = le32_to_cpu(ev->duration); ath6kl_dbg(ATH6KL_DBG_WMI, "remain_on_chnl: freq=%u dur=%u\n", freq, dur); - chan = ieee80211_get_channel(ar->wdev->wiphy, freq); + chan = ieee80211_get_channel(ar->wiphy, freq); if (!chan) { ath6kl_dbg(ATH6KL_DBG_WMI, "remain_on_chnl: Unknown channel " "(freq=%u)\n", freq); return -EINVAL; } - cfg80211_ready_on_channel(ar->net_dev, 1, chan, NL80211_CHAN_NO_HT, + id = vif->last_roc_id; + cfg80211_ready_on_channel(vif->ndev, id, chan, NL80211_CHAN_NO_HT, dur, GFP_ATOMIC); return 0; } static int ath6kl_wmi_cancel_remain_on_chnl_event_rx(struct wmi *wmi, - u8 *datap, int len) + u8 *datap, int len, + struct ath6kl_vif *vif) { struct wmi_cancel_remain_on_chnl_event *ev; u32 freq; u32 dur; struct ieee80211_channel *chan; struct ath6kl *ar = wmi->parent_dev; + u32 id; if (len < sizeof(*ev)) return -EINVAL; @@ -455,23 +485,29 @@ static int ath6kl_wmi_cancel_remain_on_chnl_event_rx(struct wmi *wmi, dur = le32_to_cpu(ev->duration); ath6kl_dbg(ATH6KL_DBG_WMI, "cancel_remain_on_chnl: freq=%u dur=%u " "status=%u\n", freq, dur, ev->status); - chan = ieee80211_get_channel(ar->wdev->wiphy, freq); + chan = ieee80211_get_channel(ar->wiphy, freq); if (!chan) { ath6kl_dbg(ATH6KL_DBG_WMI, "cancel_remain_on_chnl: Unknown " "channel (freq=%u)\n", freq); return -EINVAL; } - cfg80211_remain_on_channel_expired(ar->net_dev, 1, chan, + if (vif->last_cancel_roc_id && + vif->last_cancel_roc_id + 1 == vif->last_roc_id) + id = vif->last_cancel_roc_id; /* event for cancel command */ + else + id = vif->last_roc_id; /* timeout on uncanceled r-o-c */ + vif->last_cancel_roc_id = 0; + cfg80211_remain_on_channel_expired(vif->ndev, id, chan, NL80211_CHAN_NO_HT, GFP_ATOMIC); return 0; } -static int ath6kl_wmi_tx_status_event_rx(struct wmi *wmi, u8 *datap, int len) +static int ath6kl_wmi_tx_status_event_rx(struct wmi *wmi, u8 *datap, int len, + struct ath6kl_vif *vif) { struct wmi_tx_status_event *ev; u32 id; - struct ath6kl *ar = wmi->parent_dev; if (len < sizeof(*ev)) return -EINVAL; @@ -481,7 +517,7 @@ static int ath6kl_wmi_tx_status_event_rx(struct wmi *wmi, u8 *datap, int len) ath6kl_dbg(ATH6KL_DBG_WMI, "tx_status: id=%x ack_status=%u\n", id, ev->ack_status); if (wmi->last_mgmt_tx_frame) { - cfg80211_mgmt_tx_status(ar->net_dev, id, + cfg80211_mgmt_tx_status(vif->ndev, id, wmi->last_mgmt_tx_frame, wmi->last_mgmt_tx_frame_len, !!ev->ack_status, GFP_ATOMIC); @@ -493,12 +529,12 @@ static int ath6kl_wmi_tx_status_event_rx(struct wmi *wmi, u8 *datap, int len) return 0; } -static int ath6kl_wmi_rx_probe_req_event_rx(struct wmi *wmi, u8 *datap, int len) +static int ath6kl_wmi_rx_probe_req_event_rx(struct wmi *wmi, u8 *datap, int len, + struct ath6kl_vif *vif) { struct wmi_p2p_rx_probe_req_event *ev; u32 freq; u16 dlen; - struct ath6kl *ar = wmi->parent_dev; if (len < sizeof(*ev)) return -EINVAL; @@ -513,10 +549,10 @@ static int ath6kl_wmi_rx_probe_req_event_rx(struct wmi *wmi, u8 *datap, int len) } ath6kl_dbg(ATH6KL_DBG_WMI, "rx_probe_req: len=%u freq=%u " "probe_req_report=%d\n", - dlen, freq, ar->probe_req_report); + dlen, freq, vif->probe_req_report); - if (ar->probe_req_report || ar->nw_type == AP_NETWORK) - cfg80211_rx_mgmt(ar->net_dev, freq, ev->data, dlen, GFP_ATOMIC); + if (vif->probe_req_report || vif->nw_type == AP_NETWORK) + cfg80211_rx_mgmt(vif->ndev, freq, ev->data, dlen, GFP_ATOMIC); return 0; } @@ -536,12 +572,12 @@ static int ath6kl_wmi_p2p_capabilities_event_rx(u8 *datap, int len) return 0; } -static int ath6kl_wmi_rx_action_event_rx(struct wmi *wmi, u8 *datap, int len) +static int ath6kl_wmi_rx_action_event_rx(struct wmi *wmi, u8 *datap, int len, + struct ath6kl_vif *vif) { struct wmi_rx_action_event *ev; u32 freq; u16 dlen; - struct ath6kl *ar = wmi->parent_dev; if (len < sizeof(*ev)) return -EINVAL; @@ -555,7 +591,7 @@ static int ath6kl_wmi_rx_action_event_rx(struct wmi *wmi, u8 *datap, int len) return -EINVAL; } ath6kl_dbg(ATH6KL_DBG_WMI, "rx_action: len=%u freq=%u\n", dlen, freq); - cfg80211_rx_mgmt(ar->net_dev, freq, ev->data, dlen, GFP_ATOMIC); + cfg80211_rx_mgmt(vif->ndev, freq, ev->data, dlen, GFP_ATOMIC); return 0; } @@ -620,7 +656,8 @@ static inline struct sk_buff *ath6kl_wmi_get_new_buf(u32 size) } /* Send a "simple" wmi command -- one with no arguments */ -static int ath6kl_wmi_simple_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id) +static int ath6kl_wmi_simple_cmd(struct wmi *wmi, u8 if_idx, + enum wmi_cmd_id cmd_id) { struct sk_buff *skb; int ret; @@ -629,7 +666,7 @@ static int ath6kl_wmi_simple_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id) if (!skb) return -ENOMEM; - ret = ath6kl_wmi_cmd_send(wmi, skb, cmd_id, NO_SYNC_WMIFLAG); + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, cmd_id, NO_SYNC_WMIFLAG); return ret; } @@ -641,7 +678,6 @@ static int ath6kl_wmi_ready_event_rx(struct wmi *wmi, u8 *datap, int len) if (len < sizeof(struct wmi_ready_event_2)) return -EINVAL; - wmi->ready = true; ath6kl_ready_event(wmi->parent_dev, ev->mac_addr, le32_to_cpu(ev->sw_version), le32_to_cpu(ev->abi_version)); @@ -673,32 +709,73 @@ int ath6kl_wmi_set_roam_lrssi_cmd(struct wmi *wmi, u8 lrssi) cmd->info.params.roam_rssi_floor = DEF_LRSSI_ROAM_FLOOR; cmd->roam_ctrl = WMI_SET_LRSSI_SCAN_PARAMS; - ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_ROAM_CTRL_CMDID, NO_SYNC_WMIFLAG); + ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_ROAM_CTRL_CMDID, + NO_SYNC_WMIFLAG); return 0; } -static int ath6kl_wmi_connect_event_rx(struct wmi *wmi, u8 *datap, int len) +int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid) +{ + struct sk_buff *skb; + struct roam_ctrl_cmd *cmd; + + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct roam_ctrl_cmd *) skb->data; + memset(cmd, 0, sizeof(*cmd)); + + memcpy(cmd->info.bssid, bssid, ETH_ALEN); + cmd->roam_ctrl = WMI_FORCE_ROAM; + + ath6kl_dbg(ATH6KL_DBG_WMI, "force roam to %pM\n", bssid); + return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_ROAM_CTRL_CMDID, + NO_SYNC_WMIFLAG); +} + +int ath6kl_wmi_set_roam_mode_cmd(struct wmi *wmi, enum wmi_roam_mode mode) +{ + struct sk_buff *skb; + struct roam_ctrl_cmd *cmd; + + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct roam_ctrl_cmd *) skb->data; + memset(cmd, 0, sizeof(*cmd)); + + cmd->info.roam_mode = mode; + cmd->roam_ctrl = WMI_SET_ROAM_MODE; + + ath6kl_dbg(ATH6KL_DBG_WMI, "set roam mode %d\n", mode); + return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_ROAM_CTRL_CMDID, + NO_SYNC_WMIFLAG); +} + +static int ath6kl_wmi_connect_event_rx(struct wmi *wmi, u8 *datap, int len, + struct ath6kl_vif *vif) { struct wmi_connect_event *ev; u8 *pie, *peie; - struct ath6kl *ar = wmi->parent_dev; if (len < sizeof(struct wmi_connect_event)) return -EINVAL; ev = (struct wmi_connect_event *) datap; - if (ar->nw_type == AP_NETWORK) { + if (vif->nw_type == AP_NETWORK) { /* AP mode start/STA connected event */ - struct net_device *dev = ar->net_dev; + struct net_device *dev = vif->ndev; if (memcmp(dev->dev_addr, ev->u.ap_bss.bssid, ETH_ALEN) == 0) { ath6kl_dbg(ATH6KL_DBG_WMI, "%s: freq %d bssid %pM " "(AP started)\n", __func__, le16_to_cpu(ev->u.ap_bss.ch), ev->u.ap_bss.bssid); ath6kl_connect_ap_mode_bss( - ar, le16_to_cpu(ev->u.ap_bss.ch)); + vif, le16_to_cpu(ev->u.ap_bss.ch)); } else { ath6kl_dbg(ATH6KL_DBG_WMI, "%s: aid %u mac_addr %pM " "auth=%u keymgmt=%u cipher=%u apsd_info=%u " @@ -710,7 +787,7 @@ static int ath6kl_wmi_connect_event_rx(struct wmi *wmi, u8 *datap, int len) le16_to_cpu(ev->u.ap_sta.cipher), ev->u.ap_sta.apsd_info); ath6kl_connect_ap_mode_sta( - ar, ev->u.ap_sta.aid, ev->u.ap_sta.mac_addr, + vif, ev->u.ap_sta.aid, ev->u.ap_sta.mac_addr, ev->u.ap_sta.keymgmt, le16_to_cpu(ev->u.ap_sta.cipher), ev->u.ap_sta.auth, ev->assoc_req_len, @@ -755,7 +832,7 @@ static int ath6kl_wmi_connect_event_rx(struct wmi *wmi, u8 *datap, int len) pie += pie[1] + 2; } - ath6kl_connect_event(wmi->parent_dev, le16_to_cpu(ev->u.sta.ch), + ath6kl_connect_event(vif, le16_to_cpu(ev->u.sta.ch), ev->u.sta.bssid, le16_to_cpu(ev->u.sta.listen_intvl), le16_to_cpu(ev->u.sta.beacon_intvl), @@ -834,14 +911,15 @@ static void ath6kl_wmi_regdomain_event(struct wmi *wmi, u8 *datap, int len) alpha2[0] = country->isoName[0]; alpha2[1] = country->isoName[1]; - regulatory_hint(wmi->parent_dev->wdev->wiphy, alpha2); + regulatory_hint(wmi->parent_dev->wiphy, alpha2); ath6kl_dbg(ATH6KL_DBG_WMI, "Country alpha2 being used: %c%c\n", alpha2[0], alpha2[1]); } } -static int ath6kl_wmi_disconnect_event_rx(struct wmi *wmi, u8 *datap, int len) +static int ath6kl_wmi_disconnect_event_rx(struct wmi *wmi, u8 *datap, int len, + struct ath6kl_vif *vif) { struct wmi_disconnect_event *ev; wmi->traffic_class = 100; @@ -857,10 +935,8 @@ static int ath6kl_wmi_disconnect_event_rx(struct wmi *wmi, u8 *datap, int len) ev->disconn_reason, ev->assoc_resp_len); wmi->is_wmm_enabled = false; - wmi->pair_crypto_type = NONE_CRYPT; - wmi->grp_crypto_type = NONE_CRYPT; - ath6kl_disconnect_event(wmi->parent_dev, ev->disconn_reason, + ath6kl_disconnect_event(vif, ev->disconn_reason, ev->bssid, ev->assoc_resp_len, ev->assoc_info, le16_to_cpu(ev->proto_reason_status)); @@ -886,7 +962,8 @@ static int ath6kl_wmi_peer_node_event_rx(struct wmi *wmi, u8 *datap, int len) return 0; } -static int ath6kl_wmi_tkip_micerr_event_rx(struct wmi *wmi, u8 *datap, int len) +static int ath6kl_wmi_tkip_micerr_event_rx(struct wmi *wmi, u8 *datap, int len, + struct ath6kl_vif *vif) { struct wmi_tkip_micerr_event *ev; @@ -895,12 +972,20 @@ static int ath6kl_wmi_tkip_micerr_event_rx(struct wmi *wmi, u8 *datap, int len) ev = (struct wmi_tkip_micerr_event *) datap; - ath6kl_tkip_micerr_event(wmi->parent_dev, ev->key_id, ev->is_mcast); + ath6kl_tkip_micerr_event(vif, ev->key_id, ev->is_mcast); return 0; } -static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len) +void ath6kl_wmi_sscan_timer(unsigned long ptr) +{ + struct ath6kl_vif *vif = (struct ath6kl_vif *) ptr; + + cfg80211_sched_scan_results(vif->ar->wiphy); +} + +static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len, + struct ath6kl_vif *vif) { struct wmi_bss_info_hdr2 *bih; u8 *buf; @@ -927,26 +1012,27 @@ static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len) return 0; /* Only update BSS table for now */ if (bih->frame_type == BEACON_FTYPE && - test_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag)) { - clear_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag); - ath6kl_wmi_bssfilter_cmd(ar->wmi, NONE_BSS_FILTER, 0); + test_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags)) { + clear_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags); + ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, + NONE_BSS_FILTER, 0); } - channel = ieee80211_get_channel(ar->wdev->wiphy, le16_to_cpu(bih->ch)); + channel = ieee80211_get_channel(ar->wiphy, le16_to_cpu(bih->ch)); if (channel == NULL) return -EINVAL; if (len < 8 + 2 + 2) return -EINVAL; - if (bih->frame_type == BEACON_FTYPE && test_bit(CONNECTED, &ar->flag) && - memcmp(bih->bssid, ar->bssid, ETH_ALEN) == 0) { + if (bih->frame_type == BEACON_FTYPE && test_bit(CONNECTED, &vif->flags) + && memcmp(bih->bssid, vif->bssid, ETH_ALEN) == 0) { const u8 *tim; tim = cfg80211_find_ie(WLAN_EID_TIM, buf + 8 + 2 + 2, len - 8 - 2 - 2); if (tim && tim[1] >= 2) { - ar->assoc_bss_dtim_period = tim[3]; - set_bit(DTIM_PERIOD_AVAIL, &ar->flag); + vif->assoc_bss_dtim_period = tim[3]; + set_bit(DTIM_PERIOD_AVAIL, &vif->flags); } } @@ -966,7 +1052,7 @@ static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len) IEEE80211_STYPE_BEACON); memset(mgmt->da, 0xff, ETH_ALEN); } else { - struct net_device *dev = ar->net_dev; + struct net_device *dev = vif->ndev; mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_RESP); @@ -979,7 +1065,7 @@ static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len) memcpy(&mgmt->u.beacon, buf, len); - bss = cfg80211_inform_bss_frame(ar->wdev->wiphy, channel, mgmt, + bss = cfg80211_inform_bss_frame(ar->wiphy, channel, mgmt, 24 + len, (bih->snr - 95) * 100, GFP_ATOMIC); kfree(mgmt); @@ -987,6 +1073,21 @@ static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len) return -ENOMEM; cfg80211_put_bss(bss); + /* + * Firmware doesn't return any event when scheduled scan has + * finished, so we need to use a timer to find out when there are + * no more results. + * + * The timer is started from the first bss info received, otherwise + * the timer would not ever fire if the scan interval is short + * enough. + */ + if (ar->state == ATH6KL_STATE_SCHED_SCAN && + !timer_pending(&vif->sched_scan_timer)) { + mod_timer(&vif->sched_scan_timer, jiffies + + msecs_to_jiffies(ATH6KL_SCHED_SCAN_RESULT_DELAY)); + } + return 0; } @@ -1094,20 +1195,21 @@ static int ath6kl_wmi_keepalive_reply_rx(struct wmi *wmi, u8 *datap, int len) return 0; } -static int ath6kl_wmi_scan_complete_rx(struct wmi *wmi, u8 *datap, int len) +static int ath6kl_wmi_scan_complete_rx(struct wmi *wmi, u8 *datap, int len, + struct ath6kl_vif *vif) { struct wmi_scan_complete_event *ev; ev = (struct wmi_scan_complete_event *) datap; - ath6kl_scan_complete_evt(wmi->parent_dev, a_sle32_to_cpu(ev->status)); + ath6kl_scan_complete_evt(vif, a_sle32_to_cpu(ev->status)); wmi->is_probe_ssid = false; return 0; } static int ath6kl_wmi_neighbor_report_event_rx(struct wmi *wmi, u8 *datap, - int len) + int len, struct ath6kl_vif *vif) { struct wmi_neighbor_report_event *ev; u8 i; @@ -1125,7 +1227,7 @@ static int ath6kl_wmi_neighbor_report_event_rx(struct wmi *wmi, u8 *datap, ath6kl_dbg(ATH6KL_DBG_WMI, "neighbor %d/%d - %pM 0x%x\n", i + 1, ev->num_neighbors, ev->neighbor[i].bssid, ev->neighbor[i].bss_flags); - cfg80211_pmksa_candidate_notify(wmi->parent_dev->net_dev, i, + cfg80211_pmksa_candidate_notify(vif->ndev, i, ev->neighbor[i].bssid, !!(ev->neighbor[i].bss_flags & WMI_PREAUTH_CAPABLE_BSS), @@ -1166,9 +1268,10 @@ static int ath6kl_wmi_error_event_rx(struct wmi *wmi, u8 *datap, int len) return 0; } -static int ath6kl_wmi_stats_event_rx(struct wmi *wmi, u8 *datap, int len) +static int ath6kl_wmi_stats_event_rx(struct wmi *wmi, u8 *datap, int len, + struct ath6kl_vif *vif) { - ath6kl_tgt_stats_event(wmi->parent_dev, datap, len); + ath6kl_tgt_stats_event(vif, datap, len); return 0; } @@ -1222,7 +1325,7 @@ static int ath6kl_wmi_send_rssi_threshold_params(struct wmi *wmi, cmd = (struct wmi_rssi_threshold_params_cmd *) skb->data; memcpy(cmd, rssi_cmd, sizeof(struct wmi_rssi_threshold_params_cmd)); - return ath6kl_wmi_cmd_send(wmi, skb, WMI_RSSI_THRESHOLD_PARAMS_CMDID, + return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_RSSI_THRESHOLD_PARAMS_CMDID, NO_SYNC_WMIFLAG); } @@ -1322,7 +1425,8 @@ static int ath6kl_wmi_rssi_threshold_event_rx(struct wmi *wmi, u8 *datap, return 0; } -static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len) +static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len, + struct ath6kl_vif *vif) { struct wmi_cac_event *reply; struct ieee80211_tspec_ie *ts; @@ -1343,7 +1447,8 @@ static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len) tsid = (tsinfo >> IEEE80211_WMM_IE_TSPEC_TID_SHIFT) & IEEE80211_WMM_IE_TSPEC_TID_MASK; - ath6kl_wmi_delete_pstream_cmd(wmi, reply->ac, tsid); + ath6kl_wmi_delete_pstream_cmd(wmi, vif->fw_vif_idx, + reply->ac, tsid); } else if (reply->cac_indication == CAC_INDICATION_NO_RESP) { /* * Following assumes that there is only one outstanding @@ -1358,7 +1463,8 @@ static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len) break; } if (index < (sizeof(active_tsids) * 8)) - ath6kl_wmi_delete_pstream_cmd(wmi, reply->ac, index); + ath6kl_wmi_delete_pstream_cmd(wmi, vif->fw_vif_idx, + reply->ac, index); } /* @@ -1403,7 +1509,7 @@ static int ath6kl_wmi_send_snr_threshold_params(struct wmi *wmi, cmd = (struct wmi_snr_threshold_params_cmd *) skb->data; memcpy(cmd, snr_cmd, sizeof(struct wmi_snr_threshold_params_cmd)); - return ath6kl_wmi_cmd_send(wmi, skb, WMI_SNR_THRESHOLD_PARAMS_CMDID, + return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SNR_THRESHOLD_PARAMS_CMDID, NO_SYNC_WMIFLAG); } @@ -1528,14 +1634,15 @@ static int ath6kl_wmi_aplist_event_rx(struct wmi *wmi, u8 *datap, int len) return 0; } -int ath6kl_wmi_cmd_send(struct wmi *wmi, struct sk_buff *skb, +int ath6kl_wmi_cmd_send(struct wmi *wmi, u8 if_idx, struct sk_buff *skb, enum wmi_cmd_id cmd_id, enum wmi_sync_flag sync_flag) { struct wmi_cmd_hdr *cmd_hdr; enum htc_endpoint_id ep_id = wmi->ep_id; int ret; + u16 info1; - if (WARN_ON(skb == NULL)) + if (WARN_ON(skb == NULL || (if_idx > (wmi->parent_dev->vif_max - 1)))) return -EINVAL; ath6kl_dbg(ATH6KL_DBG_WMI, "wmi tx id %d len %d flag %d\n", @@ -1554,19 +1661,20 @@ int ath6kl_wmi_cmd_send(struct wmi *wmi, struct sk_buff *skb, * Make sure all data currently queued is transmitted before * the cmd execution. Establish a new sync point. */ - ath6kl_wmi_sync_point(wmi); + ath6kl_wmi_sync_point(wmi, if_idx); } skb_push(skb, sizeof(struct wmi_cmd_hdr)); cmd_hdr = (struct wmi_cmd_hdr *) skb->data; cmd_hdr->cmd_id = cpu_to_le16(cmd_id); - cmd_hdr->info1 = 0; /* added for virtual interface */ + info1 = if_idx & WMI_CMD_HDR_IF_ID_MASK; + cmd_hdr->info1 = cpu_to_le16(info1); /* Only for OPT_TX_CMD, use BE endpoint. */ if (cmd_id == WMI_OPT_TX_FRAME_CMDID) { ret = ath6kl_wmi_data_hdr_add(wmi, skb, OPT_MSGTYPE, - false, false, 0, NULL); + false, false, 0, NULL, if_idx); if (ret) { dev_kfree_skb(skb); return ret; @@ -1582,20 +1690,22 @@ int ath6kl_wmi_cmd_send(struct wmi *wmi, struct sk_buff *skb, * Make sure all new data queued waits for the command to * execute. Establish a new sync point. */ - ath6kl_wmi_sync_point(wmi); + ath6kl_wmi_sync_point(wmi, if_idx); } return 0; } -int ath6kl_wmi_connect_cmd(struct wmi *wmi, enum network_type nw_type, +int ath6kl_wmi_connect_cmd(struct wmi *wmi, u8 if_idx, + enum network_type nw_type, enum dot11_auth_mode dot11_auth_mode, enum auth_mode auth_mode, enum crypto_type pairwise_crypto, u8 pairwise_crypto_len, enum crypto_type group_crypto, u8 group_crypto_len, int ssid_len, u8 *ssid, - u8 *bssid, u16 channel, u32 ctrl_flags) + u8 *bssid, u16 channel, u32 ctrl_flags, + u8 nw_subtype) { struct sk_buff *skb; struct wmi_connect_cmd *cc; @@ -1635,19 +1745,19 @@ int ath6kl_wmi_connect_cmd(struct wmi *wmi, enum network_type nw_type, cc->grp_crypto_len = group_crypto_len; cc->ch = cpu_to_le16(channel); cc->ctrl_flags = cpu_to_le32(ctrl_flags); + cc->nw_subtype = nw_subtype; if (bssid != NULL) memcpy(cc->bssid, bssid, ETH_ALEN); - wmi->pair_crypto_type = pairwise_crypto; - wmi->grp_crypto_type = group_crypto; - - ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_CONNECT_CMDID, NO_SYNC_WMIFLAG); + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_CONNECT_CMDID, + NO_SYNC_WMIFLAG); return ret; } -int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 *bssid, u16 channel) +int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 if_idx, u8 *bssid, + u16 channel) { struct sk_buff *skb; struct wmi_reconnect_cmd *cc; @@ -1668,13 +1778,13 @@ int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 *bssid, u16 channel) if (bssid != NULL) memcpy(cc->bssid, bssid, ETH_ALEN); - ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_RECONNECT_CMDID, + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_RECONNECT_CMDID, NO_SYNC_WMIFLAG); return ret; } -int ath6kl_wmi_disconnect_cmd(struct wmi *wmi) +int ath6kl_wmi_disconnect_cmd(struct wmi *wmi, u8 if_idx) { int ret; @@ -1683,12 +1793,79 @@ int ath6kl_wmi_disconnect_cmd(struct wmi *wmi) wmi->traffic_class = 100; /* Disconnect command does not need to do a SYNC before. */ - ret = ath6kl_wmi_simple_cmd(wmi, WMI_DISCONNECT_CMDID); + ret = ath6kl_wmi_simple_cmd(wmi, if_idx, WMI_DISCONNECT_CMDID); + + return ret; +} + +int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx, + enum wmi_scan_type scan_type, + u32 force_fgscan, u32 is_legacy, + u32 home_dwell_time, u32 force_scan_interval, + s8 num_chan, u16 *ch_list, u32 no_cck, u32 *rates) +{ + struct sk_buff *skb; + struct wmi_begin_scan_cmd *sc; + s8 size; + int i, band, ret; + struct ath6kl *ar = wmi->parent_dev; + int num_rates; + + size = sizeof(struct wmi_begin_scan_cmd); + + if ((scan_type != WMI_LONG_SCAN) && (scan_type != WMI_SHORT_SCAN)) + return -EINVAL; + + if (num_chan > WMI_MAX_CHANNELS) + return -EINVAL; + + if (num_chan) + size += sizeof(u16) * (num_chan - 1); + + skb = ath6kl_wmi_get_new_buf(size); + if (!skb) + return -ENOMEM; + + sc = (struct wmi_begin_scan_cmd *) skb->data; + sc->scan_type = scan_type; + sc->force_fg_scan = cpu_to_le32(force_fgscan); + sc->is_legacy = cpu_to_le32(is_legacy); + sc->home_dwell_time = cpu_to_le32(home_dwell_time); + sc->force_scan_intvl = cpu_to_le32(force_scan_interval); + sc->no_cck = cpu_to_le32(no_cck); + sc->num_ch = num_chan; + + for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + struct ieee80211_supported_band *sband = + ar->wiphy->bands[band]; + u32 ratemask = rates[band]; + u8 *supp_rates = sc->supp_rates[band].rates; + num_rates = 0; + + for (i = 0; i < sband->n_bitrates; i++) { + if ((BIT(i) & ratemask) == 0) + continue; /* skip rate */ + supp_rates[num_rates++] = + (u8) (sband->bitrates[i].bitrate / 5); + } + sc->supp_rates[band].nrates = num_rates; + } + + for (i = 0; i < num_chan; i++) + sc->ch_list[i] = cpu_to_le16(ch_list[i]); + + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_BEGIN_SCAN_CMDID, + NO_SYNC_WMIFLAG); return ret; } -int ath6kl_wmi_startscan_cmd(struct wmi *wmi, enum wmi_scan_type scan_type, +/* ath6kl_wmi_start_scan_cmd is to be deprecated. Use + * ath6kl_wmi_begin_scan_cmd instead. The new function supports P2P + * mgmt operations using station interface. + */ +int ath6kl_wmi_startscan_cmd(struct wmi *wmi, u8 if_idx, + enum wmi_scan_type scan_type, u32 force_fgscan, u32 is_legacy, u32 home_dwell_time, u32 force_scan_interval, s8 num_chan, u16 *ch_list) @@ -1724,13 +1901,14 @@ int ath6kl_wmi_startscan_cmd(struct wmi *wmi, enum wmi_scan_type scan_type, for (i = 0; i < num_chan; i++) sc->ch_list[i] = cpu_to_le16(ch_list[i]); - ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_START_SCAN_CMDID, + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_START_SCAN_CMDID, NO_SYNC_WMIFLAG); return ret; } -int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u16 fg_start_sec, +int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u8 if_idx, + u16 fg_start_sec, u16 fg_end_sec, u16 bg_sec, u16 minact_chdw_msec, u16 maxact_chdw_msec, u16 pas_chdw_msec, u8 short_scan_ratio, @@ -1757,12 +1935,12 @@ int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u16 fg_start_sec, sc->max_dfsch_act_time = cpu_to_le32(max_dfsch_act_time); sc->maxact_scan_per_ssid = cpu_to_le16(maxact_scan_per_ssid); - ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_SCAN_PARAMS_CMDID, + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_SCAN_PARAMS_CMDID, NO_SYNC_WMIFLAG); return ret; } -int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 filter, u32 ie_mask) +int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 if_idx, u8 filter, u32 ie_mask) { struct sk_buff *skb; struct wmi_bss_filter_cmd *cmd; @@ -1779,12 +1957,12 @@ int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 filter, u32 ie_mask) cmd->bss_filter = filter; cmd->ie_mask = cpu_to_le32(ie_mask); - ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_BSS_FILTER_CMDID, + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_BSS_FILTER_CMDID, NO_SYNC_WMIFLAG); return ret; } -int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 index, u8 flag, +int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 if_idx, u8 index, u8 flag, u8 ssid_len, u8 *ssid) { struct sk_buff *skb; @@ -1816,12 +1994,13 @@ int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 index, u8 flag, cmd->ssid_len = ssid_len; memcpy(cmd->ssid, ssid, ssid_len); - ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_PROBED_SSID_CMDID, + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_PROBED_SSID_CMDID, NO_SYNC_WMIFLAG); return ret; } -int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u16 listen_interval, +int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u8 if_idx, + u16 listen_interval, u16 listen_beacons) { struct sk_buff *skb; @@ -1836,12 +2015,12 @@ int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u16 listen_interval, cmd->listen_intvl = cpu_to_le16(listen_interval); cmd->num_beacons = cpu_to_le16(listen_beacons); - ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_LISTEN_INT_CMDID, + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_LISTEN_INT_CMDID, NO_SYNC_WMIFLAG); return ret; } -int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 pwr_mode) +int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 if_idx, u8 pwr_mode) { struct sk_buff *skb; struct wmi_power_mode_cmd *cmd; @@ -1855,12 +2034,12 @@ int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 pwr_mode) cmd->pwr_mode = pwr_mode; wmi->pwr_mode = pwr_mode; - ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_POWER_MODE_CMDID, + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_POWER_MODE_CMDID, NO_SYNC_WMIFLAG); return ret; } -int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u16 idle_period, +int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u8 if_idx, u16 idle_period, u16 ps_poll_num, u16 dtim_policy, u16 tx_wakeup_policy, u16 num_tx_to_wakeup, u16 ps_fail_event_policy) @@ -1881,12 +2060,12 @@ int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u16 idle_period, pm->num_tx_to_wakeup = cpu_to_le16(num_tx_to_wakeup); pm->ps_fail_event_policy = cpu_to_le16(ps_fail_event_policy); - ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_POWER_PARAMS_CMDID, + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_POWER_PARAMS_CMDID, NO_SYNC_WMIFLAG); return ret; } -int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 timeout) +int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 if_idx, u8 timeout) { struct sk_buff *skb; struct wmi_disc_timeout_cmd *cmd; @@ -1899,15 +2078,20 @@ int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 timeout) cmd = (struct wmi_disc_timeout_cmd *) skb->data; cmd->discon_timeout = timeout; - ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_DISC_TIMEOUT_CMDID, + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_DISC_TIMEOUT_CMDID, NO_SYNC_WMIFLAG); + + if (ret == 0) + ath6kl_debug_set_disconnect_timeout(wmi->parent_dev, timeout); + return ret; } -int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 key_index, +int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index, enum crypto_type key_type, u8 key_usage, u8 key_len, - u8 *key_rsc, u8 *key_material, + u8 *key_rsc, unsigned int key_rsc_len, + u8 *key_material, u8 key_op_ctrl, u8 *mac_addr, enum wmi_sync_flag sync_flag) { @@ -1920,7 +2104,7 @@ int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 key_index, key_index, key_type, key_usage, key_len, key_op_ctrl); if ((key_index > WMI_MAX_KEY_INDEX) || (key_len > WMI_MAX_KEY_LEN) || - (key_material == NULL)) + (key_material == NULL) || key_rsc_len > 8) return -EINVAL; if ((WEP_CRYPT != key_type) && (NULL == key_rsc)) @@ -1938,20 +2122,20 @@ int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 key_index, memcpy(cmd->key, key_material, key_len); if (key_rsc != NULL) - memcpy(cmd->key_rsc, key_rsc, sizeof(cmd->key_rsc)); + memcpy(cmd->key_rsc, key_rsc, key_rsc_len); cmd->key_op_ctrl = key_op_ctrl; if (mac_addr) memcpy(cmd->key_mac_addr, mac_addr, ETH_ALEN); - ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_ADD_CIPHER_KEY_CMDID, + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_ADD_CIPHER_KEY_CMDID, sync_flag); return ret; } -int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 *krk) +int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 if_idx, u8 *krk) { struct sk_buff *skb; struct wmi_add_krk_cmd *cmd; @@ -1964,12 +2148,13 @@ int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 *krk) cmd = (struct wmi_add_krk_cmd *) skb->data; memcpy(cmd->krk, krk, WMI_KRK_LEN); - ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_ADD_KRK_CMDID, NO_SYNC_WMIFLAG); + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_ADD_KRK_CMDID, + NO_SYNC_WMIFLAG); return ret; } -int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 key_index) +int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index) { struct sk_buff *skb; struct wmi_delete_cipher_key_cmd *cmd; @@ -1985,13 +2170,13 @@ int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 key_index) cmd = (struct wmi_delete_cipher_key_cmd *) skb->data; cmd->key_index = key_index; - ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_DELETE_CIPHER_KEY_CMDID, + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_DELETE_CIPHER_KEY_CMDID, NO_SYNC_WMIFLAG); return ret; } -int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, const u8 *bssid, +int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, u8 if_idx, const u8 *bssid, const u8 *pmkid, bool set) { struct sk_buff *skb; @@ -2018,14 +2203,14 @@ int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, const u8 *bssid, cmd->enable = PMKID_DISABLE; } - ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_PMKID_CMDID, + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_PMKID_CMDID, NO_SYNC_WMIFLAG); return ret; } static int ath6kl_wmi_data_sync_send(struct wmi *wmi, struct sk_buff *skb, - enum htc_endpoint_id ep_id) + enum htc_endpoint_id ep_id, u8 if_idx) { struct wmi_data_hdr *data_hdr; int ret; @@ -2037,14 +2222,14 @@ static int ath6kl_wmi_data_sync_send(struct wmi *wmi, struct sk_buff *skb, data_hdr = (struct wmi_data_hdr *) skb->data; data_hdr->info = SYNC_MSGTYPE << WMI_DATA_HDR_MSG_TYPE_SHIFT; - data_hdr->info3 = 0; + data_hdr->info3 = cpu_to_le16(if_idx & WMI_DATA_HDR_IF_IDX_MASK); ret = ath6kl_control_tx(wmi->parent_dev, skb, ep_id); return ret; } -static int ath6kl_wmi_sync_point(struct wmi *wmi) +static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx) { struct sk_buff *skb; struct wmi_sync_cmd *cmd; @@ -2100,7 +2285,7 @@ static int ath6kl_wmi_sync_point(struct wmi *wmi) * Send sync cmd followed by sync data messages on all * endpoints being used */ - ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SYNCHRONIZE_CMDID, + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SYNCHRONIZE_CMDID, NO_SYNC_WMIFLAG); if (ret) @@ -2119,7 +2304,7 @@ static int ath6kl_wmi_sync_point(struct wmi *wmi) traffic_class); ret = ath6kl_wmi_data_sync_send(wmi, data_sync_bufs[index].skb, - ep_id); + ep_id, if_idx); if (ret) break; @@ -2142,7 +2327,7 @@ free_skb: return ret; } -int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi, +int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi, u8 if_idx, struct wmi_create_pstream_cmd *params) { struct sk_buff *skb; @@ -2231,12 +2416,13 @@ int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi, ath6kl_indicate_tx_activity(wmi->parent_dev, params->traffic_class, true); - ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_CREATE_PSTREAM_CMDID, + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_CREATE_PSTREAM_CMDID, NO_SYNC_WMIFLAG); return ret; } -int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 traffic_class, u8 tsid) +int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 if_idx, u8 traffic_class, + u8 tsid) { struct sk_buff *skb; struct wmi_delete_pstream_cmd *cmd; @@ -2272,7 +2458,7 @@ int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 traffic_class, u8 tsid) "sending delete_pstream_cmd: traffic class: %d tsid=%d\n", traffic_class, tsid); - ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_DELETE_PSTREAM_CMDID, + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_DELETE_PSTREAM_CMDID, SYNC_BEFORE_WMIFLAG); spin_lock_bh(&wmi->lock); @@ -2311,17 +2497,173 @@ int ath6kl_wmi_set_ip_cmd(struct wmi *wmi, struct wmi_set_ip_cmd *ip_cmd) cmd = (struct wmi_set_ip_cmd *) skb->data; memcpy(cmd, ip_cmd, sizeof(struct wmi_set_ip_cmd)); - ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_IP_CMDID, NO_SYNC_WMIFLAG); + ret = ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_IP_CMDID, + NO_SYNC_WMIFLAG); return ret; } -static int ath6kl_wmi_get_wow_list_event_rx(struct wmi *wmi, u8 * datap, - int len) +static void ath6kl_wmi_relinquish_implicit_pstream_credits(struct wmi *wmi) { - if (len < sizeof(struct wmi_get_wow_list_reply)) + u16 active_tsids; + u8 stream_exist; + int i; + + /* + * Relinquish credits from all implicitly created pstreams + * since when we go to sleep. If user created explicit + * thinstreams exists with in a fatpipe leave them intact + * for the user to delete. + */ + spin_lock_bh(&wmi->lock); + stream_exist = wmi->fat_pipe_exist; + spin_unlock_bh(&wmi->lock); + + for (i = 0; i < WMM_NUM_AC; i++) { + if (stream_exist & (1 << i)) { + + /* + * FIXME: Is this lock & unlock inside + * for loop correct? may need rework. + */ + spin_lock_bh(&wmi->lock); + active_tsids = wmi->stream_exist_for_ac[i]; + spin_unlock_bh(&wmi->lock); + + /* + * If there are no user created thin streams + * delete the fatpipe + */ + if (!active_tsids) { + stream_exist &= ~(1 << i); + /* + * Indicate inactivity to driver layer for + * this fatpipe (pstream) + */ + ath6kl_indicate_tx_activity(wmi->parent_dev, + i, false); + } + } + } + + /* FIXME: Can we do this assignment without locking ? */ + spin_lock_bh(&wmi->lock); + wmi->fat_pipe_exist = stream_exist; + spin_unlock_bh(&wmi->lock); +} + +int ath6kl_wmi_set_host_sleep_mode_cmd(struct wmi *wmi, u8 if_idx, + enum ath6kl_host_mode host_mode) +{ + struct sk_buff *skb; + struct wmi_set_host_sleep_mode_cmd *cmd; + int ret; + + if ((host_mode != ATH6KL_HOST_MODE_ASLEEP) && + (host_mode != ATH6KL_HOST_MODE_AWAKE)) { + ath6kl_err("invalid host sleep mode: %d\n", host_mode); return -EINVAL; + } - return 0; + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_set_host_sleep_mode_cmd *) skb->data; + + if (host_mode == ATH6KL_HOST_MODE_ASLEEP) { + ath6kl_wmi_relinquish_implicit_pstream_credits(wmi); + cmd->asleep = cpu_to_le32(1); + } else + cmd->awake = cpu_to_le32(1); + + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, + WMI_SET_HOST_SLEEP_MODE_CMDID, + NO_SYNC_WMIFLAG); + return ret; +} + +int ath6kl_wmi_set_wow_mode_cmd(struct wmi *wmi, u8 if_idx, + enum ath6kl_wow_mode wow_mode, + u32 filter, u16 host_req_delay) +{ + struct sk_buff *skb; + struct wmi_set_wow_mode_cmd *cmd; + int ret; + + if ((wow_mode != ATH6KL_WOW_MODE_ENABLE) && + wow_mode != ATH6KL_WOW_MODE_DISABLE) { + ath6kl_err("invalid wow mode: %d\n", wow_mode); + return -EINVAL; + } + + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_set_wow_mode_cmd *) skb->data; + cmd->enable_wow = cpu_to_le32(wow_mode); + cmd->filter = cpu_to_le32(filter); + cmd->host_req_delay = cpu_to_le16(host_req_delay); + + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_WOW_MODE_CMDID, + NO_SYNC_WMIFLAG); + return ret; +} + +int ath6kl_wmi_add_wow_pattern_cmd(struct wmi *wmi, u8 if_idx, + u8 list_id, u8 filter_size, + u8 filter_offset, u8 *filter, u8 *mask) +{ + struct sk_buff *skb; + struct wmi_add_wow_pattern_cmd *cmd; + u16 size; + u8 *filter_mask; + int ret; + + /* + * Allocate additional memory in the buffer to hold + * filter and mask value, which is twice of filter_size. + */ + size = sizeof(*cmd) + (2 * filter_size); + + skb = ath6kl_wmi_get_new_buf(size); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_add_wow_pattern_cmd *) skb->data; + cmd->filter_list_id = list_id; + cmd->filter_size = filter_size; + cmd->filter_offset = filter_offset; + + memcpy(cmd->filter, filter, filter_size); + + filter_mask = (u8 *) (cmd->filter + filter_size); + memcpy(filter_mask, mask, filter_size); + + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_ADD_WOW_PATTERN_CMDID, + NO_SYNC_WMIFLAG); + + return ret; +} + +int ath6kl_wmi_del_wow_pattern_cmd(struct wmi *wmi, u8 if_idx, + u16 list_id, u16 filter_id) +{ + struct sk_buff *skb; + struct wmi_del_wow_pattern_cmd *cmd; + int ret; + + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_del_wow_pattern_cmd *) skb->data; + cmd->filter_list_id = cpu_to_le16(list_id); + cmd->filter_id = cpu_to_le16(filter_id); + + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_DEL_WOW_PATTERN_CMDID, + NO_SYNC_WMIFLAG); + return ret; } static int ath6kl_wmi_cmd_send_xtnd(struct wmi *wmi, struct sk_buff *skb, @@ -2336,7 +2678,7 @@ static int ath6kl_wmi_cmd_send_xtnd(struct wmi *wmi, struct sk_buff *skb, cmd_hdr = (struct wmix_cmd_hdr *) skb->data; cmd_hdr->cmd_id = cpu_to_le32(cmd_id); - ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_EXTENSION_CMDID, sync_flag); + ret = ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_EXTENSION_CMDID, sync_flag); return ret; } @@ -2379,12 +2721,12 @@ int ath6kl_wmi_config_debug_module_cmd(struct wmi *wmi, u32 valid, u32 config) return ret; } -int ath6kl_wmi_get_stats_cmd(struct wmi *wmi) +int ath6kl_wmi_get_stats_cmd(struct wmi *wmi, u8 if_idx) { - return ath6kl_wmi_simple_cmd(wmi, WMI_GET_STATISTICS_CMDID); + return ath6kl_wmi_simple_cmd(wmi, if_idx, WMI_GET_STATISTICS_CMDID); } -int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 dbM) +int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 if_idx, u8 dbM) { struct sk_buff *skb; struct wmi_set_tx_pwr_cmd *cmd; @@ -2397,18 +2739,24 @@ int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 dbM) cmd = (struct wmi_set_tx_pwr_cmd *) skb->data; cmd->dbM = dbM; - ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_TX_PWR_CMDID, + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_TX_PWR_CMDID, NO_SYNC_WMIFLAG); return ret; } -int ath6kl_wmi_get_tx_pwr_cmd(struct wmi *wmi) +int ath6kl_wmi_get_tx_pwr_cmd(struct wmi *wmi, u8 if_idx) +{ + return ath6kl_wmi_simple_cmd(wmi, if_idx, WMI_GET_TX_PWR_CMDID); +} + +int ath6kl_wmi_get_roam_tbl_cmd(struct wmi *wmi) { - return ath6kl_wmi_simple_cmd(wmi, WMI_GET_TX_PWR_CMDID); + return ath6kl_wmi_simple_cmd(wmi, 0, WMI_GET_ROAM_TBL_CMDID); } -int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 status, u8 preamble_policy) +int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 if_idx, u8 status, + u8 preamble_policy) { struct sk_buff *skb; struct wmi_set_lpreamble_cmd *cmd; @@ -2422,7 +2770,7 @@ int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 status, u8 preamble_policy) cmd->status = status; cmd->preamble_policy = preamble_policy; - ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_LPREAMBLE_CMDID, + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_LPREAMBLE_CMDID, NO_SYNC_WMIFLAG); return ret; } @@ -2440,11 +2788,12 @@ int ath6kl_wmi_set_rts_cmd(struct wmi *wmi, u16 threshold) cmd = (struct wmi_set_rts_cmd *) skb->data; cmd->threshold = cpu_to_le16(threshold); - ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_RTS_CMDID, NO_SYNC_WMIFLAG); + ret = ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_RTS_CMDID, + NO_SYNC_WMIFLAG); return ret; } -int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, enum wmi_txop_cfg cfg) +int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, u8 if_idx, enum wmi_txop_cfg cfg) { struct sk_buff *skb; struct wmi_set_wmm_txop_cmd *cmd; @@ -2460,12 +2809,13 @@ int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, enum wmi_txop_cfg cfg) cmd = (struct wmi_set_wmm_txop_cmd *) skb->data; cmd->txop_enable = cfg; - ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_WMM_TXOP_CMDID, + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_WMM_TXOP_CMDID, NO_SYNC_WMIFLAG); return ret; } -int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 keep_alive_intvl) +int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 if_idx, + u8 keep_alive_intvl) { struct sk_buff *skb; struct wmi_set_keepalive_cmd *cmd; @@ -2477,10 +2827,13 @@ int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 keep_alive_intvl) cmd = (struct wmi_set_keepalive_cmd *) skb->data; cmd->keep_alive_intvl = keep_alive_intvl; - wmi->keep_alive_intvl = keep_alive_intvl; - ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_KEEPALIVE_CMDID, + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_KEEPALIVE_CMDID, NO_SYNC_WMIFLAG); + + if (ret == 0) + ath6kl_debug_set_keepalive(wmi->parent_dev, keep_alive_intvl); + return ret; } @@ -2495,7 +2848,7 @@ int ath6kl_wmi_test_cmd(struct wmi *wmi, void *buf, size_t len) memcpy(skb->data, buf, len); - ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_TEST_CMDID, NO_SYNC_WMIFLAG); + ret = ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_TEST_CMDID, NO_SYNC_WMIFLAG); return ret; } @@ -2528,28 +2881,31 @@ static int ath6kl_wmi_get_pmkid_list_event_rx(struct wmi *wmi, u8 *datap, return 0; } -static int ath6kl_wmi_addba_req_event_rx(struct wmi *wmi, u8 *datap, int len) +static int ath6kl_wmi_addba_req_event_rx(struct wmi *wmi, u8 *datap, int len, + struct ath6kl_vif *vif) { struct wmi_addba_req_event *cmd = (struct wmi_addba_req_event *) datap; - aggr_recv_addba_req_evt(wmi->parent_dev, cmd->tid, + aggr_recv_addba_req_evt(vif, cmd->tid, le16_to_cpu(cmd->st_seq_no), cmd->win_sz); return 0; } -static int ath6kl_wmi_delba_req_event_rx(struct wmi *wmi, u8 *datap, int len) +static int ath6kl_wmi_delba_req_event_rx(struct wmi *wmi, u8 *datap, int len, + struct ath6kl_vif *vif) { struct wmi_delba_event *cmd = (struct wmi_delba_event *) datap; - aggr_recv_delba_req_evt(wmi->parent_dev, cmd->tid); + aggr_recv_delba_req_evt(vif, cmd->tid); return 0; } /* AP mode functions */ -int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, struct wmi_connect_cmd *p) +int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, u8 if_idx, + struct wmi_connect_cmd *p) { struct sk_buff *skb; struct wmi_connect_cmd *cm; @@ -2562,7 +2918,7 @@ int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, struct wmi_connect_cmd *p) cm = (struct wmi_connect_cmd *) skb->data; memcpy(cm, p, sizeof(*cm)); - res = ath6kl_wmi_cmd_send(wmip, skb, WMI_AP_CONFIG_COMMIT_CMDID, + res = ath6kl_wmi_cmd_send(wmip, if_idx, skb, WMI_AP_CONFIG_COMMIT_CMDID, NO_SYNC_WMIFLAG); ath6kl_dbg(ATH6KL_DBG_WMI, "%s: nw_type=%u auth_mode=%u ch=%u " "ctrl_flags=0x%x-> res=%d\n", @@ -2571,7 +2927,8 @@ int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, struct wmi_connect_cmd *p) return res; } -int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 cmd, const u8 *mac, u16 reason) +int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 if_idx, u8 cmd, const u8 *mac, + u16 reason) { struct sk_buff *skb; struct wmi_ap_set_mlme_cmd *cm; @@ -2585,11 +2942,12 @@ int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 cmd, const u8 *mac, u16 reason) cm->reason = cpu_to_le16(reason); cm->cmd = cmd; - return ath6kl_wmi_cmd_send(wmip, skb, WMI_AP_SET_MLME_CMDID, + return ath6kl_wmi_cmd_send(wmip, if_idx, skb, WMI_AP_SET_MLME_CMDID, NO_SYNC_WMIFLAG); } -static int ath6kl_wmi_pspoll_event_rx(struct wmi *wmi, u8 *datap, int len) +static int ath6kl_wmi_pspoll_event_rx(struct wmi *wmi, u8 *datap, int len, + struct ath6kl_vif *vif) { struct wmi_pspoll_event *ev; @@ -2598,19 +2956,21 @@ static int ath6kl_wmi_pspoll_event_rx(struct wmi *wmi, u8 *datap, int len) ev = (struct wmi_pspoll_event *) datap; - ath6kl_pspoll_event(wmi->parent_dev, le16_to_cpu(ev->aid)); + ath6kl_pspoll_event(vif, le16_to_cpu(ev->aid)); return 0; } -static int ath6kl_wmi_dtimexpiry_event_rx(struct wmi *wmi, u8 *datap, int len) +static int ath6kl_wmi_dtimexpiry_event_rx(struct wmi *wmi, u8 *datap, int len, + struct ath6kl_vif *vif) { - ath6kl_dtimexpiry_event(wmi->parent_dev); + ath6kl_dtimexpiry_event(vif); return 0; } -int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u16 aid, bool flag) +int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u8 if_idx, u16 aid, + bool flag) { struct sk_buff *skb; struct wmi_ap_set_pvb_cmd *cmd; @@ -2625,13 +2985,14 @@ int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u16 aid, bool flag) cmd->rsvd = cpu_to_le16(0); cmd->flag = cpu_to_le32(flag); - ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_AP_SET_PVB_CMDID, + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_AP_SET_PVB_CMDID, NO_SYNC_WMIFLAG); return 0; } -int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 rx_meta_ver, +int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 if_idx, + u8 rx_meta_ver, bool rx_dot11_hdr, bool defrag_on_host) { struct sk_buff *skb; @@ -2648,14 +3009,14 @@ int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 rx_meta_ver, cmd->meta_ver = rx_meta_ver; /* Delete the local aggr state, on host */ - ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_RX_FRAME_FORMAT_CMDID, + ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_RX_FRAME_FORMAT_CMDID, NO_SYNC_WMIFLAG); return ret; } -int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 mgmt_frm_type, const u8 *ie, - u8 ie_len) +int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type, + const u8 *ie, u8 ie_len) { struct sk_buff *skb; struct wmi_set_appie_cmd *p; @@ -2669,8 +3030,11 @@ int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 mgmt_frm_type, const u8 *ie, p = (struct wmi_set_appie_cmd *) skb->data; p->mgmt_frm_type = mgmt_frm_type; p->ie_len = ie_len; - memcpy(p->ie_info, ie, ie_len); - return ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_APPIE_CMDID, + + if (ie != NULL && ie_len > 0) + memcpy(p->ie_info, ie, ie_len); + + return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_APPIE_CMDID, NO_SYNC_WMIFLAG); } @@ -2688,11 +3052,11 @@ int ath6kl_wmi_disable_11b_rates_cmd(struct wmi *wmi, bool disable) cmd = (struct wmi_disable_11b_rates_cmd *) skb->data; cmd->disable = disable ? 1 : 0; - return ath6kl_wmi_cmd_send(wmi, skb, WMI_DISABLE_11B_RATES_CMDID, + return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_DISABLE_11B_RATES_CMDID, NO_SYNC_WMIFLAG); } -int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u32 freq, u32 dur) +int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx, u32 freq, u32 dur) { struct sk_buff *skb; struct wmi_remain_on_chnl_cmd *p; @@ -2706,12 +3070,16 @@ int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u32 freq, u32 dur) p = (struct wmi_remain_on_chnl_cmd *) skb->data; p->freq = cpu_to_le32(freq); p->duration = cpu_to_le32(dur); - return ath6kl_wmi_cmd_send(wmi, skb, WMI_REMAIN_ON_CHNL_CMDID, + return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_REMAIN_ON_CHNL_CMDID, NO_SYNC_WMIFLAG); } -int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u32 id, u32 freq, u32 wait, - const u8 *data, u16 data_len) +/* ath6kl_wmi_send_action_cmd is to be deprecated. Use + * ath6kl_wmi_send_mgmt_cmd instead. The new function supports P2P + * mgmt operations using station interface. + */ +int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u8 if_idx, u32 id, u32 freq, + u32 wait, const u8 *data, u16 data_len) { struct sk_buff *skb; struct wmi_send_action_cmd *p; @@ -2731,6 +3099,7 @@ int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u32 id, u32 freq, u32 wait, } kfree(wmi->last_mgmt_tx_frame); + memcpy(buf, data, data_len); wmi->last_mgmt_tx_frame = buf; wmi->last_mgmt_tx_frame_len = data_len; @@ -2742,18 +3111,61 @@ int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u32 id, u32 freq, u32 wait, p->wait = cpu_to_le32(wait); p->len = cpu_to_le16(data_len); memcpy(p->data, data, data_len); - return ath6kl_wmi_cmd_send(wmi, skb, WMI_SEND_ACTION_CMDID, + return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SEND_ACTION_CMDID, NO_SYNC_WMIFLAG); } -int ath6kl_wmi_send_probe_response_cmd(struct wmi *wmi, u32 freq, - const u8 *dst, - const u8 *data, u16 data_len) +int ath6kl_wmi_send_mgmt_cmd(struct wmi *wmi, u8 if_idx, u32 id, u32 freq, + u32 wait, const u8 *data, u16 data_len, + u32 no_cck) { struct sk_buff *skb; - struct wmi_p2p_probe_response_cmd *p; + struct wmi_send_mgmt_cmd *p; + u8 *buf; + + if (wait) + return -EINVAL; /* Offload for wait not supported */ + + buf = kmalloc(data_len, GFP_KERNEL); + if (!buf) + return -ENOMEM; skb = ath6kl_wmi_get_new_buf(sizeof(*p) + data_len); + if (!skb) { + kfree(buf); + return -ENOMEM; + } + + kfree(wmi->last_mgmt_tx_frame); + memcpy(buf, data, data_len); + wmi->last_mgmt_tx_frame = buf; + wmi->last_mgmt_tx_frame_len = data_len; + + ath6kl_dbg(ATH6KL_DBG_WMI, "send_action_cmd: id=%u freq=%u wait=%u " + "len=%u\n", id, freq, wait, data_len); + p = (struct wmi_send_mgmt_cmd *) skb->data; + p->id = cpu_to_le32(id); + p->freq = cpu_to_le32(freq); + p->wait = cpu_to_le32(wait); + p->no_cck = cpu_to_le32(no_cck); + p->len = cpu_to_le16(data_len); + memcpy(p->data, data, data_len); + return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SEND_MGMT_CMDID, + NO_SYNC_WMIFLAG); +} + +int ath6kl_wmi_send_probe_response_cmd(struct wmi *wmi, u8 if_idx, u32 freq, + const u8 *dst, const u8 *data, + u16 data_len) +{ + struct sk_buff *skb; + struct wmi_p2p_probe_response_cmd *p; + size_t cmd_len = sizeof(*p) + data_len; + + if (data_len == 0) + cmd_len++; /* work around target minimum length requirement */ + + skb = ath6kl_wmi_get_new_buf(cmd_len); if (!skb) return -ENOMEM; @@ -2764,11 +3176,12 @@ int ath6kl_wmi_send_probe_response_cmd(struct wmi *wmi, u32 freq, memcpy(p->destination_addr, dst, ETH_ALEN); p->len = cpu_to_le16(data_len); memcpy(p->data, data, data_len); - return ath6kl_wmi_cmd_send(wmi, skb, WMI_SEND_PROBE_RESPONSE_CMDID, + return ath6kl_wmi_cmd_send(wmi, if_idx, skb, + WMI_SEND_PROBE_RESPONSE_CMDID, NO_SYNC_WMIFLAG); } -int ath6kl_wmi_probe_report_req_cmd(struct wmi *wmi, bool enable) +int ath6kl_wmi_probe_report_req_cmd(struct wmi *wmi, u8 if_idx, bool enable) { struct sk_buff *skb; struct wmi_probe_req_report_cmd *p; @@ -2781,11 +3194,11 @@ int ath6kl_wmi_probe_report_req_cmd(struct wmi *wmi, bool enable) enable); p = (struct wmi_probe_req_report_cmd *) skb->data; p->enable = enable ? 1 : 0; - return ath6kl_wmi_cmd_send(wmi, skb, WMI_PROBE_REQ_REPORT_CMDID, + return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_PROBE_REQ_REPORT_CMDID, NO_SYNC_WMIFLAG); } -int ath6kl_wmi_info_req_cmd(struct wmi *wmi, u32 info_req_flags) +int ath6kl_wmi_info_req_cmd(struct wmi *wmi, u8 if_idx, u32 info_req_flags) { struct sk_buff *skb; struct wmi_get_p2p_info *p; @@ -2798,14 +3211,15 @@ int ath6kl_wmi_info_req_cmd(struct wmi *wmi, u32 info_req_flags) info_req_flags); p = (struct wmi_get_p2p_info *) skb->data; p->info_req_flags = cpu_to_le32(info_req_flags); - return ath6kl_wmi_cmd_send(wmi, skb, WMI_GET_P2P_INFO_CMDID, + return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_GET_P2P_INFO_CMDID, NO_SYNC_WMIFLAG); } -int ath6kl_wmi_cancel_remain_on_chnl_cmd(struct wmi *wmi) +int ath6kl_wmi_cancel_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx) { ath6kl_dbg(ATH6KL_DBG_WMI, "cancel_remain_on_chnl_cmd\n"); - return ath6kl_wmi_simple_cmd(wmi, WMI_CANCEL_REMAIN_ON_CHNL_CMDID); + return ath6kl_wmi_simple_cmd(wmi, if_idx, + WMI_CANCEL_REMAIN_ON_CHNL_CMDID); } static int ath6kl_wmi_control_rx_xtnd(struct wmi *wmi, struct sk_buff *skb) @@ -2818,7 +3232,6 @@ static int ath6kl_wmi_control_rx_xtnd(struct wmi *wmi, struct sk_buff *skb) if (skb->len < sizeof(struct wmix_cmd_hdr)) { ath6kl_err("bad packet 1\n"); - wmi->stat.cmd_len_err++; return -EINVAL; } @@ -2840,7 +3253,6 @@ static int ath6kl_wmi_control_rx_xtnd(struct wmi *wmi, struct sk_buff *skb) break; default: ath6kl_warn("unknown cmd id 0x%x\n", id); - wmi->stat.cmd_id_err++; ret = -EINVAL; break; } @@ -2848,12 +3260,19 @@ static int ath6kl_wmi_control_rx_xtnd(struct wmi *wmi, struct sk_buff *skb) return ret; } +static int ath6kl_wmi_roam_tbl_event_rx(struct wmi *wmi, u8 *datap, int len) +{ + return ath6kl_debug_roam_tbl_event(wmi->parent_dev, datap, len); +} + /* Control Path */ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb) { struct wmi_cmd_hdr *cmd; + struct ath6kl_vif *vif; u32 len; u16 id; + u8 if_idx; u8 *datap; int ret = 0; @@ -2863,12 +3282,12 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb) if (skb->len < sizeof(struct wmi_cmd_hdr)) { ath6kl_err("bad packet 1\n"); dev_kfree_skb(skb); - wmi->stat.cmd_len_err++; return -EINVAL; } cmd = (struct wmi_cmd_hdr *) skb->data; id = le16_to_cpu(cmd->cmd_id); + if_idx = le16_to_cpu(cmd->info1) & WMI_CMD_HDR_IF_ID_MASK; skb_pull(skb, sizeof(struct wmi_cmd_hdr)); @@ -2879,6 +3298,15 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb) ath6kl_dbg_dump(ATH6KL_DBG_WMI_DUMP, NULL, "wmi rx ", datap, len); + vif = ath6kl_get_vif_by_index(wmi->parent_dev, if_idx); + if (!vif) { + ath6kl_dbg(ATH6KL_DBG_WMI, + "Wmi event for unavailable vif, vif_index:%d\n", + if_idx); + dev_kfree_skb(skb); + return -EINVAL; + } + switch (id) { case WMI_GET_BITRATE_CMDID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_BITRATE_CMDID\n"); @@ -2898,11 +3326,11 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb) break; case WMI_CONNECT_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CONNECT_EVENTID\n"); - ret = ath6kl_wmi_connect_event_rx(wmi, datap, len); + ret = ath6kl_wmi_connect_event_rx(wmi, datap, len, vif); break; case WMI_DISCONNECT_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DISCONNECT_EVENTID\n"); - ret = ath6kl_wmi_disconnect_event_rx(wmi, datap, len); + ret = ath6kl_wmi_disconnect_event_rx(wmi, datap, len, vif); break; case WMI_PEER_NODE_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_PEER_NODE_EVENTID\n"); @@ -2910,11 +3338,11 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb) break; case WMI_TKIP_MICERR_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TKIP_MICERR_EVENTID\n"); - ret = ath6kl_wmi_tkip_micerr_event_rx(wmi, datap, len); + ret = ath6kl_wmi_tkip_micerr_event_rx(wmi, datap, len, vif); break; case WMI_BSSINFO_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_BSSINFO_EVENTID\n"); - ret = ath6kl_wmi_bssinfo_event_rx(wmi, datap, len); + ret = ath6kl_wmi_bssinfo_event_rx(wmi, datap, len, vif); break; case WMI_REGDOMAIN_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REGDOMAIN_EVENTID\n"); @@ -2926,11 +3354,12 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb) break; case WMI_NEIGHBOR_REPORT_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_NEIGHBOR_REPORT_EVENTID\n"); - ret = ath6kl_wmi_neighbor_report_event_rx(wmi, datap, len); + ret = ath6kl_wmi_neighbor_report_event_rx(wmi, datap, len, + vif); break; case WMI_SCAN_COMPLETE_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_SCAN_COMPLETE_EVENTID\n"); - ret = ath6kl_wmi_scan_complete_rx(wmi, datap, len); + ret = ath6kl_wmi_scan_complete_rx(wmi, datap, len, vif); break; case WMI_CMDERROR_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CMDERROR_EVENTID\n"); @@ -2938,7 +3367,7 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb) break; case WMI_REPORT_STATISTICS_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REPORT_STATISTICS_EVENTID\n"); - ret = ath6kl_wmi_stats_event_rx(wmi, datap, len); + ret = ath6kl_wmi_stats_event_rx(wmi, datap, len, vif); break; case WMI_RSSI_THRESHOLD_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RSSI_THRESHOLD_EVENTID\n"); @@ -2953,6 +3382,7 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb) break; case WMI_REPORT_ROAM_TBL_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REPORT_ROAM_TBL_EVENTID\n"); + ret = ath6kl_wmi_roam_tbl_event_rx(wmi, datap, len); break; case WMI_EXTENSION_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_EXTENSION_EVENTID\n"); @@ -2960,7 +3390,7 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb) break; case WMI_CAC_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CAC_EVENTID\n"); - ret = ath6kl_wmi_cac_event_rx(wmi, datap, len); + ret = ath6kl_wmi_cac_event_rx(wmi, datap, len, vif); break; case WMI_CHANNEL_CHANGE_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CHANNEL_CHANGE_EVENTID\n"); @@ -2996,7 +3426,6 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb) break; case WMI_GET_WOW_LIST_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_WOW_LIST_EVENTID\n"); - ret = ath6kl_wmi_get_wow_list_event_rx(wmi, datap, len); break; case WMI_GET_PMKID_LIST_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_PMKID_LIST_EVENTID\n"); @@ -3004,25 +3433,25 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb) break; case WMI_PSPOLL_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_PSPOLL_EVENTID\n"); - ret = ath6kl_wmi_pspoll_event_rx(wmi, datap, len); + ret = ath6kl_wmi_pspoll_event_rx(wmi, datap, len, vif); break; case WMI_DTIMEXPIRY_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DTIMEXPIRY_EVENTID\n"); - ret = ath6kl_wmi_dtimexpiry_event_rx(wmi, datap, len); + ret = ath6kl_wmi_dtimexpiry_event_rx(wmi, datap, len, vif); break; case WMI_SET_PARAMS_REPLY_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_SET_PARAMS_REPLY_EVENTID\n"); break; case WMI_ADDBA_REQ_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_ADDBA_REQ_EVENTID\n"); - ret = ath6kl_wmi_addba_req_event_rx(wmi, datap, len); + ret = ath6kl_wmi_addba_req_event_rx(wmi, datap, len, vif); break; case WMI_ADDBA_RESP_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_ADDBA_RESP_EVENTID\n"); break; case WMI_DELBA_REQ_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DELBA_REQ_EVENTID\n"); - ret = ath6kl_wmi_delba_req_event_rx(wmi, datap, len); + ret = ath6kl_wmi_delba_req_event_rx(wmi, datap, len, vif); break; case WMI_REPORT_BTCOEX_CONFIG_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, @@ -3038,21 +3467,21 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb) break; case WMI_REMAIN_ON_CHNL_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REMAIN_ON_CHNL_EVENTID\n"); - ret = ath6kl_wmi_remain_on_chnl_event_rx(wmi, datap, len); + ret = ath6kl_wmi_remain_on_chnl_event_rx(wmi, datap, len, vif); break; case WMI_CANCEL_REMAIN_ON_CHNL_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CANCEL_REMAIN_ON_CHNL_EVENTID\n"); ret = ath6kl_wmi_cancel_remain_on_chnl_event_rx(wmi, datap, - len); + len, vif); break; case WMI_TX_STATUS_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TX_STATUS_EVENTID\n"); - ret = ath6kl_wmi_tx_status_event_rx(wmi, datap, len); + ret = ath6kl_wmi_tx_status_event_rx(wmi, datap, len, vif); break; case WMI_RX_PROBE_REQ_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RX_PROBE_REQ_EVENTID\n"); - ret = ath6kl_wmi_rx_probe_req_event_rx(wmi, datap, len); + ret = ath6kl_wmi_rx_probe_req_event_rx(wmi, datap, len, vif); break; case WMI_P2P_CAPABILITIES_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_P2P_CAPABILITIES_EVENTID\n"); @@ -3060,7 +3489,7 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb) break; case WMI_RX_ACTION_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RX_ACTION_EVENTID\n"); - ret = ath6kl_wmi_rx_action_event_rx(wmi, datap, len); + ret = ath6kl_wmi_rx_action_event_rx(wmi, datap, len, vif); break; case WMI_P2P_INFO_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_P2P_INFO_EVENTID\n"); @@ -3068,7 +3497,6 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb) break; default: ath6kl_dbg(ATH6KL_DBG_WMI, "unknown cmd id 0x%x\n", id); - wmi->stat.cmd_id_err++; ret = -EINVAL; break; } @@ -3078,11 +3506,8 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb) return ret; } -static void ath6kl_wmi_qos_state_init(struct wmi *wmi) +void ath6kl_wmi_reset(struct wmi *wmi) { - if (!wmi) - return; - spin_lock_bh(&wmi->lock); wmi->fat_pipe_exist = 0; @@ -3103,16 +3528,9 @@ void *ath6kl_wmi_init(struct ath6kl *dev) wmi->parent_dev = dev; - ath6kl_wmi_qos_state_init(wmi); - wmi->pwr_mode = REC_POWER; - wmi->phy_mode = WMI_11G_MODE; - - wmi->pair_crypto_type = NONE_CRYPT; - wmi->grp_crypto_type = NONE_CRYPT; - wmi->ht_allowed[A_BAND_24GHZ] = 1; - wmi->ht_allowed[A_BAND_5GHZ] = 1; + ath6kl_wmi_reset(wmi); return wmi; } diff --git a/drivers/net/wireless/ath/ath6kl/wmi.h b/drivers/net/wireless/ath/ath6kl/wmi.h index f8e644d54aa..42ac311eda4 100644 --- a/drivers/net/wireless/ath/ath6kl/wmi.h +++ b/drivers/net/wireless/ath/ath6kl/wmi.h @@ -93,11 +93,6 @@ struct sq_threshold_params { u8 last_rssi_poll_event; }; -struct wmi_stats { - u32 cmd_len_err; - u32 cmd_id_err; -}; - struct wmi_data_sync_bufs { u8 traffic_class; struct sk_buff *skb; @@ -111,32 +106,26 @@ struct wmi_data_sync_bufs { #define WMM_AC_VO 3 /* voice */ struct wmi { - bool ready; u16 stream_exist_for_ac[WMM_NUM_AC]; u8 fat_pipe_exist; struct ath6kl *parent_dev; - struct wmi_stats stat; u8 pwr_mode; - u8 phy_mode; - u8 keep_alive_intvl; spinlock_t lock; enum htc_endpoint_id ep_id; struct sq_threshold_params sq_threshld[SIGNAL_QUALITY_METRICS_NUM_MAX]; - enum crypto_type pair_crypto_type; - enum crypto_type grp_crypto_type; bool is_wmm_enabled; - u8 ht_allowed[A_NUM_BANDS]; u8 traffic_class; bool is_probe_ssid; u8 *last_mgmt_tx_frame; size_t last_mgmt_tx_frame_len; + u8 saved_pwr_mode; }; struct host_app_area { - u32 wmi_protocol_ver; -}; + __le32 wmi_protocol_ver; +} __packed; enum wmi_msg_type { DATA_MSGTYPE = 0x0, @@ -184,6 +173,8 @@ enum wmi_data_hdr_data_type { #define WMI_DATA_HDR_META_MASK 0x7 #define WMI_DATA_HDR_META_SHIFT 13 +#define WMI_DATA_HDR_IF_IDX_MASK 0xF + struct wmi_data_hdr { s8 rssi; @@ -208,6 +199,12 @@ struct wmi_data_hdr { * b15:b13 - META_DATA_VERSION 0 - 7 */ __le16 info2; + + /* + * usage of info3, 16-bit: + * b3:b0 - Interface index + * b15:b4 - Reserved + */ __le16 info3; } __packed; @@ -250,6 +247,11 @@ static inline u8 wmi_data_hdr_get_meta(struct wmi_data_hdr *dhdr) WMI_DATA_HDR_META_MASK; } +static inline u8 wmi_data_hdr_get_if_idx(struct wmi_data_hdr *dhdr) +{ + return le16_to_cpu(dhdr->info3) & WMI_DATA_HDR_IF_IDX_MASK; +} + /* Tx meta version definitions */ #define WMI_MAX_TX_META_SZ 12 #define WMI_META_VERSION_1 0x01 @@ -299,6 +301,8 @@ struct wmi_rx_meta_v2 { u8 csum_flags; } __packed; +#define WMI_CMD_HDR_IF_ID_MASK 0xF + /* Control Path */ struct wmi_cmd_hdr { __le16 cmd_id; @@ -312,6 +316,11 @@ struct wmi_cmd_hdr { __le16 reserved; } __packed; +static inline u8 wmi_cmd_hdr_get_if_idx(struct wmi_cmd_hdr *chdr) +{ + return le16_to_cpu(chdr->info1) & WMI_CMD_HDR_IF_ID_MASK; +} + /* List of WMI commands */ enum wmi_cmd_id { WMI_CONNECT_CMDID = 0x0001, @@ -320,6 +329,10 @@ enum wmi_cmd_id { WMI_SYNCHRONIZE_CMDID, WMI_CREATE_PSTREAM_CMDID, WMI_DELETE_PSTREAM_CMDID, + /* WMI_START_SCAN_CMDID is to be deprecated. Use + * WMI_BEGIN_SCAN_CMDID instead. The new cmd supports P2P mgmt + * operations using station interface. + */ WMI_START_SCAN_CMDID, WMI_SET_SCAN_PARAMS_CMDID, WMI_SET_BSS_FILTER_CMDID, @@ -533,12 +546,61 @@ enum wmi_cmd_id { WMI_GTK_OFFLOAD_OP_CMDID, WMI_REMAIN_ON_CHNL_CMDID, WMI_CANCEL_REMAIN_ON_CHNL_CMDID, + /* WMI_SEND_ACTION_CMDID is to be deprecated. Use + * WMI_SEND_MGMT_CMDID instead. The new cmd supports P2P mgmt + * operations using station interface. + */ WMI_SEND_ACTION_CMDID, WMI_PROBE_REQ_REPORT_CMDID, WMI_DISABLE_11B_RATES_CMDID, WMI_SEND_PROBE_RESPONSE_CMDID, WMI_GET_P2P_INFO_CMDID, WMI_AP_JOIN_BSS_CMDID, + + WMI_SMPS_ENABLE_CMDID, + WMI_SMPS_CONFIG_CMDID, + WMI_SET_RATECTRL_PARM_CMDID, + /* LPL specific commands*/ + WMI_LPL_FORCE_ENABLE_CMDID, + WMI_LPL_SET_POLICY_CMDID, + WMI_LPL_GET_POLICY_CMDID, + WMI_LPL_GET_HWSTATE_CMDID, + WMI_LPL_SET_PARAMS_CMDID, + WMI_LPL_GET_PARAMS_CMDID, + + WMI_SET_BUNDLE_PARAM_CMDID, + + /*GreenTx specific commands*/ + + WMI_GREENTX_PARAMS_CMDID, + + WMI_RTT_MEASREQ_CMDID, + WMI_RTT_CAPREQ_CMDID, + WMI_RTT_STATUSREQ_CMDID, + + /* WPS Commands */ + WMI_WPS_START_CMDID, + WMI_GET_WPS_STATUS_CMDID, + + /* More P2P commands */ + WMI_SET_NOA_CMDID, + WMI_GET_NOA_CMDID, + WMI_SET_OPPPS_CMDID, + WMI_GET_OPPPS_CMDID, + WMI_ADD_PORT_CMDID, + WMI_DEL_PORT_CMDID, + + /* 802.11w cmd */ + WMI_SET_RSN_CAP_CMDID, + WMI_GET_RSN_CAP_CMDID, + WMI_SET_IGTK_CMDID, + + WMI_RX_FILTER_COALESCE_FILTER_OP_CMDID, + WMI_RX_FILTER_SET_FRAME_TEST_LIST_CMDID, + + WMI_SEND_MGMT_CMDID, + WMI_BEGIN_SCAN_CMDID, + }; enum wmi_mgmt_frame_type { @@ -558,6 +620,14 @@ enum network_type { AP_NETWORK = 0x10, }; +enum network_subtype { + SUBTYPE_NONE, + SUBTYPE_BT, + SUBTYPE_P2PDEV, + SUBTYPE_P2PCLIENT, + SUBTYPE_P2PGO, +}; + enum dot11_auth_mode { OPEN_AUTH = 0x01, SHARED_AUTH = 0x02, @@ -576,9 +646,6 @@ enum auth_mode { WPA2_AUTH_CCKM = 0x40, }; -#define WMI_MIN_CRYPTO_TYPE NONE_CRYPT -#define WMI_MAX_CRYPTO_TYPE (AES_CRYPT + 1) - #define WMI_MIN_KEY_INDEX 0 #define WMI_MAX_KEY_INDEX 3 @@ -617,6 +684,7 @@ enum wmi_connect_ctrl_flags_bits { CONNECT_CSA_FOLLOW_BSS = 0x0020, CONNECT_DO_WPA_OFFLOAD = 0x0040, CONNECT_DO_NOT_DEAUTH = 0x0080, + CONNECT_WPS_FLAG = 0x0100, }; struct wmi_connect_cmd { @@ -632,6 +700,7 @@ struct wmi_connect_cmd { __le16 ch; u8 bssid[ETH_ALEN]; __le32 ctrl_flags; + u8 nw_subtype; } __packed; /* WMI_RECONNECT_CMDID */ @@ -719,7 +788,12 @@ enum wmi_scan_type { WMI_SHORT_SCAN = 1, }; -struct wmi_start_scan_cmd { +struct wmi_supp_rates { + u8 nrates; + u8 rates[ATH6KL_RATE_MAXSIZE]; +}; + +struct wmi_begin_scan_cmd { __le32 force_fg_scan; /* for legacy cisco AP compatibility */ @@ -731,9 +805,15 @@ struct wmi_start_scan_cmd { /* time interval between scans (msec) */ __le32 force_scan_intvl; + /* no CCK rates */ + __le32 no_cck; + /* enum wmi_scan_type */ u8 scan_type; + /* Supported rates to advertise in the probe request frames */ + struct wmi_supp_rates supp_rates[IEEE80211_NUM_BANDS]; + /* how many channels follow */ u8 num_ch; @@ -741,8 +821,31 @@ struct wmi_start_scan_cmd { __le16 ch_list[1]; } __packed; -/* WMI_SET_SCAN_PARAMS_CMDID */ -#define WMI_SHORTSCANRATIO_DEFAULT 3 +/* wmi_start_scan_cmd is to be deprecated. Use + * wmi_begin_scan_cmd instead. The new structure supports P2P mgmt + * operations using station interface. + */ +struct wmi_start_scan_cmd { + __le32 force_fg_scan; + + /* for legacy cisco AP compatibility */ + __le32 is_legacy; + + /* max duration in the home channel(msec) */ + __le32 home_dwell_time; + + /* time interval between scans (msec) */ + __le32 force_scan_intvl; + + /* enum wmi_scan_type */ + u8 scan_type; + + /* how many channels follow */ + u8 num_ch; + + /* channels in Mhz */ + __le16 ch_list[1]; +} __packed; /* * Warning: scan control flag value of 0xFF is used to disable @@ -776,13 +879,6 @@ enum wmi_scan_ctrl_flags_bits { ENABLE_SCAN_ABORT_EVENT = 0x40 }; -#define DEFAULT_SCAN_CTRL_FLAGS \ - (CONNECT_SCAN_CTRL_FLAGS | \ - SCAN_CONNECTED_CTRL_FLAGS | \ - ACTIVE_SCAN_CTRL_FLAGS | \ - ROAM_SCAN_CTRL_FLAGS | \ - ENABLE_AUTO_CTRL_FLAGS) - struct wmi_scan_params_cmd { /* sec */ __le16 fg_start_period; @@ -1365,14 +1461,20 @@ enum wmi_roam_ctrl { WMI_SET_LRSSI_SCAN_PARAMS, }; +enum wmi_roam_mode { + WMI_DEFAULT_ROAM_MODE = 1, /* RSSI based roam */ + WMI_HOST_BIAS_ROAM_MODE = 2, /* Host bias based roam */ + WMI_LOCK_BSS_MODE = 3, /* Lock to the current BSS */ +}; + struct bss_bias { u8 bssid[ETH_ALEN]; - u8 bias; + s8 bias; } __packed; struct bss_bias_info { u8 num_bss; - struct bss_bias bss_bias[1]; + struct bss_bias bss_bias[0]; } __packed; struct low_rssi_scan_params { @@ -1385,10 +1487,11 @@ struct low_rssi_scan_params { struct roam_ctrl_cmd { union { - u8 bssid[ETH_ALEN]; - u8 roam_mode; - struct bss_bias_info bss; - struct low_rssi_scan_params params; + u8 bssid[ETH_ALEN]; /* WMI_FORCE_ROAM */ + u8 roam_mode; /* WMI_SET_ROAM_MODE */ + struct bss_bias_info bss; /* WMI_SET_HOST_BIAS */ + struct low_rssi_scan_params params; /* WMI_SET_LRSSI_SCAN_PARAMS + */ } __packed info; u8 roam_ctrl; } __packed; @@ -1455,6 +1558,10 @@ struct wmi_tkip_micerr_event { u8 is_mcast; } __packed; +enum wmi_scan_status { + WMI_SCAN_STATUS_SUCCESS = 0, +}; + /* WMI_SCAN_COMPLETE_EVENTID */ struct wmi_scan_complete_event { a_sle32 status; @@ -1635,6 +1742,12 @@ struct wmi_bss_roam_info { u8 reserved; } __packed; +struct wmi_target_roam_tbl { + __le16 roam_mode; + __le16 num_entries; + struct wmi_bss_roam_info info[]; +} __packed; + /* WMI_CAC_EVENTID */ enum cac_indication { CAC_INDICATION_ADMISSION = 0x00, @@ -1771,7 +1884,6 @@ struct wmi_set_appie_cmd { #define WSC_REG_ACTIVE 1 #define WSC_REG_INACTIVE 0 -#define WOW_MAX_FILTER_LISTS 1 #define WOW_MAX_FILTERS_PER_LIST 4 #define WOW_PATTERN_SIZE 64 #define WOW_MASK_SIZE 64 @@ -1794,17 +1906,52 @@ struct wmi_set_ip_cmd { __le32 ips[MAX_IP_ADDRS]; } __packed; -/* WMI_GET_WOW_LIST_CMD reply */ -struct wmi_get_wow_list_reply { - /* number of patterns in reply */ - u8 num_filters; +enum ath6kl_wow_filters { + WOW_FILTER_SSID = BIT(1), + WOW_FILTER_OPTION_MAGIC_PACKET = BIT(2), + WOW_FILTER_OPTION_EAP_REQ = BIT(3), + WOW_FILTER_OPTION_PATTERNS = BIT(4), + WOW_FILTER_OPTION_OFFLOAD_ARP = BIT(5), + WOW_FILTER_OPTION_OFFLOAD_NS = BIT(6), + WOW_FILTER_OPTION_OFFLOAD_GTK = BIT(7), + WOW_FILTER_OPTION_8021X_4WAYHS = BIT(8), + WOW_FILTER_OPTION_NLO_DISCVRY = BIT(9), + WOW_FILTER_OPTION_NWK_DISASSOC = BIT(10), + WOW_FILTER_OPTION_GTK_ERROR = BIT(11), + WOW_FILTER_OPTION_TEST_MODE = BIT(15), +}; + +enum ath6kl_host_mode { + ATH6KL_HOST_MODE_AWAKE, + ATH6KL_HOST_MODE_ASLEEP, +}; + +struct wmi_set_host_sleep_mode_cmd { + __le32 awake; + __le32 asleep; +} __packed; + +enum ath6kl_wow_mode { + ATH6KL_WOW_MODE_DISABLE, + ATH6KL_WOW_MODE_ENABLE, +}; + +struct wmi_set_wow_mode_cmd { + __le32 enable_wow; + __le32 filter; + __le16 host_req_delay; +} __packed; - /* this is filter # x of total num_filters */ - u8 this_filter_num; +struct wmi_add_wow_pattern_cmd { + u8 filter_list_id; + u8 filter_size; + u8 filter_offset; + u8 filter[0]; +} __packed; - u8 wow_mode; - u8 host_mode; - struct wow_filter wow_filters[1]; +struct wmi_del_wow_pattern_cmd { + __le16 filter_list_id; + __le16 filter_id; } __packed; /* WMI_SET_AKMP_PARAMS_CMD */ @@ -1905,7 +2052,7 @@ struct wmi_tx_complete_event { * !!! Warning !!! * -Changing the following values needs compilation of both driver and firmware */ -#define AP_MAX_NUM_STA 8 +#define AP_MAX_NUM_STA 10 /* Spl. AID used to set DTIM flag in the beacons */ #define MCAST_AID 0xFF @@ -1988,6 +2135,10 @@ struct wmi_remain_on_chnl_cmd { __le32 duration; } __packed; +/* wmi_send_action_cmd is to be deprecated. Use + * wmi_send_mgmt_cmd instead. The new structure supports P2P mgmt + * operations using station interface. + */ struct wmi_send_action_cmd { __le32 id; __le32 freq; @@ -1996,6 +2147,15 @@ struct wmi_send_action_cmd { u8 data[0]; } __packed; +struct wmi_send_mgmt_cmd { + __le32 id; + __le32 freq; + __le32 wait; + __le32 no_cck; + __le16 len; + u8 data[0]; +} __packed; + struct wmi_tx_status_event { __le32 id; u8 ack_status; @@ -2163,120 +2323,162 @@ int ath6kl_wmi_dix_2_dot3(struct wmi *wmi, struct sk_buff *skb); int ath6kl_wmi_data_hdr_add(struct wmi *wmi, struct sk_buff *skb, u8 msg_type, bool more_data, enum wmi_data_hdr_data_type data_type, - u8 meta_ver, void *tx_meta_info); + u8 meta_ver, void *tx_meta_info, u8 if_idx); int ath6kl_wmi_dot11_hdr_remove(struct wmi *wmi, struct sk_buff *skb); int ath6kl_wmi_dot3_2_dix(struct sk_buff *skb); -int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, struct sk_buff *skb, - u32 layer2_priority, bool wmm_enabled, - u8 *ac); +int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, u8 if_idx, + struct sk_buff *skb, u32 layer2_priority, + bool wmm_enabled, u8 *ac); int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb); -int ath6kl_wmi_cmd_send(struct wmi *wmi, struct sk_buff *skb, +int ath6kl_wmi_cmd_send(struct wmi *wmi, u8 if_idx, struct sk_buff *skb, enum wmi_cmd_id cmd_id, enum wmi_sync_flag sync_flag); -int ath6kl_wmi_connect_cmd(struct wmi *wmi, enum network_type nw_type, +int ath6kl_wmi_connect_cmd(struct wmi *wmi, u8 if_idx, + enum network_type nw_type, enum dot11_auth_mode dot11_auth_mode, enum auth_mode auth_mode, enum crypto_type pairwise_crypto, u8 pairwise_crypto_len, enum crypto_type group_crypto, u8 group_crypto_len, int ssid_len, u8 *ssid, - u8 *bssid, u16 channel, u32 ctrl_flags); - -int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 *bssid, u16 channel); -int ath6kl_wmi_disconnect_cmd(struct wmi *wmi); -int ath6kl_wmi_startscan_cmd(struct wmi *wmi, enum wmi_scan_type scan_type, + u8 *bssid, u16 channel, u32 ctrl_flags, + u8 nw_subtype); + +int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 if_idx, u8 *bssid, + u16 channel); +int ath6kl_wmi_disconnect_cmd(struct wmi *wmi, u8 if_idx); +int ath6kl_wmi_startscan_cmd(struct wmi *wmi, u8 if_idx, + enum wmi_scan_type scan_type, u32 force_fgscan, u32 is_legacy, u32 home_dwell_time, u32 force_scan_interval, s8 num_chan, u16 *ch_list); -int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u16 fg_start_sec, + +int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx, + enum wmi_scan_type scan_type, + u32 force_fgscan, u32 is_legacy, + u32 home_dwell_time, u32 force_scan_interval, + s8 num_chan, u16 *ch_list, u32 no_cck, + u32 *rates); + +int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u8 if_idx, u16 fg_start_sec, u16 fg_end_sec, u16 bg_sec, u16 minact_chdw_msec, u16 maxact_chdw_msec, u16 pas_chdw_msec, u8 short_scan_ratio, u8 scan_ctrl_flag, u32 max_dfsch_act_time, u16 maxact_scan_per_ssid); -int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 filter, u32 ie_mask); -int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 index, u8 flag, +int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 if_idx, u8 filter, + u32 ie_mask); +int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 if_idx, u8 index, u8 flag, u8 ssid_len, u8 *ssid); -int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u16 listen_interval, +int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u8 if_idx, + u16 listen_interval, u16 listen_beacons); -int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 pwr_mode); -int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u16 idle_period, +int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 if_idx, u8 pwr_mode); +int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u8 if_idx, u16 idle_period, u16 ps_poll_num, u16 dtim_policy, u16 tx_wakup_policy, u16 num_tx_to_wakeup, u16 ps_fail_event_policy); -int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 timeout); -int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi, +int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi, u8 if_idx, struct wmi_create_pstream_cmd *pstream); -int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 traffic_class, u8 tsid); +int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 if_idx, u8 traffic_class, + u8 tsid); +int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 if_idx, u8 timeout); int ath6kl_wmi_set_rts_cmd(struct wmi *wmi, u16 threshold); -int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 status, +int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 if_idx, u8 status, u8 preamble_policy); int ath6kl_wmi_get_challenge_resp_cmd(struct wmi *wmi, u32 cookie, u32 source); int ath6kl_wmi_config_debug_module_cmd(struct wmi *wmi, u32 valid, u32 config); -int ath6kl_wmi_get_stats_cmd(struct wmi *wmi); -int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 key_index, +int ath6kl_wmi_get_stats_cmd(struct wmi *wmi, u8 if_idx); +int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index, enum crypto_type key_type, u8 key_usage, u8 key_len, - u8 *key_rsc, u8 *key_material, + u8 *key_rsc, unsigned int key_rsc_len, + u8 *key_material, u8 key_op_ctrl, u8 *mac_addr, enum wmi_sync_flag sync_flag); -int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 *krk); -int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 key_index); -int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, const u8 *bssid, +int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 if_idx, u8 *krk); +int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index); +int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, u8 if_idx, const u8 *bssid, const u8 *pmkid, bool set); -int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 dbM); -int ath6kl_wmi_get_tx_pwr_cmd(struct wmi *wmi); +int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 if_idx, u8 dbM); +int ath6kl_wmi_get_tx_pwr_cmd(struct wmi *wmi, u8 if_idx); +int ath6kl_wmi_get_roam_tbl_cmd(struct wmi *wmi); -int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, enum wmi_txop_cfg cfg); -int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 keep_alive_intvl); +int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, u8 if_idx, enum wmi_txop_cfg cfg); +int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 if_idx, + u8 keep_alive_intvl); int ath6kl_wmi_test_cmd(struct wmi *wmi, void *buf, size_t len); s32 ath6kl_wmi_get_rate(s8 rate_index); int ath6kl_wmi_set_ip_cmd(struct wmi *wmi, struct wmi_set_ip_cmd *ip_cmd); +int ath6kl_wmi_set_host_sleep_mode_cmd(struct wmi *wmi, u8 if_idx, + enum ath6kl_host_mode host_mode); +int ath6kl_wmi_set_wow_mode_cmd(struct wmi *wmi, u8 if_idx, + enum ath6kl_wow_mode wow_mode, + u32 filter, u16 host_req_delay); +int ath6kl_wmi_add_wow_pattern_cmd(struct wmi *wmi, u8 if_idx, + u8 list_id, u8 filter_size, + u8 filter_offset, u8 *filter, u8 *mask); +int ath6kl_wmi_del_wow_pattern_cmd(struct wmi *wmi, u8 if_idx, + u16 list_id, u16 filter_id); int ath6kl_wmi_set_roam_lrssi_cmd(struct wmi *wmi, u8 lrssi); +int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid); +int ath6kl_wmi_set_roam_mode_cmd(struct wmi *wmi, enum wmi_roam_mode mode); /* AP mode */ -int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, struct wmi_connect_cmd *p); +int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, u8 if_idx, + struct wmi_connect_cmd *p); -int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 cmd, const u8 *mac, u16 reason); +int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 if_idx, u8 cmd, + const u8 *mac, u16 reason); -int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u16 aid, bool flag); +int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u8 if_idx, u16 aid, bool flag); -int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 rx_meta_version, +int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 if_idx, + u8 rx_meta_version, bool rx_dot11_hdr, bool defrag_on_host); -int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 mgmt_frm_type, const u8 *ie, - u8 ie_len); +int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type, + const u8 *ie, u8 ie_len); /* P2P */ int ath6kl_wmi_disable_11b_rates_cmd(struct wmi *wmi, bool disable); -int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u32 freq, u32 dur); +int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx, u32 freq, + u32 dur); + +int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u8 if_idx, u32 id, u32 freq, + u32 wait, const u8 *data, u16 data_len); + +int ath6kl_wmi_send_mgmt_cmd(struct wmi *wmi, u8 if_idx, u32 id, u32 freq, + u32 wait, const u8 *data, u16 data_len, + u32 no_cck); -int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u32 id, u32 freq, u32 wait, - const u8 *data, u16 data_len); +int ath6kl_wmi_send_probe_response_cmd(struct wmi *wmi, u8 if_idx, u32 freq, + const u8 *dst, const u8 *data, + u16 data_len); -int ath6kl_wmi_send_probe_response_cmd(struct wmi *wmi, u32 freq, - const u8 *dst, - const u8 *data, u16 data_len); +int ath6kl_wmi_probe_report_req_cmd(struct wmi *wmi, u8 if_idx, bool enable); -int ath6kl_wmi_probe_report_req_cmd(struct wmi *wmi, bool enable); +int ath6kl_wmi_info_req_cmd(struct wmi *wmi, u8 if_idx, u32 info_req_flags); -int ath6kl_wmi_info_req_cmd(struct wmi *wmi, u32 info_req_flags); +int ath6kl_wmi_cancel_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx); -int ath6kl_wmi_cancel_remain_on_chnl_cmd(struct wmi *wmi); +int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type, + const u8 *ie, u8 ie_len); -int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 mgmt_frm_type, const u8 *ie, - u8 ie_len); +void ath6kl_wmi_sscan_timer(unsigned long ptr); +struct ath6kl_vif *ath6kl_get_vif_by_index(struct ath6kl *ar, u8 if_idx); void *ath6kl_wmi_init(struct ath6kl *devt); void ath6kl_wmi_shutdown(struct wmi *wmi); +void ath6kl_wmi_reset(struct wmi *wmi); #endif /* WMI_H */ diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig index d9c08c619a3..dc6be4afe8e 100644 --- a/drivers/net/wireless/ath/ath9k/Kconfig +++ b/drivers/net/wireless/ath/ath9k/Kconfig @@ -2,6 +2,9 @@ config ATH9K_HW tristate config ATH9K_COMMON tristate +config ATH9K_DFS_DEBUGFS + def_bool y + depends on ATH9K_DEBUGFS && ATH9K_DFS_CERTIFIED config ATH9K tristate "Atheros 802.11n wireless cards support" @@ -25,6 +28,7 @@ config ATH9K config ATH9K_PCI bool "Atheros ath9k PCI/PCIe bus support" + default y depends on ATH9K && PCI ---help--- This option enables the PCI bus support in ath9k. @@ -50,6 +54,25 @@ config ATH9K_DEBUGFS Also required for changing debug message flags at run time. +config ATH9K_DFS_CERTIFIED + bool "Atheros DFS support for certified platforms" + depends on ATH9K && EXPERT + default n + ---help--- + This option enables DFS support for initiating radiation on + ath9k. There is no way to dynamically detect if a card was DFS + certified and as such this is left as a build time option. This + option should only be enabled by system integrators that can + guarantee that all the platforms that their kernel will run on + have obtained appropriate regulatory body certification for a + respective Atheros card by using ath9k on the target shipping + platforms. + + This is currently only a placeholder for future DFS support, + as DFS support requires more components that still need to be + developed. At this point enabling this option won't do anything + except increase code size. + config ATH9K_RATE_CONTROL bool "Atheros ath9k rate control" depends on ATH9K @@ -58,6 +81,14 @@ config ATH9K_RATE_CONTROL Say Y, if you want to use the ath9k specific rate control module instead of minstrel_ht. +config ATH9K_BTCOEX_SUPPORT + bool "Atheros ath9k bluetooth coexistence support" + depends on ATH9K + default y + ---help--- + Say Y, if you want to use the ath9k radios together with + Bluetooth modules in the same system. + config ATH9K_HTC tristate "Atheros HTC based wireless cards support" depends on USB && MAC80211 diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile index 36ed3c46fec..da02242499a 100644 --- a/drivers/net/wireless/ath/ath9k/Makefile +++ b/drivers/net/wireless/ath/ath9k/Makefile @@ -4,11 +4,14 @@ ath9k-y += beacon.o \ main.o \ recv.o \ xmit.o \ + mci.o \ ath9k-$(CONFIG_ATH9K_RATE_CONTROL) += rc.o ath9k-$(CONFIG_ATH9K_PCI) += pci.o ath9k-$(CONFIG_ATH9K_AHB) += ahb.o ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o +ath9k-$(CONFIG_ATH9K_DFS_DEBUGFS) += dfs_debug.o +ath9k-$(CONFIG_ATH9K_DFS_CERTIFIED) += dfs.o obj-$(CONFIG_ATH9K) += ath9k.o @@ -33,7 +36,8 @@ ath9k_hw-y:= \ ar9002_mac.o \ ar9003_mac.o \ ar9003_eeprom.o \ - ar9003_paprd.o + ar9003_paprd.o \ + ar9003_mci.o obj-$(CONFIG_ATH9K_HW) += ath9k_hw.o diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c index a639b94f764..bc56f57b393 100644 --- a/drivers/net/wireless/ath/ath9k/ani.c +++ b/drivers/net/wireless/ath/ath9k/ani.c @@ -136,8 +136,8 @@ static void ath9k_ani_restart(struct ath_hw *ah) cck_base = AR_PHY_COUNTMAX - ah->config.cck_trig_high; } - ath_dbg(common, ATH_DBG_ANI, - "Writing ofdmbase=%u cckbase=%u\n", ofdm_base, cck_base); + ath_dbg(common, ANI, "Writing ofdmbase=%u cckbase=%u\n", + ofdm_base, cck_base); ENABLE_REGWRITE_BUFFER(ah); @@ -268,8 +268,7 @@ static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel) aniState->noiseFloor = BEACON_RSSI(ah); - ath_dbg(common, ATH_DBG_ANI, - "**** ofdmlevel %d=>%d, rssi=%d[lo=%d hi=%d]\n", + ath_dbg(common, ANI, "**** ofdmlevel %d=>%d, rssi=%d[lo=%d hi=%d]\n", aniState->ofdmNoiseImmunityLevel, immunityLevel, aniState->noiseFloor, aniState->rssiThrLow, aniState->rssiThrHigh); @@ -336,8 +335,7 @@ static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel) const struct ani_cck_level_entry *entry_cck; aniState->noiseFloor = BEACON_RSSI(ah); - ath_dbg(common, ATH_DBG_ANI, - "**** ccklevel %d=>%d, rssi=%d[lo=%d hi=%d]\n", + ath_dbg(common, ANI, "**** ccklevel %d=>%d, rssi=%d[lo=%d hi=%d]\n", aniState->cckNoiseImmunityLevel, immunityLevel, aniState->noiseFloor, aniState->rssiThrLow, aniState->rssiThrHigh); @@ -481,8 +479,7 @@ static void ath9k_ani_reset_old(struct ath_hw *ah, bool is_scanning) if (ah->opmode != NL80211_IFTYPE_STATION && ah->opmode != NL80211_IFTYPE_ADHOC) { - ath_dbg(common, ATH_DBG_ANI, - "Reset ANI state opmode %u\n", ah->opmode); + ath_dbg(common, ANI, "Reset ANI state opmode %u\n", ah->opmode); ah->stats.ast_ani_reset++; if (ah->opmode == NL80211_IFTYPE_AP) { @@ -582,7 +579,7 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning) ATH9K_ANI_OFDM_DEF_LEVEL || aniState->cckNoiseImmunityLevel != ATH9K_ANI_CCK_DEF_LEVEL) { - ath_dbg(common, ATH_DBG_ANI, + ath_dbg(common, ANI, "Restore defaults: opmode %u chan %d Mhz/0x%x is_scanning=%d ofdm:%d cck:%d\n", ah->opmode, chan->channel, @@ -599,7 +596,7 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning) /* * restore historical levels for this channel */ - ath_dbg(common, ATH_DBG_ANI, + ath_dbg(common, ANI, "Restore history: opmode %u chan %d Mhz/0x%x is_scanning=%d ofdm:%d cck:%d\n", ah->opmode, chan->channel, @@ -662,7 +659,7 @@ static bool ath9k_hw_ani_read_counters(struct ath_hw *ah) if (!use_new_ani(ah) && (phyCnt1 < ofdm_base || phyCnt2 < cck_base)) { if (phyCnt1 < ofdm_base) { - ath_dbg(common, ATH_DBG_ANI, + ath_dbg(common, ANI, "phyCnt1 0x%x, resetting counter value to 0x%x\n", phyCnt1, ofdm_base); REG_WRITE(ah, AR_PHY_ERR_1, ofdm_base); @@ -670,7 +667,7 @@ static bool ath9k_hw_ani_read_counters(struct ath_hw *ah) AR_PHY_ERR_OFDM_TIMING); } if (phyCnt2 < cck_base) { - ath_dbg(common, ATH_DBG_ANI, + ath_dbg(common, ANI, "phyCnt2 0x%x, resetting counter value to 0x%x\n", phyCnt2, cck_base); REG_WRITE(ah, AR_PHY_ERR_2, cck_base); @@ -713,7 +710,7 @@ void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan) cckPhyErrRate = aniState->cckPhyErrCount * 1000 / aniState->listenTime; - ath_dbg(common, ATH_DBG_ANI, + ath_dbg(common, ANI, "listenTime=%d OFDM:%d errs=%d/s CCK:%d errs=%d/s ofdm_turn=%d\n", aniState->listenTime, aniState->ofdmNoiseImmunityLevel, @@ -748,7 +745,7 @@ void ath9k_enable_mib_counters(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); - ath_dbg(common, ATH_DBG_ANI, "Enable MIB counters\n"); + ath_dbg(common, ANI, "Enable MIB counters\n"); ath9k_hw_update_mibstats(ah, &ah->ah_mibStats); @@ -770,7 +767,7 @@ void ath9k_hw_disable_mib_counters(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); - ath_dbg(common, ATH_DBG_ANI, "Disable MIB counters\n"); + ath_dbg(common, ANI, "Disable MIB counters\n"); REG_WRITE(ah, AR_MIBC, AR_MIBC_FMC); ath9k_hw_update_mibstats(ah, &ah->ah_mibStats); @@ -845,7 +842,7 @@ void ath9k_hw_ani_init(struct ath_hw *ah) struct ath_common *common = ath9k_hw_common(ah); int i; - ath_dbg(common, ATH_DBG_ANI, "Initialize ANI\n"); + ath_dbg(common, ANI, "Initialize ANI\n"); if (use_new_ani(ah)) { ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH_NEW; diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c index f199e9e2514..f901a17f76b 100644 --- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c @@ -158,7 +158,7 @@ static void ar5008_hw_force_bias(struct ath_hw *ah, u16 synth_freq) /* pre-reverse this field */ tmp_reg = ath9k_hw_reverse_bits(new_bias, 3); - ath_dbg(common, ATH_DBG_CONFIG, "Force rf_pwd_icsyndiv to %1d on %4d\n", + ath_dbg(common, CONFIG, "Force rf_pwd_icsyndiv to %1d on %4d\n", new_bias, synth_freq); /* swizzle rf_pwd_icsyndiv */ @@ -1053,8 +1053,7 @@ static bool ar5008_hw_ani_control_old(struct ath_hw *ah, u32 level = param; if (level >= ARRAY_SIZE(ah->totalSizeDesired)) { - ath_dbg(common, ATH_DBG_ANI, - "level out of range (%u > %zu)\n", + ath_dbg(common, ANI, "level out of range (%u > %zu)\n", level, ARRAY_SIZE(ah->totalSizeDesired)); return false; } @@ -1157,8 +1156,7 @@ static bool ar5008_hw_ani_control_old(struct ath_hw *ah, u32 level = param; if (level >= ARRAY_SIZE(firstep)) { - ath_dbg(common, ATH_DBG_ANI, - "level out of range (%u > %zu)\n", + ath_dbg(common, ANI, "level out of range (%u > %zu)\n", level, ARRAY_SIZE(firstep)); return false; } @@ -1177,8 +1175,7 @@ static bool ar5008_hw_ani_control_old(struct ath_hw *ah, u32 level = param; if (level >= ARRAY_SIZE(cycpwrThr1)) { - ath_dbg(common, ATH_DBG_ANI, - "level out of range (%u > %zu)\n", + ath_dbg(common, ANI, "level out of range (%u > %zu)\n", level, ARRAY_SIZE(cycpwrThr1)); return false; } @@ -1195,23 +1192,22 @@ static bool ar5008_hw_ani_control_old(struct ath_hw *ah, case ATH9K_ANI_PRESENT: break; default: - ath_dbg(common, ATH_DBG_ANI, "invalid cmd %u\n", cmd); + ath_dbg(common, ANI, "invalid cmd %u\n", cmd); return false; } - ath_dbg(common, ATH_DBG_ANI, "ANI parameters:\n"); - ath_dbg(common, ATH_DBG_ANI, + ath_dbg(common, ANI, "ANI parameters:\n"); + ath_dbg(common, ANI, "noiseImmunityLevel=%d, spurImmunityLevel=%d, ofdmWeakSigDetectOff=%d\n", aniState->noiseImmunityLevel, aniState->spurImmunityLevel, !aniState->ofdmWeakSigDetectOff); - ath_dbg(common, ATH_DBG_ANI, + ath_dbg(common, ANI, "cckWeakSigThreshold=%d, firstepLevel=%d, listenTime=%d\n", aniState->cckWeakSigThreshold, aniState->firstepLevel, aniState->listenTime); - ath_dbg(common, ATH_DBG_ANI, - "ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n", + ath_dbg(common, ANI, "ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n", aniState->ofdmPhyErrCount, aniState->cckPhyErrCount); @@ -1295,7 +1291,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah, AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW); if (!on != aniState->ofdmWeakSigDetectOff) { - ath_dbg(common, ATH_DBG_ANI, + ath_dbg(common, ANI, "** ch %d: ofdm weak signal: %s=>%s\n", chan->channel, !aniState->ofdmWeakSigDetectOff ? @@ -1313,7 +1309,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah, u32 level = param; if (level >= ARRAY_SIZE(firstep_table)) { - ath_dbg(common, ATH_DBG_ANI, + ath_dbg(common, ANI, "ATH9K_ANI_FIRSTEP_LEVEL: level out of range (%u > %zu)\n", level, ARRAY_SIZE(firstep_table)); return false; @@ -1350,7 +1346,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah, AR_PHY_FIND_SIG_FIRSTEP_LOW, value2); if (level != aniState->firstepLevel) { - ath_dbg(common, ATH_DBG_ANI, + ath_dbg(common, ANI, "** ch %d: level %d=>%d[def:%d] firstep[level]=%d ini=%d\n", chan->channel, aniState->firstepLevel, @@ -1358,7 +1354,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah, ATH9K_ANI_FIRSTEP_LVL_NEW, value, aniState->iniDef.firstep); - ath_dbg(common, ATH_DBG_ANI, + ath_dbg(common, ANI, "** ch %d: level %d=>%d[def:%d] firstep_low[level]=%d ini=%d\n", chan->channel, aniState->firstepLevel, @@ -1378,7 +1374,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah, u32 level = param; if (level >= ARRAY_SIZE(cycpwrThr1_table)) { - ath_dbg(common, ATH_DBG_ANI, + ath_dbg(common, ANI, "ATH9K_ANI_SPUR_IMMUNITY_LEVEL: level out of range (%u > %zu)\n", level, ARRAY_SIZE(cycpwrThr1_table)); return false; @@ -1414,7 +1410,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah, AR_PHY_EXT_TIMING5_CYCPWR_THR1, value2); if (level != aniState->spurImmunityLevel) { - ath_dbg(common, ATH_DBG_ANI, + ath_dbg(common, ANI, "** ch %d: level %d=>%d[def:%d] cycpwrThr1[level]=%d ini=%d\n", chan->channel, aniState->spurImmunityLevel, @@ -1422,7 +1418,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah, ATH9K_ANI_SPUR_IMMUNE_LVL_NEW, value, aniState->iniDef.cycpwrThr1); - ath_dbg(common, ATH_DBG_ANI, + ath_dbg(common, ANI, "** ch %d: level %d=>%d[def:%d] cycpwrThr1Ext[level]=%d ini=%d\n", chan->channel, aniState->spurImmunityLevel, @@ -1448,11 +1444,11 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah, case ATH9K_ANI_PRESENT: break; default: - ath_dbg(common, ATH_DBG_ANI, "invalid cmd %u\n", cmd); + ath_dbg(common, ANI, "invalid cmd %u\n", cmd); return false; } - ath_dbg(common, ATH_DBG_ANI, + ath_dbg(common, ANI, "ANI parameters: SI=%d, ofdmWS=%s FS=%d MRCcck=%s listenTime=%d ofdmErrs=%d cckErrs=%d\n", aniState->spurImmunityLevel, !aniState->ofdmWeakSigDetectOff ? "on" : "off", @@ -1506,7 +1502,7 @@ static void ar5008_hw_ani_cache_ini_regs(struct ath_hw *ah) iniDef = &aniState->iniDef; - ath_dbg(common, ATH_DBG_ANI, "ver %d.%d opmode %u chan %d Mhz/0x%x\n", + ath_dbg(common, ANI, "ver %d.%d opmode %u chan %d Mhz/0x%x\n", ah->hw_version.macVersion, ah->hw_version.macRev, ah->opmode, diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c index 88279e325dc..c55e5bbafc4 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c @@ -61,18 +61,16 @@ static void ar9002_hw_setup_calibration(struct ath_hw *ah, switch (currCal->calData->calType) { case IQ_MISMATCH_CAL: REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ); - ath_dbg(common, ATH_DBG_CALIBRATE, + ath_dbg(common, CALIBRATE, "starting IQ Mismatch Calibration\n"); break; case ADC_GAIN_CAL: REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_GAIN); - ath_dbg(common, ATH_DBG_CALIBRATE, - "starting ADC Gain Calibration\n"); + ath_dbg(common, CALIBRATE, "starting ADC Gain Calibration\n"); break; case ADC_DC_CAL: REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_PER); - ath_dbg(common, ATH_DBG_CALIBRATE, - "starting ADC DC Calibration\n"); + ath_dbg(common, CALIBRATE, "starting ADC DC Calibration\n"); break; } @@ -129,7 +127,7 @@ static void ar9002_hw_iqcal_collect(struct ath_hw *ah) REG_READ(ah, AR_PHY_CAL_MEAS_1(i)); ah->totalIqCorrMeas[i] += (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i)); - ath_dbg(ath9k_hw_common(ah), ATH_DBG_CALIBRATE, + ath_dbg(ath9k_hw_common(ah), CALIBRATE, "%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n", ah->cal_samples, i, ah->totalPowerMeasI[i], ah->totalPowerMeasQ[i], @@ -151,7 +149,7 @@ static void ar9002_hw_adc_gaincal_collect(struct ath_hw *ah) ah->totalAdcQEvenPhase[i] += REG_READ(ah, AR_PHY_CAL_MEAS_3(i)); - ath_dbg(ath9k_hw_common(ah), ATH_DBG_CALIBRATE, + ath_dbg(ath9k_hw_common(ah), CALIBRATE, "%d: Chn %d oddi=0x%08x; eveni=0x%08x; oddq=0x%08x; evenq=0x%08x;\n", ah->cal_samples, i, ah->totalAdcIOddPhase[i], @@ -175,7 +173,7 @@ static void ar9002_hw_adc_dccal_collect(struct ath_hw *ah) ah->totalAdcDcOffsetQEvenPhase[i] += (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_3(i)); - ath_dbg(ath9k_hw_common(ah), ATH_DBG_CALIBRATE, + ath_dbg(ath9k_hw_common(ah), CALIBRATE, "%d: Chn %d oddi=0x%08x; eveni=0x%08x; oddq=0x%08x; evenq=0x%08x;\n", ah->cal_samples, i, ah->totalAdcDcOffsetIOddPhase[i], @@ -198,12 +196,12 @@ static void ar9002_hw_iqcalibrate(struct ath_hw *ah, u8 numChains) powerMeasQ = ah->totalPowerMeasQ[i]; iqCorrMeas = ah->totalIqCorrMeas[i]; - ath_dbg(common, ATH_DBG_CALIBRATE, + ath_dbg(common, CALIBRATE, "Starting IQ Cal and Correction for Chain %d\n", i); - ath_dbg(common, ATH_DBG_CALIBRATE, - "Orignal: Chn %diq_corr_meas = 0x%08x\n", + ath_dbg(common, CALIBRATE, + "Original: Chn %d iq_corr_meas = 0x%08x\n", i, ah->totalIqCorrMeas[i]); iqCorrNeg = 0; @@ -213,12 +211,11 @@ static void ar9002_hw_iqcalibrate(struct ath_hw *ah, u8 numChains) iqCorrNeg = 1; } - ath_dbg(common, ATH_DBG_CALIBRATE, - "Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI); - ath_dbg(common, ATH_DBG_CALIBRATE, - "Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ); - ath_dbg(common, ATH_DBG_CALIBRATE, "iqCorrNeg is 0x%08x\n", - iqCorrNeg); + ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_i = 0x%08x\n", + i, powerMeasI); + ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_q = 0x%08x\n", + i, powerMeasQ); + ath_dbg(common, CALIBRATE, "iqCorrNeg is 0x%08x\n", iqCorrNeg); iCoffDenom = (powerMeasI / 2 + powerMeasQ / 2) / 128; qCoffDenom = powerMeasQ / 64; @@ -227,13 +224,13 @@ static void ar9002_hw_iqcalibrate(struct ath_hw *ah, u8 numChains) (qCoffDenom != 0)) { iCoff = iqCorrMeas / iCoffDenom; qCoff = powerMeasI / qCoffDenom - 64; - ath_dbg(common, ATH_DBG_CALIBRATE, - "Chn %d iCoff = 0x%08x\n", i, iCoff); - ath_dbg(common, ATH_DBG_CALIBRATE, - "Chn %d qCoff = 0x%08x\n", i, qCoff); + ath_dbg(common, CALIBRATE, "Chn %d iCoff = 0x%08x\n", + i, iCoff); + ath_dbg(common, CALIBRATE, "Chn %d qCoff = 0x%08x\n", + i, qCoff); iCoff = iCoff & 0x3f; - ath_dbg(common, ATH_DBG_CALIBRATE, + ath_dbg(common, CALIBRATE, "New: Chn %d iCoff = 0x%08x\n", i, iCoff); if (iqCorrNeg == 0x0) iCoff = 0x40 - iCoff; @@ -243,7 +240,7 @@ static void ar9002_hw_iqcalibrate(struct ath_hw *ah, u8 numChains) else if (qCoff <= -16) qCoff = -16; - ath_dbg(common, ATH_DBG_CALIBRATE, + ath_dbg(common, CALIBRATE, "Chn %d : iCoff = 0x%x qCoff = 0x%x\n", i, iCoff, qCoff); @@ -253,7 +250,7 @@ static void ar9002_hw_iqcalibrate(struct ath_hw *ah, u8 numChains) REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i), AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF, qCoff); - ath_dbg(common, ATH_DBG_CALIBRATE, + ath_dbg(common, CALIBRATE, "IQ Cal and Correction done for Chain %d\n", i); } @@ -275,21 +272,17 @@ static void ar9002_hw_adc_gaincal_calibrate(struct ath_hw *ah, u8 numChains) qOddMeasOffset = ah->totalAdcQOddPhase[i]; qEvenMeasOffset = ah->totalAdcQEvenPhase[i]; - ath_dbg(common, ATH_DBG_CALIBRATE, + ath_dbg(common, CALIBRATE, "Starting ADC Gain Cal for Chain %d\n", i); - ath_dbg(common, ATH_DBG_CALIBRATE, - "Chn %d pwr_meas_odd_i = 0x%08x\n", i, - iOddMeasOffset); - ath_dbg(common, ATH_DBG_CALIBRATE, - "Chn %d pwr_meas_even_i = 0x%08x\n", i, - iEvenMeasOffset); - ath_dbg(common, ATH_DBG_CALIBRATE, - "Chn %d pwr_meas_odd_q = 0x%08x\n", i, - qOddMeasOffset); - ath_dbg(common, ATH_DBG_CALIBRATE, - "Chn %d pwr_meas_even_q = 0x%08x\n", i, - qEvenMeasOffset); + ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_odd_i = 0x%08x\n", + i, iOddMeasOffset); + ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_even_i = 0x%08x\n", + i, iEvenMeasOffset); + ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_odd_q = 0x%08x\n", + i, qOddMeasOffset); + ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_even_q = 0x%08x\n", + i, qEvenMeasOffset); if (iOddMeasOffset != 0 && qEvenMeasOffset != 0) { iGainMismatch = @@ -299,19 +292,19 @@ static void ar9002_hw_adc_gaincal_calibrate(struct ath_hw *ah, u8 numChains) ((qOddMeasOffset * 32) / qEvenMeasOffset) & 0x3f; - ath_dbg(common, ATH_DBG_CALIBRATE, - "Chn %d gain_mismatch_i = 0x%08x\n", i, - iGainMismatch); - ath_dbg(common, ATH_DBG_CALIBRATE, - "Chn %d gain_mismatch_q = 0x%08x\n", i, - qGainMismatch); + ath_dbg(common, CALIBRATE, + "Chn %d gain_mismatch_i = 0x%08x\n", + i, iGainMismatch); + ath_dbg(common, CALIBRATE, + "Chn %d gain_mismatch_q = 0x%08x\n", + i, qGainMismatch); val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i)); val &= 0xfffff000; val |= (qGainMismatch) | (iGainMismatch << 6); REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val); - ath_dbg(common, ATH_DBG_CALIBRATE, + ath_dbg(common, CALIBRATE, "ADC Gain Cal done for Chain %d\n", i); } } @@ -337,40 +330,36 @@ static void ar9002_hw_adc_dccal_calibrate(struct ath_hw *ah, u8 numChains) qOddMeasOffset = ah->totalAdcDcOffsetQOddPhase[i]; qEvenMeasOffset = ah->totalAdcDcOffsetQEvenPhase[i]; - ath_dbg(common, ATH_DBG_CALIBRATE, + ath_dbg(common, CALIBRATE, "Starting ADC DC Offset Cal for Chain %d\n", i); - ath_dbg(common, ATH_DBG_CALIBRATE, - "Chn %d pwr_meas_odd_i = %d\n", i, - iOddMeasOffset); - ath_dbg(common, ATH_DBG_CALIBRATE, - "Chn %d pwr_meas_even_i = %d\n", i, - iEvenMeasOffset); - ath_dbg(common, ATH_DBG_CALIBRATE, - "Chn %d pwr_meas_odd_q = %d\n", i, - qOddMeasOffset); - ath_dbg(common, ATH_DBG_CALIBRATE, - "Chn %d pwr_meas_even_q = %d\n", i, - qEvenMeasOffset); + ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_odd_i = %d\n", + i, iOddMeasOffset); + ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_even_i = %d\n", + i, iEvenMeasOffset); + ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_odd_q = %d\n", + i, qOddMeasOffset); + ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_even_q = %d\n", + i, qEvenMeasOffset); iDcMismatch = (((iEvenMeasOffset - iOddMeasOffset) * 2) / numSamples) & 0x1ff; qDcMismatch = (((qOddMeasOffset - qEvenMeasOffset) * 2) / numSamples) & 0x1ff; - ath_dbg(common, ATH_DBG_CALIBRATE, - "Chn %d dc_offset_mismatch_i = 0x%08x\n", i, - iDcMismatch); - ath_dbg(common, ATH_DBG_CALIBRATE, - "Chn %d dc_offset_mismatch_q = 0x%08x\n", i, - qDcMismatch); + ath_dbg(common, CALIBRATE, + "Chn %d dc_offset_mismatch_i = 0x%08x\n", + i, iDcMismatch); + ath_dbg(common, CALIBRATE, + "Chn %d dc_offset_mismatch_q = 0x%08x\n", + i, qDcMismatch); val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i)); val &= 0xc0000fff; val |= (qDcMismatch << 12) | (iDcMismatch << 21); REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val); - ath_dbg(common, ATH_DBG_CALIBRATE, + ath_dbg(common, CALIBRATE, "ADC DC Offset Cal done for Chain %d\n", i); } @@ -560,7 +549,7 @@ static inline void ar9285_hw_pa_cal(struct ath_hw *ah, bool is_reset) { 0x7838, 0 }, }; - ath_dbg(common, ATH_DBG_CALIBRATE, "Running PA Calibration\n"); + ath_dbg(common, CALIBRATE, "Running PA Calibration\n"); /* PA CAL is not needed for high power solution */ if (ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE) == @@ -741,7 +730,7 @@ static bool ar9285_hw_cl_cal(struct ath_hw *ah, struct ath9k_channel *chan) REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL); if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL, 0, AH_WAIT_TIMEOUT)) { - ath_dbg(common, ATH_DBG_CALIBRATE, + ath_dbg(common, CALIBRATE, "offset calibration failed to complete in 1ms; noisy environment?\n"); return false; } @@ -755,7 +744,7 @@ static bool ar9285_hw_cl_cal(struct ath_hw *ah, struct ath9k_channel *chan) REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL); if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL, 0, AH_WAIT_TIMEOUT)) { - ath_dbg(common, ATH_DBG_CALIBRATE, + ath_dbg(common, CALIBRATE, "offset calibration failed to complete in 1ms; noisy environment?\n"); return false; } @@ -851,7 +840,7 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan) if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL, 0, AH_WAIT_TIMEOUT)) { - ath_dbg(common, ATH_DBG_CALIBRATE, + ath_dbg(common, CALIBRATE, "offset calibration failed to complete in 1ms; noisy environment?\n"); return false; } @@ -886,22 +875,21 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan) if (ar9002_hw_is_cal_supported(ah, chan, ADC_GAIN_CAL)) { INIT_CAL(&ah->adcgain_caldata); INSERT_CAL(ah, &ah->adcgain_caldata); - ath_dbg(common, ATH_DBG_CALIBRATE, - "enabling ADC Gain Calibration.\n"); + ath_dbg(common, CALIBRATE, + "enabling ADC Gain Calibration\n"); } if (ar9002_hw_is_cal_supported(ah, chan, ADC_DC_CAL)) { INIT_CAL(&ah->adcdc_caldata); INSERT_CAL(ah, &ah->adcdc_caldata); - ath_dbg(common, ATH_DBG_CALIBRATE, - "enabling ADC DC Calibration.\n"); + ath_dbg(common, CALIBRATE, + "enabling ADC DC Calibration\n"); } if (ar9002_hw_is_cal_supported(ah, chan, IQ_MISMATCH_CAL)) { INIT_CAL(&ah->iq_caldata); INSERT_CAL(ah, &ah->iq_caldata); - ath_dbg(common, ATH_DBG_CALIBRATE, - "enabling IQ Calibration.\n"); + ath_dbg(common, CALIBRATE, "enabling IQ Calibration\n"); } ah->cal_list_curr = ah->cal_list; diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c index b5920168606..7b6417b5212 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c @@ -107,7 +107,7 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked) } if (isr & AR_ISR_RXORN) { - ath_dbg(common, ATH_DBG_INTERRUPT, + ath_dbg(common, INTERRUPT, "receive FIFO overrun interrupt\n"); } @@ -143,24 +143,24 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked) if (fatal_int) { if (sync_cause & AR_INTR_SYNC_HOST1_FATAL) { - ath_dbg(common, ATH_DBG_ANY, + ath_dbg(common, ANY, "received PCI FATAL interrupt\n"); } if (sync_cause & AR_INTR_SYNC_HOST1_PERR) { - ath_dbg(common, ATH_DBG_ANY, + ath_dbg(common, ANY, "received PCI PERR interrupt\n"); } *masked |= ATH9K_INT_FATAL; } if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) { - ath_dbg(common, ATH_DBG_INTERRUPT, + ath_dbg(common, INTERRUPT, "AR_INTR_SYNC_RADM_CPL_TIMEOUT\n"); REG_WRITE(ah, AR_RC, AR_RC_HOSTIF); REG_WRITE(ah, AR_RC, 0); *masked |= ATH9K_INT_FATAL; } if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT) { - ath_dbg(common, ATH_DBG_INTERRUPT, + ath_dbg(common, INTERRUPT, "AR_INTR_SYNC_LOCAL_TIMEOUT\n"); } diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c index 12a730dcb50..8e70f0bc073 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c @@ -18,6 +18,7 @@ #include "hw-ops.h" #include "ar9003_phy.h" #include "ar9003_rtt.h" +#include "ar9003_mci.h" #define MAX_MEASUREMENT MAX_IQCAL_MEASUREMENT #define MAX_MAG_DELTA 11 @@ -51,7 +52,7 @@ static void ar9003_hw_setup_calibration(struct ath_hw *ah, currCal->calData->calCountMax); REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ); - ath_dbg(common, ATH_DBG_CALIBRATE, + ath_dbg(common, CALIBRATE, "starting IQ Mismatch Calibration\n"); /* Kick-off cal */ @@ -63,7 +64,7 @@ static void ar9003_hw_setup_calibration(struct ath_hw *ah, REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_THERM, AR_PHY_65NM_CH0_THERM_START, 1); - ath_dbg(common, ATH_DBG_CALIBRATE, + ath_dbg(common, CALIBRATE, "starting Temperature Compensation Calibration\n"); break; } @@ -193,7 +194,7 @@ static void ar9003_hw_iqcal_collect(struct ath_hw *ah) REG_READ(ah, AR_PHY_CAL_MEAS_1(i)); ah->totalIqCorrMeas[i] += (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i)); - ath_dbg(ath9k_hw_common(ah), ATH_DBG_CALIBRATE, + ath_dbg(ath9k_hw_common(ah), CALIBRATE, "%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n", ah->cal_samples, i, ah->totalPowerMeasI[i], ah->totalPowerMeasQ[i], @@ -220,12 +221,11 @@ static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains) powerMeasQ = ah->totalPowerMeasQ[i]; iqCorrMeas = ah->totalIqCorrMeas[i]; - ath_dbg(common, ATH_DBG_CALIBRATE, - "Starting IQ Cal and Correction for Chain %d\n", - i); + ath_dbg(common, CALIBRATE, + "Starting IQ Cal and Correction for Chain %d\n", i); - ath_dbg(common, ATH_DBG_CALIBRATE, - "Orignal: Chn %diq_corr_meas = 0x%08x\n", + ath_dbg(common, CALIBRATE, + "Original: Chn %d iq_corr_meas = 0x%08x\n", i, ah->totalIqCorrMeas[i]); iqCorrNeg = 0; @@ -235,12 +235,11 @@ static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains) iqCorrNeg = 1; } - ath_dbg(common, ATH_DBG_CALIBRATE, - "Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI); - ath_dbg(common, ATH_DBG_CALIBRATE, - "Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ); - ath_dbg(common, ATH_DBG_CALIBRATE, "iqCorrNeg is 0x%08x\n", - iqCorrNeg); + ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_i = 0x%08x\n", + i, powerMeasI); + ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_q = 0x%08x\n", + i, powerMeasQ); + ath_dbg(common, CALIBRATE, "iqCorrNeg is 0x%08x\n", iqCorrNeg); iCoffDenom = (powerMeasI / 2 + powerMeasQ / 2) / 256; qCoffDenom = powerMeasQ / 64; @@ -248,10 +247,10 @@ static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains) if ((iCoffDenom != 0) && (qCoffDenom != 0)) { iCoff = iqCorrMeas / iCoffDenom; qCoff = powerMeasI / qCoffDenom - 64; - ath_dbg(common, ATH_DBG_CALIBRATE, - "Chn %d iCoff = 0x%08x\n", i, iCoff); - ath_dbg(common, ATH_DBG_CALIBRATE, - "Chn %d qCoff = 0x%08x\n", i, qCoff); + ath_dbg(common, CALIBRATE, "Chn %d iCoff = 0x%08x\n", + i, iCoff); + ath_dbg(common, CALIBRATE, "Chn %d qCoff = 0x%08x\n", + i, qCoff); /* Force bounds on iCoff */ if (iCoff >= 63) @@ -272,10 +271,10 @@ static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains) iCoff = iCoff & 0x7f; qCoff = qCoff & 0x7f; - ath_dbg(common, ATH_DBG_CALIBRATE, + ath_dbg(common, CALIBRATE, "Chn %d : iCoff = 0x%x qCoff = 0x%x\n", i, iCoff, qCoff); - ath_dbg(common, ATH_DBG_CALIBRATE, + ath_dbg(common, CALIBRATE, "Register offset (0x%04x) before update = 0x%x\n", offset_array[i], REG_READ(ah, offset_array[i])); @@ -286,25 +285,25 @@ static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains) REG_RMW_FIELD(ah, offset_array[i], AR_PHY_RX_IQCAL_CORR_IQCORR_Q_Q_COFF, qCoff); - ath_dbg(common, ATH_DBG_CALIBRATE, + ath_dbg(common, CALIBRATE, "Register offset (0x%04x) QI COFF (bitfields 0x%08x) after update = 0x%x\n", offset_array[i], AR_PHY_RX_IQCAL_CORR_IQCORR_Q_I_COFF, REG_READ(ah, offset_array[i])); - ath_dbg(common, ATH_DBG_CALIBRATE, + ath_dbg(common, CALIBRATE, "Register offset (0x%04x) QQ COFF (bitfields 0x%08x) after update = 0x%x\n", offset_array[i], AR_PHY_RX_IQCAL_CORR_IQCORR_Q_Q_COFF, REG_READ(ah, offset_array[i])); - ath_dbg(common, ATH_DBG_CALIBRATE, + ath_dbg(common, CALIBRATE, "IQ Cal and Correction done for Chain %d\n", i); } } REG_SET_BIT(ah, AR_PHY_RX_IQCAL_CORR_B0, AR_PHY_RX_IQCAL_CORR_IQCORR_ENABLE); - ath_dbg(common, ATH_DBG_CALIBRATE, + ath_dbg(common, CALIBRATE, "IQ Cal and Correction (offset 0x%04x) enabled (bit position 0x%08x). New Value 0x%08x\n", (unsigned) (AR_PHY_RX_IQCAL_CORR_B0), AR_PHY_RX_IQCAL_CORR_IQCORR_ENABLE, @@ -348,7 +347,7 @@ static bool ar9003_hw_solve_iq_cal(struct ath_hw *ah, f2 = (f1 * f1 + f3 * f3) / result_shift; if (!f2) { - ath_dbg(common, ATH_DBG_CALIBRATE, "Divide by 0\n"); + ath_dbg(common, CALIBRATE, "Divide by 0\n"); return false; } @@ -469,7 +468,7 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah, if ((i2_p_q2_a0_d0 == 0) || (i2_p_q2_a0_d1 == 0) || (i2_p_q2_a1_d0 == 0) || (i2_p_q2_a1_d1 == 0)) { - ath_dbg(common, ATH_DBG_CALIBRATE, + ath_dbg(common, CALIBRATE, "Divide by 0:\n" "a0_d0=%d\n" "a0_d1=%d\n" @@ -509,8 +508,7 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah, mag2 = ar9003_hw_find_mag_approx(ah, cos_2phi_2, sin_2phi_2); if ((mag1 == 0) || (mag2 == 0)) { - ath_dbg(common, ATH_DBG_CALIBRATE, - "Divide by 0: mag1=%d, mag2=%d\n", + ath_dbg(common, CALIBRATE, "Divide by 0: mag1=%d, mag2=%d\n", mag1, mag2); return false; } @@ -528,8 +526,8 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah, mag_a0_d0, phs_a0_d0, mag_a1_d0, phs_a1_d0, solved_eq)) { - ath_dbg(common, ATH_DBG_CALIBRATE, - "Call to ar9003_hw_solve_iq_cal() failed.\n"); + ath_dbg(common, CALIBRATE, + "Call to ar9003_hw_solve_iq_cal() failed\n"); return false; } @@ -538,12 +536,12 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah, mag_rx = solved_eq[2]; phs_rx = solved_eq[3]; - ath_dbg(common, ATH_DBG_CALIBRATE, + ath_dbg(common, CALIBRATE, "chain %d: mag mismatch=%d phase mismatch=%d\n", chain_idx, mag_tx/res_scale, phs_tx/res_scale); if (res_scale == mag_tx) { - ath_dbg(common, ATH_DBG_CALIBRATE, + ath_dbg(common, CALIBRATE, "Divide by 0: mag_tx=%d, res_scale=%d\n", mag_tx, res_scale); return false; @@ -556,8 +554,7 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah, q_q_coff = (mag_corr_tx * 128 / res_scale); q_i_coff = (phs_corr_tx * 256 / res_scale); - ath_dbg(common, ATH_DBG_CALIBRATE, - "tx chain %d: mag corr=%d phase corr=%d\n", + ath_dbg(common, CALIBRATE, "tx chain %d: mag corr=%d phase corr=%d\n", chain_idx, q_q_coff, q_i_coff); if (q_i_coff < -63) @@ -571,12 +568,11 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah, iqc_coeff[0] = (q_q_coff * 128) + q_i_coff; - ath_dbg(common, ATH_DBG_CALIBRATE, - "tx chain %d: iq corr coeff=%x\n", + ath_dbg(common, CALIBRATE, "tx chain %d: iq corr coeff=%x\n", chain_idx, iqc_coeff[0]); if (-mag_rx == res_scale) { - ath_dbg(common, ATH_DBG_CALIBRATE, + ath_dbg(common, CALIBRATE, "Divide by 0: mag_rx=%d, res_scale=%d\n", mag_rx, res_scale); return false; @@ -589,8 +585,7 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah, q_q_coff = (mag_corr_rx * 128 / res_scale); q_i_coff = (phs_corr_rx * 256 / res_scale); - ath_dbg(common, ATH_DBG_CALIBRATE, - "rx chain %d: mag corr=%d phase corr=%d\n", + ath_dbg(common, CALIBRATE, "rx chain %d: mag corr=%d phase corr=%d\n", chain_idx, q_q_coff, q_i_coff); if (q_i_coff < -63) @@ -604,8 +599,7 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah, iqc_coeff[1] = (q_q_coff * 128) + q_i_coff; - ath_dbg(common, ATH_DBG_CALIBRATE, - "rx chain %d: iq corr coeff=%x\n", + ath_dbg(common, CALIBRATE, "rx chain %d: iq corr coeff=%x\n", chain_idx, iqc_coeff[1]); return true; @@ -752,8 +746,7 @@ static bool ar9003_hw_tx_iq_cal_run(struct ath_hw *ah) if (!ath9k_hw_wait(ah, AR_PHY_TX_IQCAL_START, AR_PHY_TX_IQCAL_START_DO_CAL, 0, AH_WAIT_TIMEOUT)) { - ath_dbg(common, ATH_DBG_CALIBRATE, - "Tx IQ Cal is not completed.\n"); + ath_dbg(common, CALIBRATE, "Tx IQ Cal is not completed\n"); return false; } return true; @@ -791,13 +784,13 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, bool is_reusable) nmeasurement = MAX_MEASUREMENT; for (im = 0; im < nmeasurement; im++) { - ath_dbg(common, ATH_DBG_CALIBRATE, - "Doing Tx IQ Cal for chain %d.\n", i); + ath_dbg(common, CALIBRATE, + "Doing Tx IQ Cal for chain %d\n", i); if (REG_READ(ah, txiqcal_status[i]) & AR_PHY_TX_IQCAL_STATUS_FAILED) { - ath_dbg(common, ATH_DBG_CALIBRATE, - "Tx IQ Cal failed for chain %d.\n", i); + ath_dbg(common, CALIBRATE, + "Tx IQ Cal failed for chain %d\n", i); goto tx_iqcal_fail; } @@ -823,18 +816,16 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, bool is_reusable) iq_res[idx + 1] = 0xffff & REG_READ(ah, chan_info_tab[i] + offset); - ath_dbg(common, ATH_DBG_CALIBRATE, - "IQ RES[%d]=0x%x" - "IQ_RES[%d]=0x%x\n", + ath_dbg(common, CALIBRATE, + "IQ_RES[%d]=0x%x IQ_RES[%d]=0x%x\n", idx, iq_res[idx], idx + 1, iq_res[idx + 1]); } if (!ar9003_hw_calc_iq_corr(ah, i, iq_res, coeff.iqc_coeff)) { - ath_dbg(common, ATH_DBG_CALIBRATE, - "Failed in calculation of \ - IQ correction.\n"); + ath_dbg(common, CALIBRATE, + "Failed in calculation of IQ correction\n"); goto tx_iqcal_fail; } @@ -854,7 +845,7 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, bool is_reusable) return; tx_iqcal_fail: - ath_dbg(common, ATH_DBG_CALIBRATE, "Tx IQ Cal failed\n"); + ath_dbg(common, CALIBRATE, "Tx IQ Cal failed\n"); return; } @@ -934,10 +925,12 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah, { struct ath_common *common = ath9k_hw_common(ah); struct ath9k_hw_cal_data *caldata = ah->caldata; + struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci; bool txiqcal_done = false, txclcal_done = false; bool is_reusable = true, status = true; bool run_rtt_cal = false, run_agc_cal; bool rtt = !!(ah->caps.hw_caps & ATH9K_HW_CAP_RTT); + bool mci = !!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI); u32 agc_ctrl = 0, agc_supp_cals = AR_PHY_AGC_CONTROL_OFFSET_CAL | AR_PHY_AGC_CONTROL_FLTR_CAL | AR_PHY_AGC_CONTROL_PKDET_CAL; @@ -950,7 +943,7 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah, if (!ar9003_hw_rtt_restore(ah, chan)) run_rtt_cal = true; - ath_dbg(common, ATH_DBG_CALIBRATE, "RTT restore %s\n", + ath_dbg(common, CALIBRATE, "RTT restore %s\n", run_rtt_cal ? "failed" : "succeed"); } run_agc_cal = run_rtt_cal; @@ -1005,6 +998,31 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah, } else if (caldata && !caldata->done_txiqcal_once) run_agc_cal = true; + if (mci && IS_CHAN_2GHZ(chan) && + (mci_hw->bt_state == MCI_BT_AWAKE) && + run_agc_cal && + !(mci_hw->config & ATH_MCI_CONFIG_DISABLE_MCI_CAL)) { + + u32 pld[4] = {0, 0, 0, 0}; + + /* send CAL_REQ only when BT is AWAKE. */ + ath_dbg(common, MCI, "MCI send WLAN_CAL_REQ 0x%x\n", + mci_hw->wlan_cal_seq); + MCI_GPM_SET_CAL_TYPE(pld, MCI_GPM_WLAN_CAL_REQ); + pld[MCI_GPM_WLAN_CAL_W_SEQUENCE] = mci_hw->wlan_cal_seq++; + ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16, true, false); + + /* Wait BT_CAL_GRANT for 50ms */ + ath_dbg(common, MCI, "MCI wait for BT_CAL_GRANT\n"); + + if (ar9003_mci_wait_for_gpm(ah, MCI_GPM_BT_CAL_GRANT, 0, 50000)) + ath_dbg(common, MCI, "MCI got BT_CAL_GRANT\n"); + else { + is_reusable = false; + ath_dbg(common, MCI, "\nMCI BT is not responding\n"); + } + } + txiqcal_done = ar9003_hw_tx_iq_cal_run(ah); REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS); udelay(5); @@ -1022,6 +1040,21 @@ skip_tx_iqcal: AR_PHY_AGC_CONTROL_CAL, 0, AH_WAIT_TIMEOUT); } + + if (mci && IS_CHAN_2GHZ(chan) && + (mci_hw->bt_state == MCI_BT_AWAKE) && + run_agc_cal && + !(mci_hw->config & ATH_MCI_CONFIG_DISABLE_MCI_CAL)) { + + u32 pld[4] = {0, 0, 0, 0}; + + ath_dbg(common, MCI, "MCI Send WLAN_CAL_DONE 0x%x\n", + mci_hw->wlan_cal_done); + MCI_GPM_SET_CAL_TYPE(pld, MCI_GPM_WLAN_CAL_DONE); + pld[MCI_GPM_WLAN_CAL_W_SEQUENCE] = mci_hw->wlan_cal_done++; + ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16, true, false); + } + if (rtt && !run_rtt_cal) { agc_ctrl |= agc_supp_cals; REG_WRITE(ah, AR_PHY_AGC_CONTROL, agc_ctrl); @@ -1031,9 +1064,8 @@ skip_tx_iqcal: if (run_rtt_cal) ar9003_hw_rtt_disable(ah); - ath_dbg(common, ATH_DBG_CALIBRATE, - "offset calibration failed to complete in 1ms;" - "noisy environment?\n"); + ath_dbg(common, CALIBRATE, + "offset calibration failed to complete in 1ms; noisy environment?\n"); return false; } @@ -1092,15 +1124,14 @@ skip_tx_iqcal: if (ah->supp_cals & IQ_MISMATCH_CAL) { INIT_CAL(&ah->iq_caldata); INSERT_CAL(ah, &ah->iq_caldata); - ath_dbg(common, ATH_DBG_CALIBRATE, - "enabling IQ Calibration.\n"); + ath_dbg(common, CALIBRATE, "enabling IQ Calibration\n"); } if (ah->supp_cals & TEMP_COMP_CAL) { INIT_CAL(&ah->tempCompCalData); INSERT_CAL(ah, &ah->tempCompCalData); - ath_dbg(common, ATH_DBG_CALIBRATE, - "enabling Temperature Compensation Calibration.\n"); + ath_dbg(common, CALIBRATE, + "enabling Temperature Compensation Calibration\n"); } /* Initialize current pointer to first element in list */ diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index 3b262ba6b17..9fbcbddea16 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -121,10 +121,8 @@ static const struct ar9300_eeprom ar9300_default = { * if the register is per chain */ .noiseFloorThreshCh = {-1, 0, 0}, - .ob = {1, 1, 1},/* 3 chain */ - .db_stage2 = {1, 1, 1}, /* 3 chain */ - .db_stage3 = {0, 0, 0}, - .db_stage4 = {0, 0, 0}, + .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .quick_drop = 0, .xpaBiasLvl = 0, .txFrameToDataStart = 0x0e, .txFrameToPaOn = 0x0e, @@ -144,7 +142,7 @@ static const struct ar9300_eeprom ar9300_default = { }, .base_ext1 = { .ant_div_control = 0, - .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, .calFreqPier2G = { FREQ2FBIN(2412, 1), @@ -323,10 +321,8 @@ static const struct ar9300_eeprom ar9300_default = { .spurChans = {0, 0, 0, 0, 0}, /* noiseFloorThreshCh Check if the register is per chain */ .noiseFloorThreshCh = {-1, 0, 0}, - .ob = {3, 3, 3}, /* 3 chain */ - .db_stage2 = {3, 3, 3}, /* 3 chain */ - .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */ - .db_stage4 = {3, 3, 3}, /* don't exist for 2G */ + .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .quick_drop = 0, .xpaBiasLvl = 0, .txFrameToDataStart = 0x0e, .txFrameToPaOn = 0x0e, @@ -698,10 +694,8 @@ static const struct ar9300_eeprom ar9300_x113 = { * if the register is per chain */ .noiseFloorThreshCh = {-1, 0, 0}, - .ob = {1, 1, 1},/* 3 chain */ - .db_stage2 = {1, 1, 1}, /* 3 chain */ - .db_stage3 = {0, 0, 0}, - .db_stage4 = {0, 0, 0}, + .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .quick_drop = 0, .xpaBiasLvl = 0, .txFrameToDataStart = 0x0e, .txFrameToPaOn = 0x0e, @@ -721,7 +715,7 @@ static const struct ar9300_eeprom ar9300_x113 = { }, .base_ext1 = { .ant_div_control = 0, - .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, .calFreqPier2G = { FREQ2FBIN(2412, 1), @@ -900,10 +894,8 @@ static const struct ar9300_eeprom ar9300_x113 = { .spurChans = {FREQ2FBIN(5500, 0), 0, 0, 0, 0}, /* noiseFloorThreshCh Check if the register is per chain */ .noiseFloorThreshCh = {-1, 0, 0}, - .ob = {3, 3, 3}, /* 3 chain */ - .db_stage2 = {3, 3, 3}, /* 3 chain */ - .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */ - .db_stage4 = {3, 3, 3}, /* don't exist for 2G */ + .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .quick_drop = 0, .xpaBiasLvl = 0xf, .txFrameToDataStart = 0x0e, .txFrameToPaOn = 0x0e, @@ -1276,10 +1268,8 @@ static const struct ar9300_eeprom ar9300_h112 = { * if the register is per chain */ .noiseFloorThreshCh = {-1, 0, 0}, - .ob = {1, 1, 1},/* 3 chain */ - .db_stage2 = {1, 1, 1}, /* 3 chain */ - .db_stage3 = {0, 0, 0}, - .db_stage4 = {0, 0, 0}, + .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .quick_drop = 0, .xpaBiasLvl = 0, .txFrameToDataStart = 0x0e, .txFrameToPaOn = 0x0e, @@ -1291,20 +1281,20 @@ static const struct ar9300_eeprom ar9300_h112 = { .txEndToRxOn = 0x2, .txFrameToXpaOn = 0xe, .thresh62 = 28, - .papdRateMaskHt20 = LE32(0x80c080), - .papdRateMaskHt40 = LE32(0x80c080), + .papdRateMaskHt20 = LE32(0x0c80c080), + .papdRateMaskHt40 = LE32(0x0080c080), .futureModal = { 0, 0, 0, 0, 0, 0, 0, 0, }, }, .base_ext1 = { .ant_div_control = 0, - .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, .calFreqPier2G = { FREQ2FBIN(2412, 1), FREQ2FBIN(2437, 1), - FREQ2FBIN(2472, 1), + FREQ2FBIN(2462, 1), }, /* ar9300_cal_data_per_freq_op_loop 2g */ .calPierData2G = { @@ -1314,7 +1304,7 @@ static const struct ar9300_eeprom ar9300_h112 = { }, .calTarget_freqbin_Cck = { FREQ2FBIN(2412, 1), - FREQ2FBIN(2484, 1), + FREQ2FBIN(2472, 1), }, .calTarget_freqbin_2G = { FREQ2FBIN(2412, 1), @@ -1478,10 +1468,8 @@ static const struct ar9300_eeprom ar9300_h112 = { .spurChans = {0, 0, 0, 0, 0}, /* noiseFloorThreshCh Check if the register is per chain */ .noiseFloorThreshCh = {-1, 0, 0}, - .ob = {3, 3, 3}, /* 3 chain */ - .db_stage2 = {3, 3, 3}, /* 3 chain */ - .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */ - .db_stage4 = {3, 3, 3}, /* don't exist for 2G */ + .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .quick_drop = 0, .xpaBiasLvl = 0, .txFrameToDataStart = 0x0e, .txFrameToPaOn = 0x0e, @@ -1515,7 +1503,7 @@ static const struct ar9300_eeprom ar9300_h112 = { FREQ2FBIN(5500, 0), FREQ2FBIN(5600, 0), FREQ2FBIN(5700, 0), - FREQ2FBIN(5825, 0) + FREQ2FBIN(5785, 0) }, .calPierData5G = { { @@ -1854,10 +1842,8 @@ static const struct ar9300_eeprom ar9300_x112 = { * if the register is per chain */ .noiseFloorThreshCh = {-1, 0, 0}, - .ob = {1, 1, 1},/* 3 chain */ - .db_stage2 = {1, 1, 1}, /* 3 chain */ - .db_stage3 = {0, 0, 0}, - .db_stage4 = {0, 0, 0}, + .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .quick_drop = 0, .xpaBiasLvl = 0, .txFrameToDataStart = 0x0e, .txFrameToPaOn = 0x0e, @@ -1877,7 +1863,7 @@ static const struct ar9300_eeprom ar9300_x112 = { }, .base_ext1 = { .ant_div_control = 0, - .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, .calFreqPier2G = { FREQ2FBIN(2412, 1), @@ -2056,10 +2042,8 @@ static const struct ar9300_eeprom ar9300_x112 = { .spurChans = {0, 0, 0, 0, 0}, /* noiseFloorThreshch check if the register is per chain */ .noiseFloorThreshCh = {-1, 0, 0}, - .ob = {3, 3, 3}, /* 3 chain */ - .db_stage2 = {3, 3, 3}, /* 3 chain */ - .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */ - .db_stage4 = {3, 3, 3}, /* don't exist for 2G */ + .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .quick_drop = 0, .xpaBiasLvl = 0, .txFrameToDataStart = 0x0e, .txFrameToPaOn = 0x0e, @@ -2431,10 +2415,8 @@ static const struct ar9300_eeprom ar9300_h116 = { * if the register is per chain */ .noiseFloorThreshCh = {-1, 0, 0}, - .ob = {1, 1, 1},/* 3 chain */ - .db_stage2 = {1, 1, 1}, /* 3 chain */ - .db_stage3 = {0, 0, 0}, - .db_stage4 = {0, 0, 0}, + .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .quick_drop = 0, .xpaBiasLvl = 0, .txFrameToDataStart = 0x0e, .txFrameToPaOn = 0x0e, @@ -2454,12 +2436,12 @@ static const struct ar9300_eeprom ar9300_h116 = { }, .base_ext1 = { .ant_div_control = 0, - .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, .calFreqPier2G = { FREQ2FBIN(2412, 1), FREQ2FBIN(2437, 1), - FREQ2FBIN(2472, 1), + FREQ2FBIN(2462, 1), }, /* ar9300_cal_data_per_freq_op_loop 2g */ .calPierData2G = { @@ -2633,10 +2615,8 @@ static const struct ar9300_eeprom ar9300_h116 = { .spurChans = {0, 0, 0, 0, 0}, /* noiseFloorThreshCh Check if the register is per chain */ .noiseFloorThreshCh = {-1, 0, 0}, - .ob = {3, 3, 3}, /* 3 chain */ - .db_stage2 = {3, 3, 3}, /* 3 chain */ - .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */ - .db_stage4 = {3, 3, 3}, /* don't exist for 2G */ + .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .quick_drop = 0, .xpaBiasLvl = 0, .txFrameToDataStart = 0x0e, .txFrameToPaOn = 0x0e, @@ -2663,7 +2643,7 @@ static const struct ar9300_eeprom ar9300_h116 = { .xatten1MarginHigh = {0, 0, 0} }, .calFreqPier5G = { - FREQ2FBIN(5180, 0), + FREQ2FBIN(5160, 0), FREQ2FBIN(5220, 0), FREQ2FBIN(5320, 0), FREQ2FBIN(5400, 0), @@ -3023,6 +3003,8 @@ static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah, return eep->modalHeader5G.antennaGain; case EEP_ANTENNA_GAIN_2G: return eep->modalHeader2G.antennaGain; + case EEP_QUICK_DROP: + return pBase->miscConfiguration & BIT(1); default: return 0; } @@ -3061,8 +3043,7 @@ static bool ar9300_read_eeprom(struct ath_hw *ah, int address, u8 *buffer, int i; if ((address < 0) || ((address + count) / 2 > AR9300_EEPROM_SIZE - 1)) { - ath_dbg(common, ATH_DBG_EEPROM, - "eeprom address not in range\n"); + ath_dbg(common, EEPROM, "eeprom address not in range\n"); return false; } @@ -3093,8 +3074,8 @@ static bool ar9300_read_eeprom(struct ath_hw *ah, int address, u8 *buffer, return true; error: - ath_dbg(common, ATH_DBG_EEPROM, - "unable to read eeprom region at offset %d\n", address); + ath_dbg(common, EEPROM, "unable to read eeprom region at offset %d\n", + address); return false; } @@ -3178,13 +3159,13 @@ static bool ar9300_uncompress_block(struct ath_hw *ah, length &= 0xff; if (length > 0 && spot >= 0 && spot+length <= mdataSize) { - ath_dbg(common, ATH_DBG_EEPROM, + ath_dbg(common, EEPROM, "Restore at %d: spot=%d offset=%d length=%d\n", it, spot, offset, length); memcpy(&mptr[spot], &block[it+2], length); spot += length; } else if (length > 0) { - ath_dbg(common, ATH_DBG_EEPROM, + ath_dbg(common, EEPROM, "Bad restore at %d: spot=%d offset=%d length=%d\n", it, spot, offset, length); return false; @@ -3206,13 +3187,13 @@ static int ar9300_compress_decision(struct ath_hw *ah, switch (code) { case _CompressNone: if (length != mdata_size) { - ath_dbg(common, ATH_DBG_EEPROM, + ath_dbg(common, EEPROM, "EEPROM structure size mismatch memory=%d eeprom=%d\n", mdata_size, length); return -1; } memcpy(mptr, (u8 *) (word + COMP_HDR_LEN), length); - ath_dbg(common, ATH_DBG_EEPROM, + ath_dbg(common, EEPROM, "restored eeprom %d: uncompressed, length %d\n", it, length); break; @@ -3221,22 +3202,21 @@ static int ar9300_compress_decision(struct ath_hw *ah, } else { eep = ar9003_eeprom_struct_find_by_id(reference); if (eep == NULL) { - ath_dbg(common, ATH_DBG_EEPROM, + ath_dbg(common, EEPROM, "can't find reference eeprom struct %d\n", reference); return -1; } memcpy(mptr, eep, mdata_size); } - ath_dbg(common, ATH_DBG_EEPROM, + ath_dbg(common, EEPROM, "restore eeprom %d: block, reference %d, length %d\n", it, reference, length); ar9300_uncompress_block(ah, mptr, mdata_size, (u8 *) (word + COMP_HDR_LEN), length); break; default: - ath_dbg(common, ATH_DBG_EEPROM, - "unknown compression code %d\n", code); + ath_dbg(common, EEPROM, "unknown compression code %d\n", code); return -1; } return 0; @@ -3312,34 +3292,32 @@ static int ar9300_eeprom_restore_internal(struct ath_hw *ah, cptr = AR9300_BASE_ADDR_512; else cptr = AR9300_BASE_ADDR; - ath_dbg(common, ATH_DBG_EEPROM, - "Trying EEPROM access at Address 0x%04x\n", cptr); + ath_dbg(common, EEPROM, "Trying EEPROM access at Address 0x%04x\n", + cptr); if (ar9300_check_eeprom_header(ah, read, cptr)) goto found; cptr = AR9300_BASE_ADDR_512; - ath_dbg(common, ATH_DBG_EEPROM, - "Trying EEPROM access at Address 0x%04x\n", cptr); + ath_dbg(common, EEPROM, "Trying EEPROM access at Address 0x%04x\n", + cptr); if (ar9300_check_eeprom_header(ah, read, cptr)) goto found; read = ar9300_read_otp; cptr = AR9300_BASE_ADDR; - ath_dbg(common, ATH_DBG_EEPROM, - "Trying OTP access at Address 0x%04x\n", cptr); + ath_dbg(common, EEPROM, "Trying OTP access at Address 0x%04x\n", cptr); if (ar9300_check_eeprom_header(ah, read, cptr)) goto found; cptr = AR9300_BASE_ADDR_512; - ath_dbg(common, ATH_DBG_EEPROM, - "Trying OTP access at Address 0x%04x\n", cptr); + ath_dbg(common, EEPROM, "Trying OTP access at Address 0x%04x\n", cptr); if (ar9300_check_eeprom_header(ah, read, cptr)) goto found; goto fail; found: - ath_dbg(common, ATH_DBG_EEPROM, "Found valid EEPROM data\n"); + ath_dbg(common, EEPROM, "Found valid EEPROM data\n"); for (it = 0; it < MSTATE; it++) { if (!read(ah, cptr, word, COMP_HDR_LEN)) @@ -3350,13 +3328,12 @@ found: ar9300_comp_hdr_unpack(word, &code, &reference, &length, &major, &minor); - ath_dbg(common, ATH_DBG_EEPROM, + ath_dbg(common, EEPROM, "Found block at %x: code=%d ref=%d length=%d major=%d minor=%d\n", cptr, code, reference, length, major, minor); if ((!AR_SREV_9485(ah) && length >= 1024) || (AR_SREV_9485(ah) && length > EEPROM_DATA_LEN_9485)) { - ath_dbg(common, ATH_DBG_EEPROM, - "Skipping bad header\n"); + ath_dbg(common, EEPROM, "Skipping bad header\n"); cptr -= COMP_HDR_LEN; continue; } @@ -3365,13 +3342,13 @@ found: read(ah, cptr, word, COMP_HDR_LEN + osize + COMP_CKSUM_LEN); checksum = ar9300_comp_cksum(&word[COMP_HDR_LEN], length); mchecksum = get_unaligned_le16(&word[COMP_HDR_LEN + osize]); - ath_dbg(common, ATH_DBG_EEPROM, - "checksum %x %x\n", checksum, mchecksum); + ath_dbg(common, EEPROM, "checksum %x %x\n", + checksum, mchecksum); if (checksum == mchecksum) { ar9300_compress_decision(ah, it, code, reference, mptr, word, length, mdata_size); } else { - ath_dbg(common, ATH_DBG_EEPROM, + ath_dbg(common, EEPROM, "skipping block with bad checksum\n"); } cptr -= (COMP_HDR_LEN + osize + COMP_CKSUM_LEN); @@ -3428,25 +3405,14 @@ static u32 ar9003_dump_modal_eeprom(char *buf, u32 len, u32 size, PR_EEP("Chain0 NF Threshold", modal_hdr->noiseFloorThreshCh[0]); PR_EEP("Chain1 NF Threshold", modal_hdr->noiseFloorThreshCh[1]); PR_EEP("Chain2 NF Threshold", modal_hdr->noiseFloorThreshCh[2]); + PR_EEP("Quick Drop", modal_hdr->quick_drop); + PR_EEP("txEndToXpaOff", modal_hdr->txEndToXpaOff); PR_EEP("xPA Bias Level", modal_hdr->xpaBiasLvl); PR_EEP("txFrameToDataStart", modal_hdr->txFrameToDataStart); PR_EEP("txFrameToPaOn", modal_hdr->txFrameToPaOn); PR_EEP("txFrameToXpaOn", modal_hdr->txFrameToXpaOn); PR_EEP("txClip", modal_hdr->txClip); PR_EEP("ADC Desired size", modal_hdr->adcDesiredSize); - PR_EEP("Chain0 ob", modal_hdr->ob[0]); - PR_EEP("Chain1 ob", modal_hdr->ob[1]); - PR_EEP("Chain2 ob", modal_hdr->ob[2]); - - PR_EEP("Chain0 db_stage2", modal_hdr->db_stage2[0]); - PR_EEP("Chain1 db_stage2", modal_hdr->db_stage2[1]); - PR_EEP("Chain2 db_stage2", modal_hdr->db_stage2[2]); - PR_EEP("Chain0 db_stage3", modal_hdr->db_stage3[0]); - PR_EEP("Chain1 db_stage3", modal_hdr->db_stage3[1]); - PR_EEP("Chain2 db_stage3", modal_hdr->db_stage3[2]); - PR_EEP("Chain0 db_stage4", modal_hdr->db_stage4[0]); - PR_EEP("Chain1 db_stage4", modal_hdr->db_stage4[1]); - PR_EEP("Chain2 db_stage4", modal_hdr->db_stage4[2]); return len; } @@ -3503,6 +3469,7 @@ static u32 ath9k_hw_ar9003_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr, PR_EEP("Internal regulator", !!(pBase->featureEnable & BIT(4))); PR_EEP("Enable Paprd", !!(pBase->featureEnable & BIT(5))); PR_EEP("Driver Strength", !!(pBase->miscConfiguration & BIT(0))); + PR_EEP("Quick Drop", !!(pBase->miscConfiguration & BIT(1))); PR_EEP("Chain mask Reduce", (pBase->miscConfiguration >> 0x3) & 0x1); PR_EEP("Write enable Gpio", pBase->eepromWriteEnableGpio); PR_EEP("WLAN Disable Gpio", pBase->wlanDisableGpio); @@ -3571,13 +3538,13 @@ static void ar9003_hw_xpa_bias_level_apply(struct ath_hw *ah, bool is2ghz) static u16 ar9003_switch_com_spdt_get(struct ath_hw *ah, bool is_2ghz) { struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; - __le32 val; + __le16 val; if (is_2ghz) val = eep->modalHeader2G.switchcomspdt; else val = eep->modalHeader5G.switchcomspdt; - return le32_to_cpu(val); + return le16_to_cpu(val); } @@ -3965,6 +3932,40 @@ static void ar9003_hw_apply_tuning_caps(struct ath_hw *ah) } } +static void ar9003_hw_quick_drop_apply(struct ath_hw *ah, u16 freq) +{ + struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; + int quick_drop = ath9k_hw_ar9300_get_eeprom(ah, EEP_QUICK_DROP); + s32 t[3], f[3] = {5180, 5500, 5785}; + + if (!quick_drop) + return; + + if (freq < 4000) + quick_drop = eep->modalHeader2G.quick_drop; + else { + t[0] = eep->base_ext1.quick_drop_low; + t[1] = eep->modalHeader5G.quick_drop; + t[2] = eep->base_ext1.quick_drop_high; + quick_drop = ar9003_hw_power_interpolate(freq, f, t, 3); + } + REG_RMW_FIELD(ah, AR_PHY_AGC, AR_PHY_AGC_QUICK_DROP, quick_drop); +} + +static void ar9003_hw_txend_to_xpa_off_apply(struct ath_hw *ah, u16 freq) +{ + struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; + u32 value; + + value = (freq < 4000) ? eep->modalHeader2G.txEndToXpaOff : + eep->modalHeader5G.txEndToXpaOff; + + REG_RMW_FIELD(ah, AR_PHY_XPA_TIMING_CTL, + AR_PHY_XPA_TIMING_CTL_TX_END_XPAB_OFF, value); + REG_RMW_FIELD(ah, AR_PHY_XPA_TIMING_CTL, + AR_PHY_XPA_TIMING_CTL_TX_END_XPAA_OFF, value); +} + static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah, struct ath9k_channel *chan) { @@ -3972,10 +3973,12 @@ static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah, ar9003_hw_ant_ctrl_apply(ah, IS_CHAN_2GHZ(chan)); ar9003_hw_drive_strength_apply(ah); ar9003_hw_atten_apply(ah, chan); + ar9003_hw_quick_drop_apply(ah, chan->channel); if (!AR_SREV_9330(ah) && !AR_SREV_9340(ah)) ar9003_hw_internal_regulator_apply(ah); if (AR_SREV_9485(ah) || AR_SREV_9330(ah) || AR_SREV_9340(ah)) ar9003_hw_apply_tuning_caps(ah); + ar9003_hw_txend_to_xpa_off_apply(ah, chan->channel); } static void ath9k_hw_ar9300_set_addac(struct ath_hw *ah, @@ -4416,8 +4419,8 @@ static void ar9003_hw_set_target_power_eeprom(struct ath_hw *ah, u16 freq, is2GHz) + ht40PowerIncForPdadc; for (i = 0; i < ar9300RateSize; i++) { - ath_dbg(common, ATH_DBG_EEPROM, - "TPC[%02d] 0x%08x\n", i, targetPowerValT2[i]); + ath_dbg(common, EEPROM, "TPC[%02d] 0x%08x\n", + i, targetPowerValT2[i]); } } @@ -4436,7 +4439,7 @@ static int ar9003_hw_cal_pier_get(struct ath_hw *ah, struct ath_common *common = ath9k_hw_common(ah); if (ichain >= AR9300_MAX_CHAINS) { - ath_dbg(common, ATH_DBG_EEPROM, + ath_dbg(common, EEPROM, "Invalid chain index, must be less than %d\n", AR9300_MAX_CHAINS); return -1; @@ -4444,7 +4447,7 @@ static int ar9003_hw_cal_pier_get(struct ath_hw *ah, if (mode) { /* 5GHz */ if (ipier >= AR9300_NUM_5G_CAL_PIERS) { - ath_dbg(common, ATH_DBG_EEPROM, + ath_dbg(common, EEPROM, "Invalid 5GHz cal pier index, must be less than %d\n", AR9300_NUM_5G_CAL_PIERS); return -1; @@ -4454,7 +4457,7 @@ static int ar9003_hw_cal_pier_get(struct ath_hw *ah, is2GHz = 0; } else { if (ipier >= AR9300_NUM_2G_CAL_PIERS) { - ath_dbg(common, ATH_DBG_EEPROM, + ath_dbg(common, EEPROM, "Invalid 2GHz cal pier index, must be less than %d\n", AR9300_NUM_2G_CAL_PIERS); return -1; @@ -4616,8 +4619,7 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency) /* interpolate */ for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++) { - ath_dbg(common, ATH_DBG_EEPROM, - "ch=%d f=%d low=%d %d h=%d %d\n", + ath_dbg(common, EEPROM, "ch=%d f=%d low=%d %d h=%d %d\n", ichain, frequency, lfrequency[ichain], lcorrection[ichain], hfrequency[ichain], hcorrection[ichain]); @@ -4672,7 +4674,7 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency) ar9003_hw_power_control_override(ah, frequency, correction, voltage, temperature); - ath_dbg(common, ATH_DBG_EEPROM, + ath_dbg(common, EEPROM, "for frequency=%d, calibration correction = %d %d %d\n", frequency, correction[0], correction[1], correction[2]); @@ -4771,7 +4773,7 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah, { struct ath_common *common = ath9k_hw_common(ah); struct ar9300_eeprom *pEepData = &ah->eeprom.ar9300_eep; - u16 twiceMaxEdgePower = MAX_RATE_POWER; + u16 twiceMaxEdgePower; int i; u16 scaledPower = 0, minCtlPower; static const u16 ctlModesFor11a[] = { @@ -4858,7 +4860,7 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah, else freq = centers.ctl_center; - ath_dbg(common, ATH_DBG_REGULATORY, + ath_dbg(common, REGULATORY, "LOOP-Mode ctlMode %d < %d, isHt40CtlMode %d, EXT_ADDITIVE %d\n", ctlMode, numCtlModes, isHt40CtlMode, (pCtlMode[ctlMode] & EXT_ADDITIVE)); @@ -4872,8 +4874,9 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah, ctlNum = AR9300_NUM_CTLS_5G; } + twiceMaxEdgePower = MAX_RATE_POWER; for (i = 0; (i < ctlNum) && ctlIndex[i]; i++) { - ath_dbg(common, ATH_DBG_REGULATORY, + ath_dbg(common, REGULATORY, "LOOP-Ctlidx %d: cfgCtl 0x%2.2x pCtlMode 0x%2.2x ctlIndex 0x%2.2x chan %d\n", i, cfgCtl, pCtlMode[ctlMode], ctlIndex[i], chan->channel); @@ -4915,7 +4918,7 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah, minCtlPower = (u8)min(twiceMaxEdgePower, scaledPower); - ath_dbg(common, ATH_DBG_REGULATORY, + ath_dbg(common, REGULATORY, "SEL-Min ctlMode %d pCtlMode %d 2xMaxEdge %d sP %d minCtlPwr %d\n", ctlMode, pCtlMode[ctlMode], twiceMaxEdgePower, scaledPower, minCtlPower); @@ -5039,7 +5042,7 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah, target_power_val_t2_eep[i]) > paprd_scale_factor)) { ah->paprd_ratemask &= ~(1 << i); - ath_dbg(common, ATH_DBG_EEPROM, + ath_dbg(common, EEPROM, "paprd disabled for mcs %d\n", i); } } @@ -5051,12 +5054,14 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah, regulatory->max_power_level = targetPowerValT2[i]; } + ath9k_hw_update_regulatory_maxpower(ah); + if (test) return; for (i = 0; i < ar9300RateSize; i++) { - ath_dbg(common, ATH_DBG_EEPROM, - "TPC[%02d] 0x%08x\n", i, targetPowerValT2[i]); + ath_dbg(common, EEPROM, "TPC[%02d] 0x%08x\n", + i, targetPowerValT2[i]); } ah->txpower_limit = regulatory->max_power_level; diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h index 6335a867527..bb223fe8281 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h @@ -216,10 +216,8 @@ struct ar9300_modal_eep_header { u8 spurChans[AR_EEPROM_MODAL_SPURS]; /* 3 Check if the register is per chain */ int8_t noiseFloorThreshCh[AR9300_MAX_CHAINS]; - u8 ob[AR9300_MAX_CHAINS]; - u8 db_stage2[AR9300_MAX_CHAINS]; - u8 db_stage3[AR9300_MAX_CHAINS]; - u8 db_stage4[AR9300_MAX_CHAINS]; + u8 reserved[11]; + int8_t quick_drop; u8 xpaBiasLvl; u8 txFrameToDataStart; u8 txFrameToPaOn; @@ -269,7 +267,9 @@ struct cal_ctl_data_5g { struct ar9300_BaseExtension_1 { u8 ant_div_control; - u8 future[13]; + u8 future[11]; + int8_t quick_drop_low; + int8_t quick_drop_high; } __packed; struct ar9300_BaseExtension_2 { diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c index ccde784a842..88c81c5706b 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c @@ -175,20 +175,24 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked) u32 isr = 0; u32 mask2 = 0; struct ath9k_hw_capabilities *pCap = &ah->caps; - u32 sync_cause = 0; struct ath_common *common = ath9k_hw_common(ah); + struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; + u32 sync_cause = 0, async_cause; - if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) { + async_cause = REG_READ(ah, AR_INTR_ASYNC_CAUSE); + + if (async_cause & (AR_INTR_MAC_IRQ | AR_INTR_ASYNC_MASK_MCI)) { if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M) == AR_RTC_STATUS_ON) isr = REG_READ(ah, AR_ISR); } + sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE) & AR_INTR_SYNC_DEFAULT; *masked = 0; - if (!isr && !sync_cause) + if (!isr && !sync_cause && !async_cause) return false; if (isr) { @@ -294,6 +298,33 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked) ar9003_hw_bb_watchdog_read(ah); } + if (async_cause & AR_INTR_ASYNC_MASK_MCI) { + u32 raw_intr, rx_msg_intr; + + rx_msg_intr = REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW); + raw_intr = REG_READ(ah, AR_MCI_INTERRUPT_RAW); + + if ((raw_intr == 0xdeadbeef) || (rx_msg_intr == 0xdeadbeef)) + ath_dbg(common, MCI, + "MCI gets 0xdeadbeef during MCI int processing new raw_intr=0x%08x, new rx_msg_raw=0x%08x, raw_intr=0x%08x, rx_msg_raw=0x%08x\n", + raw_intr, rx_msg_intr, mci->raw_intr, + mci->rx_msg_intr); + else { + mci->rx_msg_intr |= rx_msg_intr; + mci->raw_intr |= raw_intr; + *masked |= ATH9K_INT_MCI; + + if (rx_msg_intr & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO) + mci->cont_status = + REG_READ(ah, AR_MCI_CONT_STATUS); + + REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, rx_msg_intr); + REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, raw_intr); + ath_dbg(common, MCI, "AR_INTR_SYNC_MCI\n"); + + } + } + if (sync_cause) { if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) { REG_WRITE(ah, AR_RC, AR_RC_HOSTIF); @@ -302,7 +333,7 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked) } if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT) - ath_dbg(common, ATH_DBG_INTERRUPT, + ath_dbg(common, INTERRUPT, "AR_INTR_SYNC_LOCAL_TIMEOUT\n"); REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause); @@ -333,7 +364,7 @@ static int ar9003_hw_proc_txdesc(struct ath_hw *ah, void *ds, if ((MS(ads->ds_info, AR_DescId) != ATHEROS_VENDOR_ID) || (MS(ads->ds_info, AR_TxRxDesc) != 1)) { - ath_dbg(ath9k_hw_common(ah), ATH_DBG_XMIT, + ath_dbg(ath9k_hw_common(ah), XMIT, "Tx Descriptor error %x\n", ads->ds_info); memset(ads, 0, sizeof(*ads)); return -EIO; @@ -541,7 +572,7 @@ void ath9k_hw_reset_txstatus_ring(struct ath_hw *ah) memset((void *) ah->ts_ring, 0, ah->ts_size * sizeof(struct ar9003_txs)); - ath_dbg(ath9k_hw_common(ah), ATH_DBG_XMIT, + ath_dbg(ath9k_hw_common(ah), XMIT, "TS Start 0x%x End 0x%x Virt %p, Size %d\n", ah->ts_paddr_start, ah->ts_paddr_end, ah->ts_ring, ah->ts_size); @@ -552,7 +583,7 @@ void ath9k_hw_reset_txstatus_ring(struct ath_hw *ah) void ath9k_hw_setup_statusring(struct ath_hw *ah, void *ts_start, u32 ts_paddr_start, - u8 size) + u16 size) { ah->ts_paddr_start = ts_paddr_start; diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.h b/drivers/net/wireless/ath/ath9k/ar9003_mac.h index c50449387bf..e203b51e968 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.h @@ -118,5 +118,5 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, void ath9k_hw_reset_txstatus_ring(struct ath_hw *ah); void ath9k_hw_setup_statusring(struct ath_hw *ah, void *ts_start, u32 ts_paddr_start, - u8 size); + u16 size); #endif diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.c b/drivers/net/wireless/ath/ath9k/ar9003_mci.c new file mode 100644 index 00000000000..709520c6835 --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.c @@ -0,0 +1,1493 @@ +/* + * Copyright (c) 2008-2011 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/export.h> +#include "hw.h" +#include "ar9003_phy.h" +#include "ar9003_mci.h" + +static void ar9003_mci_reset_req_wakeup(struct ath_hw *ah) +{ + if (!AR_SREV_9462_20(ah)) + return; + + REG_RMW_FIELD(ah, AR_MCI_COMMAND2, + AR_MCI_COMMAND2_RESET_REQ_WAKEUP, 1); + udelay(1); + REG_RMW_FIELD(ah, AR_MCI_COMMAND2, + AR_MCI_COMMAND2_RESET_REQ_WAKEUP, 0); +} + +static int ar9003_mci_wait_for_interrupt(struct ath_hw *ah, u32 address, + u32 bit_position, int time_out) +{ + struct ath_common *common = ath9k_hw_common(ah); + + while (time_out) { + + if (REG_READ(ah, address) & bit_position) { + + REG_WRITE(ah, address, bit_position); + + if (address == AR_MCI_INTERRUPT_RX_MSG_RAW) { + + if (bit_position & + AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE) + ar9003_mci_reset_req_wakeup(ah); + + if (bit_position & + (AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING | + AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING)) + REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, + AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE); + + REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, + AR_MCI_INTERRUPT_RX_MSG); + } + break; + } + + udelay(10); + time_out -= 10; + + if (time_out < 0) + break; + } + + if (time_out <= 0) { + ath_dbg(common, MCI, + "MCI Wait for Reg 0x%08x = 0x%08x timeout\n", + address, bit_position); + ath_dbg(common, MCI, + "MCI INT_RAW = 0x%08x, RX_MSG_RAW = 0x%08x\n", + REG_READ(ah, AR_MCI_INTERRUPT_RAW), + REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW)); + time_out = 0; + } + + return time_out; +} + +void ar9003_mci_remote_reset(struct ath_hw *ah, bool wait_done) +{ + u32 payload[4] = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffff00}; + + if (!ATH9K_HW_CAP_MCI) + return; + + ar9003_mci_send_message(ah, MCI_REMOTE_RESET, 0, payload, 16, + wait_done, false); + udelay(5); +} + +void ar9003_mci_send_lna_transfer(struct ath_hw *ah, bool wait_done) +{ + u32 payload = 0x00000000; + + if (!ATH9K_HW_CAP_MCI) + return; + + ar9003_mci_send_message(ah, MCI_LNA_TRANS, 0, &payload, 1, + wait_done, false); +} + +static void ar9003_mci_send_req_wake(struct ath_hw *ah, bool wait_done) +{ + ar9003_mci_send_message(ah, MCI_REQ_WAKE, MCI_FLAG_DISABLE_TIMESTAMP, + NULL, 0, wait_done, false); + udelay(5); +} + +void ar9003_mci_send_sys_waking(struct ath_hw *ah, bool wait_done) +{ + if (!ATH9K_HW_CAP_MCI) + return; + + ar9003_mci_send_message(ah, MCI_SYS_WAKING, MCI_FLAG_DISABLE_TIMESTAMP, + NULL, 0, wait_done, false); +} + +static void ar9003_mci_send_lna_take(struct ath_hw *ah, bool wait_done) +{ + u32 payload = 0x70000000; + + ar9003_mci_send_message(ah, MCI_LNA_TAKE, 0, &payload, 1, + wait_done, false); +} + +static void ar9003_mci_send_sys_sleeping(struct ath_hw *ah, bool wait_done) +{ + ar9003_mci_send_message(ah, MCI_SYS_SLEEPING, + MCI_FLAG_DISABLE_TIMESTAMP, + NULL, 0, wait_done, false); +} + +static void ar9003_mci_send_coex_version_query(struct ath_hw *ah, + bool wait_done) +{ + struct ath_common *common = ath9k_hw_common(ah); + struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; + u32 payload[4] = {0, 0, 0, 0}; + + if (!mci->bt_version_known && + (mci->bt_state != MCI_BT_SLEEP)) { + ath_dbg(common, MCI, "MCI Send Coex version query\n"); + MCI_GPM_SET_TYPE_OPCODE(payload, + MCI_GPM_COEX_AGENT, MCI_GPM_COEX_VERSION_QUERY); + ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, + wait_done, true); + } +} + +static void ar9003_mci_send_coex_version_response(struct ath_hw *ah, + bool wait_done) +{ + struct ath_common *common = ath9k_hw_common(ah); + struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; + u32 payload[4] = {0, 0, 0, 0}; + + ath_dbg(common, MCI, "MCI Send Coex version response\n"); + MCI_GPM_SET_TYPE_OPCODE(payload, MCI_GPM_COEX_AGENT, + MCI_GPM_COEX_VERSION_RESPONSE); + *(((u8 *)payload) + MCI_GPM_COEX_B_MAJOR_VERSION) = + mci->wlan_ver_major; + *(((u8 *)payload) + MCI_GPM_COEX_B_MINOR_VERSION) = + mci->wlan_ver_minor; + ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, wait_done, true); +} + +static void ar9003_mci_send_coex_wlan_channels(struct ath_hw *ah, + bool wait_done) +{ + struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; + u32 *payload = &mci->wlan_channels[0]; + + if ((mci->wlan_channels_update == true) && + (mci->bt_state != MCI_BT_SLEEP)) { + MCI_GPM_SET_TYPE_OPCODE(payload, + MCI_GPM_COEX_AGENT, MCI_GPM_COEX_WLAN_CHANNELS); + ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, + wait_done, true); + MCI_GPM_SET_TYPE_OPCODE(payload, 0xff, 0xff); + } +} + +static void ar9003_mci_send_coex_bt_status_query(struct ath_hw *ah, + bool wait_done, u8 query_type) +{ + struct ath_common *common = ath9k_hw_common(ah); + struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; + u32 payload[4] = {0, 0, 0, 0}; + bool query_btinfo = !!(query_type & (MCI_GPM_COEX_QUERY_BT_ALL_INFO | + MCI_GPM_COEX_QUERY_BT_TOPOLOGY)); + + if (mci->bt_state != MCI_BT_SLEEP) { + + ath_dbg(common, MCI, "MCI Send Coex BT Status Query 0x%02X\n", + query_type); + + MCI_GPM_SET_TYPE_OPCODE(payload, + MCI_GPM_COEX_AGENT, MCI_GPM_COEX_STATUS_QUERY); + + *(((u8 *)payload) + MCI_GPM_COEX_B_BT_BITMAP) = query_type; + /* + * If bt_status_query message is not sent successfully, + * then need_flush_btinfo should be set again. + */ + if (!ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, + wait_done, true)) { + if (query_btinfo) { + mci->need_flush_btinfo = true; + + ath_dbg(common, MCI, + "MCI send bt_status_query fail, set flush flag again\n"); + } + } + + if (query_btinfo) + mci->query_bt = false; + } +} + +void ar9003_mci_send_coex_halt_bt_gpm(struct ath_hw *ah, bool halt, + bool wait_done) +{ + struct ath_common *common = ath9k_hw_common(ah); + struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; + u32 payload[4] = {0, 0, 0, 0}; + + if (!ATH9K_HW_CAP_MCI) + return; + + ath_dbg(common, MCI, "MCI Send Coex %s BT GPM\n", + (halt) ? "halt" : "unhalt"); + + MCI_GPM_SET_TYPE_OPCODE(payload, + MCI_GPM_COEX_AGENT, MCI_GPM_COEX_HALT_BT_GPM); + + if (halt) { + mci->query_bt = true; + /* Send next unhalt no matter halt sent or not */ + mci->unhalt_bt_gpm = true; + mci->need_flush_btinfo = true; + *(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) = + MCI_GPM_COEX_BT_GPM_HALT; + } else + *(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) = + MCI_GPM_COEX_BT_GPM_UNHALT; + + ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, wait_done, true); +} + + +static void ar9003_mci_prep_interface(struct ath_hw *ah) +{ + struct ath_common *common = ath9k_hw_common(ah); + struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; + u32 saved_mci_int_en; + u32 mci_timeout = 150; + + mci->bt_state = MCI_BT_SLEEP; + saved_mci_int_en = REG_READ(ah, AR_MCI_INTERRUPT_EN); + + REG_WRITE(ah, AR_MCI_INTERRUPT_EN, 0); + REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, + REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW)); + REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, + REG_READ(ah, AR_MCI_INTERRUPT_RAW)); + + /* Remote Reset */ + ath_dbg(common, MCI, "MCI Reset sequence start\n"); + ath_dbg(common, MCI, "MCI send REMOTE_RESET\n"); + ar9003_mci_remote_reset(ah, true); + + /* + * This delay is required for the reset delay worst case value 255 in + * MCI_COMMAND2 register + */ + + if (AR_SREV_9462_10(ah)) + udelay(252); + + ath_dbg(common, MCI, "MCI Send REQ_WAKE to remoter(BT)\n"); + ar9003_mci_send_req_wake(ah, true); + + if (ar9003_mci_wait_for_interrupt(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, + AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING, 500)) { + + ath_dbg(common, MCI, "MCI SYS_WAKING from remote(BT)\n"); + mci->bt_state = MCI_BT_AWAKE; + + if (AR_SREV_9462_10(ah)) + udelay(10); + /* + * we don't need to send more remote_reset at this moment. + * If BT receive first remote_reset, then BT HW will + * be cleaned up and will be able to receive req_wake + * and BT HW will respond sys_waking. + * In this case, WLAN will receive BT's HW sys_waking. + * Otherwise, if BT SW missed initial remote_reset, + * that remote_reset will still clean up BT MCI RX, + * and the req_wake will wake BT up, + * and BT SW will respond this req_wake with a remote_reset and + * sys_waking. In this case, WLAN will receive BT's SW + * sys_waking. In either case, BT's RX is cleaned up. So we + * don't need to reply BT's remote_reset now, if any. + * Similarly, if in any case, WLAN can receive BT's sys_waking, + * that means WLAN's RX is also fine. + */ + + /* Send SYS_WAKING to BT */ + + ath_dbg(common, MCI, "MCI send SW SYS_WAKING to remote BT\n"); + + ar9003_mci_send_sys_waking(ah, true); + udelay(10); + + /* + * Set BT priority interrupt value to be 0xff to + * avoid having too many BT PRIORITY interrupts. + */ + + REG_WRITE(ah, AR_MCI_BT_PRI0, 0xFFFFFFFF); + REG_WRITE(ah, AR_MCI_BT_PRI1, 0xFFFFFFFF); + REG_WRITE(ah, AR_MCI_BT_PRI2, 0xFFFFFFFF); + REG_WRITE(ah, AR_MCI_BT_PRI3, 0xFFFFFFFF); + REG_WRITE(ah, AR_MCI_BT_PRI, 0X000000FF); + + /* + * A contention reset will be received after send out + * sys_waking. Also BT priority interrupt bits will be set. + * Clear those bits before the next step. + */ + + REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, + AR_MCI_INTERRUPT_RX_MSG_CONT_RST); + REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, + AR_MCI_INTERRUPT_BT_PRI); + + if (AR_SREV_9462_10(ah) || mci->is_2g) { + /* Send LNA_TRANS */ + ath_dbg(common, MCI, "MCI send LNA_TRANS to BT\n"); + ar9003_mci_send_lna_transfer(ah, true); + udelay(5); + } + + if (AR_SREV_9462_10(ah) || (mci->is_2g && + !mci->update_2g5g)) { + if (ar9003_mci_wait_for_interrupt(ah, + AR_MCI_INTERRUPT_RX_MSG_RAW, + AR_MCI_INTERRUPT_RX_MSG_LNA_INFO, + mci_timeout)) + ath_dbg(common, MCI, + "MCI WLAN has control over the LNA & BT obeys it\n"); + else + ath_dbg(common, MCI, + "MCI BT didn't respond to LNA_TRANS\n"); + } + + if (AR_SREV_9462_10(ah)) { + /* Send another remote_reset to deassert BT clk_req. */ + ath_dbg(common, MCI, + "MCI another remote_reset to deassert clk_req\n"); + ar9003_mci_remote_reset(ah, true); + udelay(252); + } + } + + /* Clear the extra redundant SYS_WAKING from BT */ + if ((mci->bt_state == MCI_BT_AWAKE) && + (REG_READ_FIELD(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, + AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING)) && + (REG_READ_FIELD(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, + AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING) == 0)) { + + REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, + AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING); + REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, + AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE); + } + + REG_WRITE(ah, AR_MCI_INTERRUPT_EN, saved_mci_int_en); +} + +void ar9003_mci_disable_interrupt(struct ath_hw *ah) +{ + if (!ATH9K_HW_CAP_MCI) + return; + + REG_WRITE(ah, AR_MCI_INTERRUPT_EN, 0); + REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN, 0); +} + +void ar9003_mci_enable_interrupt(struct ath_hw *ah) +{ + if (!ATH9K_HW_CAP_MCI) + return; + + REG_WRITE(ah, AR_MCI_INTERRUPT_EN, AR_MCI_INTERRUPT_DEFAULT); + REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN, + AR_MCI_INTERRUPT_RX_MSG_DEFAULT); +} + +bool ar9003_mci_check_int(struct ath_hw *ah, u32 ints) +{ + u32 intr; + + if (!ATH9K_HW_CAP_MCI) + return false; + + intr = REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW); + return ((intr & ints) == ints); +} + +void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr, + u32 *rx_msg_intr) +{ + struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; + + if (!ATH9K_HW_CAP_MCI) + return; + + *raw_intr = mci->raw_intr; + *rx_msg_intr = mci->rx_msg_intr; + + /* Clean int bits after the values are read. */ + mci->raw_intr = 0; + mci->rx_msg_intr = 0; +} +EXPORT_SYMBOL(ar9003_mci_get_interrupt); + +void ar9003_mci_2g5g_changed(struct ath_hw *ah, bool is_2g) +{ + struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; + + if (!ATH9K_HW_CAP_MCI) + return; + + if (!mci->update_2g5g && + (mci->is_2g != is_2g)) + mci->update_2g5g = true; + + mci->is_2g = is_2g; +} + +static bool ar9003_mci_is_gpm_valid(struct ath_hw *ah, u32 msg_index) +{ + struct ath_common *common = ath9k_hw_common(ah); + struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; + u32 *payload; + u32 recv_type, offset; + + if (msg_index == MCI_GPM_INVALID) + return false; + + offset = msg_index << 4; + + payload = (u32 *)(mci->gpm_buf + offset); + recv_type = MCI_GPM_TYPE(payload); + + if (recv_type == MCI_GPM_RSVD_PATTERN) { + ath_dbg(common, MCI, "MCI Skip RSVD GPM\n"); + return false; + } + + return true; +} + +static void ar9003_mci_observation_set_up(struct ath_hw *ah) +{ + struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; + if (mci->config & ATH_MCI_CONFIG_MCI_OBS_MCI) { + + ath9k_hw_cfg_output(ah, 3, + AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_DATA); + ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_CLK); + ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA); + ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK); + + } else if (mci->config & ATH_MCI_CONFIG_MCI_OBS_TXRX) { + + ath9k_hw_cfg_output(ah, 3, AR_GPIO_OUTPUT_MUX_AS_WL_IN_TX); + ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_WL_IN_RX); + ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX); + ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX); + ath9k_hw_cfg_output(ah, 5, AR_GPIO_OUTPUT_MUX_AS_OUTPUT); + + } else if (mci->config & ATH_MCI_CONFIG_MCI_OBS_BT) { + + ath9k_hw_cfg_output(ah, 3, AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX); + ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX); + ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA); + ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK); + + } else + return; + + REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE); + + if (AR_SREV_9462_20_OR_LATER(ah)) { + REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL, + AR_GLB_DS_JTAG_DISABLE, 1); + REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL, + AR_GLB_WLAN_UART_INTF_EN, 0); + REG_SET_BIT(ah, AR_GLB_GPIO_CONTROL, + ATH_MCI_CONFIG_MCI_OBS_GPIO); + } + + REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_GPIO_OBS_SEL, 0); + REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL, 1); + REG_WRITE(ah, AR_OBS, 0x4b); + REG_RMW_FIELD(ah, AR_DIAG_SW, AR_DIAG_OBS_PT_SEL1, 0x03); + REG_RMW_FIELD(ah, AR_DIAG_SW, AR_DIAG_OBS_PT_SEL2, 0x01); + REG_RMW_FIELD(ah, AR_MACMISC, AR_MACMISC_MISC_OBS_BUS_LSB, 0x02); + REG_RMW_FIELD(ah, AR_MACMISC, AR_MACMISC_MISC_OBS_BUS_MSB, 0x03); + REG_RMW_FIELD(ah, AR_PHY_TEST_CTL_STATUS, + AR_PHY_TEST_CTL_DEBUGPORT_SEL, 0x07); +} + +static bool ar9003_mci_send_coex_bt_flags(struct ath_hw *ah, bool wait_done, + u8 opcode, u32 bt_flags) +{ + struct ath_common *common = ath9k_hw_common(ah); + u32 pld[4] = {0, 0, 0, 0}; + + MCI_GPM_SET_TYPE_OPCODE(pld, + MCI_GPM_COEX_AGENT, MCI_GPM_COEX_BT_UPDATE_FLAGS); + + *(((u8 *)pld) + MCI_GPM_COEX_B_BT_FLAGS_OP) = opcode; + *(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 0) = bt_flags & 0xFF; + *(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 1) = (bt_flags >> 8) & 0xFF; + *(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 2) = (bt_flags >> 16) & 0xFF; + *(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 3) = (bt_flags >> 24) & 0xFF; + + ath_dbg(common, MCI, + "MCI BT_MCI_FLAGS: Send Coex BT Update Flags %s 0x%08x\n", + opcode == MCI_GPM_COEX_BT_FLAGS_READ ? "READ" : + opcode == MCI_GPM_COEX_BT_FLAGS_SET ? "SET" : "CLEAR", + bt_flags); + + return ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16, + wait_done, true); +} + +void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g, + bool is_full_sleep) +{ + struct ath_common *common = ath9k_hw_common(ah); + struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; + u32 regval, thresh; + + if (!ATH9K_HW_CAP_MCI) + return; + + ath_dbg(common, MCI, "MCI full_sleep = %d, is_2g = %d\n", + is_full_sleep, is_2g); + + /* + * GPM buffer and scheduling message buffer are not allocated + */ + + if (!mci->gpm_addr && !mci->sched_addr) { + ath_dbg(common, MCI, + "MCI GPM and schedule buffers are not allocated\n"); + return; + } + + if (REG_READ(ah, AR_BTCOEX_CTRL) == 0xdeadbeef) { + ath_dbg(common, MCI, "MCI it's deadbeef, quit mci_reset\n"); + return; + } + + /* Program MCI DMA related registers */ + REG_WRITE(ah, AR_MCI_GPM_0, mci->gpm_addr); + REG_WRITE(ah, AR_MCI_GPM_1, mci->gpm_len); + REG_WRITE(ah, AR_MCI_SCHD_TABLE_0, mci->sched_addr); + + /* + * To avoid MCI state machine be affected by incoming remote MCI msgs, + * MCI mode will be enabled later, right before reset the MCI TX and RX. + */ + + regval = SM(1, AR_BTCOEX_CTRL_AR9462_MODE) | + SM(1, AR_BTCOEX_CTRL_WBTIMER_EN) | + SM(1, AR_BTCOEX_CTRL_PA_SHARED) | + SM(1, AR_BTCOEX_CTRL_LNA_SHARED) | + SM(2, AR_BTCOEX_CTRL_NUM_ANTENNAS) | + SM(3, AR_BTCOEX_CTRL_RX_CHAIN_MASK) | + SM(0, AR_BTCOEX_CTRL_1_CHAIN_ACK) | + SM(0, AR_BTCOEX_CTRL_1_CHAIN_BCN) | + SM(0, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN); + + if (is_2g && (AR_SREV_9462_20(ah)) && + !(mci->config & ATH_MCI_CONFIG_DISABLE_OSLA)) { + + regval |= SM(1, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN); + ath_dbg(common, MCI, "MCI sched one step look ahead\n"); + + if (!(mci->config & + ATH_MCI_CONFIG_DISABLE_AGGR_THRESH)) { + + thresh = MS(mci->config, + ATH_MCI_CONFIG_AGGR_THRESH); + thresh &= 7; + regval |= SM(1, + AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN); + regval |= SM(thresh, AR_BTCOEX_CTRL_AGGR_THRESH); + + REG_RMW_FIELD(ah, AR_MCI_SCHD_TABLE_2, + AR_MCI_SCHD_TABLE_2_HW_BASED, 1); + REG_RMW_FIELD(ah, AR_MCI_SCHD_TABLE_2, + AR_MCI_SCHD_TABLE_2_MEM_BASED, 1); + + } else + ath_dbg(common, MCI, "MCI sched aggr thresh: off\n"); + } else + ath_dbg(common, MCI, "MCI SCHED one step look ahead off\n"); + + if (AR_SREV_9462_10(ah)) + regval |= SM(1, AR_BTCOEX_CTRL_SPDT_ENABLE_10); + + REG_WRITE(ah, AR_BTCOEX_CTRL, regval); + + if (AR_SREV_9462_20(ah)) { + REG_SET_BIT(ah, AR_PHY_GLB_CONTROL, + AR_BTCOEX_CTRL_SPDT_ENABLE); + REG_RMW_FIELD(ah, AR_BTCOEX_CTRL3, + AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT, 20); + } + + REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_RX_DEWEIGHT, 1); + REG_RMW_FIELD(ah, AR_PCU_MISC, AR_PCU_BT_ANT_PREVENT_RX, 0); + + thresh = MS(mci->config, ATH_MCI_CONFIG_CLK_DIV); + REG_RMW_FIELD(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_CLK_DIV, thresh); + REG_SET_BIT(ah, AR_BTCOEX_CTRL, AR_BTCOEX_CTRL_MCI_MODE_EN); + + /* Resetting the Rx and Tx paths of MCI */ + regval = REG_READ(ah, AR_MCI_COMMAND2); + regval |= SM(1, AR_MCI_COMMAND2_RESET_TX); + REG_WRITE(ah, AR_MCI_COMMAND2, regval); + + udelay(1); + + regval &= ~SM(1, AR_MCI_COMMAND2_RESET_TX); + REG_WRITE(ah, AR_MCI_COMMAND2, regval); + + if (is_full_sleep) { + ar9003_mci_mute_bt(ah); + udelay(100); + } + + regval |= SM(1, AR_MCI_COMMAND2_RESET_RX); + REG_WRITE(ah, AR_MCI_COMMAND2, regval); + udelay(1); + regval &= ~SM(1, AR_MCI_COMMAND2_RESET_RX); + REG_WRITE(ah, AR_MCI_COMMAND2, regval); + + ar9003_mci_state(ah, MCI_STATE_INIT_GPM_OFFSET, NULL); + REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE, + (SM(0xe801, AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR) | + SM(0x0000, AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM))); + + REG_CLR_BIT(ah, AR_MCI_TX_CTRL, + AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE); + + if (AR_SREV_9462_20_OR_LATER(ah)) + ar9003_mci_observation_set_up(ah); + + mci->ready = true; + ar9003_mci_prep_interface(ah); + + if (en_int) + ar9003_mci_enable_interrupt(ah); +} + +void ar9003_mci_mute_bt(struct ath_hw *ah) +{ + struct ath_common *common = ath9k_hw_common(ah); + + if (!ATH9K_HW_CAP_MCI) + return; + + /* disable all MCI messages */ + REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE, 0xffff0000); + REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS0, 0xffffffff); + REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS1, 0xffffffff); + REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS2, 0xffffffff); + REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS3, 0xffffffff); + REG_SET_BIT(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE); + + /* wait pending HW messages to flush out */ + udelay(10); + + /* + * Send LNA_TAKE and SYS_SLEEPING when + * 1. reset not after resuming from full sleep + * 2. before reset MCI RX, to quiet BT and avoid MCI RX misalignment + */ + + ath_dbg(common, MCI, "MCI Send LNA take\n"); + ar9003_mci_send_lna_take(ah, true); + + udelay(5); + + ath_dbg(common, MCI, "MCI Send sys sleeping\n"); + ar9003_mci_send_sys_sleeping(ah, true); +} + +void ar9003_mci_sync_bt_state(struct ath_hw *ah) +{ + struct ath_common *common = ath9k_hw_common(ah); + struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; + u32 cur_bt_state; + + if (!ATH9K_HW_CAP_MCI) + return; + + cur_bt_state = ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL); + + if (mci->bt_state != cur_bt_state) { + ath_dbg(common, MCI, + "MCI BT state mismatches. old: %d, new: %d\n", + mci->bt_state, cur_bt_state); + mci->bt_state = cur_bt_state; + } + + if (mci->bt_state != MCI_BT_SLEEP) { + + ar9003_mci_send_coex_version_query(ah, true); + ar9003_mci_send_coex_wlan_channels(ah, true); + + if (mci->unhalt_bt_gpm == true) { + ath_dbg(common, MCI, "MCI unhalt BT GPM\n"); + ar9003_mci_send_coex_halt_bt_gpm(ah, false, true); + } + } +} + +static void ar9003_mci_send_2g5g_status(struct ath_hw *ah, bool wait_done) +{ + struct ath_common *common = ath9k_hw_common(ah); + struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; + u32 new_flags, to_set, to_clear; + + if (AR_SREV_9462_20(ah) && + mci->update_2g5g && + (mci->bt_state != MCI_BT_SLEEP)) { + + if (mci->is_2g) { + new_flags = MCI_2G_FLAGS; + to_clear = MCI_2G_FLAGS_CLEAR_MASK; + to_set = MCI_2G_FLAGS_SET_MASK; + } else { + new_flags = MCI_5G_FLAGS; + to_clear = MCI_5G_FLAGS_CLEAR_MASK; + to_set = MCI_5G_FLAGS_SET_MASK; + } + + ath_dbg(common, MCI, + "MCI BT_MCI_FLAGS: %s 0x%08x clr=0x%08x, set=0x%08x\n", + mci->is_2g ? "2G" : "5G", new_flags, to_clear, to_set); + + if (to_clear) + ar9003_mci_send_coex_bt_flags(ah, wait_done, + MCI_GPM_COEX_BT_FLAGS_CLEAR, to_clear); + + if (to_set) + ar9003_mci_send_coex_bt_flags(ah, wait_done, + MCI_GPM_COEX_BT_FLAGS_SET, to_set); + } + + if (AR_SREV_9462_10(ah) && (mci->bt_state != MCI_BT_SLEEP)) + mci->update_2g5g = false; +} + +static void ar9003_mci_queue_unsent_gpm(struct ath_hw *ah, u8 header, + u32 *payload, bool queue) +{ + struct ath_common *common = ath9k_hw_common(ah); + struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; + u8 type, opcode; + + if (queue) { + + if (payload) + ath_dbg(common, MCI, + "MCI ERROR: Send fail: %02x: %02x %02x %02x\n", + header, + *(((u8 *)payload) + 4), + *(((u8 *)payload) + 5), + *(((u8 *)payload) + 6)); + else + ath_dbg(common, MCI, "MCI ERROR: Send fail: %02x\n", + header); + } + + /* check if the message is to be queued */ + if (header != MCI_GPM) + return; + + type = MCI_GPM_TYPE(payload); + opcode = MCI_GPM_OPCODE(payload); + + if (type != MCI_GPM_COEX_AGENT) + return; + + switch (opcode) { + case MCI_GPM_COEX_BT_UPDATE_FLAGS: + + if (AR_SREV_9462_10(ah)) + break; + + if (*(((u8 *)payload) + MCI_GPM_COEX_B_BT_FLAGS_OP) == + MCI_GPM_COEX_BT_FLAGS_READ) + break; + + mci->update_2g5g = queue; + + if (queue) + ath_dbg(common, MCI, + "MCI BT_MCI_FLAGS: 2G5G status <queued> %s\n", + mci->is_2g ? "2G" : "5G"); + else + ath_dbg(common, MCI, + "MCI BT_MCI_FLAGS: 2G5G status <sent> %s\n", + mci->is_2g ? "2G" : "5G"); + + break; + + case MCI_GPM_COEX_WLAN_CHANNELS: + + mci->wlan_channels_update = queue; + if (queue) + ath_dbg(common, MCI, "MCI WLAN channel map <queued>\n"); + else + ath_dbg(common, MCI, "MCI WLAN channel map <sent>\n"); + break; + + case MCI_GPM_COEX_HALT_BT_GPM: + + if (*(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) == + MCI_GPM_COEX_BT_GPM_UNHALT) { + + mci->unhalt_bt_gpm = queue; + + if (queue) + ath_dbg(common, MCI, + "MCI UNHALT BT GPM <queued>\n"); + else { + mci->halted_bt_gpm = false; + ath_dbg(common, MCI, + "MCI UNHALT BT GPM <sent>\n"); + } + } + + if (*(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) == + MCI_GPM_COEX_BT_GPM_HALT) { + + mci->halted_bt_gpm = !queue; + + if (queue) + ath_dbg(common, MCI, + "MCI HALT BT GPM <not sent>\n"); + else + ath_dbg(common, MCI, + "MCI UNHALT BT GPM <sent>\n"); + } + + break; + default: + break; + } +} + +void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool wait_done) +{ + struct ath_common *common = ath9k_hw_common(ah); + struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; + + if (!ATH9K_HW_CAP_MCI) + return; + + if (mci->update_2g5g) { + if (mci->is_2g) { + + ar9003_mci_send_2g5g_status(ah, true); + ath_dbg(common, MCI, "MCI Send LNA trans\n"); + ar9003_mci_send_lna_transfer(ah, true); + udelay(5); + + REG_CLR_BIT(ah, AR_MCI_TX_CTRL, + AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE); + + if (AR_SREV_9462_20(ah)) { + REG_CLR_BIT(ah, AR_PHY_GLB_CONTROL, + AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL); + if (!(mci->config & + ATH_MCI_CONFIG_DISABLE_OSLA)) { + REG_SET_BIT(ah, AR_BTCOEX_CTRL, + AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN); + } + } + } else { + ath_dbg(common, MCI, "MCI Send LNA take\n"); + ar9003_mci_send_lna_take(ah, true); + udelay(5); + + REG_SET_BIT(ah, AR_MCI_TX_CTRL, + AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE); + + if (AR_SREV_9462_20(ah)) { + REG_SET_BIT(ah, AR_PHY_GLB_CONTROL, + AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL); + REG_CLR_BIT(ah, AR_BTCOEX_CTRL, + AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN); + } + + ar9003_mci_send_2g5g_status(ah, true); + } + } +} + +bool ar9003_mci_send_message(struct ath_hw *ah, u8 header, u32 flag, + u32 *payload, u8 len, bool wait_done, + bool check_bt) +{ + struct ath_common *common = ath9k_hw_common(ah); + struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; + bool msg_sent = false; + u32 regval; + u32 saved_mci_int_en; + int i; + + if (!ATH9K_HW_CAP_MCI) + return false; + + saved_mci_int_en = REG_READ(ah, AR_MCI_INTERRUPT_EN); + regval = REG_READ(ah, AR_BTCOEX_CTRL); + + if ((regval == 0xdeadbeef) || !(regval & AR_BTCOEX_CTRL_MCI_MODE_EN)) { + + ath_dbg(common, MCI, + "MCI Not sending 0x%x. MCI is not enabled. full_sleep = %d\n", + header, + (ah->power_mode == ATH9K_PM_FULL_SLEEP) ? 1 : 0); + + ar9003_mci_queue_unsent_gpm(ah, header, payload, true); + return false; + + } else if (check_bt && (mci->bt_state == MCI_BT_SLEEP)) { + + ath_dbg(common, MCI, + "MCI Don't send message 0x%x. BT is in sleep state\n", + header); + + ar9003_mci_queue_unsent_gpm(ah, header, payload, true); + return false; + } + + if (wait_done) + REG_WRITE(ah, AR_MCI_INTERRUPT_EN, 0); + + /* Need to clear SW_MSG_DONE raw bit before wait */ + + REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, + (AR_MCI_INTERRUPT_SW_MSG_DONE | + AR_MCI_INTERRUPT_MSG_FAIL_MASK)); + + if (payload) { + for (i = 0; (i * 4) < len; i++) + REG_WRITE(ah, (AR_MCI_TX_PAYLOAD0 + i * 4), + *(payload + i)); + } + + REG_WRITE(ah, AR_MCI_COMMAND0, + (SM((flag & MCI_FLAG_DISABLE_TIMESTAMP), + AR_MCI_COMMAND0_DISABLE_TIMESTAMP) | + SM(len, AR_MCI_COMMAND0_LEN) | + SM(header, AR_MCI_COMMAND0_HEADER))); + + if (wait_done && + !(ar9003_mci_wait_for_interrupt(ah, AR_MCI_INTERRUPT_RAW, + AR_MCI_INTERRUPT_SW_MSG_DONE, 500))) + ar9003_mci_queue_unsent_gpm(ah, header, payload, true); + else { + ar9003_mci_queue_unsent_gpm(ah, header, payload, false); + msg_sent = true; + } + + if (wait_done) + REG_WRITE(ah, AR_MCI_INTERRUPT_EN, saved_mci_int_en); + + return msg_sent; +} +EXPORT_SYMBOL(ar9003_mci_send_message); + +void ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf, + u16 len, u32 sched_addr) +{ + struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; + void *sched_buf = (void *)((char *) gpm_buf + (sched_addr - gpm_addr)); + + if (!ATH9K_HW_CAP_MCI) + return; + + mci->gpm_addr = gpm_addr; + mci->gpm_buf = gpm_buf; + mci->gpm_len = len; + mci->sched_addr = sched_addr; + mci->sched_buf = sched_buf; + + ar9003_mci_reset(ah, true, true, true); +} +EXPORT_SYMBOL(ar9003_mci_setup); + +void ar9003_mci_cleanup(struct ath_hw *ah) +{ + struct ath_common *common = ath9k_hw_common(ah); + + if (!ATH9K_HW_CAP_MCI) + return; + + /* Turn off MCI and Jupiter mode. */ + REG_WRITE(ah, AR_BTCOEX_CTRL, 0x00); + ath_dbg(common, MCI, "MCI ar9003_mci_cleanup\n"); + ar9003_mci_disable_interrupt(ah); +} +EXPORT_SYMBOL(ar9003_mci_cleanup); + +static void ar9003_mci_process_gpm_extra(struct ath_hw *ah, u8 gpm_type, + u8 gpm_opcode, u32 *p_gpm) +{ + struct ath_common *common = ath9k_hw_common(ah); + struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; + u8 *p_data = (u8 *) p_gpm; + + if (gpm_type != MCI_GPM_COEX_AGENT) + return; + + switch (gpm_opcode) { + case MCI_GPM_COEX_VERSION_QUERY: + ath_dbg(common, MCI, "MCI Recv GPM COEX Version Query\n"); + ar9003_mci_send_coex_version_response(ah, true); + break; + case MCI_GPM_COEX_VERSION_RESPONSE: + ath_dbg(common, MCI, "MCI Recv GPM COEX Version Response\n"); + mci->bt_ver_major = + *(p_data + MCI_GPM_COEX_B_MAJOR_VERSION); + mci->bt_ver_minor = + *(p_data + MCI_GPM_COEX_B_MINOR_VERSION); + mci->bt_version_known = true; + ath_dbg(common, MCI, "MCI BT Coex version: %d.%d\n", + mci->bt_ver_major, mci->bt_ver_minor); + break; + case MCI_GPM_COEX_STATUS_QUERY: + ath_dbg(common, MCI, + "MCI Recv GPM COEX Status Query = 0x%02X\n", + *(p_data + MCI_GPM_COEX_B_WLAN_BITMAP)); + mci->wlan_channels_update = true; + ar9003_mci_send_coex_wlan_channels(ah, true); + break; + case MCI_GPM_COEX_BT_PROFILE_INFO: + mci->query_bt = true; + ath_dbg(common, MCI, "MCI Recv GPM COEX BT_Profile_Info\n"); + break; + case MCI_GPM_COEX_BT_STATUS_UPDATE: + mci->query_bt = true; + ath_dbg(common, MCI, + "MCI Recv GPM COEX BT_Status_Update SEQ=%d (drop&query)\n", + *(p_gpm + 3)); + break; + default: + break; + } +} + +u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type, + u8 gpm_opcode, int time_out) +{ + struct ath_common *common = ath9k_hw_common(ah); + struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; + u32 *p_gpm = NULL, mismatch = 0, more_data; + u32 offset; + u8 recv_type = 0, recv_opcode = 0; + bool b_is_bt_cal_done = (gpm_type == MCI_GPM_BT_CAL_DONE); + + if (!ATH9K_HW_CAP_MCI) + return 0; + + more_data = time_out ? MCI_GPM_NOMORE : MCI_GPM_MORE; + + while (time_out > 0) { + if (p_gpm) { + MCI_GPM_RECYCLE(p_gpm); + p_gpm = NULL; + } + + if (more_data != MCI_GPM_MORE) + time_out = ar9003_mci_wait_for_interrupt(ah, + AR_MCI_INTERRUPT_RX_MSG_RAW, + AR_MCI_INTERRUPT_RX_MSG_GPM, + time_out); + + if (!time_out) + break; + + offset = ar9003_mci_state(ah, + MCI_STATE_NEXT_GPM_OFFSET, &more_data); + + if (offset == MCI_GPM_INVALID) + continue; + + p_gpm = (u32 *) (mci->gpm_buf + offset); + recv_type = MCI_GPM_TYPE(p_gpm); + recv_opcode = MCI_GPM_OPCODE(p_gpm); + + if (MCI_GPM_IS_CAL_TYPE(recv_type)) { + + if (recv_type == gpm_type) { + + if ((gpm_type == MCI_GPM_BT_CAL_DONE) && + !b_is_bt_cal_done) { + gpm_type = MCI_GPM_BT_CAL_GRANT; + ath_dbg(common, MCI, + "MCI Recv BT_CAL_DONE wait BT_CAL_GRANT\n"); + continue; + } + + break; + } + } else if ((recv_type == gpm_type) && + (recv_opcode == gpm_opcode)) + break; + + /* not expected message */ + + /* + * check if it's cal_grant + * + * When we're waiting for cal_grant in reset routine, + * it's possible that BT sends out cal_request at the + * same time. Since BT's calibration doesn't happen + * that often, we'll let BT completes calibration then + * we continue to wait for cal_grant from BT. + * Orginal: Wait BT_CAL_GRANT. + * New: Receive BT_CAL_REQ -> send WLAN_CAL_GRANT->wait + * BT_CAL_DONE -> Wait BT_CAL_GRANT. + */ + + if ((gpm_type == MCI_GPM_BT_CAL_GRANT) && + (recv_type == MCI_GPM_BT_CAL_REQ)) { + + u32 payload[4] = {0, 0, 0, 0}; + + gpm_type = MCI_GPM_BT_CAL_DONE; + ath_dbg(common, MCI, + "MCI Rcv BT_CAL_REQ, send WLAN_CAL_GRANT\n"); + + MCI_GPM_SET_CAL_TYPE(payload, + MCI_GPM_WLAN_CAL_GRANT); + + ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, + false, false); + + ath_dbg(common, MCI, "MCI now wait for BT_CAL_DONE\n"); + + continue; + } else { + ath_dbg(common, MCI, "MCI GPM subtype not match 0x%x\n", + *(p_gpm + 1)); + mismatch++; + ar9003_mci_process_gpm_extra(ah, recv_type, + recv_opcode, p_gpm); + } + } + if (p_gpm) { + MCI_GPM_RECYCLE(p_gpm); + p_gpm = NULL; + } + + if (time_out <= 0) { + time_out = 0; + ath_dbg(common, MCI, + "MCI GPM received timeout, mismatch = %d\n", mismatch); + } else + ath_dbg(common, MCI, "MCI Receive GPM type=0x%x, code=0x%x\n", + gpm_type, gpm_opcode); + + while (more_data == MCI_GPM_MORE) { + + ath_dbg(common, MCI, "MCI discard remaining GPM\n"); + offset = ar9003_mci_state(ah, MCI_STATE_NEXT_GPM_OFFSET, + &more_data); + + if (offset == MCI_GPM_INVALID) + break; + + p_gpm = (u32 *) (mci->gpm_buf + offset); + recv_type = MCI_GPM_TYPE(p_gpm); + recv_opcode = MCI_GPM_OPCODE(p_gpm); + + if (!MCI_GPM_IS_CAL_TYPE(recv_type)) + ar9003_mci_process_gpm_extra(ah, recv_type, + recv_opcode, p_gpm); + + MCI_GPM_RECYCLE(p_gpm); + } + + return time_out; +} + +u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data) +{ + struct ath_common *common = ath9k_hw_common(ah); + struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; + u32 value = 0, more_gpm = 0, gpm_ptr; + u8 query_type; + + if (!ATH9K_HW_CAP_MCI) + return 0; + + switch (state_type) { + case MCI_STATE_ENABLE: + if (mci->ready) { + + value = REG_READ(ah, AR_BTCOEX_CTRL); + + if ((value == 0xdeadbeef) || (value == 0xffffffff)) + value = 0; + } + value &= AR_BTCOEX_CTRL_MCI_MODE_EN; + break; + case MCI_STATE_INIT_GPM_OFFSET: + value = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR); + ath_dbg(common, MCI, "MCI GPM initial WRITE_PTR=%d\n", value); + mci->gpm_idx = value; + break; + case MCI_STATE_NEXT_GPM_OFFSET: + case MCI_STATE_LAST_GPM_OFFSET: + /* + * This could be useful to avoid new GPM message interrupt which + * may lead to spurious interrupt after power sleep, or multiple + * entry of ath_mci_intr(). + * Adding empty GPM check by returning HAL_MCI_GPM_INVALID can + * alleviate this effect, but clearing GPM RX interrupt bit is + * safe, because whether this is called from hw or driver code + * there must be an interrupt bit set/triggered initially + */ + REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, + AR_MCI_INTERRUPT_RX_MSG_GPM); + + gpm_ptr = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR); + value = gpm_ptr; + + if (value == 0) + value = mci->gpm_len - 1; + else if (value >= mci->gpm_len) { + if (value != 0xFFFF) { + value = 0; + ath_dbg(common, MCI, + "MCI GPM offset out of range\n"); + } + } else + value--; + + if (value == 0xFFFF) { + value = MCI_GPM_INVALID; + more_gpm = MCI_GPM_NOMORE; + ath_dbg(common, MCI, + "MCI GPM ptr invalid @ptr=%d, offset=%d, more=GPM_NOMORE\n", + gpm_ptr, value); + } else if (state_type == MCI_STATE_NEXT_GPM_OFFSET) { + + if (gpm_ptr == mci->gpm_idx) { + value = MCI_GPM_INVALID; + more_gpm = MCI_GPM_NOMORE; + + ath_dbg(common, MCI, + "MCI GPM message not available @ptr=%d, @offset=%d, more=GPM_NOMORE\n", + gpm_ptr, value); + } else { + for (;;) { + + u32 temp_index; + + /* skip reserved GPM if any */ + + if (value != mci->gpm_idx) + more_gpm = MCI_GPM_MORE; + else + more_gpm = MCI_GPM_NOMORE; + + temp_index = mci->gpm_idx; + mci->gpm_idx++; + + if (mci->gpm_idx >= + mci->gpm_len) + mci->gpm_idx = 0; + + ath_dbg(common, MCI, + "MCI GPM message got ptr=%d, @offset=%d, more=%d\n", + gpm_ptr, temp_index, + (more_gpm == MCI_GPM_MORE)); + + if (ar9003_mci_is_gpm_valid(ah, + temp_index)) { + value = temp_index; + break; + } + + if (more_gpm == MCI_GPM_NOMORE) { + value = MCI_GPM_INVALID; + break; + } + } + } + if (p_data) + *p_data = more_gpm; + } + + if (value != MCI_GPM_INVALID) + value <<= 4; + + break; + case MCI_STATE_LAST_SCHD_MSG_OFFSET: + value = MS(REG_READ(ah, AR_MCI_RX_STATUS), + AR_MCI_RX_LAST_SCHD_MSG_INDEX); + /* Make it in bytes */ + value <<= 4; + break; + + case MCI_STATE_REMOTE_SLEEP: + value = MS(REG_READ(ah, AR_MCI_RX_STATUS), + AR_MCI_RX_REMOTE_SLEEP) ? + MCI_BT_SLEEP : MCI_BT_AWAKE; + break; + + case MCI_STATE_CONT_RSSI_POWER: + value = MS(mci->cont_status, AR_MCI_CONT_RSSI_POWER); + break; + + case MCI_STATE_CONT_PRIORITY: + value = MS(mci->cont_status, AR_MCI_CONT_RRIORITY); + break; + + case MCI_STATE_CONT_TXRX: + value = MS(mci->cont_status, AR_MCI_CONT_TXRX); + break; + + case MCI_STATE_BT: + value = mci->bt_state; + break; + + case MCI_STATE_SET_BT_SLEEP: + mci->bt_state = MCI_BT_SLEEP; + break; + + case MCI_STATE_SET_BT_AWAKE: + mci->bt_state = MCI_BT_AWAKE; + ar9003_mci_send_coex_version_query(ah, true); + ar9003_mci_send_coex_wlan_channels(ah, true); + + if (mci->unhalt_bt_gpm) { + + ath_dbg(common, MCI, "MCI unhalt BT GPM\n"); + ar9003_mci_send_coex_halt_bt_gpm(ah, false, true); + } + + ar9003_mci_2g5g_switch(ah, true); + break; + + case MCI_STATE_SET_BT_CAL_START: + mci->bt_state = MCI_BT_CAL_START; + break; + + case MCI_STATE_SET_BT_CAL: + mci->bt_state = MCI_BT_CAL; + break; + + case MCI_STATE_RESET_REQ_WAKE: + ar9003_mci_reset_req_wakeup(ah); + mci->update_2g5g = true; + + if ((AR_SREV_9462_20_OR_LATER(ah)) && + (mci->config & ATH_MCI_CONFIG_MCI_OBS_MASK)) { + /* Check if we still have control of the GPIOs */ + if ((REG_READ(ah, AR_GLB_GPIO_CONTROL) & + ATH_MCI_CONFIG_MCI_OBS_GPIO) != + ATH_MCI_CONFIG_MCI_OBS_GPIO) { + + ath_dbg(common, MCI, + "MCI reconfigure observation\n"); + ar9003_mci_observation_set_up(ah); + } + } + break; + + case MCI_STATE_SEND_WLAN_COEX_VERSION: + ar9003_mci_send_coex_version_response(ah, true); + break; + + case MCI_STATE_SET_BT_COEX_VERSION: + + if (!p_data) + ath_dbg(common, MCI, + "MCI Set BT Coex version with NULL data!!\n"); + else { + mci->bt_ver_major = (*p_data >> 8) & 0xff; + mci->bt_ver_minor = (*p_data) & 0xff; + mci->bt_version_known = true; + ath_dbg(common, MCI, "MCI BT version set: %d.%d\n", + mci->bt_ver_major, mci->bt_ver_minor); + } + break; + + case MCI_STATE_SEND_WLAN_CHANNELS: + if (p_data) { + if (((mci->wlan_channels[1] & 0xffff0000) == + (*(p_data + 1) & 0xffff0000)) && + (mci->wlan_channels[2] == *(p_data + 2)) && + (mci->wlan_channels[3] == *(p_data + 3))) + break; + + mci->wlan_channels[0] = *p_data++; + mci->wlan_channels[1] = *p_data++; + mci->wlan_channels[2] = *p_data++; + mci->wlan_channels[3] = *p_data++; + } + mci->wlan_channels_update = true; + ar9003_mci_send_coex_wlan_channels(ah, true); + break; + + case MCI_STATE_SEND_VERSION_QUERY: + ar9003_mci_send_coex_version_query(ah, true); + break; + + case MCI_STATE_SEND_STATUS_QUERY: + query_type = (AR_SREV_9462_10(ah)) ? + MCI_GPM_COEX_QUERY_BT_ALL_INFO : + MCI_GPM_COEX_QUERY_BT_TOPOLOGY; + + ar9003_mci_send_coex_bt_status_query(ah, true, query_type); + break; + + case MCI_STATE_NEED_FLUSH_BT_INFO: + /* + * btcoex_hw.mci.unhalt_bt_gpm means whether it's + * needed to send UNHALT message. It's set whenever + * there's a request to send HALT message. + * mci_halted_bt_gpm means whether HALT message is sent + * out successfully. + * + * Checking (mci_unhalt_bt_gpm == false) instead of + * checking (ah->mci_halted_bt_gpm == false) will make + * sure currently is in UNHALT-ed mode and BT can + * respond to status query. + */ + value = (!mci->unhalt_bt_gpm && + mci->need_flush_btinfo) ? 1 : 0; + if (p_data) + mci->need_flush_btinfo = + (*p_data != 0) ? true : false; + break; + + case MCI_STATE_RECOVER_RX: + + ath_dbg(common, MCI, "MCI hw RECOVER_RX\n"); + ar9003_mci_prep_interface(ah); + mci->query_bt = true; + mci->need_flush_btinfo = true; + ar9003_mci_send_coex_wlan_channels(ah, true); + ar9003_mci_2g5g_switch(ah, true); + break; + + case MCI_STATE_NEED_FTP_STOMP: + value = !(mci->config & ATH_MCI_CONFIG_DISABLE_FTP_STOMP); + break; + + case MCI_STATE_NEED_TUNING: + value = !(mci->config & ATH_MCI_CONFIG_DISABLE_TUNING); + break; + + default: + break; + + } + + return value; +} +EXPORT_SYMBOL(ar9003_mci_state); diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.h b/drivers/net/wireless/ath/ath9k/ar9003_mci.h new file mode 100644 index 00000000000..798da116a44 --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.h @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2010-2011 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef AR9003_MCI_H +#define AR9003_MCI_H + +#define MCI_FLAG_DISABLE_TIMESTAMP 0x00000001 /* Disable time stamp */ + +/* Default remote BT device MCI COEX version */ +#define MCI_GPM_COEX_MAJOR_VERSION_DEFAULT 3 +#define MCI_GPM_COEX_MINOR_VERSION_DEFAULT 0 + +/* Local WLAN MCI COEX version */ +#define MCI_GPM_COEX_MAJOR_VERSION_WLAN 3 +#define MCI_GPM_COEX_MINOR_VERSION_WLAN 0 + +enum mci_gpm_coex_query_type { + MCI_GPM_COEX_QUERY_BT_ALL_INFO = BIT(0), + MCI_GPM_COEX_QUERY_BT_TOPOLOGY = BIT(1), + MCI_GPM_COEX_QUERY_BT_DEBUG = BIT(2), +}; + +enum mci_gpm_coex_halt_bt_gpm { + MCI_GPM_COEX_BT_GPM_UNHALT, + MCI_GPM_COEX_BT_GPM_HALT +}; + +enum mci_gpm_coex_bt_update_flags_op { + MCI_GPM_COEX_BT_FLAGS_READ, + MCI_GPM_COEX_BT_FLAGS_SET, + MCI_GPM_COEX_BT_FLAGS_CLEAR +}; + +#define MCI_NUM_BT_CHANNELS 79 + +#define MCI_BT_MCI_FLAGS_UPDATE_CORR 0x00000002 +#define MCI_BT_MCI_FLAGS_UPDATE_HDR 0x00000004 +#define MCI_BT_MCI_FLAGS_UPDATE_PLD 0x00000008 +#define MCI_BT_MCI_FLAGS_LNA_CTRL 0x00000010 +#define MCI_BT_MCI_FLAGS_DEBUG 0x00000020 +#define MCI_BT_MCI_FLAGS_SCHED_MSG 0x00000040 +#define MCI_BT_MCI_FLAGS_CONT_MSG 0x00000080 +#define MCI_BT_MCI_FLAGS_COEX_GPM 0x00000100 +#define MCI_BT_MCI_FLAGS_CPU_INT_MSG 0x00000200 +#define MCI_BT_MCI_FLAGS_MCI_MODE 0x00000400 +#define MCI_BT_MCI_FLAGS_AR9462_MODE 0x00001000 +#define MCI_BT_MCI_FLAGS_OTHER 0x00010000 + +#define MCI_DEFAULT_BT_MCI_FLAGS 0x00011dde + +#define MCI_TOGGLE_BT_MCI_FLAGS (MCI_BT_MCI_FLAGS_UPDATE_CORR | \ + MCI_BT_MCI_FLAGS_UPDATE_HDR | \ + MCI_BT_MCI_FLAGS_UPDATE_PLD | \ + MCI_BT_MCI_FLAGS_MCI_MODE) + +#define MCI_2G_FLAGS_CLEAR_MASK 0x00000000 +#define MCI_2G_FLAGS_SET_MASK MCI_TOGGLE_BT_MCI_FLAGS +#define MCI_2G_FLAGS MCI_DEFAULT_BT_MCI_FLAGS + +#define MCI_5G_FLAGS_CLEAR_MASK MCI_TOGGLE_BT_MCI_FLAGS +#define MCI_5G_FLAGS_SET_MASK 0x00000000 +#define MCI_5G_FLAGS (MCI_DEFAULT_BT_MCI_FLAGS & \ + ~MCI_TOGGLE_BT_MCI_FLAGS) + +/* + * Default value for AR9462 is 0x00002201 + */ +#define ATH_MCI_CONFIG_CONCUR_TX 0x00000003 +#define ATH_MCI_CONFIG_MCI_OBS_MCI 0x00000004 +#define ATH_MCI_CONFIG_MCI_OBS_TXRX 0x00000008 +#define ATH_MCI_CONFIG_MCI_OBS_BT 0x00000010 +#define ATH_MCI_CONFIG_DISABLE_MCI_CAL 0x00000020 +#define ATH_MCI_CONFIG_DISABLE_OSLA 0x00000040 +#define ATH_MCI_CONFIG_DISABLE_FTP_STOMP 0x00000080 +#define ATH_MCI_CONFIG_AGGR_THRESH 0x00000700 +#define ATH_MCI_CONFIG_AGGR_THRESH_S 8 +#define ATH_MCI_CONFIG_DISABLE_AGGR_THRESH 0x00000800 +#define ATH_MCI_CONFIG_CLK_DIV 0x00003000 +#define ATH_MCI_CONFIG_CLK_DIV_S 12 +#define ATH_MCI_CONFIG_DISABLE_TUNING 0x00004000 +#define ATH_MCI_CONFIG_MCI_WEIGHT_DBG 0x40000000 +#define ATH_MCI_CONFIG_DISABLE_MCI 0x80000000 + +#define ATH_MCI_CONFIG_MCI_OBS_MASK (ATH_MCI_CONFIG_MCI_OBS_MCI | \ + ATH_MCI_CONFIG_MCI_OBS_TXRX | \ + ATH_MCI_CONFIG_MCI_OBS_BT) +#define ATH_MCI_CONFIG_MCI_OBS_GPIO 0x0000002F + +#endif diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c index a4450cba065..59647a3ceb7 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c @@ -119,8 +119,8 @@ static int ar9003_get_training_power_5g(struct ath_hw *ah) break; default: delta = 0; - ath_dbg(common, ATH_DBG_CALIBRATE, - "Invalid tx-chainmask: %u\n", ah->txchainmask); + ath_dbg(common, CALIBRATE, "Invalid tx-chainmask: %u\n", + ah->txchainmask); } power += delta; @@ -148,13 +148,12 @@ static int ar9003_paprd_setup_single_table(struct ath_hw *ah) else training_power = ar9003_get_training_power_5g(ah); - ath_dbg(common, ATH_DBG_CALIBRATE, - "Training power: %d, Target power: %d\n", + ath_dbg(common, CALIBRATE, "Training power: %d, Target power: %d\n", training_power, ah->paprd_target_power); if (training_power < 0) { - ath_dbg(common, ATH_DBG_CALIBRATE, - "PAPRD target power delta out of range"); + ath_dbg(common, CALIBRATE, + "PAPRD target power delta out of range\n"); return -ERANGE; } ah->paprd_training_power = training_power; @@ -311,8 +310,8 @@ static unsigned int ar9003_get_desired_gain(struct ath_hw *ah, int chain, reg_cl_gain = AR_PHY_CL_TAB_2; break; default: - ath_dbg(ath9k_hw_common(ah), ATH_DBG_CALIBRATE, - "Invalid chainmask: %d\n", chain); + ath_dbg(ath9k_hw_common(ah), CALIBRATE, + "Invalid chainmask: %d\n", chain); break; } @@ -850,7 +849,7 @@ bool ar9003_paprd_is_done(struct ath_hw *ah) agc2_pwr = REG_READ_FIELD(ah, AR_PHY_PAPRD_TRAINER_STAT1, AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_AGC2_PWR); - ath_dbg(ath9k_hw_common(ah), ATH_DBG_CALIBRATE, + ath_dbg(ath9k_hw_common(ah), CALIBRATE, "AGC2_PWR = 0x%x training done = 0x%x\n", agc2_pwr, paprd_done); /* diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c index 2330e7ede19..2589b38b689 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c @@ -199,12 +199,14 @@ static void ar9003_hw_spur_mitigate_mrc_cck(struct ath_hw *ah, synth_freq = chan->channel; } } else { - range = 10; + range = AR_SREV_9462(ah) ? 5 : 10; max_spur_cnts = 4; synth_freq = chan->channel; } for (i = 0; i < max_spur_cnts; i++) { + if (AR_SREV_9462(ah) && (i == 0 || i == 3)) + continue; negative = 0; if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah)) cur_bb_spur = FBIN2FREQ(spur_fbin_ptr[i], @@ -880,7 +882,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah, AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW); if (!on != aniState->ofdmWeakSigDetectOff) { - ath_dbg(common, ATH_DBG_ANI, + ath_dbg(common, ANI, "** ch %d: ofdm weak signal: %s=>%s\n", chan->channel, !aniState->ofdmWeakSigDetectOff ? @@ -898,7 +900,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah, u32 level = param; if (level >= ARRAY_SIZE(firstep_table)) { - ath_dbg(common, ATH_DBG_ANI, + ath_dbg(common, ANI, "ATH9K_ANI_FIRSTEP_LEVEL: level out of range (%u > %zu)\n", level, ARRAY_SIZE(firstep_table)); return false; @@ -935,7 +937,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah, AR_PHY_FIND_SIG_LOW_FIRSTEP_LOW, value2); if (level != aniState->firstepLevel) { - ath_dbg(common, ATH_DBG_ANI, + ath_dbg(common, ANI, "** ch %d: level %d=>%d[def:%d] firstep[level]=%d ini=%d\n", chan->channel, aniState->firstepLevel, @@ -943,7 +945,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah, ATH9K_ANI_FIRSTEP_LVL_NEW, value, aniState->iniDef.firstep); - ath_dbg(common, ATH_DBG_ANI, + ath_dbg(common, ANI, "** ch %d: level %d=>%d[def:%d] firstep_low[level]=%d ini=%d\n", chan->channel, aniState->firstepLevel, @@ -963,7 +965,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah, u32 level = param; if (level >= ARRAY_SIZE(cycpwrThr1_table)) { - ath_dbg(common, ATH_DBG_ANI, + ath_dbg(common, ANI, "ATH9K_ANI_SPUR_IMMUNITY_LEVEL: level out of range (%u > %zu)\n", level, ARRAY_SIZE(cycpwrThr1_table)); return false; @@ -999,7 +1001,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah, AR_PHY_EXT_CYCPWR_THR1, value2); if (level != aniState->spurImmunityLevel) { - ath_dbg(common, ATH_DBG_ANI, + ath_dbg(common, ANI, "** ch %d: level %d=>%d[def:%d] cycpwrThr1[level]=%d ini=%d\n", chan->channel, aniState->spurImmunityLevel, @@ -1007,7 +1009,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah, ATH9K_ANI_SPUR_IMMUNE_LVL_NEW, value, aniState->iniDef.cycpwrThr1); - ath_dbg(common, ATH_DBG_ANI, + ath_dbg(common, ANI, "** ch %d: level %d=>%d[def:%d] cycpwrThr1Ext[level]=%d ini=%d\n", chan->channel, aniState->spurImmunityLevel, @@ -1034,8 +1036,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah, REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL, AR_PHY_MRC_CCK_MUX_REG, is_on); if (!is_on != aniState->mrcCCKOff) { - ath_dbg(common, ATH_DBG_ANI, - "** ch %d: MRC CCK: %s=>%s\n", + ath_dbg(common, ANI, "** ch %d: MRC CCK: %s=>%s\n", chan->channel, !aniState->mrcCCKOff ? "on" : "off", is_on ? "on" : "off"); @@ -1050,11 +1051,11 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah, case ATH9K_ANI_PRESENT: break; default: - ath_dbg(common, ATH_DBG_ANI, "invalid cmd %u\n", cmd); + ath_dbg(common, ANI, "invalid cmd %u\n", cmd); return false; } - ath_dbg(common, ATH_DBG_ANI, + ath_dbg(common, ANI, "ANI parameters: SI=%d, ofdmWS=%s FS=%d MRCcck=%s listenTime=%d ofdmErrs=%d cckErrs=%d\n", aniState->spurImmunityLevel, !aniState->ofdmWeakSigDetectOff ? "on" : "off", @@ -1123,8 +1124,7 @@ static void ar9003_hw_ani_cache_ini_regs(struct ath_hw *ah) aniState = &ah->curchan->ani; iniDef = &aniState->iniDef; - ath_dbg(common, ATH_DBG_ANI, - "ver %d.%d opmode %u chan %d Mhz/0x%x\n", + ath_dbg(common, ANI, "ver %d.%d opmode %u chan %d Mhz/0x%x\n", ah->hw_version.macVersion, ah->hw_version.macRev, ah->opmode, @@ -1386,7 +1386,7 @@ void ar9003_hw_bb_watchdog_config(struct ath_hw *ah) ~(AR_PHY_WATCHDOG_NON_IDLE_ENABLE | AR_PHY_WATCHDOG_IDLE_ENABLE)); - ath_dbg(common, ATH_DBG_RESET, "Disabled BB Watchdog\n"); + ath_dbg(common, RESET, "Disabled BB Watchdog\n"); return; } @@ -1422,8 +1422,7 @@ void ar9003_hw_bb_watchdog_config(struct ath_hw *ah) AR_PHY_WATCHDOG_IDLE_MASK | (AR_PHY_WATCHDOG_NON_IDLE_MASK & (idle_count << 2))); - ath_dbg(common, ATH_DBG_RESET, - "Enabled BB Watchdog timeout (%u ms)\n", + ath_dbg(common, RESET, "Enabled BB Watchdog timeout (%u ms)\n", idle_tmo_ms); } @@ -1452,9 +1451,9 @@ void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah) return; status = ah->bb_watchdog_last_status; - ath_dbg(common, ATH_DBG_RESET, + ath_dbg(common, RESET, "\n==== BB update: BB status=0x%08x ====\n", status); - ath_dbg(common, ATH_DBG_RESET, + ath_dbg(common, RESET, "** BB state: wd=%u det=%u rdar=%u rOFDM=%d rCCK=%u tOFDM=%u tCCK=%u agc=%u src=%u **\n", MS(status, AR_PHY_WATCHDOG_INFO), MS(status, AR_PHY_WATCHDOG_DET_HANG), @@ -1466,22 +1465,19 @@ void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah) MS(status, AR_PHY_WATCHDOG_AGC_SM), MS(status, AR_PHY_WATCHDOG_SRCH_SM)); - ath_dbg(common, ATH_DBG_RESET, - "** BB WD cntl: cntl1=0x%08x cntl2=0x%08x **\n", + ath_dbg(common, RESET, "** BB WD cntl: cntl1=0x%08x cntl2=0x%08x **\n", REG_READ(ah, AR_PHY_WATCHDOG_CTL_1), REG_READ(ah, AR_PHY_WATCHDOG_CTL_2)); - ath_dbg(common, ATH_DBG_RESET, - "** BB mode: BB_gen_controls=0x%08x **\n", + ath_dbg(common, RESET, "** BB mode: BB_gen_controls=0x%08x **\n", REG_READ(ah, AR_PHY_GEN_CTRL)); #define PCT(_field) (common->cc_survey._field * 100 / common->cc_survey.cycles) if (common->cc_survey.cycles) - ath_dbg(common, ATH_DBG_RESET, + ath_dbg(common, RESET, "** BB busy times: rx_clear=%d%%, rx_frame=%d%%, tx_frame=%d%% **\n", PCT(rx_busy), PCT(rx_frame), PCT(tx_frame)); - ath_dbg(common, ATH_DBG_RESET, - "==== BB update: done ====\n\n"); + ath_dbg(common, RESET, "==== BB update: done ====\n\n"); } EXPORT_SYMBOL(ar9003_hw_bb_watchdog_dbg_info); diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h index 4114fe752c6..ed64114571f 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h @@ -389,6 +389,8 @@ #define AR_PHY_DAG_CTRLCCK_RSSI_THR_S 10 #define AR_PHY_RIFS_INIT_DELAY 0x3ff0000 +#define AR_PHY_AGC_QUICK_DROP 0x03c00000 +#define AR_PHY_AGC_QUICK_DROP_S 22 #define AR_PHY_AGC_COARSE_LOW 0x00007F80 #define AR_PHY_AGC_COARSE_LOW_S 7 #define AR_PHY_AGC_COARSE_HIGH 0x003F8000 @@ -488,6 +490,8 @@ #define AR_PHY_TEST_CTL_TSTADC_EN_S 8 #define AR_PHY_TEST_CTL_RX_OBS_SEL 0x3C00 #define AR_PHY_TEST_CTL_RX_OBS_SEL_S 10 +#define AR_PHY_TEST_CTL_DEBUGPORT_SEL 0xe0000000 +#define AR_PHY_TEST_CTL_DEBUGPORT_SEL_S 29 #define AR_PHY_TSTDAC (AR_SM_BASE + 0x168) @@ -999,6 +1003,7 @@ /* GLB Registers */ #define AR_GLB_BASE 0x20000 +#define AR_GLB_GPIO_CONTROL (AR_GLB_BASE) #define AR_PHY_GLB_CONTROL (AR_GLB_BASE + 0x44) #define AR_GLB_SCRATCH(_ah) (AR_GLB_BASE + \ (AR_SREV_9462_20(_ah) ? 0x4c : 0x50)) diff --git a/drivers/net/wireless/ath/ath9k/ar9003_rtt.c b/drivers/net/wireless/ath/ath9k/ar9003_rtt.c index 48803ee9c0d..458bedf0b0a 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_rtt.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_rtt.c @@ -16,6 +16,7 @@ #include "hw.h" #include "ar9003_phy.h" +#include "ar9003_rtt.h" #define RTT_RESTORE_TIMEOUT 1000 #define RTT_ACCESS_TIMEOUT 100 diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h index 9c51b395b4f..dc2054f0378 100644 --- a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h @@ -41,24 +41,24 @@ static const u32 ar9462_pciephy_clkreq_enable_L1_2p0[][2] = { static const u32 ar9462_2p0_baseband_postamble[][5] = { /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ - {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011}, - {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e}, - {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0}, - {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881}, + {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a800d}, + {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a01ae}, + {0x00009824, 0x5ac640de, 0x5ac640d0, 0x5ac640d0, 0x63c640da}, + {0x00009828, 0x0796be89, 0x0696b081, 0x0696b881, 0x09143e81}, {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4}, {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c}, {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4}, {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0}, {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020}, - {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2}, - {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e}, - {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3039605e, 0x33795d5e}, + {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000d8}, + {0x00009e10, 0x92c88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec86d2e}, + {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3376605e, 0x33795d5e}, {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce}, {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021}, - {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c782}, - {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27}, + {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282}, + {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27}, {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012}, {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000}, {0x0000a204, 0x013187c0, 0x013187c4, 0x013187c4, 0x013187c0}, @@ -81,6 +81,15 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = { {0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982}, {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b}, {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000a3a4, 0x00000010, 0x00000010, 0x00000000, 0x00000000}, + {0x0000a3a8, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa}, + {0x0000a3ac, 0xaaaaaa00, 0xaaaaaa30, 0xaaaaaa00, 0xaaaaaa00}, + {0x0000a41c, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce}, + {0x0000a420, 0x000001ce, 0x000001ce, 0x000001ce, 0x000001ce}, + {0x0000a424, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce}, + {0x0000a428, 0x000001ce, 0x000001ce, 0x000001ce, 0x000001ce}, + {0x0000a42c, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce}, + {0x0000a430, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce}, {0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c}, {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x00100000}, {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, @@ -688,8 +697,8 @@ static const u32 ar9462_2p0_mac_postamble_emulation[][5] = { static const u32 ar9462_2p0_radio_postamble_sys3ant[][5] = { /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ {0x000160ac, 0xa4646c08, 0xa4646c08, 0x24645808, 0x24645808}, - {0x00016140, 0x10804008, 0x10804008, 0x90804008, 0x90804008}, - {0x00016540, 0x10804008, 0x10804008, 0x90804008, 0x90804008}, + {0x00016140, 0x10804008, 0x10804008, 0x50804008, 0x50804008}, + {0x00016540, 0x10804008, 0x10804008, 0x50804008, 0x50804008}, }; static const u32 ar9462_2p0_baseband_postamble_emulation[][5] = { @@ -717,8 +726,8 @@ static const u32 ar9462_2p0_baseband_postamble_emulation[][5] = { static const u32 ar9462_2p0_radio_postamble_sys2ant[][5] = { /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ {0x000160ac, 0xa4646c08, 0xa4646c08, 0x24645808, 0x24645808}, - {0x00016140, 0x10804008, 0x10804008, 0x90804008, 0x90804008}, - {0x00016540, 0x10804008, 0x10804008, 0x90804008, 0x90804008}, + {0x00016140, 0x10804008, 0x10804008, 0x50804008, 0x50804008}, + {0x00016540, 0x10804008, 0x10804008, 0x50804008, 0x50804008}, }; static const u32 ar9462_common_wo_xlna_rx_gain_table_2p0[][2] = { @@ -1059,7 +1068,7 @@ static const u32 ar9462_modes_low_ob_db_tx_gain_table_2p0[][5] = { static const u32 ar9462_2p0_soc_postamble[][5] = { /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ - {0x00007010, 0x00002233, 0x00002233, 0x00002233, 0x00002233}, + {0x00007010, 0x00000033, 0x00000033, 0x00000033, 0x00000033}, }; static const u32 ar9462_2p0_baseband_core[][2] = { @@ -1107,11 +1116,11 @@ static const u32 ar9462_2p0_baseband_core[][2] = { {0x00009e30, 0x06336f77}, {0x00009e34, 0x6af6532f}, {0x00009e38, 0x0cc80c00}, - {0x00009e40, 0x0d261820}, + {0x00009e40, 0x15262820}, {0x00009e4c, 0x00001004}, {0x00009e50, 0x00ff03f1}, - {0x00009e54, 0xe4c355c7}, - {0x00009e58, 0xfd897735}, + {0x00009e54, 0xe4c555c2}, + {0x00009e58, 0xfd857722}, {0x00009e5c, 0xe9198724}, {0x00009fc0, 0x803e4788}, {0x00009fc4, 0x0001efb5}, @@ -1142,9 +1151,6 @@ static const u32 ar9462_2p0_baseband_core[][2] = { {0x0000a398, 0x001f0e0f}, {0x0000a39c, 0x0075393f}, {0x0000a3a0, 0xb79f6427}, - {0x0000a3a4, 0x00000000}, - {0x0000a3a8, 0xaaaaaaaa}, - {0x0000a3ac, 0x3c466478}, {0x0000a3c0, 0x20202020}, {0x0000a3c4, 0x22222220}, {0x0000a3c8, 0x20200020}, @@ -1167,12 +1173,6 @@ static const u32 ar9462_2p0_baseband_core[][2] = { {0x0000a40c, 0x00820820}, {0x0000a414, 0x1ce739ce}, {0x0000a418, 0x2d001dce}, - {0x0000a41c, 0x1ce739ce}, - {0x0000a420, 0x000001ce}, - {0x0000a424, 0x1ce739ce}, - {0x0000a428, 0x000001ce}, - {0x0000a42c, 0x1ce739ce}, - {0x0000a430, 0x1ce739ce}, {0x0000a434, 0x00000000}, {0x0000a438, 0x00001801}, {0x0000a43c, 0x00100000}, @@ -1257,8 +1257,8 @@ static const u32 ar9462_modes_high_ob_db_tx_gain_table_2p0[][5] = { {0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660}, {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861}, {0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81}, - {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83}, - {0x0000a550, 0x5f025ef6, 0x5f025ef6, 0x44001c84, 0x44001c84}, + {0x0000a54c, 0x59025eb6, 0x59025eb6, 0x42001a83, 0x42001a83}, + {0x0000a550, 0x5d025ef6, 0x5d025ef6, 0x44001c84, 0x44001c84}, {0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3}, {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5}, {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9}, @@ -1850,8 +1850,8 @@ static const u32 ar9462_modes_green_ob_db_tx_gain_table_2p0[][5] = { {0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660}, {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861}, {0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81}, - {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83}, - {0x0000a550, 0x5f025ef6, 0x5f025ef6, 0x44001c84, 0x44001c84}, + {0x0000a54c, 0x59025eb6, 0x59025eb6, 0x42001a83, 0x42001a83}, + {0x0000a550, 0x5d025ef6, 0x5d025ef6, 0x44001c84, 0x44001c84}, {0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3}, {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5}, {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9}, diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 1c269f50822..b30e9fc6433 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -25,6 +25,7 @@ #include "debug.h" #include "common.h" +#include "mci.h" /* * Header for the ath9k.ko driver core *only* -- hw code nor any other driver @@ -96,7 +97,7 @@ enum buffer_type { #define bf_isampdu(bf) (bf->bf_state.bf_type & BUF_AMPDU) #define bf_isaggr(bf) (bf->bf_state.bf_type & BUF_AGGR) -#define ATH_TXSTATUS_RING_SIZE 64 +#define ATH_TXSTATUS_RING_SIZE 512 #define DS2PHYS(_dd, _ds) \ ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc)) @@ -158,6 +159,9 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd, /* return block-ack bitmap index given sequence and starting sequence */ #define ATH_BA_INDEX(_st, _seq) (((_seq) - (_st)) & (IEEE80211_SEQ_MAX - 1)) +/* return the seqno for _start + _offset */ +#define ATH_BA_INDEX2SEQ(_seq, _offset) (((_seq) + (_offset)) & (IEEE80211_SEQ_MAX - 1)) + /* returns delimiter padding required given the packet length */ #define ATH_AGGR_GET_NDELIM(_len) \ (((_len) >= ATH_AGGR_MINPLEN) ? 0 : \ @@ -192,6 +196,7 @@ struct ath_txq { u8 txq_headidx; u8 txq_tailidx; int pending_frames; + struct sk_buff_head complete_q; }; struct ath_atx_ac { @@ -237,6 +242,7 @@ struct ath_atx_tid { struct ath_node *an; struct ath_atx_ac *ac; unsigned long tx_buf[BITS_TO_LONGS(ATH_TID_MAX_BUFS)]; + int bar_index; u16 seq_start; u16 seq_next; u16 baw_size; @@ -251,8 +257,9 @@ struct ath_atx_tid { struct ath_node { #ifdef CONFIG_ATH9K_DEBUGFS struct list_head list; /* for sc->nodes */ - struct ieee80211_sta *sta; /* station struct we're part of */ #endif + struct ieee80211_sta *sta; /* station struct we're part of */ + struct ieee80211_vif *vif; /* interface with which we're associated */ struct ath_atx_tid tid[WME_NUM_TID]; struct ath_atx_ac ac[WME_NUM_AC]; int ps_key; @@ -274,7 +281,6 @@ struct ath_tx_control { }; #define ATH_TX_ERROR 0x01 -#define ATH_TX_BAR 0x02 /** * @txq_map: Index is mac80211 queue number. This is @@ -443,7 +449,9 @@ struct ath_btcoex { u32 btcoex_no_stomp; /* in usec */ u32 btcoex_period; /* in usec */ u32 btscan_no_stomp; /* in usec */ + u32 duty_cycle; struct ath_gen_timer *no_stomp_timer; /* Timer for no BT stomping */ + struct ath_mci_profile mci; }; int ath_init_btcoex_timer(struct ath_softc *sc); @@ -458,7 +466,7 @@ void ath9k_btcoex_timer_pause(struct ath_softc *sc); #define ATH_LED_PIN_9287 8 #define ATH_LED_PIN_9300 10 #define ATH_LED_PIN_9485 6 -#define ATH_LED_PIN_9462 0 +#define ATH_LED_PIN_9462 4 #ifdef CONFIG_MAC80211_LEDS void ath_init_leds(struct ath_softc *sc); @@ -538,7 +546,7 @@ struct ath_ant_comb { #define DEFAULT_CACHELINE 32 #define ATH_REGCLASSIDS_MAX 10 #define ATH_CABQ_READY_TIME 80 /* % of beacon interval */ -#define ATH_MAX_SW_RETRIES 10 +#define ATH_MAX_SW_RETRIES 30 #define ATH_CHAN_MAX 255 #define ATH_TXPOWER_MAX 100 /* .5 dBm units */ @@ -643,6 +651,7 @@ struct ath_softc { struct delayed_work tx_complete_work; struct delayed_work hw_pll_work; struct ath_btcoex btcoex; + struct ath_mci_coex mci_coex; struct ath_descdma txsdma; diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c index a13cabb9543..b8967e482e6 100644 --- a/drivers/net/wireless/ath/ath9k/beacon.c +++ b/drivers/net/wireless/ath/ath9k/beacon.c @@ -117,11 +117,10 @@ static void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb) memset(&txctl, 0, sizeof(struct ath_tx_control)); txctl.txq = sc->beacon.cabq; - ath_dbg(common, ATH_DBG_XMIT, - "transmitting CABQ packet, skb: %p\n", skb); + ath_dbg(common, XMIT, "transmitting CABQ packet, skb: %p\n", skb); if (ath_tx_start(hw, skb, &txctl) != 0) { - ath_dbg(common, ATH_DBG_XMIT, "CABQ TX failed\n"); + ath_dbg(common, XMIT, "CABQ TX failed\n"); dev_kfree_skb_any(skb); } } @@ -204,7 +203,7 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw, if (skb && cabq_depth) { if (sc->nvifs > 1) { - ath_dbg(common, ATH_DBG_BEACON, + ath_dbg(common, BEACON, "Flushing previous cabq traffic\n"); ath_draintxq(sc, cabq, false); } @@ -297,7 +296,7 @@ int ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_vif *vif) tsfadjust = TU_TO_USEC(intval * avp->av_bslot) / ATH_BCBUF; avp->tsf_adjust = cpu_to_le64(tsfadjust); - ath_dbg(common, ATH_DBG_BEACON, + ath_dbg(common, BEACON, "stagger beacons, bslot %d intval %u tsfadjust %llu\n", avp->av_bslot, intval, (unsigned long long)tsfadjust); @@ -357,6 +356,7 @@ void ath_beacon_tasklet(unsigned long data) struct ath_buf *bf = NULL; struct ieee80211_vif *vif; struct ath_tx_status ts; + bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); int slot; u32 bfaddr, bc = 0; @@ -371,15 +371,14 @@ void ath_beacon_tasklet(unsigned long data) sc->beacon.bmisscnt++; if (sc->beacon.bmisscnt < BSTUCK_THRESH * sc->nbcnvifs) { - ath_dbg(common, ATH_DBG_BSTUCK, + ath_dbg(common, BSTUCK, "missed %u consecutive beacons\n", sc->beacon.bmisscnt); ath9k_hw_stop_dma_queue(ah, sc->beacon.beaconq); if (sc->beacon.bmisscnt > 3) ath9k_hw_bstuck_nfcal(ah); } else if (sc->beacon.bmisscnt >= BSTUCK_THRESH) { - ath_dbg(common, ATH_DBG_BSTUCK, - "beacon is officially stuck\n"); + ath_dbg(common, BSTUCK, "beacon is officially stuck\n"); sc->sc_flags |= SC_OP_TSF_RESET; ieee80211_queue_work(sc->hw, &sc->hw_reset_work); } @@ -406,7 +405,7 @@ void ath_beacon_tasklet(unsigned long data) slot = (tsftu % (intval * ATH_BCBUF)) / intval; vif = sc->beacon.bslot[slot]; - ath_dbg(common, ATH_DBG_BEACON, + ath_dbg(common, BEACON, "slot %d [tsf %llu tsftu %u intval %u] vif %p\n", slot, tsf, tsftu / ATH_BCBUF, intval, vif); } else { @@ -424,7 +423,7 @@ void ath_beacon_tasklet(unsigned long data) } if (sc->beacon.bmisscnt != 0) { - ath_dbg(common, ATH_DBG_BSTUCK, + ath_dbg(common, BSTUCK, "resume beacon xmit after %u misses\n", sc->beacon.bmisscnt); sc->beacon.bmisscnt = 0; @@ -458,10 +457,12 @@ void ath_beacon_tasklet(unsigned long data) if (bfaddr != 0) { /* NB: cabq traffic should already be queued and primed */ ath9k_hw_puttxbuf(ah, sc->beacon.beaconq, bfaddr); - ath9k_hw_txstart(ah, sc->beacon.beaconq); + + if (!edma) + ath9k_hw_txstart(ah, sc->beacon.beaconq); sc->beacon.ast_be_xmit += bc; /* XXX per-vif? */ - if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { + if (edma) { spin_lock_bh(&sc->sc_pcu_lock); ath9k_hw_txprocdesc(ah, bf->bf_desc, (void *)&ts); spin_unlock_bh(&sc->sc_pcu_lock); @@ -541,7 +542,7 @@ static void ath_beacon_config_sta(struct ath_softc *sc, /* No need to configure beacon if we are not associated */ if (!common->curaid) { - ath_dbg(common, ATH_DBG_BEACON, + ath_dbg(common, BEACON, "STA is not yet associated..skipping beacon config\n"); return; } @@ -631,8 +632,8 @@ static void ath_beacon_config_sta(struct ath_softc *sc, /* TSF out of range threshold fixed at 1 second */ bs.bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD; - ath_dbg(common, ATH_DBG_BEACON, "tsf: %llu tsftu: %u\n", tsf, tsftu); - ath_dbg(common, ATH_DBG_BEACON, + ath_dbg(common, BEACON, "tsf: %llu tsftu: %u\n", tsf, tsftu); + ath_dbg(common, BEACON, "bmiss: %u sleep: %u cfp-period: %u maxdur: %u next: %u\n", bs.bs_bmissthreshold, bs.bs_sleepduration, bs.bs_cfpperiod, bs.bs_cfpmaxduration, bs.bs_cfpnext); @@ -660,8 +661,7 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc, tsf = roundup(ath9k_hw_gettsf32(ah) + TU_TO_USEC(FUDGE), intval); nexttbtt = tsf + intval; - ath_dbg(common, ATH_DBG_BEACON, - "IBSS nexttbtt %u intval %u (%u)\n", + ath_dbg(common, BEACON, "IBSS nexttbtt %u intval %u (%u)\n", nexttbtt, intval, conf->beacon_interval); /* @@ -699,9 +699,8 @@ static bool ath9k_allow_beacon_config(struct ath_softc *sc, (sc->nbcnvifs > 1) && (vif->type == NL80211_IFTYPE_AP) && (cur_conf->beacon_interval != bss_conf->beacon_int)) { - ath_dbg(common, ATH_DBG_CONFIG, - "Changing beacon interval of multiple \ - AP interfaces !\n"); + ath_dbg(common, CONFIG, + "Changing beacon interval of multiple AP interfaces !\n"); return false; } /* @@ -710,7 +709,7 @@ static bool ath9k_allow_beacon_config(struct ath_softc *sc, */ if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) && (vif->type != NL80211_IFTYPE_AP)) { - ath_dbg(common, ATH_DBG_CONFIG, + ath_dbg(common, CONFIG, "STA vif's beacon not allowed on AP mode\n"); return false; } @@ -722,7 +721,7 @@ static bool ath9k_allow_beacon_config(struct ath_softc *sc, (vif->type == NL80211_IFTYPE_STATION) && (sc->sc_flags & SC_OP_BEACONS) && !avp->primary_sta_vif) { - ath_dbg(common, ATH_DBG_CONFIG, + ath_dbg(common, CONFIG, "Beacon already configured for a station interface\n"); return false; } @@ -802,8 +801,7 @@ void ath_set_beacon(struct ath_softc *sc) ath_beacon_config_sta(sc, cur_conf); break; default: - ath_dbg(common, ATH_DBG_CONFIG, - "Unsupported beaconing mode\n"); + ath_dbg(common, CONFIG, "Unsupported beaconing mode\n"); return; } diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c index 012263968d6..a6712a95d76 100644 --- a/drivers/net/wireless/ath/ath9k/btcoex.c +++ b/drivers/net/wireless/ath/ath9k/btcoex.c @@ -21,7 +21,7 @@ enum ath_bt_mode { ATH_BT_COEX_MODE_LEGACY, /* legacy rx_clear mode */ ATH_BT_COEX_MODE_UNSLOTTED, /* untimed/unslotted mode */ ATH_BT_COEX_MODE_SLOTTED, /* slotted mode */ - ATH_BT_COEX_MODE_DISALBED, /* coexistence disabled */ + ATH_BT_COEX_MODE_DISABLED, /* coexistence disabled */ }; struct ath_btcoex_config { @@ -36,6 +36,20 @@ struct ath_btcoex_config { bool bt_hold_rx_clear; }; +static const u32 ar9003_wlan_weights[ATH_BTCOEX_STOMP_MAX] + [AR9300_NUM_WLAN_WEIGHTS] = { + { 0xfffffff0, 0xfffffff0, 0xfffffff0, 0xfffffff0 }, /* STOMP_ALL */ + { 0x88888880, 0x88888880, 0x88888880, 0x88888880 }, /* STOMP_LOW */ + { 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, /* STOMP_NONE */ +}; + +static const u32 ar9462_wlan_weights[ATH_BTCOEX_STOMP_MAX] + [AR9300_NUM_WLAN_WEIGHTS] = { + { 0x01017d01, 0x41414101, 0x41414101, 0x41414141 }, /* STOMP_ALL */ + { 0x01017d01, 0x3b3b3b01, 0x3b3b3b01, 0x3b3b3b3b }, /* STOMP_LOW */ + { 0x01017d01, 0x01010101, 0x01010101, 0x01010101 }, /* STOMP_NONE */ + { 0x01017d01, 0x013b0101, 0x3b3b0101, 0x3b3b013b }, /* STOMP_LOW_FTP */ +}; void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum) { @@ -54,6 +68,9 @@ void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum) u32 i, idx; bool rxclear_polarity = ath_bt_config.bt_rxclear_polarity; + if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE) + return; + if (AR_SREV_9300_20_OR_LATER(ah)) rxclear_polarity = !ath_bt_config.bt_rxclear_polarity; @@ -85,6 +102,9 @@ void ath9k_hw_btcoex_init_2wire(struct ath_hw *ah) { struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw; + if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE) + return; + /* connect bt_active to baseband */ REG_CLR_BIT(ah, AR_GPIO_INPUT_EN_VAL, (AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_DEF | @@ -107,6 +127,9 @@ void ath9k_hw_btcoex_init_3wire(struct ath_hw *ah) { struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw; + if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE) + return; + /* btcoex 3-wire */ REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, (AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB | @@ -133,6 +156,9 @@ static void ath9k_hw_btcoex_enable_2wire(struct ath_hw *ah) { struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw; + if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE) + return; + /* Configure the desired GPIO port for TX_FRAME output */ ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio, AR_GPIO_OUTPUT_MUX_AS_TX_FRAME); @@ -144,6 +170,9 @@ void ath9k_hw_btcoex_set_weight(struct ath_hw *ah, { struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw; + if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE) + return; + btcoex_hw->bt_coex_weights = SM(bt_weight, AR_BTCOEX_BT_WGHT) | SM(wlan_weight, AR_BTCOEX_WL_WGHT); } @@ -152,27 +181,26 @@ EXPORT_SYMBOL(ath9k_hw_btcoex_set_weight); static void ath9k_hw_btcoex_enable_3wire(struct ath_hw *ah) { - struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw; + struct ath_btcoex_hw *btcoex = &ah->btcoex_hw; u32 val; + int i; /* * Program coex mode and weight registers to * enable coex 3-wire */ - REG_WRITE(ah, AR_BT_COEX_MODE, btcoex_hw->bt_coex_mode); - REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex_hw->bt_coex_mode2); + REG_WRITE(ah, AR_BT_COEX_MODE, btcoex->bt_coex_mode); + REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex->bt_coex_mode2); if (AR_SREV_9300_20_OR_LATER(ah)) { - REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS0, ah->bt_coex_wlan_weight[0]); - REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS1, ah->bt_coex_wlan_weight[1]); - REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS0, ah->bt_coex_bt_weight[0]); - REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS1, ah->bt_coex_bt_weight[1]); - REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS2, ah->bt_coex_bt_weight[2]); - REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS3, ah->bt_coex_bt_weight[3]); - + REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS0, btcoex->wlan_weight[0]); + REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS1, btcoex->wlan_weight[1]); + for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++) + REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS(i), + btcoex->bt_weight[i]); } else - REG_WRITE(ah, AR_BT_COEX_WEIGHT, btcoex_hw->bt_coex_weights); + REG_WRITE(ah, AR_BT_COEX_WEIGHT, btcoex->bt_coex_weights); @@ -185,23 +213,39 @@ static void ath9k_hw_btcoex_enable_3wire(struct ath_hw *ah) REG_RMW_FIELD(ah, AR_QUIET1, AR_QUIET1_QUIET_ACK_CTS_ENABLE, 1); REG_RMW_FIELD(ah, AR_PCU_MISC, AR_PCU_BT_ANT_PREVENT_RX, 0); - ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio, + ath9k_hw_cfg_output(ah, btcoex->wlanactive_gpio, AR_GPIO_OUTPUT_MUX_AS_RX_CLEAR_EXTERNAL); } +static void ath9k_hw_btcoex_enable_mci(struct ath_hw *ah) +{ + struct ath_btcoex_hw *btcoex = &ah->btcoex_hw; + int i; + + for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++) + REG_WRITE(ah, AR_MCI_COEX_WL_WEIGHTS(i), + btcoex->wlan_weight[i]); + + REG_RMW_FIELD(ah, AR_QUIET1, AR_QUIET1_QUIET_ACK_CTS_ENABLE, 1); + btcoex->enabled = true; +} + void ath9k_hw_btcoex_enable(struct ath_hw *ah) { struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw; - switch (btcoex_hw->scheme) { + switch (ath9k_hw_get_btcoex_scheme(ah)) { case ATH_BTCOEX_CFG_NONE: - break; + return; case ATH_BTCOEX_CFG_2WIRE: ath9k_hw_btcoex_enable_2wire(ah); break; case ATH_BTCOEX_CFG_3WIRE: ath9k_hw_btcoex_enable_3wire(ah); break; + case ATH_BTCOEX_CFG_MCI: + ath9k_hw_btcoex_enable_mci(ah); + return; } REG_RMW(ah, AR_GPIO_PDPU, @@ -215,7 +259,18 @@ EXPORT_SYMBOL(ath9k_hw_btcoex_enable); void ath9k_hw_btcoex_disable(struct ath_hw *ah) { struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw; + int i; + + if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE) + return; + btcoex_hw->enabled = false; + if (btcoex_hw->scheme == ATH_BTCOEX_CFG_MCI) { + ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE); + for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++) + REG_WRITE(ah, AR_MCI_COEX_WL_WEIGHTS(i), + btcoex_hw->wlan_weight[i]); + } ath9k_hw_set_gpio(ah, btcoex_hw->wlanactive_gpio, 0); ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio, @@ -228,49 +283,27 @@ void ath9k_hw_btcoex_disable(struct ath_hw *ah) if (AR_SREV_9300_20_OR_LATER(ah)) { REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS0, 0); REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS1, 0); - REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS0, 0); - REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS1, 0); - REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS2, 0); - REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS3, 0); + for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++) + REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS(i), 0); } else REG_WRITE(ah, AR_BT_COEX_WEIGHT, 0); } - - ah->btcoex_hw.enabled = false; } EXPORT_SYMBOL(ath9k_hw_btcoex_disable); static void ar9003_btcoex_bt_stomp(struct ath_hw *ah, enum ath_stomp_type stomp_type) { - ah->bt_coex_bt_weight[0] = AR9300_BT_WGHT; - ah->bt_coex_bt_weight[1] = AR9300_BT_WGHT; - ah->bt_coex_bt_weight[2] = AR9300_BT_WGHT; - ah->bt_coex_bt_weight[3] = AR9300_BT_WGHT; - - - switch (stomp_type) { - case ATH_BTCOEX_STOMP_ALL: - ah->bt_coex_wlan_weight[0] = AR9300_STOMP_ALL_WLAN_WGHT0; - ah->bt_coex_wlan_weight[1] = AR9300_STOMP_ALL_WLAN_WGHT1; - break; - case ATH_BTCOEX_STOMP_LOW: - ah->bt_coex_wlan_weight[0] = AR9300_STOMP_LOW_WLAN_WGHT0; - ah->bt_coex_wlan_weight[1] = AR9300_STOMP_LOW_WLAN_WGHT1; - break; - case ATH_BTCOEX_STOMP_NONE: - ah->bt_coex_wlan_weight[0] = AR9300_STOMP_NONE_WLAN_WGHT0; - ah->bt_coex_wlan_weight[1] = AR9300_STOMP_NONE_WLAN_WGHT1; - break; - - default: - ath_dbg(ath9k_hw_common(ah), ATH_DBG_BTCOEX, - "Invalid Stomptype\n"); - break; + struct ath_btcoex_hw *btcoex = &ah->btcoex_hw; + const u32 *weight = AR_SREV_9462(ah) ? ar9003_wlan_weights[stomp_type] : + ar9462_wlan_weights[stomp_type]; + int i; + + for (i = 0; i < AR9300_NUM_WLAN_WEIGHTS; i++) { + btcoex->bt_weight[i] = AR9300_BT_WGHT; + btcoex->wlan_weight[i] = weight[i]; } - - ath9k_hw_btcoex_enable(ah); } /* @@ -279,6 +312,9 @@ static void ar9003_btcoex_bt_stomp(struct ath_hw *ah, void ath9k_hw_btcoex_bt_stomp(struct ath_hw *ah, enum ath_stomp_type stomp_type) { + if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE) + return; + if (AR_SREV_9300_20_OR_LATER(ah)) { ar9003_btcoex_bt_stomp(ah, stomp_type); return; @@ -298,11 +334,8 @@ void ath9k_hw_btcoex_bt_stomp(struct ath_hw *ah, AR_STOMP_NONE_WLAN_WGHT); break; default: - ath_dbg(ath9k_hw_common(ah), ATH_DBG_BTCOEX, - "Invalid Stomptype\n"); + ath_dbg(ath9k_hw_common(ah), BTCOEX, "Invalid Stomptype\n"); break; } - - ath9k_hw_btcoex_enable(ah); } EXPORT_SYMBOL(ath9k_hw_btcoex_bt_stomp); diff --git a/drivers/net/wireless/ath/ath9k/btcoex.h b/drivers/net/wireless/ath/ath9k/btcoex.h index 234f77689b1..278361c867c 100644 --- a/drivers/net/wireless/ath/ath9k/btcoex.h +++ b/drivers/net/wireless/ath/ath9k/btcoex.h @@ -36,22 +36,57 @@ #define ATH_BT_CNT_THRESHOLD 3 #define ATH_BT_CNT_SCAN_THRESHOLD 15 +#define AR9300_NUM_BT_WEIGHTS 4 +#define AR9300_NUM_WLAN_WEIGHTS 4 /* Defines the BT AR_BT_COEX_WGHT used */ enum ath_stomp_type { - ATH_BTCOEX_NO_STOMP, ATH_BTCOEX_STOMP_ALL, ATH_BTCOEX_STOMP_LOW, - ATH_BTCOEX_STOMP_NONE + ATH_BTCOEX_STOMP_NONE, + ATH_BTCOEX_STOMP_LOW_FTP, + ATH_BTCOEX_STOMP_MAX }; enum ath_btcoex_scheme { ATH_BTCOEX_CFG_NONE, ATH_BTCOEX_CFG_2WIRE, ATH_BTCOEX_CFG_3WIRE, + ATH_BTCOEX_CFG_MCI, +}; + +struct ath9k_hw_mci { + u32 raw_intr; + u32 rx_msg_intr; + u32 cont_status; + u32 gpm_addr; + u32 gpm_len; + u32 gpm_idx; + u32 sched_addr; + u32 wlan_channels[4]; + u32 wlan_cal_seq; + u32 wlan_cal_done; + u32 config; + u8 *gpm_buf; + u8 *sched_buf; + bool ready; + bool update_2g5g; + bool is_2g; + bool query_bt; + bool unhalt_bt_gpm; /* need send UNHALT */ + bool halted_bt_gpm; /* HALT sent */ + bool need_flush_btinfo; + bool bt_version_known; + bool wlan_channels_update; + u8 wlan_ver_major; + u8 wlan_ver_minor; + u8 bt_ver_major; + u8 bt_ver_minor; + u8 bt_state; }; struct ath_btcoex_hw { enum ath_btcoex_scheme scheme; + struct ath9k_hw_mci mci; bool enabled; u8 wlanactive_gpio; u8 btactive_gpio; @@ -59,6 +94,8 @@ struct ath_btcoex_hw { u32 bt_coex_mode; /* Register setting for AR_BT_COEX_MODE */ u32 bt_coex_weights; /* Register setting for AR_BT_COEX_WEIGHT */ u32 bt_coex_mode2; /* Register setting for AR_BT_COEX_MODE2 */ + u32 bt_weight[AR9300_NUM_BT_WEIGHTS]; + u32 wlan_weight[AR9300_NUM_WLAN_WEIGHTS]; }; void ath9k_hw_btcoex_init_2wire(struct ath_hw *ah); diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c index 99538810a31..172e33db7f4 100644 --- a/drivers/net/wireless/ath/ath9k/calib.c +++ b/drivers/net/wireless/ath/ath9k/calib.c @@ -116,7 +116,7 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath_hw *ah, if (h[i].privNF > limit->max) { high_nf_mid = true; - ath_dbg(common, ATH_DBG_CALIBRATE, + ath_dbg(common, CALIBRATE, "NFmid[%d] (%d) > MAX (%d), %s\n", i, h[i].privNF, limit->max, (cal->nfcal_interference ? @@ -199,8 +199,7 @@ bool ath9k_hw_reset_calvalid(struct ath_hw *ah) return true; if (currCal->calState != CAL_DONE) { - ath_dbg(common, ATH_DBG_CALIBRATE, - "Calibration state incorrect, %d\n", + ath_dbg(common, CALIBRATE, "Calibration state incorrect, %d\n", currCal->calState); return true; } @@ -208,8 +207,7 @@ bool ath9k_hw_reset_calvalid(struct ath_hw *ah) if (!(ah->supp_cals & currCal->calData->calType)) return true; - ath_dbg(common, ATH_DBG_CALIBRATE, - "Resetting Cal %d state for channel %u\n", + ath_dbg(common, CALIBRATE, "Resetting Cal %d state for channel %u\n", currCal->calData->calType, conf->channel->center_freq); ah->caldata->CalValid &= ~currCal->calData->calType; @@ -302,7 +300,7 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan) * noisefloor until the next calibration timer. */ if (j == 10000) { - ath_dbg(common, ATH_DBG_ANY, + ath_dbg(common, ANY, "Timeout while waiting for nf to load: AR_PHY_AGC_CONTROL=0x%x\n", REG_READ(ah, AR_PHY_AGC_CONTROL)); return; @@ -344,17 +342,17 @@ static void ath9k_hw_nf_sanitize(struct ath_hw *ah, s16 *nf) if (!nf[i]) continue; - ath_dbg(common, ATH_DBG_CALIBRATE, + ath_dbg(common, CALIBRATE, "NF calibrated [%s] [chain %d] is %d\n", (i >= 3 ? "ext" : "ctl"), i % 3, nf[i]); if (nf[i] > ATH9K_NF_TOO_HIGH) { - ath_dbg(common, ATH_DBG_CALIBRATE, + ath_dbg(common, CALIBRATE, "NF[%d] (%d) > MAX (%d), correcting to MAX\n", i, nf[i], ATH9K_NF_TOO_HIGH); nf[i] = limit->max; } else if (nf[i] < limit->min) { - ath_dbg(common, ATH_DBG_CALIBRATE, + ath_dbg(common, CALIBRATE, "NF[%d] (%d) < MIN (%d), correcting to NOM\n", i, nf[i], limit->min); nf[i] = limit->nominal; @@ -373,7 +371,7 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan) chan->channelFlags &= (~CHANNEL_CW_INT); if (REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF) { - ath_dbg(common, ATH_DBG_CALIBRATE, + ath_dbg(common, CALIBRATE, "NF did not complete in calibration window\n"); return false; } @@ -383,7 +381,7 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan) nf = nfarray[0]; if (ath9k_hw_get_nf_thresh(ah, c->band, &nfThresh) && nf > nfThresh) { - ath_dbg(common, ATH_DBG_CALIBRATE, + ath_dbg(common, CALIBRATE, "noise floor failed detected; detected %d, threshold %d\n", nf, nfThresh); chan->channelFlags |= CHANNEL_CW_INT; diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c index 2741203e803..68d972bf232 100644 --- a/drivers/net/wireless/ath/ath9k/debug.c +++ b/drivers/net/wireless/ath/ath9k/debug.c @@ -709,24 +709,29 @@ static ssize_t read_file_stations(struct file *file, char __user *user_buf, len += snprintf(buf + len, size - len, "Stations:\n" - " tid: addr sched paused buf_q-empty an ac\n" + " tid: addr sched paused buf_q-empty an ac baw\n" " ac: addr sched tid_q-empty txq\n"); spin_lock(&sc->nodes_lock); list_for_each_entry(an, &sc->nodes, list) { + unsigned short ma = an->maxampdu; + if (ma == 0) + ma = 65535; /* see ath_lookup_rate */ len += snprintf(buf + len, size - len, - "%pM\n", an->sta->addr); + "iface: %pM sta: %pM max-ampdu: %hu mpdu-density: %uus\n", + an->vif->addr, an->sta->addr, ma, + (unsigned int)(an->mpdudensity)); if (len >= size) goto done; for (q = 0; q < WME_NUM_TID; q++) { struct ath_atx_tid *tid = &(an->tid[q]); len += snprintf(buf + len, size - len, - " tid: %p %s %s %i %p %p\n", + " tid: %p %s %s %i %p %p %hu\n", tid, tid->sched ? "sched" : "idle", tid->paused ? "paused" : "running", skb_queue_empty(&tid->buf_q), - tid->an, tid->ac); + tid->an, tid->ac, tid->baw_size); if (len >= size) goto done; } @@ -851,7 +856,7 @@ void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf, sc->debug.stats.txstats[qnum].tx_bytes_all += bf->bf_mpdu->len; if (bf_isampdu(bf)) { - if (flags & ATH_TX_BAR) + if (flags & ATH_TX_ERROR) TX_STAT_INC(qnum, a_xretries); else TX_STAT_INC(qnum, a_completed); @@ -1625,6 +1630,9 @@ int ath9k_init_debug(struct ath_hw *ah) debugfs_create_file("debug", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, sc, &fops_debug); #endif + + ath9k_dfs_init_debug(sc); + debugfs_create_file("dma", S_IRUSR, sc->debug.debugfs_phy, sc, &fops_dma); debugfs_create_file("interrupt", S_IRUSR, sc->debug.debugfs_phy, sc, diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h index 356352ac2d6..776a24ada60 100644 --- a/drivers/net/wireless/ath/ath9k/debug.h +++ b/drivers/net/wireless/ath/ath9k/debug.h @@ -19,6 +19,7 @@ #include "hw.h" #include "rc.h" +#include "dfs_debug.h" struct ath_txq; struct ath_buf; @@ -187,6 +188,7 @@ struct ath_stats { struct ath_interrupt_stats istats; struct ath_tx_stats txstats[ATH9K_NUM_TX_QUEUES]; struct ath_rx_stats rxstats; + struct ath_dfs_stats dfs_stats; u32 reset[__RESET_TYPE_MAX]; }; diff --git a/drivers/net/wireless/ath/ath9k/dfs.c b/drivers/net/wireless/ath/ath9k/dfs.c new file mode 100644 index 00000000000..f4f56aff1e9 --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/dfs.c @@ -0,0 +1,215 @@ +/* + * Copyright (c) 2008-2011 Atheros Communications Inc. + * Copyright (c) 2011 Neratec Solutions AG + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "hw.h" +#include "hw-ops.h" +#include "ath9k.h" +#include "dfs.h" +#include "dfs_debug.h" + +/* + * TODO: move into or synchronize this with generic header + * as soon as IF is defined + */ +struct dfs_radar_pulse { + u16 freq; + u64 ts; + u32 width; + u8 rssi; +}; + +/* internal struct to pass radar data */ +struct ath_radar_data { + u8 pulse_bw_info; + u8 rssi; + u8 ext_rssi; + u8 pulse_length_ext; + u8 pulse_length_pri; +}; + +/* convert pulse duration to usecs, considering clock mode */ +static u32 dur_to_usecs(struct ath_hw *ah, u32 dur) +{ + const u32 AR93X_NSECS_PER_DUR = 800; + const u32 AR93X_NSECS_PER_DUR_FAST = (8000 / 11); + u32 nsecs; + + if (IS_CHAN_A_FAST_CLOCK(ah, ah->curchan)) + nsecs = dur * AR93X_NSECS_PER_DUR_FAST; + else + nsecs = dur * AR93X_NSECS_PER_DUR; + + return (nsecs + 500) / 1000; +} + +#define PRI_CH_RADAR_FOUND 0x01 +#define EXT_CH_RADAR_FOUND 0x02 +static bool +ath9k_postprocess_radar_event(struct ath_softc *sc, + struct ath_radar_data *are, + struct dfs_radar_pulse *drp) +{ + u8 rssi; + u16 dur; + + ath_dbg(ath9k_hw_common(sc->sc_ah), DFS, + "pulse_bw_info=0x%x, pri,ext len/rssi=(%u/%u, %u/%u)\n", + are->pulse_bw_info, + are->pulse_length_pri, are->rssi, + are->pulse_length_ext, are->ext_rssi); + + /* + * Only the last 2 bits of the BW info are relevant, they indicate + * which channel the radar was detected in. + */ + are->pulse_bw_info &= 0x03; + + switch (are->pulse_bw_info) { + case PRI_CH_RADAR_FOUND: + /* radar in ctrl channel */ + dur = are->pulse_length_pri; + DFS_STAT_INC(sc, pri_phy_errors); + /* + * cannot use ctrl channel RSSI + * if extension channel is stronger + */ + rssi = (are->ext_rssi >= (are->rssi + 3)) ? 0 : are->rssi; + break; + case EXT_CH_RADAR_FOUND: + /* radar in extension channel */ + dur = are->pulse_length_ext; + DFS_STAT_INC(sc, ext_phy_errors); + /* + * cannot use extension channel RSSI + * if control channel is stronger + */ + rssi = (are->rssi >= (are->ext_rssi + 12)) ? 0 : are->ext_rssi; + break; + case (PRI_CH_RADAR_FOUND | EXT_CH_RADAR_FOUND): + /* + * Conducted testing, when pulse is on DC, both pri and ext + * durations are reported to be same + * + * Radiated testing, when pulse is on DC, different pri and + * ext durations are reported, so take the larger of the two + */ + if (are->pulse_length_ext >= are->pulse_length_pri) + dur = are->pulse_length_ext; + else + dur = are->pulse_length_pri; + DFS_STAT_INC(sc, dc_phy_errors); + + /* when both are present use stronger one */ + rssi = (are->rssi < are->ext_rssi) ? are->ext_rssi : are->rssi; + break; + default: + /* + * Bogus bandwidth info was received in descriptor, + * so ignore this PHY error + */ + DFS_STAT_INC(sc, bwinfo_discards); + return false; + } + + if (rssi == 0) { + DFS_STAT_INC(sc, rssi_discards); + return false; + } + + /* + * TODO: check chirping pulses + * checks for chirping are dependent on the DFS regulatory domain + * used, which is yet TBD + */ + + /* convert duration to usecs */ + drp->width = dur_to_usecs(sc->sc_ah, dur); + drp->rssi = rssi; + + DFS_STAT_INC(sc, pulses_detected); + return true; +} +#undef PRI_CH_RADAR_FOUND +#undef EXT_CH_RADAR_FOUND + +/* + * DFS: check PHY-error for radar pulse and feed the detector + */ +void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data, + struct ath_rx_status *rs, u64 mactime) +{ + struct ath_radar_data ard; + u16 datalen; + char *vdata_end; + struct dfs_radar_pulse drp; + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); + + if ((!(rs->rs_phyerr != ATH9K_PHYERR_RADAR)) && + (!(rs->rs_phyerr != ATH9K_PHYERR_FALSE_RADAR_EXT))) { + ath_dbg(common, DFS, + "Error: rs_phyer=0x%x not a radar error\n", + rs->rs_phyerr); + return; + } + + datalen = rs->rs_datalen; + if (datalen == 0) { + DFS_STAT_INC(sc, datalen_discards); + return; + } + + ard.rssi = rs->rs_rssi_ctl0; + ard.ext_rssi = rs->rs_rssi_ext0; + + /* + * hardware stores this as 8 bit signed value. + * we will cap it at 0 if it is a negative number + */ + if (ard.rssi & 0x80) + ard.rssi = 0; + if (ard.ext_rssi & 0x80) + ard.ext_rssi = 0; + + vdata_end = (char *)data + datalen; + ard.pulse_bw_info = vdata_end[-1]; + ard.pulse_length_ext = vdata_end[-2]; + ard.pulse_length_pri = vdata_end[-3]; + + ath_dbg(common, DFS, + "bw_info=%d, length_pri=%d, length_ext=%d, " + "rssi_pri=%d, rssi_ext=%d\n", + ard.pulse_bw_info, ard.pulse_length_pri, ard.pulse_length_ext, + ard.rssi, ard.ext_rssi); + + drp.freq = ah->curchan->channel; + drp.ts = mactime; + if (ath9k_postprocess_radar_event(sc, &ard, &drp)) { + static u64 last_ts; + ath_dbg(common, DFS, + "ath9k_dfs_process_phyerr: channel=%d, ts=%llu, " + "width=%d, rssi=%d, delta_ts=%llu\n", + drp.freq, drp.ts, drp.width, drp.rssi, drp.ts-last_ts); + last_ts = drp.ts; + /* + * TODO: forward pulse to pattern detector + * + * ieee80211_add_radar_pulse(drp.freq, drp.ts, + * drp.width, drp.rssi); + */ + } +} diff --git a/drivers/net/wireless/ath/ath9k/dfs.h b/drivers/net/wireless/ath/ath9k/dfs.h new file mode 100644 index 00000000000..c2412857f12 --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/dfs.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2008-2011 Atheros Communications Inc. + * Copyright (c) 2011 Neratec Solutions AG + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef ATH9K_DFS_H +#define ATH9K_DFS_H + +#if defined(CONFIG_ATH9K_DFS_CERTIFIED) +/** + * ath9k_dfs_process_phyerr - process radar PHY error + * @sc: ath_softc + * @data: RX payload data + * @rs: RX status after processing descriptor + * @mactime: receive time + * + * This function is called whenever the HW DFS module detects a radar + * pulse and reports it as a PHY error. + * + * The radar information provided as raw payload data is validated and + * filtered for false pulses. Events passing all tests are forwarded to + * the upper layer for pattern detection. + */ +void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data, + struct ath_rx_status *rs, u64 mactime); +#else +static inline void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data, + struct ath_rx_status *rs, u64 mactime) { } +#endif + +#endif /* ATH9K_DFS_H */ diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.c b/drivers/net/wireless/ath/ath9k/dfs_debug.c new file mode 100644 index 00000000000..106d031d834 --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/dfs_debug.c @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2008-2011 Atheros Communications Inc. + * Copyright (c) 2011 Neratec Solutions AG + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/debugfs.h> +#include <linux/export.h> + +#include "ath9k.h" +#include "dfs_debug.h" + +#define ATH9K_DFS_STAT(s, p) \ + len += snprintf(buf + len, size - len, "%28s : %10u\n", s, \ + sc->debug.stats.dfs_stats.p); + +static ssize_t read_file_dfs(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + struct ath9k_hw_version *hw_ver = &sc->sc_ah->hw_version; + char *buf; + unsigned int len = 0, size = 8000; + ssize_t retval = 0; + + buf = kzalloc(size, GFP_KERNEL); + if (buf == NULL) + return -ENOMEM; + + len += snprintf(buf + len, size - len, "DFS support for " + "macVersion = 0x%x, macRev = 0x%x: %s\n", + hw_ver->macVersion, hw_ver->macRev, + (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_DFS) ? + "enabled" : "disabled"); + ATH9K_DFS_STAT("DFS pulses detected ", pulses_detected); + ATH9K_DFS_STAT("Datalen discards ", datalen_discards); + ATH9K_DFS_STAT("RSSI discards ", rssi_discards); + ATH9K_DFS_STAT("BW info discards ", bwinfo_discards); + ATH9K_DFS_STAT("Primary channel pulses ", pri_phy_errors); + ATH9K_DFS_STAT("Secondary channel pulses", ext_phy_errors); + ATH9K_DFS_STAT("Dual channel pulses ", dc_phy_errors); + + if (len > size) + len = size; + + retval = simple_read_from_buffer(user_buf, count, ppos, buf, len); + kfree(buf); + + return retval; +} + +static int ath9k_dfs_debugfs_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + + return 0; +} + +static const struct file_operations fops_dfs_stats = { + .read = read_file_dfs, + .open = ath9k_dfs_debugfs_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +void ath9k_dfs_init_debug(struct ath_softc *sc) +{ + debugfs_create_file("dfs_stats", S_IRUSR, + sc->debug.debugfs_phy, sc, &fops_dfs_stats); +} diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.h b/drivers/net/wireless/ath/ath9k/dfs_debug.h new file mode 100644 index 00000000000..4911724cb44 --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/dfs_debug.h @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2008-2011 Atheros Communications Inc. + * Copyright (c) 2011 Neratec Solutions AG + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + +#ifndef ATH9K_DFS_DEBUG_H +#define ATH9K_DFS_DEBUG_H + +#include "hw.h" + +/** + * struct ath_dfs_stats - DFS Statistics + * + * @pulses_detected: No. of pulses detected so far + * @datalen_discards: No. of pulses discarded due to invalid datalen + * @rssi_discards: No. of pulses discarded due to invalid RSSI + * @bwinfo_discards: No. of pulses discarded due to invalid BW info + * @pri_phy_errors: No. of pulses reported for primary channel + * @ext_phy_errors: No. of pulses reported for extension channel + * @dc_phy_errors: No. of pulses reported for primary + extension channel + */ +struct ath_dfs_stats { + u32 pulses_detected; + u32 datalen_discards; + u32 rssi_discards; + u32 bwinfo_discards; + u32 pri_phy_errors; + u32 ext_phy_errors; + u32 dc_phy_errors; +}; + +#if defined(CONFIG_ATH9K_DFS_DEBUGFS) + +#define DFS_STAT_INC(sc, c) (sc->debug.stats.dfs_stats.c++) +void ath9k_dfs_init_debug(struct ath_softc *sc); + +#else + +#define DFS_STAT_INC(sc, c) do { } while (0) +static inline void ath9k_dfs_init_debug(struct ath_softc *sc) { } + +#endif /* CONFIG_ATH9K_DFS_DEBUGFS */ + +#endif /* ATH9K_DFS_DEBUG_H */ diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c index e46f751ab50..c4352323331 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom.c +++ b/drivers/net/wireless/ath/ath9k/eeprom.c @@ -305,8 +305,7 @@ void ath9k_hw_update_regulatory_maxpower(struct ath_hw *ah) regulatory->max_power_level += INCREASE_MAXPOW_BY_THREE_CHAIN; break; default: - ath_dbg(common, ATH_DBG_EEPROM, - "Invalid chainmask configuration\n"); + ath_dbg(common, EEPROM, "Invalid chainmask configuration\n"); break; } } diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h index 49abd34be74..5ff7ab96512 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom.h +++ b/drivers/net/wireless/ath/ath9k/eeprom.h @@ -249,7 +249,8 @@ enum eeprom_param { EEP_ANT_DIV_CTL1, EEP_CHAIN_MASK_REDUCE, EEP_ANTENNA_GAIN_2G, - EEP_ANTENNA_GAIN_5G + EEP_ANTENNA_GAIN_5G, + EEP_QUICK_DROP }; enum ar5416_rates { diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c index 9a7520f987f..4322ac80c20 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c @@ -38,7 +38,7 @@ static bool __ath9k_hw_4k_fill_eeprom(struct ath_hw *ah) for (addr = 0; addr < SIZE_EEPROM_4K; addr++) { if (!ath9k_hw_nvram_read(common, addr + eep_start_loc, eep_data)) { - ath_dbg(common, ATH_DBG_EEPROM, + ath_dbg(common, EEPROM, "Unable to read eeprom region\n"); return false; } @@ -62,8 +62,7 @@ static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah) struct ath_common *common = ath9k_hw_common(ah); if (!ath9k_hw_use_flash(ah)) { - ath_dbg(common, ATH_DBG_EEPROM, - "Reading from EEPROM, not flash\n"); + ath_dbg(common, EEPROM, "Reading from EEPROM, not flash\n"); } if (common->bus_ops->ath_bus_type == ATH_USB) @@ -204,8 +203,7 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah) return false; } - ath_dbg(common, ATH_DBG_EEPROM, - "Read Magic = 0x%04X\n", magic); + ath_dbg(common, EEPROM, "Read Magic = 0x%04X\n", magic); if (magic != AR5416_EEPROM_MAGIC) { magic2 = swab16(magic); @@ -227,7 +225,7 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah) } } - ath_dbg(common, ATH_DBG_EEPROM, "need_swap = %s.\n", + ath_dbg(common, EEPROM, "need_swap = %s\n", need_swap ? "True" : "False"); if (need_swap) @@ -249,7 +247,7 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah) u32 integer; u16 word; - ath_dbg(common, ATH_DBG_EEPROM, + ath_dbg(common, EEPROM, "EEPROM Endianness is not native.. Changing\n"); word = swab16(eep->baseEepHeader.length); @@ -435,11 +433,11 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah, reg32 = get_unaligned_le32(&pdadcValues[4 * j]); REG_WRITE(ah, regOffset, reg32); - ath_dbg(common, ATH_DBG_EEPROM, + ath_dbg(common, EEPROM, "PDADC (%d,%4x): %4.4x %8.8x\n", i, regChainOffset, regOffset, reg32); - ath_dbg(common, ATH_DBG_EEPROM, + ath_dbg(common, EEPROM, "PDADC: Chain %d | " "PDADC %3d Value %3d | " "PDADC %3d Value %3d | " @@ -473,7 +471,7 @@ static void ath9k_hw_set_4k_power_per_rate_table(struct ath_hw *ah, int i; u16 twiceMinEdgePower; - u16 twiceMaxEdgePower = MAX_RATE_POWER; + u16 twiceMaxEdgePower; u16 scaledPower = 0, minCtlPower; u16 numCtlModes; const u16 *pCtlMode; @@ -542,9 +540,7 @@ static void ath9k_hw_set_4k_power_per_rate_table(struct ath_hw *ah, else freq = centers.ctl_center; - if (ah->eep_ops->get_eeprom_ver(ah) == 14 && - ah->eep_ops->get_eeprom_rev(ah) <= 2) - twiceMaxEdgePower = MAX_RATE_POWER; + twiceMaxEdgePower = MAX_RATE_POWER; for (i = 0; (i < AR5416_EEP4K_NUM_CTLS) && pEepData->ctlIndex[i]; i++) { @@ -1081,8 +1077,7 @@ static u16 ath9k_hw_4k_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz) u16 spur_val = AR_NO_SPUR; - ath_dbg(common, ATH_DBG_ANI, - "Getting spur idx:%d is2Ghz:%d val:%x\n", + ath_dbg(common, ANI, "Getting spur idx:%d is2Ghz:%d val:%x\n", i, is2GHz, ah->config.spurchans[i][is2GHz]); switch (ah->config.spurmode) { @@ -1090,8 +1085,8 @@ static u16 ath9k_hw_4k_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz) break; case SPUR_ENABLE_IOCTL: spur_val = ah->config.spurchans[i][is2GHz]; - ath_dbg(common, ATH_DBG_ANI, - "Getting spur val from new loc. %d\n", spur_val); + ath_dbg(common, ANI, "Getting spur val from new loc. %d\n", + spur_val); break; case SPUR_ENABLE_EEPROM: spur_val = EEP_MAP4K_SPURCHAN; diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c index 4f5c50a87ce..f272236d805 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c @@ -41,7 +41,7 @@ static bool __ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah) for (addr = 0; addr < SIZE_EEPROM_AR9287; addr++) { if (!ath9k_hw_nvram_read(common, addr + eep_start_loc, eep_data)) { - ath_dbg(common, ATH_DBG_EEPROM, + ath_dbg(common, EEPROM, "Unable to read eeprom region\n"); return false; } @@ -66,8 +66,7 @@ static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah) struct ath_common *common = ath9k_hw_common(ah); if (!ath9k_hw_use_flash(ah)) { - ath_dbg(common, ATH_DBG_EEPROM, - "Reading from EEPROM, not flash\n"); + ath_dbg(common, EEPROM, "Reading from EEPROM, not flash\n"); } if (common->bus_ops->ath_bus_type == ATH_USB) @@ -197,8 +196,7 @@ static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah) return false; } - ath_dbg(common, ATH_DBG_EEPROM, - "Read Magic = 0x%04X\n", magic); + ath_dbg(common, EEPROM, "Read Magic = 0x%04X\n", magic); if (magic != AR5416_EEPROM_MAGIC) { magic2 = swab16(magic); @@ -220,7 +218,7 @@ static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah) } } - ath_dbg(common, ATH_DBG_EEPROM, "need_swap = %s.\n", + ath_dbg(common, EEPROM, "need_swap = %s\n", need_swap ? "True" : "False"); if (need_swap) @@ -569,7 +567,7 @@ static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah, #define REDUCE_SCALED_POWER_BY_TWO_CHAIN 6 #define REDUCE_SCALED_POWER_BY_THREE_CHAIN 10 - u16 twiceMaxEdgePower = MAX_RATE_POWER; + u16 twiceMaxEdgePower; int i; struct cal_ctl_data_ar9287 *rep; struct cal_target_power_leg targetPowerOfdm = {0, {0, 0, 0, 0} }, @@ -669,6 +667,7 @@ static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah, else freq = centers.ctl_center; + twiceMaxEdgePower = MAX_RATE_POWER; /* Walk through the CTL indices stored in EEPROM */ for (i = 0; (i < AR9287_NUM_CTLS) && pEepData->ctlIndex[i]; i++) { struct cal_ctl_edges *pRdEdgesPower; @@ -1040,8 +1039,7 @@ static u16 ath9k_hw_ar9287_get_spur_channel(struct ath_hw *ah, struct ath_common *common = ath9k_hw_common(ah); u16 spur_val = AR_NO_SPUR; - ath_dbg(common, ATH_DBG_ANI, - "Getting spur idx:%d is2Ghz:%d val:%x\n", + ath_dbg(common, ANI, "Getting spur idx:%d is2Ghz:%d val:%x\n", i, is2GHz, ah->config.spurchans[i][is2GHz]); switch (ah->config.spurmode) { @@ -1049,8 +1047,8 @@ static u16 ath9k_hw_ar9287_get_spur_channel(struct ath_hw *ah, break; case SPUR_ENABLE_IOCTL: spur_val = ah->config.spurchans[i][is2GHz]; - ath_dbg(common, ATH_DBG_ANI, - "Getting spur val from new loc. %d\n", spur_val); + ath_dbg(common, ANI, "Getting spur val from new loc. %d\n", + spur_val); break; case SPUR_ENABLE_EEPROM: spur_val = EEP_MAP9287_SPURCHAN; diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c index 81e62967167..619b95d764f 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_def.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c @@ -121,8 +121,7 @@ static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah) struct ath_common *common = ath9k_hw_common(ah); if (!ath9k_hw_use_flash(ah)) { - ath_dbg(common, ATH_DBG_EEPROM, - "Reading from EEPROM, not flash\n"); + ath_dbg(common, EEPROM, "Reading from EEPROM, not flash\n"); } if (common->bus_ops->ath_bus_type == ATH_USB) @@ -279,8 +278,7 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah) } if (!ath9k_hw_use_flash(ah)) { - ath_dbg(common, ATH_DBG_EEPROM, - "Read Magic = 0x%04X\n", magic); + ath_dbg(common, EEPROM, "Read Magic = 0x%04X\n", magic); if (magic != AR5416_EEPROM_MAGIC) { magic2 = swab16(magic); @@ -303,7 +301,7 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah) } } - ath_dbg(common, ATH_DBG_EEPROM, "need_swap = %s.\n", + ath_dbg(common, EEPROM, "need_swap = %s\n", need_swap ? "True" : "False"); if (need_swap) @@ -325,7 +323,7 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah) u32 integer, j; u16 word; - ath_dbg(common, ATH_DBG_EEPROM, + ath_dbg(common, EEPROM, "EEPROM Endianness is not native.. Changing.\n"); word = swab16(eep->baseEepHeader.length); @@ -385,7 +383,7 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah) if ((ah->hw_version.devid == AR9280_DEVID_PCI) && ((eep->baseEepHeader.version & 0xff) > 0x0a) && (eep->baseEepHeader.pwdclkind == 0)) - ah->need_an_top2_fixup = 1; + ah->need_an_top2_fixup = true; if ((common->bus_ops->ath_bus_type == ATH_USB) && (AR_SREV_9280(ah))) @@ -965,15 +963,12 @@ static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah, reg32 = get_unaligned_le32(&pdadcValues[4 * j]); REG_WRITE(ah, regOffset, reg32); - ath_dbg(common, ATH_DBG_EEPROM, + ath_dbg(common, EEPROM, "PDADC (%d,%4x): %4.4x %8.8x\n", i, regChainOffset, regOffset, reg32); - ath_dbg(common, ATH_DBG_EEPROM, - "PDADC: Chain %d | PDADC %3d " - "Value %3d | PDADC %3d Value %3d | " - "PDADC %3d Value %3d | PDADC %3d " - "Value %3d |\n", + ath_dbg(common, EEPROM, + "PDADC: Chain %d | PDADC %3d Value %3d | PDADC %3d Value %3d | PDADC %3d Value %3d | PDADC %3d Value %3d |\n", i, 4 * j, pdadcValues[4 * j], 4 * j + 1, pdadcValues[4 * j + 1], 4 * j + 2, pdadcValues[4 * j + 2], @@ -1000,7 +995,7 @@ static void ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah, #define REDUCE_SCALED_POWER_BY_THREE_CHAIN 9 /* 10*log10(3)*2 */ struct ar5416_eeprom_def *pEepData = &ah->eeprom.def; - u16 twiceMaxEdgePower = MAX_RATE_POWER; + u16 twiceMaxEdgePower; int i; struct cal_ctl_data *rep; struct cal_target_power_leg targetPowerOfdm, targetPowerCck = { @@ -1121,9 +1116,7 @@ static void ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah, else freq = centers.ctl_center; - if (ah->eep_ops->get_eeprom_ver(ah) == 14 && - ah->eep_ops->get_eeprom_rev(ah) <= 2) - twiceMaxEdgePower = MAX_RATE_POWER; + twiceMaxEdgePower = MAX_RATE_POWER; for (i = 0; (i < AR5416_NUM_CTLS) && pEepData->ctlIndex[i]; i++) { if ((((cfgCtl & ~CTL_MODE_M) | @@ -1280,7 +1273,7 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah, regulatory->max_power_level += INCREASE_MAXPOW_BY_THREE_CHAIN; break; default: - ath_dbg(ath9k_hw_common(ah), ATH_DBG_EEPROM, + ath_dbg(ath9k_hw_common(ah), EEPROM, "Invalid chainmask configuration\n"); break; } @@ -1398,8 +1391,7 @@ static u16 ath9k_hw_def_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz) u16 spur_val = AR_NO_SPUR; - ath_dbg(common, ATH_DBG_ANI, - "Getting spur idx:%d is2Ghz:%d val:%x\n", + ath_dbg(common, ANI, "Getting spur idx:%d is2Ghz:%d val:%x\n", i, is2GHz, ah->config.spurchans[i][is2GHz]); switch (ah->config.spurmode) { @@ -1407,8 +1399,8 @@ static u16 ath9k_hw_def_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz) break; case SPUR_ENABLE_IOCTL: spur_val = ah->config.spurchans[i][is2GHz]; - ath_dbg(common, ATH_DBG_ANI, - "Getting spur val from new loc. %d\n", spur_val); + ath_dbg(common, ANI, "Getting spur val from new loc. %d\n", + spur_val); break; case SPUR_ENABLE_EEPROM: spur_val = EEP_DEF_SPURCHAN; diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c index 655576c8fda..597c84e31ad 100644 --- a/drivers/net/wireless/ath/ath9k/gpio.c +++ b/drivers/net/wireless/ath/ath9k/gpio.c @@ -130,12 +130,12 @@ static void ath_detect_bt_priority(struct ath_softc *sc) sc->sc_flags &= ~(SC_OP_BT_PRIORITY_DETECTED | SC_OP_BT_SCAN); /* Detect if colocated bt started scanning */ if (btcoex->bt_priority_cnt >= ATH_BT_CNT_SCAN_THRESHOLD) { - ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_BTCOEX, + ath_dbg(ath9k_hw_common(sc->sc_ah), BTCOEX, "BT scan detected\n"); sc->sc_flags |= (SC_OP_BT_SCAN | SC_OP_BT_PRIORITY_DETECTED); } else if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) { - ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_BTCOEX, + ath_dbg(ath9k_hw_common(sc->sc_ah), BTCOEX, "BT priority traffic detected\n"); sc->sc_flags |= SC_OP_BT_PRIORITY_DETECTED; } @@ -189,8 +189,8 @@ static void ath_btcoex_period_timer(unsigned long data) bool is_btscan; ath9k_ps_wakeup(sc); - ath_detect_bt_priority(sc); - + if (!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI)) + ath_detect_bt_priority(sc); is_btscan = sc->sc_flags & SC_OP_BT_SCAN; spin_lock_bh(&btcoex->btcoex_lock); @@ -198,6 +198,7 @@ static void ath_btcoex_period_timer(unsigned long data) ath9k_hw_btcoex_bt_stomp(ah, is_btscan ? ATH_BTCOEX_STOMP_ALL : btcoex->bt_stomp_type); + ath9k_hw_btcoex_enable(ah); spin_unlock_bh(&btcoex->btcoex_lock); if (btcoex->btcoex_period != btcoex->btcoex_no_stomp) { @@ -212,8 +213,9 @@ static void ath_btcoex_period_timer(unsigned long data) } ath9k_ps_restore(sc); + timer_period = btcoex->btcoex_period / 1000; mod_timer(&btcoex->period_timer, jiffies + - msecs_to_jiffies(ATH_BTCOEX_DEF_BT_PERIOD)); + msecs_to_jiffies(timer_period)); } /* @@ -228,8 +230,7 @@ static void ath_btcoex_no_stomp_timer(void *arg) struct ath_common *common = ath9k_hw_common(ah); bool is_btscan = sc->sc_flags & SC_OP_BT_SCAN; - ath_dbg(common, ATH_DBG_BTCOEX, - "no stomp timer running\n"); + ath_dbg(common, BTCOEX, "no stomp timer running\n"); ath9k_ps_wakeup(sc); spin_lock_bh(&btcoex->btcoex_lock); @@ -239,6 +240,7 @@ static void ath_btcoex_no_stomp_timer(void *arg) else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL) ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_LOW); + ath9k_hw_btcoex_enable(ah); spin_unlock_bh(&btcoex->btcoex_lock); ath9k_ps_restore(sc); } @@ -247,6 +249,9 @@ int ath_init_btcoex_timer(struct ath_softc *sc) { struct ath_btcoex *btcoex = &sc->btcoex; + if (ath9k_hw_get_btcoex_scheme(sc->sc_ah) == ATH_BTCOEX_CFG_NONE) + return 0; + btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD * 1000; btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) * btcoex->btcoex_period / 100; @@ -277,8 +282,10 @@ void ath9k_btcoex_timer_resume(struct ath_softc *sc) struct ath_btcoex *btcoex = &sc->btcoex; struct ath_hw *ah = sc->sc_ah; - ath_dbg(ath9k_hw_common(ah), ATH_DBG_BTCOEX, - "Starting btcoex timers\n"); + ath_dbg(ath9k_hw_common(ah), BTCOEX, "Starting btcoex timers\n"); + + if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE) + return; /* make sure duty cycle timer is also stopped when resuming */ if (btcoex->hw_timer_enabled) @@ -300,6 +307,9 @@ void ath9k_btcoex_timer_pause(struct ath_softc *sc) struct ath_btcoex *btcoex = &sc->btcoex; struct ath_hw *ah = sc->sc_ah; + if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE) + return; + del_timer_sync(&btcoex->period_timer); if (btcoex->hw_timer_enabled) diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c index 57fe22b2424..2eadffb7971 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c @@ -167,9 +167,9 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv, /* TSF out of range threshold fixed at 1 second */ bs.bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD; - ath_dbg(common, ATH_DBG_CONFIG, "intval: %u tsf: %llu tsftu: %u\n", + ath_dbg(common, CONFIG, "intval: %u tsf: %llu tsftu: %u\n", intval, tsf, tsftu); - ath_dbg(common, ATH_DBG_CONFIG, + ath_dbg(common, CONFIG, "bmiss: %u sleep: %u cfp-period: %u maxdur: %u next: %u\n", bs.bs_bmissthreshold, bs.bs_sleepduration, bs.bs_cfpperiod, bs.bs_cfpmaxduration, bs.bs_cfpnext); @@ -224,9 +224,8 @@ static void ath9k_htc_beacon_config_ap(struct ath9k_htc_priv *priv, if (priv->op_flags & OP_ENABLE_BEACON) imask |= ATH9K_INT_SWBA; - ath_dbg(common, ATH_DBG_CONFIG, - "AP Beacon config, intval: %d, nexttbtt: %u, resp_time: %d " - "imask: 0x%x\n", + ath_dbg(common, CONFIG, + "AP Beacon config, intval: %d, nexttbtt: %u, resp_time: %d imask: 0x%x\n", bss_conf->beacon_interval, nexttbtt, priv->ah->config.sw_beacon_response_time, imask); @@ -273,9 +272,8 @@ static void ath9k_htc_beacon_config_adhoc(struct ath9k_htc_priv *priv, if (priv->op_flags & OP_ENABLE_BEACON) imask |= ATH9K_INT_SWBA; - ath_dbg(common, ATH_DBG_CONFIG, - "IBSS Beacon config, intval: %d, nexttbtt: %u, " - "resp_time: %d, imask: 0x%x\n", + ath_dbg(common, CONFIG, + "IBSS Beacon config, intval: %d, nexttbtt: %u, resp_time: %d, imask: 0x%x\n", bss_conf->beacon_interval, nexttbtt, priv->ah->config.sw_beacon_response_time, imask); @@ -323,7 +321,7 @@ static void ath9k_htc_send_buffered(struct ath9k_htc_priv *priv, tx_slot = ath9k_htc_tx_get_slot(priv); if (tx_slot < 0) { - ath_dbg(common, ATH_DBG_XMIT, "No free CAB slot\n"); + ath_dbg(common, XMIT, "No free CAB slot\n"); dev_kfree_skb_any(skb); goto next; } @@ -333,8 +331,7 @@ static void ath9k_htc_send_buffered(struct ath9k_htc_priv *priv, ath9k_htc_tx_clear_slot(priv, tx_slot); dev_kfree_skb_any(skb); - ath_dbg(common, ATH_DBG_XMIT, - "Failed to send CAB frame\n"); + ath_dbg(common, XMIT, "Failed to send CAB frame\n"); } else { spin_lock_bh(&priv->tx.tx_lock); priv->tx.queued_cnt++; @@ -409,7 +406,7 @@ static void ath9k_htc_send_beacon(struct ath9k_htc_priv *priv, ret = htc_send(priv->htc, beacon); if (ret != 0) { if (ret == -ENOMEM) { - ath_dbg(common, ATH_DBG_BSTUCK, + ath_dbg(common, BSTUCK, "Failed to send beacon, no free TX buffer\n"); } dev_kfree_skb_any(beacon); @@ -434,7 +431,7 @@ static int ath9k_htc_choose_bslot(struct ath9k_htc_priv *priv, slot = ((tsftu % intval) * ATH9K_HTC_MAX_BCN_VIF) / intval; slot = ATH9K_HTC_MAX_BCN_VIF - slot - 1; - ath_dbg(common, ATH_DBG_BEACON, + ath_dbg(common, BEACON, "Choose slot: %d, tsf: %llu, tsftu: %u, intval: %u\n", slot, tsf, tsftu, intval); @@ -450,8 +447,7 @@ void ath9k_htc_swba(struct ath9k_htc_priv *priv, if (swba->beacon_pending != 0) { priv->cur_beacon_conf.bmiss_cnt++; if (priv->cur_beacon_conf.bmiss_cnt > BSTUCK_THRESHOLD) { - ath_dbg(common, ATH_DBG_BSTUCK, - "Beacon stuck, HW reset\n"); + ath_dbg(common, BSTUCK, "Beacon stuck, HW reset\n"); ieee80211_queue_work(priv->hw, &priv->fatal_work); } @@ -459,7 +455,7 @@ void ath9k_htc_swba(struct ath9k_htc_priv *priv, } if (priv->cur_beacon_conf.bmiss_cnt) { - ath_dbg(common, ATH_DBG_BSTUCK, + ath_dbg(common, BSTUCK, "Resuming beacon xmit after %u misses\n", priv->cur_beacon_conf.bmiss_cnt); priv->cur_beacon_conf.bmiss_cnt = 0; @@ -495,8 +491,8 @@ void ath9k_htc_assign_bslot(struct ath9k_htc_priv *priv, priv->cur_beacon_conf.bslot[avp->bslot] = vif; spin_unlock_bh(&priv->beacon_lock); - ath_dbg(common, ATH_DBG_CONFIG, - "Added interface at beacon slot: %d\n", avp->bslot); + ath_dbg(common, CONFIG, "Added interface at beacon slot: %d\n", + avp->bslot); } void ath9k_htc_remove_bslot(struct ath9k_htc_priv *priv, @@ -509,8 +505,8 @@ void ath9k_htc_remove_bslot(struct ath9k_htc_priv *priv, priv->cur_beacon_conf.bslot[avp->bslot] = NULL; spin_unlock_bh(&priv->beacon_lock); - ath_dbg(common, ATH_DBG_CONFIG, - "Removed interface at beacon slot: %d\n", avp->bslot); + ath_dbg(common, CONFIG, "Removed interface at beacon slot: %d\n", + avp->bslot); } /* @@ -536,8 +532,7 @@ void ath9k_htc_set_tsfadjust(struct ath9k_htc_priv *priv, tsfadjust = cur_conf->beacon_interval * avp->bslot / ATH9K_HTC_MAX_BCN_VIF; avp->tsfadjust = cpu_to_le64(TU_TO_USEC(tsfadjust)); - ath_dbg(common, ATH_DBG_CONFIG, - "tsfadjust is: %llu for bslot: %d\n", + ath_dbg(common, CONFIG, "tsfadjust is: %llu for bslot: %d\n", (unsigned long long)tsfadjust, avp->bslot); } @@ -568,7 +563,7 @@ static bool ath9k_htc_check_beacon_config(struct ath9k_htc_priv *priv, (priv->num_ap_vif > 1) && (vif->type == NL80211_IFTYPE_AP) && (cur_conf->beacon_interval != bss_conf->beacon_int)) { - ath_dbg(common, ATH_DBG_CONFIG, + ath_dbg(common, CONFIG, "Changing beacon interval of multiple AP interfaces !\n"); return false; } @@ -579,7 +574,7 @@ static bool ath9k_htc_check_beacon_config(struct ath9k_htc_priv *priv, */ if (priv->num_ap_vif && (vif->type != NL80211_IFTYPE_AP)) { - ath_dbg(common, ATH_DBG_CONFIG, + ath_dbg(common, CONFIG, "HW in AP mode, cannot set STA beacon parameters\n"); return false; } @@ -597,7 +592,7 @@ static bool ath9k_htc_check_beacon_config(struct ath9k_htc_priv *priv, &beacon_configured); if (beacon_configured) { - ath_dbg(common, ATH_DBG_CONFIG, + ath_dbg(common, CONFIG, "Beacon already configured for a station interface\n"); return false; } @@ -637,8 +632,7 @@ void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv, ath9k_htc_beacon_config_ap(priv, cur_conf); break; default: - ath_dbg(common, ATH_DBG_CONFIG, - "Unsupported beaconing mode\n"); + ath_dbg(common, CONFIG, "Unsupported beaconing mode\n"); return; } } @@ -659,8 +653,7 @@ void ath9k_htc_beacon_reconfig(struct ath9k_htc_priv *priv) ath9k_htc_beacon_config_ap(priv, cur_conf); break; default: - ath_dbg(common, ATH_DBG_CONFIG, - "Unsupported beaconing mode\n"); + ath_dbg(common, CONFIG, "Unsupported beaconing mode\n"); return; } } diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c index e3a02eb8e0c..6506e1fd503 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c @@ -36,12 +36,12 @@ static void ath_detect_bt_priority(struct ath9k_htc_priv *priv) priv->op_flags &= ~(OP_BT_PRIORITY_DETECTED | OP_BT_SCAN); /* Detect if colocated bt started scanning */ if (btcoex->bt_priority_cnt >= ATH_BT_CNT_SCAN_THRESHOLD) { - ath_dbg(ath9k_hw_common(ah), ATH_DBG_BTCOEX, + ath_dbg(ath9k_hw_common(ah), BTCOEX, "BT scan detected\n"); priv->op_flags |= (OP_BT_SCAN | OP_BT_PRIORITY_DETECTED); } else if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) { - ath_dbg(ath9k_hw_common(ah), ATH_DBG_BTCOEX, + ath_dbg(ath9k_hw_common(ah), BTCOEX, "BT priority traffic detected\n"); priv->op_flags |= OP_BT_PRIORITY_DETECTED; } @@ -80,6 +80,7 @@ static void ath_btcoex_period_work(struct work_struct *work) ath9k_hw_btcoex_bt_stomp(priv->ah, is_btscan ? ATH_BTCOEX_STOMP_ALL : btcoex->bt_stomp_type); + ath9k_hw_btcoex_enable(priv->ah); timer_period = is_btscan ? btcoex->btscan_no_stomp : btcoex->btcoex_no_stomp; ieee80211_queue_delayed_work(priv->hw, &priv->duty_cycle_work, @@ -101,19 +102,22 @@ static void ath_btcoex_duty_cycle_work(struct work_struct *work) struct ath_common *common = ath9k_hw_common(ah); bool is_btscan = priv->op_flags & OP_BT_SCAN; - ath_dbg(common, ATH_DBG_BTCOEX, - "time slice work for bt and wlan\n"); + ath_dbg(common, BTCOEX, "time slice work for bt and wlan\n"); if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW || is_btscan) ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE); else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL) ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_LOW); + ath9k_hw_btcoex_enable(priv->ah); } void ath_htc_init_btcoex_work(struct ath9k_htc_priv *priv) { struct ath_btcoex *btcoex = &priv->btcoex; + if (ath9k_hw_get_btcoex_scheme(priv->ah) == ATH_BTCOEX_CFG_NONE) + return; + btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD; btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) * btcoex->btcoex_period / 100; @@ -132,7 +136,10 @@ void ath_htc_resume_btcoex_work(struct ath9k_htc_priv *priv) struct ath_btcoex *btcoex = &priv->btcoex; struct ath_hw *ah = priv->ah; - ath_dbg(ath9k_hw_common(ah), ATH_DBG_BTCOEX, "Starting btcoex work\n"); + if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE) + return; + + ath_dbg(ath9k_hw_common(ah), BTCOEX, "Starting btcoex work\n"); btcoex->bt_priority_cnt = 0; btcoex->bt_priority_time = jiffies; @@ -146,6 +153,9 @@ void ath_htc_resume_btcoex_work(struct ath9k_htc_priv *priv) */ void ath_htc_cancel_btcoex_work(struct ath9k_htc_priv *priv) { + if (ath9k_hw_get_btcoex_scheme(priv->ah) == ATH_BTCOEX_CFG_NONE) + return; + cancel_delayed_work_sync(&priv->coex_period_work); cancel_delayed_work_sync(&priv->duty_cycle_work); } diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index 966661c9e58..9be10a2da1c 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c @@ -299,8 +299,7 @@ static unsigned int ath9k_regread(void *hw_priv, u32 reg_offset) (u8 *) &val, sizeof(val), 100); if (unlikely(r)) { - ath_dbg(common, ATH_DBG_WMI, - "REGISTER READ FAILED: (0x%04x, %d)\n", + ath_dbg(common, WMI, "REGISTER READ FAILED: (0x%04x, %d)\n", reg_offset, r); return -EIO; } @@ -327,7 +326,7 @@ static void ath9k_multi_regread(void *hw_priv, u32 *addr, (u8 *)tmpval, sizeof(u32) * count, 100); if (unlikely(ret)) { - ath_dbg(common, ATH_DBG_WMI, + ath_dbg(common, WMI, "Multiple REGISTER READ FAILED (count: %d)\n", count); } @@ -352,8 +351,7 @@ static void ath9k_regwrite_single(void *hw_priv, u32 val, u32 reg_offset) (u8 *) &val, sizeof(val), 100); if (unlikely(r)) { - ath_dbg(common, ATH_DBG_WMI, - "REGISTER WRITE FAILED:(0x%04x, %d)\n", + ath_dbg(common, WMI, "REGISTER WRITE FAILED:(0x%04x, %d)\n", reg_offset, r); } } @@ -384,7 +382,7 @@ static void ath9k_regwrite_buffer(void *hw_priv, u32 val, u32 reg_offset) (u8 *) &rsp_status, sizeof(rsp_status), 100); if (unlikely(r)) { - ath_dbg(common, ATH_DBG_WMI, + ath_dbg(common, WMI, "REGISTER WRITE FAILED, multi len: %d\n", priv->wmi->multi_write_idx); } @@ -434,7 +432,7 @@ static void ath9k_regwrite_flush(void *hw_priv) (u8 *) &rsp_status, sizeof(rsp_status), 100); if (unlikely(r)) { - ath_dbg(common, ATH_DBG_WMI, + ath_dbg(common, WMI, "REGISTER WRITE FAILED, multi len: %d\n", priv->wmi->multi_write_idx); } @@ -512,8 +510,7 @@ static void setup_ht_cap(struct ath9k_htc_priv *priv, tx_streams = ath9k_cmn_count_streams(priv->ah->txchainmask, 2); rx_streams = ath9k_cmn_count_streams(priv->ah->rxchainmask, 2); - ath_dbg(common, ATH_DBG_CONFIG, - "TX streams %d, RX streams: %d\n", + ath_dbg(common, CONFIG, "TX streams %d, RX streams: %d\n", tx_streams, rx_streams); if (tx_streams != rx_streams) { @@ -610,7 +607,7 @@ static void ath9k_init_btcoex(struct ath9k_htc_priv *priv) { int qnum; - switch (priv->ah->btcoex_hw.scheme) { + switch (ath9k_hw_get_btcoex_scheme(priv->ah)) { case ATH_BTCOEX_CFG_NONE: break; case ATH_BTCOEX_CFG_3WIRE: @@ -704,7 +701,8 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv, if (product && strncmp(product, ATH_HTC_BTCOEX_PRODUCT_ID, 5) == 0) { ah->btcoex_hw.scheme = ATH_BTCOEX_CFG_3WIRE; - ath9k_init_btcoex(priv); + if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) + ath9k_init_btcoex(priv); } return 0; @@ -876,9 +874,8 @@ static int ath9k_init_device(struct ath9k_htc_priv *priv, goto err_world; } - ath_dbg(common, ATH_DBG_CONFIG, - "WMI:%d, BCN:%d, CAB:%d, UAPSD:%d, MGMT:%d, " - "BE:%d, BK:%d, VI:%d, VO:%d\n", + ath_dbg(common, CONFIG, + "WMI:%d, BCN:%d, CAB:%d, UAPSD:%d, MGMT:%d, BE:%d, BK:%d, VI:%d, VO:%d\n", priv->wmi_cmd_ep, priv->beacon_ep, priv->cab_ep, diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c index 0b9a0e8a495..ef4c6066129 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c @@ -266,7 +266,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv, ath9k_wmi_event_drain(priv); - ath_dbg(common, ATH_DBG_CONFIG, + ath_dbg(common, CONFIG, "(%u MHz) -> (%u MHz), HT: %d, HT40: %d fastcc: %d\n", priv->ah->curchan->channel, channel->center_freq, conf_is_ht(conf), conf_is_ht40(conf), @@ -415,7 +415,7 @@ static int ath9k_htc_add_monitor_interface(struct ath9k_htc_priv *priv) priv->vif_sta_pos[priv->mon_vif_idx] = sta_idx; priv->ah->is_monitoring = true; - ath_dbg(common, ATH_DBG_CONFIG, + ath_dbg(common, CONFIG, "Attached a monitor interface at idx: %d, sta idx: %d\n", priv->mon_vif_idx, sta_idx); @@ -427,7 +427,7 @@ err_sta: */ __ath9k_htc_remove_monitor_interface(priv); err_vif: - ath_dbg(common, ATH_DBG_FATAL, "Unable to attach a monitor interface\n"); + ath_dbg(common, FATAL, "Unable to attach a monitor interface\n"); return ret; } @@ -452,7 +452,7 @@ static int ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv) priv->nstations--; priv->ah->is_monitoring = false; - ath_dbg(common, ATH_DBG_CONFIG, + ath_dbg(common, CONFIG, "Removed a monitor interface at idx: %d, sta idx: %d\n", priv->mon_vif_idx, sta_idx); @@ -512,11 +512,11 @@ static int ath9k_htc_add_station(struct ath9k_htc_priv *priv, } if (sta) { - ath_dbg(common, ATH_DBG_CONFIG, + ath_dbg(common, CONFIG, "Added a station entry for: %pM (idx: %d)\n", sta->addr, tsta.sta_index); } else { - ath_dbg(common, ATH_DBG_CONFIG, + ath_dbg(common, CONFIG, "Added a station entry for VIF %d (idx: %d)\n", avp->index, tsta.sta_index); } @@ -556,11 +556,11 @@ static int ath9k_htc_remove_station(struct ath9k_htc_priv *priv, } if (sta) { - ath_dbg(common, ATH_DBG_CONFIG, + ath_dbg(common, CONFIG, "Removed a station entry for: %pM (idx: %d)\n", sta->addr, sta_idx); } else { - ath_dbg(common, ATH_DBG_CONFIG, + ath_dbg(common, CONFIG, "Removed a station entry for VIF %d (idx: %d)\n", avp->index, sta_idx); } @@ -665,7 +665,7 @@ static void ath9k_htc_init_rate(struct ath9k_htc_priv *priv, ath9k_htc_setup_rate(priv, sta, &trate); ret = ath9k_htc_send_rate_cmd(priv, &trate); if (!ret) - ath_dbg(common, ATH_DBG_CONFIG, + ath_dbg(common, CONFIG, "Updated target sta: %pM, rate caps: 0x%X\n", sta->addr, be32_to_cpu(trate.capflags)); } @@ -692,7 +692,7 @@ static void ath9k_htc_update_rate(struct ath9k_htc_priv *priv, ret = ath9k_htc_send_rate_cmd(priv, &trate); if (!ret) - ath_dbg(common, ATH_DBG_CONFIG, + ath_dbg(common, CONFIG, "Updated target sta: %pM, rate caps: 0x%X\n", bss_conf->bssid, be32_to_cpu(trate.capflags)); } @@ -721,11 +721,11 @@ static int ath9k_htc_tx_aggr_oper(struct ath9k_htc_priv *priv, WMI_CMD_BUF(WMI_TX_AGGR_ENABLE_CMDID, &aggr); if (ret) - ath_dbg(common, ATH_DBG_CONFIG, + ath_dbg(common, CONFIG, "Unable to %s TX aggregation for (%pM, %d)\n", (aggr.aggr_enable) ? "start" : "stop", sta->addr, tid); else - ath_dbg(common, ATH_DBG_CONFIG, + ath_dbg(common, CONFIG, "%s TX aggregation for (%pM, %d)\n", (aggr.aggr_enable) ? "Starting" : "Stopping", sta->addr, tid); @@ -784,7 +784,7 @@ void ath9k_htc_ani_work(struct work_struct *work) /* Long calibration runs independently of short calibration. */ if ((timestamp - common->ani.longcal_timer) >= ATH_LONG_CALINTERVAL) { longcal = true; - ath_dbg(common, ATH_DBG_ANI, "longcal @%lu\n", jiffies); + ath_dbg(common, ANI, "longcal @%lu\n", jiffies); common->ani.longcal_timer = timestamp; } @@ -793,8 +793,7 @@ void ath9k_htc_ani_work(struct work_struct *work) if ((timestamp - common->ani.shortcal_timer) >= short_cal_interval) { shortcal = true; - ath_dbg(common, ATH_DBG_ANI, - "shortcal @%lu\n", jiffies); + ath_dbg(common, ANI, "shortcal @%lu\n", jiffies); common->ani.shortcal_timer = timestamp; common->ani.resetcal_timer = timestamp; } @@ -808,7 +807,8 @@ void ath9k_htc_ani_work(struct work_struct *work) } /* Verify whether we must check ANI */ - if ((timestamp - common->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) { + if (ah->config.enable_ani && + (timestamp - common->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) { aniflag = true; common->ani.checkani_timer = timestamp; } @@ -838,7 +838,7 @@ set_timer: * short calibration and long calibration. */ cal_interval = ATH_LONG_CALINTERVAL; - if (priv->ah->config.enable_ani) + if (ah->config.enable_ani) cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL); if (!common->ani.caldone) cal_interval = min(cal_interval, (u32)short_cal_interval); @@ -865,7 +865,7 @@ static void ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb) padsize = padpos & 3; if (padsize && skb->len > padpos) { if (skb_headroom(skb) < padsize) { - ath_dbg(common, ATH_DBG_XMIT, "No room for padding\n"); + ath_dbg(common, XMIT, "No room for padding\n"); goto fail_tx; } skb_push(skb, padsize); @@ -874,13 +874,13 @@ static void ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb) slot = ath9k_htc_tx_get_slot(priv); if (slot < 0) { - ath_dbg(common, ATH_DBG_XMIT, "No free TX slot\n"); + ath_dbg(common, XMIT, "No free TX slot\n"); goto fail_tx; } ret = ath9k_htc_tx_start(priv, skb, slot, false); if (ret != 0) { - ath_dbg(common, ATH_DBG_XMIT, "Tx failed\n"); + ath_dbg(common, XMIT, "Tx failed\n"); goto clear_slot; } @@ -908,7 +908,7 @@ static int ath9k_htc_start(struct ieee80211_hw *hw) mutex_lock(&priv->mutex); - ath_dbg(common, ATH_DBG_CONFIG, + ath_dbg(common, CONFIG, "Starting driver with initial channel: %d MHz\n", curchan->center_freq); @@ -942,7 +942,7 @@ static int ath9k_htc_start(struct ieee80211_hw *hw) ret = ath9k_htc_update_cap_target(priv, 0); if (ret) - ath_dbg(common, ATH_DBG_CONFIG, + ath_dbg(common, CONFIG, "Failed to update capability in target\n"); priv->op_flags &= ~OP_INVALID; @@ -957,7 +957,7 @@ static int ath9k_htc_start(struct ieee80211_hw *hw) mod_timer(&priv->tx.cleanup_timer, jiffies + msecs_to_jiffies(ATH9K_HTC_TX_CLEANUP_INTERVAL)); - if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) { + if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE) { ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT, AR_STOMP_LOW_WLAN_WGHT); ath9k_hw_btcoex_enable(ah); @@ -979,7 +979,7 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw) mutex_lock(&priv->mutex); if (priv->op_flags & OP_INVALID) { - ath_dbg(common, ATH_DBG_ANY, "Device not present\n"); + ath_dbg(common, ANY, "Device not present\n"); mutex_unlock(&priv->mutex); return; } @@ -1009,7 +1009,8 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw) mutex_lock(&priv->mutex); - if (ah->btcoex_hw.enabled) { + if (ah->btcoex_hw.enabled && + ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) { ath9k_hw_btcoex_disable(ah); if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) ath_htc_cancel_btcoex_work(priv); @@ -1026,7 +1027,7 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw) priv->op_flags |= OP_INVALID; - ath_dbg(common, ATH_DBG_CONFIG, "Driver halt\n"); + ath_dbg(common, CONFIG, "Driver halt\n"); mutex_unlock(&priv->mutex); } @@ -1119,8 +1120,8 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw, ath9k_htc_start_ani(priv); } - ath_dbg(common, ATH_DBG_CONFIG, - "Attach a VIF of type: %d at idx: %d\n", vif->type, avp->index); + ath_dbg(common, CONFIG, "Attach a VIF of type: %d at idx: %d\n", + vif->type, avp->index); out: ath9k_htc_ps_restore(priv); @@ -1176,7 +1177,7 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw, ath9k_htc_stop_ani(priv); } - ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface at idx: %d\n", avp->index); + ath_dbg(common, CONFIG, "Detach Interface at idx: %d\n", avp->index); ath9k_htc_ps_restore(priv); mutex_unlock(&priv->mutex); @@ -1201,8 +1202,7 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed) mutex_unlock(&priv->htc_pm_lock); if (enable_radio) { - ath_dbg(common, ATH_DBG_CONFIG, - "not-idle: enabling radio\n"); + ath_dbg(common, CONFIG, "not-idle: enabling radio\n"); ath9k_htc_setpower(priv, ATH9K_PM_AWAKE); ath9k_htc_radio_enable(hw); } @@ -1224,7 +1224,7 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed) struct ieee80211_channel *curchan = hw->conf.channel; int pos = curchan->hw_value; - ath_dbg(common, ATH_DBG_CONFIG, "Set channel: %d MHz\n", + ath_dbg(common, CONFIG, "Set channel: %d MHz\n", curchan->center_freq); ath9k_cmn_update_ichannel(&priv->ah->channels[pos], @@ -1264,8 +1264,7 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed) } mutex_unlock(&priv->htc_pm_lock); - ath_dbg(common, ATH_DBG_CONFIG, - "idle: disabling radio\n"); + ath_dbg(common, CONFIG, "idle: disabling radio\n"); ath9k_htc_radio_disable(hw); } @@ -1297,7 +1296,7 @@ static void ath9k_htc_configure_filter(struct ieee80211_hw *hw, *total_flags &= SUPPORTED_FILTERS; if (priv->op_flags & OP_INVALID) { - ath_dbg(ath9k_hw_common(priv->ah), ATH_DBG_ANY, + ath_dbg(ath9k_hw_common(priv->ah), ANY, "Unable to configure filter on invalid state\n"); mutex_unlock(&priv->mutex); return; @@ -1308,8 +1307,8 @@ static void ath9k_htc_configure_filter(struct ieee80211_hw *hw, rfilt = ath9k_htc_calcrxfilter(priv); ath9k_hw_setrxfilter(priv->ah, rfilt); - ath_dbg(ath9k_hw_common(priv->ah), ATH_DBG_CONFIG, - "Set HW RX filter: 0x%x\n", rfilt); + ath_dbg(ath9k_hw_common(priv->ah), CONFIG, "Set HW RX filter: 0x%x\n", + rfilt); ath9k_htc_ps_restore(priv); mutex_unlock(&priv->mutex); @@ -1376,7 +1375,7 @@ static int ath9k_htc_conf_tx(struct ieee80211_hw *hw, qnum = get_hw_qnum(queue, priv->hwq_map); - ath_dbg(common, ATH_DBG_CONFIG, + ath_dbg(common, CONFIG, "Configure tx [queue/hwq] [%d/%d], aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n", queue, qnum, params->aifs, params->cw_min, params->cw_max, params->txop); @@ -1411,7 +1410,7 @@ static int ath9k_htc_set_key(struct ieee80211_hw *hw, return -ENOSPC; mutex_lock(&priv->mutex); - ath_dbg(common, ATH_DBG_CONFIG, "Set HW Key\n"); + ath_dbg(common, CONFIG, "Set HW Key\n"); ath9k_htc_ps_wakeup(priv); switch (cmd) { @@ -1447,8 +1446,7 @@ static void ath9k_htc_set_bssid(struct ath9k_htc_priv *priv) struct ath_common *common = ath9k_hw_common(priv->ah); ath9k_hw_write_associd(priv->ah); - ath_dbg(common, ATH_DBG_CONFIG, - "BSSID: %pM aid: 0x%x\n", + ath_dbg(common, CONFIG, "BSSID: %pM aid: 0x%x\n", common->curbssid, common->curaid); } @@ -1486,7 +1484,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw, ath9k_htc_ps_wakeup(priv); if (changed & BSS_CHANGED_ASSOC) { - ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n", + ath_dbg(common, CONFIG, "BSS Changed ASSOC %d\n", bss_conf->assoc); bss_conf->assoc ? @@ -1511,8 +1509,8 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw, } if ((changed & BSS_CHANGED_BEACON_ENABLED) && bss_conf->enable_beacon) { - ath_dbg(common, ATH_DBG_CONFIG, - "Beacon enabled for BSS: %pM\n", bss_conf->bssid); + ath_dbg(common, CONFIG, "Beacon enabled for BSS: %pM\n", + bss_conf->bssid); ath9k_htc_set_tsfadjust(priv, vif); priv->op_flags |= OP_ENABLE_BEACON; ath9k_htc_beacon_config(priv, vif); @@ -1524,7 +1522,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw, * AP/IBSS interfaces. */ if ((priv->num_ap_vif <= 1) || priv->num_ibss_vif) { - ath_dbg(common, ATH_DBG_CONFIG, + ath_dbg(common, CONFIG, "Beacon disabled for BSS: %pM\n", bss_conf->bssid); priv->op_flags &= ~OP_ENABLE_BEACON; @@ -1542,7 +1540,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw, (vif->type == NL80211_IFTYPE_AP)) { priv->op_flags |= OP_TSF_RESET; } - ath_dbg(common, ATH_DBG_CONFIG, + ath_dbg(common, CONFIG, "Beacon interval changed for BSS: %pM\n", bss_conf->bssid); ath9k_htc_beacon_config(priv, vif); @@ -1732,8 +1730,7 @@ static int ath9k_htc_set_bitrate_mask(struct ieee80211_hw *hw, goto out; } - ath_dbg(common, ATH_DBG_CONFIG, - "Set bitrate masks: 0x%x, 0x%x\n", + ath_dbg(common, CONFIG, "Set bitrate masks: 0x%x, 0x%x\n", mask->control[IEEE80211_BAND_2GHZ].legacy, mask->control[IEEE80211_BAND_5GHZ].legacy); out: diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c index 2d81c700e20..3e40a646151 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c @@ -355,7 +355,7 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, vif_idx = avp->index; } else { if (!priv->ah->is_monitoring) { - ath_dbg(ath9k_hw_common(priv->ah), ATH_DBG_XMIT, + ath_dbg(ath9k_hw_common(priv->ah), XMIT, "VIF is null, but no monitor interface !\n"); return -EINVAL; } @@ -620,8 +620,7 @@ static struct sk_buff* ath9k_htc_tx_get_packet(struct ath9k_htc_priv *priv, } spin_unlock_irqrestore(&epid_queue->lock, flags); - ath_dbg(common, ATH_DBG_XMIT, - "No matching packet for cookie: %d, epid: %d\n", + ath_dbg(common, XMIT, "No matching packet for cookie: %d, epid: %d\n", txs->cookie, epid); return NULL; @@ -705,8 +704,7 @@ static inline bool check_packet(struct ath9k_htc_priv *priv, struct sk_buff *skb if (time_after(jiffies, tx_ctl->timestamp + msecs_to_jiffies(ATH9K_HTC_TX_TIMEOUT_INTERVAL))) { - ath_dbg(common, ATH_DBG_XMIT, - "Dropping a packet due to TX timeout\n"); + ath_dbg(common, XMIT, "Dropping a packet due to TX timeout\n"); return true; } @@ -753,7 +751,7 @@ void ath9k_htc_tx_cleanup_timer(unsigned long data) skb = ath9k_htc_tx_get_packet(priv, &event->txs); if (skb) { - ath_dbg(common, ATH_DBG_XMIT, + ath_dbg(common, XMIT, "Found packet for cookie: %d, epid: %d\n", event->txs.cookie, MS(event->txs.ts_rate, ATH9K_HTC_TXSTAT_EPID)); @@ -1167,8 +1165,7 @@ void ath9k_htc_rxep(void *drv_priv, struct sk_buff *skb, spin_unlock(&priv->rx.rxbuflock); if (rxbuf == NULL) { - ath_dbg(common, ATH_DBG_ANY, - "No free RX buffer\n"); + ath_dbg(common, ANY, "No free RX buffer\n"); goto err; } diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h index e74c233757a..c4ad0b06bdb 100644 --- a/drivers/net/wireless/ath/ath9k/hw-ops.h +++ b/drivers/net/wireless/ath/ath9k/hw-ops.h @@ -212,4 +212,13 @@ static inline int ath9k_hw_fast_chan_change(struct ath_hw *ah, return ath9k_hw_private_ops(ah)->fast_chan_change(ah, chan, ini_reloaded); } + +static inline void ath9k_hw_set_radar_params(struct ath_hw *ah) +{ + if (!ath9k_hw_private_ops(ah)->set_radar_params) + return; + + ath9k_hw_private_ops(ah)->set_radar_params(ah, &ah->radar_conf); +} + #endif /* ATH9K_HW_OPS_H */ diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 8873c6e6fb9..ee775957505 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -133,7 +133,7 @@ bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout) udelay(AH_TIME_QUANTUM); } - ath_dbg(ath9k_hw_common(ah), ATH_DBG_ANY, + ath_dbg(ath9k_hw_common(ah), ANY, "timeout (%d us) on reg 0x%x: 0x%08x & 0x%08x != 0x%08x\n", timeout, reg, REG_READ(ah, reg), mask, val); @@ -491,8 +491,7 @@ static int ath9k_hw_post_init(struct ath_hw *ah) if (ecode != 0) return ecode; - ath_dbg(ath9k_hw_common(ah), ATH_DBG_CONFIG, - "Eeprom VER: %d, REV: %d\n", + ath_dbg(ath9k_hw_common(ah), CONFIG, "Eeprom VER: %d, REV: %d\n", ah->eep_ops->get_eeprom_ver(ah), ah->eep_ops->get_eeprom_rev(ah)); @@ -504,7 +503,7 @@ static int ath9k_hw_post_init(struct ath_hw *ah) return ecode; } - if (!AR_SREV_9100(ah) && !AR_SREV_9340(ah)) { + if (ah->config.enable_ani) { ath9k_hw_ani_setup(ah); ath9k_hw_ani_init(ah); } @@ -567,7 +566,7 @@ static int __ath9k_hw_init(struct ath_hw *ah) } } - ath_dbg(common, ATH_DBG_RESET, "serialize_regmode is %d\n", + ath_dbg(common, RESET, "serialize_regmode is %d\n", ah->config.serialize_regmode); if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) @@ -610,6 +609,10 @@ static int __ath9k_hw_init(struct ath_hw *ah) if (!AR_SREV_9300_20_OR_LATER(ah)) ah->ani_function &= ~ATH9K_ANI_MRC_CCK; + /* disable ANI for 9340 */ + if (AR_SREV_9340(ah)) + ah->config.enable_ani = false; + ath9k_hw_init_mode_regs(ah); if (!ah->is_pciexpress) @@ -954,8 +957,8 @@ static void ath9k_hw_set_cts_timeout(struct ath_hw *ah, u32 us) static bool ath9k_hw_set_global_txtimeout(struct ath_hw *ah, u32 tu) { if (tu > 0xFFFF) { - ath_dbg(ath9k_hw_common(ah), ATH_DBG_XMIT, - "bad global tx timeout %u\n", tu); + ath_dbg(ath9k_hw_common(ah), XMIT, "bad global tx timeout %u\n", + tu); ah->globaltxtimeout = (u32) -1; return false; } else { @@ -976,7 +979,7 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah) int rx_lat = 0, tx_lat = 0, eifs = 0; u32 reg; - ath_dbg(ath9k_hw_common(ah), ATH_DBG_RESET, "ah->misc_mode 0x%x\n", + ath_dbg(ath9k_hw_common(ah), RESET, "ah->misc_mode 0x%x\n", ah->misc_mode); if (!chan) @@ -1271,7 +1274,7 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type) (npend || type == ATH9K_RESET_COLD)) { int reset_err = 0; - ath_dbg(ath9k_hw_common(ah), ATH_DBG_RESET, + ath_dbg(ath9k_hw_common(ah), RESET, "reset MAC via external reset\n"); reset_err = ah->external_reset(); @@ -1294,8 +1297,7 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type) REG_WRITE(ah, AR_RTC_RC, 0); if (!ath9k_hw_wait(ah, AR_RTC_RC, AR_RTC_RC_M, 0, AH_WAIT_TIMEOUT)) { - ath_dbg(ath9k_hw_common(ah), ATH_DBG_RESET, - "RTC stuck in MAC reset\n"); + ath_dbg(ath9k_hw_common(ah), RESET, "RTC stuck in MAC reset\n"); return false; } @@ -1340,8 +1342,7 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah) AR_RTC_STATUS_M, AR_RTC_STATUS_ON, AH_WAIT_TIMEOUT)) { - ath_dbg(ath9k_hw_common(ah), ATH_DBG_RESET, - "RTC not waking up\n"); + ath_dbg(ath9k_hw_common(ah), RESET, "RTC not waking up\n"); return false; } @@ -1350,6 +1351,7 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah) static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type) { + bool ret = false; if (AR_SREV_9300_20_OR_LATER(ah)) { REG_WRITE(ah, AR_WA, ah->WARegVal); @@ -1361,13 +1363,20 @@ static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type) switch (type) { case ATH9K_RESET_POWER_ON: - return ath9k_hw_set_reset_power_on(ah); + ret = ath9k_hw_set_reset_power_on(ah); + break; case ATH9K_RESET_WARM: case ATH9K_RESET_COLD: - return ath9k_hw_set_reset(ah, type); + ret = ath9k_hw_set_reset(ah, type); + break; default: - return false; + break; } + + if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI) + REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2); + + return ret; } static bool ath9k_hw_chip_reset(struct ath_hw *ah, @@ -1406,7 +1415,7 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah, for (qnum = 0; qnum < AR_NUM_QCU; qnum++) { if (ath9k_hw_numtxpending(ah, qnum)) { - ath_dbg(common, ATH_DBG_QUEUE, + ath_dbg(common, QUEUE, "Transmit frames pending on queue %d\n", qnum); return false; } @@ -1506,6 +1515,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, struct ath9k_hw_cal_data *caldata, bool bChannelChange) { struct ath_common *common = ath9k_hw_common(ah); + struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci; u32 saveLedState; struct ath9k_channel *curchan = ah->curchan; u32 saveDefAntenna; @@ -1513,6 +1523,52 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, u64 tsf = 0; int i, r; bool allow_fbs = false; + bool mci = !!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI); + bool save_fullsleep = ah->chip_fullsleep; + + if (mci) { + + ar9003_mci_2g5g_changed(ah, IS_CHAN_2GHZ(chan)); + + if (mci_hw->bt_state == MCI_BT_CAL_START) { + u32 payload[4] = {0, 0, 0, 0}; + + ath_dbg(common, MCI, "MCI stop rx for BT CAL\n"); + + mci_hw->bt_state = MCI_BT_CAL; + + /* + * MCI FIX: disable mci interrupt here. This is to avoid + * SW_MSG_DONE or RX_MSG bits to trigger MCI_INT and + * lead to mci_intr reentry. + */ + + ar9003_mci_disable_interrupt(ah); + + ath_dbg(common, MCI, "send WLAN_CAL_GRANT\n"); + MCI_GPM_SET_CAL_TYPE(payload, MCI_GPM_WLAN_CAL_GRANT); + ar9003_mci_send_message(ah, MCI_GPM, 0, payload, + 16, true, false); + + ath_dbg(common, MCI, "\nMCI BT is calibrating\n"); + + /* Wait BT calibration to be completed for 25ms */ + + if (ar9003_mci_wait_for_gpm(ah, MCI_GPM_BT_CAL_DONE, + 0, 25000)) + ath_dbg(common, MCI, + "MCI got BT_CAL_DONE\n"); + else + ath_dbg(common, MCI, + "MCI ### BT cal takes to long, force bt_state to be bt_awake\n"); + mci_hw->bt_state = MCI_BT_AWAKE; + /* MCI FIX: enable mci interrupt here */ + ar9003_mci_enable_interrupt(ah); + + return true; + } + } + if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) return -EIO; @@ -1550,12 +1606,29 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, if (ath9k_hw_channel_change(ah, chan)) { ath9k_hw_loadnf(ah, ah->curchan); ath9k_hw_start_nfcal(ah, true); + if (mci && mci_hw->ready) + ar9003_mci_2g5g_switch(ah, true); + if (AR_SREV_9271(ah)) ar9002_hw_load_ani_reg(ah, chan); return 0; } } + if (mci) { + ar9003_mci_disable_interrupt(ah); + + if (mci_hw->ready && !save_fullsleep) { + ar9003_mci_mute_bt(ah); + udelay(20); + REG_WRITE(ah, AR_BTCOEX_CTRL, 0); + } + + mci_hw->bt_state = MCI_BT_SLEEP; + mci_hw->ready = false; + } + + saveDefAntenna = REG_READ(ah, AR_DEF_ANTENNA); if (saveDefAntenna == 0) saveDefAntenna = 1; @@ -1611,6 +1684,9 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, if (r) return r; + if (mci) + ar9003_mci_reset(ah, false, IS_CHAN_2GHZ(chan), save_fullsleep); + /* * Some AR91xx SoC devices frequently fail to accept TSF writes * right after the chip reset. When that happens, write a new @@ -1728,6 +1804,54 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, ath9k_hw_loadnf(ah, chan); ath9k_hw_start_nfcal(ah, true); + if (mci && mci_hw->ready) { + + if (IS_CHAN_2GHZ(chan) && + (mci_hw->bt_state == MCI_BT_SLEEP)) { + + if (ar9003_mci_check_int(ah, + AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET) || + ar9003_mci_check_int(ah, + AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)) { + + /* + * BT is sleeping. Check if BT wakes up during + * WLAN calibration. If BT wakes up during + * WLAN calibration, need to go through all + * message exchanges again and recal. + */ + + ath_dbg(common, MCI, + "MCI BT wakes up during WLAN calibration\n"); + + REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, + AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET | + AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE); + ath_dbg(common, MCI, "MCI send REMOTE_RESET\n"); + ar9003_mci_remote_reset(ah, true); + ar9003_mci_send_sys_waking(ah, true); + udelay(1); + if (IS_CHAN_2GHZ(chan)) + ar9003_mci_send_lna_transfer(ah, true); + + mci_hw->bt_state = MCI_BT_AWAKE; + + ath_dbg(common, MCI, "MCI re-cal\n"); + + if (caldata) { + caldata->done_txiqcal_once = false; + caldata->done_txclcal_once = false; + caldata->rtt_hist.num_readings = 0; + } + + if (!ath9k_hw_init_cal(ah, chan)) + return -EIO; + + } + } + ar9003_mci_enable_interrupt(ah); + } + ENABLE_REGWRITE_BUFFER(ah); ath9k_hw_restore_chainmask(ah); @@ -1742,14 +1866,14 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, u32 mask; mask = REG_READ(ah, AR_CFG); if (mask & (AR_CFG_SWRB | AR_CFG_SWTB | AR_CFG_SWRG)) { - ath_dbg(common, ATH_DBG_RESET, - "CFG Byte Swap Set 0x%x\n", mask); + ath_dbg(common, RESET, "CFG Byte Swap Set 0x%x\n", + mask); } else { mask = INIT_CONFIG_STATUS | AR_CFG_SWRB | AR_CFG_SWTB; REG_WRITE(ah, AR_CFG, mask); - ath_dbg(common, ATH_DBG_RESET, - "Setting CFG 0x%x\n", REG_READ(ah, AR_CFG)); + ath_dbg(common, RESET, "Setting CFG 0x%x\n", + REG_READ(ah, AR_CFG)); } } else { if (common->bus_ops->ath_bus_type == ATH_USB) { @@ -1767,9 +1891,25 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, #endif } - if (ah->btcoex_hw.enabled) + if (ah->btcoex_hw.enabled && + ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) ath9k_hw_btcoex_enable(ah); + if (mci && mci_hw->ready) { + /* + * check BT state again to make + * sure it's not changed. + */ + + ar9003_mci_sync_bt_state(ah); + ar9003_mci_2g5g_switch(ah, true); + + if ((mci_hw->bt_state == MCI_BT_AWAKE) && + (mci_hw->query_bt == true)) { + mci_hw->need_flush_btinfo = true; + } + } + if (AR_SREV_9300_20_OR_LATER(ah)) { ar9003_hw_bb_watchdog_config(ah); @@ -1934,6 +2074,7 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip) bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode) { struct ath_common *common = ath9k_hw_common(ah); + struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; int status = true, setChip = true; static const char *modes[] = { "AWAKE", @@ -1945,18 +2086,41 @@ bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode) if (ah->power_mode == mode) return status; - ath_dbg(common, ATH_DBG_RESET, "%s -> %s\n", + ath_dbg(common, RESET, "%s -> %s\n", modes[ah->power_mode], modes[mode]); switch (mode) { case ATH9K_PM_AWAKE: status = ath9k_hw_set_power_awake(ah, setChip); + + if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI) + REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2); + break; case ATH9K_PM_FULL_SLEEP: + + if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI) { + if (ar9003_mci_state(ah, MCI_STATE_ENABLE, NULL) && + (mci->bt_state != MCI_BT_SLEEP) && + !mci->halted_bt_gpm) { + ath_dbg(common, MCI, + "MCI halt BT GPM (full_sleep)\n"); + ar9003_mci_send_coex_halt_bt_gpm(ah, + true, true); + } + + mci->ready = false; + REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2); + } + ath9k_set_power_sleep(ah, setChip); ah->chip_fullsleep = true; break; case ATH9K_PM_NETWORK_SLEEP: + + if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI) + REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2); + ath9k_set_power_network_sleep(ah, setChip); break; default: @@ -2006,9 +2170,8 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period) AR_TBTT_TIMER_EN | AR_DBA_TIMER_EN | AR_SWBA_TIMER_EN; break; default: - ath_dbg(ath9k_hw_common(ah), ATH_DBG_BEACON, - "%s: unsupported opmode: %d\n", - __func__, ah->opmode); + ath_dbg(ath9k_hw_common(ah), BEACON, + "%s: unsupported opmode: %d\n", __func__, ah->opmode); return; break; } @@ -2059,10 +2222,10 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah, else nextTbtt = bs->bs_nexttbtt; - ath_dbg(common, ATH_DBG_BEACON, "next DTIM %d\n", bs->bs_nextdtim); - ath_dbg(common, ATH_DBG_BEACON, "next beacon %d\n", nextTbtt); - ath_dbg(common, ATH_DBG_BEACON, "beacon period %d\n", beaconintval); - ath_dbg(common, ATH_DBG_BEACON, "DTIM period %d\n", dtimperiod); + ath_dbg(common, BEACON, "next DTIM %d\n", bs->bs_nextdtim); + ath_dbg(common, BEACON, "next beacon %d\n", nextTbtt); + ath_dbg(common, BEACON, "beacon period %d\n", beaconintval); + ath_dbg(common, BEACON, "DTIM period %d\n", dtimperiod); ENABLE_REGWRITE_BUFFER(ah); @@ -2109,6 +2272,30 @@ static u8 fixup_chainmask(u8 chip_chainmask, u8 eeprom_chainmask) return chip_chainmask; } +/** + * ath9k_hw_dfs_tested - checks if DFS has been tested with used chipset + * @ah: the atheros hardware data structure + * + * We enable DFS support upstream on chipsets which have passed a series + * of tests. The testing requirements are going to be documented. Desired + * test requirements are documented at: + * + * http://wireless.kernel.org/en/users/Drivers/ath9k/dfs + * + * Once a new chipset gets properly tested an individual commit can be used + * to document the testing for DFS for that chipset. + */ +static bool ath9k_hw_dfs_tested(struct ath_hw *ah) +{ + + switch (ah->hw_version.macVersion) { + /* AR9580 will likely be our first target to get testing on */ + case AR_SREV_VERSION_9580: + default: + return false; + } +} + int ath9k_hw_fill_cap_info(struct ath_hw *ah) { struct ath9k_hw_capabilities *pCap = &ah->caps; @@ -2130,8 +2317,8 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah) regulatory->current_rd += 5; else if (regulatory->current_rd == 0x41) regulatory->current_rd = 0x43; - ath_dbg(common, ATH_DBG_REGULATORY, - "regdomain mapped to 0x%x\n", regulatory->current_rd); + ath_dbg(common, REGULATORY, "regdomain mapped to 0x%x\n", + regulatory->current_rd); } eeval = ah->eep_ops->get_eeprom(ah, EEP_OP_MODE); @@ -2149,6 +2336,8 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah) if (AR_SREV_9485(ah) || AR_SREV_9285(ah) || AR_SREV_9330(ah)) chip_chainmask = 1; + else if (AR_SREV_9462(ah)) + chip_chainmask = 3; else if (!AR_SREV_9280_20_OR_LATER(ah)) chip_chainmask = 7; else if (!AR_SREV_9300_20_OR_LATER(ah) || AR_SREV_9340(ah)) @@ -2205,12 +2394,10 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah) else pCap->num_gpio_pins = AR_NUM_GPIO; - if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah)) { - pCap->hw_caps |= ATH9K_HW_CAP_CST; + if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah)) pCap->rts_aggr_limit = ATH_AMPDU_LIMIT_MAX; - } else { + else pCap->rts_aggr_limit = (8 * 1024); - } #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE) ah->rfsilent = ah->eep_ops->get_eeprom(ah, EEP_RF_SILENT); @@ -2234,7 +2421,9 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah) pCap->hw_caps |= ATH9K_HW_CAP_4KB_SPLITTRANS; if (common->btcoex_enabled) { - if (AR_SREV_9300_20_OR_LATER(ah)) { + if (AR_SREV_9462(ah)) + btcoex_hw->scheme = ATH_BTCOEX_CFG_MCI; + else if (AR_SREV_9300_20_OR_LATER(ah)) { btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE; btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO_9300; btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO_9300; @@ -2318,6 +2507,9 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah) pCap->pcie_lcr_offset = 0x80; } + if (ath9k_hw_dfs_tested(ah)) + pCap->hw_caps |= ATH9K_HW_CAP_DFS; + tx_chainmask = pCap->tx_chainmask; rx_chainmask = pCap->rx_chainmask; while (tx_chainmask || rx_chainmask) { @@ -2332,11 +2524,11 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah) if (AR_SREV_9300_20_OR_LATER(ah)) { ah->enabled_cals |= TX_IQ_CAL; - if (!AR_SREV_9330(ah)) + if (AR_SREV_9485_OR_LATER(ah)) ah->enabled_cals |= TX_IQ_ON_AGC_CAL; } if (AR_SREV_9462(ah)) - pCap->hw_caps |= ATH9K_HW_CAP_RTT; + pCap->hw_caps |= ATH9K_HW_CAP_RTT | ATH9K_HW_CAP_MCI; return 0; } @@ -2584,7 +2776,7 @@ void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit, bool test) struct ath9k_channel *chan = ah->curchan; struct ieee80211_channel *channel = chan->chan; - reg->power_limit = min_t(int, limit, MAX_RATE_POWER); + reg->power_limit = min_t(u32, limit, MAX_RATE_POWER); if (test) channel->max_power = MAX_RATE_POWER / 2; @@ -2651,7 +2843,7 @@ void ath9k_hw_reset_tsf(struct ath_hw *ah) { if (!ath9k_hw_wait(ah, AR_SLP32_MODE, AR_SLP32_TSF_WRITE_STATUS, 0, AH_TSF_WRITE_TIMEOUT)) - ath_dbg(ath9k_hw_common(ah), ATH_DBG_RESET, + ath_dbg(ath9k_hw_common(ah), RESET, "AR_SLP32_TSF_WRITE_STATUS limit exceeded\n"); REG_WRITE(ah, AR_RESET_TSF, AR_RESET_TSF_ONCE); @@ -2776,7 +2968,7 @@ void ath9k_hw_gen_timer_start(struct ath_hw *ah, timer_next = tsf + trig_timeout; - ath_dbg(ath9k_hw_common(ah), ATH_DBG_HWTIMER, + ath_dbg(ath9k_hw_common(ah), HWTIMER, "current tsf %x period %x timer_next %x\n", tsf, timer_period, timer_next); @@ -2865,8 +3057,8 @@ void ath_gen_timer_isr(struct ath_hw *ah) index = rightmost_index(timer_table, &thresh_mask); timer = timer_table->timers[index]; BUG_ON(!timer); - ath_dbg(common, ATH_DBG_HWTIMER, - "TSF overflow for Gen timer %d\n", index); + ath_dbg(common, HWTIMER, "TSF overflow for Gen timer %d\n", + index); timer->overflow(timer->arg); } @@ -2874,7 +3066,7 @@ void ath_gen_timer_isr(struct ath_hw *ah) index = rightmost_index(timer_table, &trigger_mask); timer = timer_table->timers[index]; BUG_ON(!timer); - ath_dbg(common, ATH_DBG_HWTIMER, + ath_dbg(common, HWTIMER, "Gen timer[%d] trigger\n", index); timer->trigger(timer->arg); } diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index f389b3c93cf..6a29004a71b 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h @@ -59,9 +59,6 @@ #define AT9285_COEX3WIRE_SA_SUBSYSID 0x30aa #define AT9285_COEX3WIRE_DA_SUBSYSID 0x30ab -#define AR9300_NUM_BT_WEIGHTS 4 -#define AR9300_NUM_WLAN_WEIGHTS 4 - #define ATH_AMPDU_LIMIT_MAX (64 * 1024 - 1) #define ATH_DEFAULT_NOISE_FLOOR -95 @@ -129,6 +126,16 @@ #define AR_GPIO_OUTPUT_MUX_AS_RX_CLEAR_EXTERNAL 4 #define AR_GPIO_OUTPUT_MUX_AS_MAC_NETWORK_LED 5 #define AR_GPIO_OUTPUT_MUX_AS_MAC_POWER_LED 6 +#define AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_DATA 0x16 +#define AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_CLK 0x17 +#define AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA 0x18 +#define AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK 0x19 +#define AR_GPIO_OUTPUT_MUX_AS_WL_IN_TX 0x14 +#define AR_GPIO_OUTPUT_MUX_AS_WL_IN_RX 0x13 +#define AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX 9 +#define AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX 8 +#define AR_GPIO_OUTPUT_MUX_AS_RUCKUS_STROBE 0x1d +#define AR_GPIO_OUTPUT_MUX_AS_RUCKUS_DATA 0x1e #define AR_GPIOD_MASK 0x00001FFF #define AR_GPIO_BIT(_gpio) (1 << (_gpio)) @@ -189,20 +196,25 @@ enum ath_ini_subsys { enum ath9k_hw_caps { ATH9K_HW_CAP_HT = BIT(0), ATH9K_HW_CAP_RFSILENT = BIT(1), - ATH9K_HW_CAP_CST = BIT(2), - ATH9K_HW_CAP_AUTOSLEEP = BIT(4), - ATH9K_HW_CAP_4KB_SPLITTRANS = BIT(5), - ATH9K_HW_CAP_EDMA = BIT(6), - ATH9K_HW_CAP_RAC_SUPPORTED = BIT(7), - ATH9K_HW_CAP_LDPC = BIT(8), - ATH9K_HW_CAP_FASTCLOCK = BIT(9), - ATH9K_HW_CAP_SGI_20 = BIT(10), - ATH9K_HW_CAP_PAPRD = BIT(11), - ATH9K_HW_CAP_ANT_DIV_COMB = BIT(12), - ATH9K_HW_CAP_2GHZ = BIT(13), - ATH9K_HW_CAP_5GHZ = BIT(14), - ATH9K_HW_CAP_APM = BIT(15), - ATH9K_HW_CAP_RTT = BIT(16), + ATH9K_HW_CAP_AUTOSLEEP = BIT(2), + ATH9K_HW_CAP_4KB_SPLITTRANS = BIT(3), + ATH9K_HW_CAP_EDMA = BIT(4), + ATH9K_HW_CAP_RAC_SUPPORTED = BIT(5), + ATH9K_HW_CAP_LDPC = BIT(6), + ATH9K_HW_CAP_FASTCLOCK = BIT(7), + ATH9K_HW_CAP_SGI_20 = BIT(8), + ATH9K_HW_CAP_PAPRD = BIT(9), + ATH9K_HW_CAP_ANT_DIV_COMB = BIT(10), + ATH9K_HW_CAP_2GHZ = BIT(11), + ATH9K_HW_CAP_5GHZ = BIT(12), + ATH9K_HW_CAP_APM = BIT(13), + ATH9K_HW_CAP_RTT = BIT(14), +#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT + ATH9K_HW_CAP_MCI = BIT(15), +#else + ATH9K_HW_CAP_MCI = 0, +#endif + ATH9K_HW_CAP_DFS = BIT(16), }; struct ath9k_hw_capabilities { @@ -268,6 +280,7 @@ enum ath9k_int { ATH9K_INT_TX = 0x00000040, ATH9K_INT_TXDESC = 0x00000080, ATH9K_INT_TIM_TIMER = 0x00000100, + ATH9K_INT_MCI = 0x00000200, ATH9K_INT_BB_WATCHDOG = 0x00000400, ATH9K_INT_TXURN = 0x00000800, ATH9K_INT_MIB = 0x00001000, @@ -419,6 +432,161 @@ enum ath9k_rx_qtype { ATH9K_RX_QUEUE_MAX, }; +enum mci_message_header { /* length of payload */ + MCI_LNA_CTRL = 0x10, /* len = 0 */ + MCI_CONT_NACK = 0x20, /* len = 0 */ + MCI_CONT_INFO = 0x30, /* len = 4 */ + MCI_CONT_RST = 0x40, /* len = 0 */ + MCI_SCHD_INFO = 0x50, /* len = 16 */ + MCI_CPU_INT = 0x60, /* len = 4 */ + MCI_SYS_WAKING = 0x70, /* len = 0 */ + MCI_GPM = 0x80, /* len = 16 */ + MCI_LNA_INFO = 0x90, /* len = 1 */ + MCI_LNA_STATE = 0x94, + MCI_LNA_TAKE = 0x98, + MCI_LNA_TRANS = 0x9c, + MCI_SYS_SLEEPING = 0xa0, /* len = 0 */ + MCI_REQ_WAKE = 0xc0, /* len = 0 */ + MCI_DEBUG_16 = 0xfe, /* len = 2 */ + MCI_REMOTE_RESET = 0xff /* len = 16 */ +}; + +enum ath_mci_gpm_coex_profile_type { + MCI_GPM_COEX_PROFILE_UNKNOWN, + MCI_GPM_COEX_PROFILE_RFCOMM, + MCI_GPM_COEX_PROFILE_A2DP, + MCI_GPM_COEX_PROFILE_HID, + MCI_GPM_COEX_PROFILE_BNEP, + MCI_GPM_COEX_PROFILE_VOICE, + MCI_GPM_COEX_PROFILE_MAX +}; + +/* MCI GPM/Coex opcode/type definitions */ +enum { + MCI_GPM_COEX_W_GPM_PAYLOAD = 1, + MCI_GPM_COEX_B_GPM_TYPE = 4, + MCI_GPM_COEX_B_GPM_OPCODE = 5, + /* MCI_GPM_WLAN_CAL_REQ, MCI_GPM_WLAN_CAL_DONE */ + MCI_GPM_WLAN_CAL_W_SEQUENCE = 2, + + /* MCI_GPM_COEX_VERSION_QUERY */ + /* MCI_GPM_COEX_VERSION_RESPONSE */ + MCI_GPM_COEX_B_MAJOR_VERSION = 6, + MCI_GPM_COEX_B_MINOR_VERSION = 7, + /* MCI_GPM_COEX_STATUS_QUERY */ + MCI_GPM_COEX_B_BT_BITMAP = 6, + MCI_GPM_COEX_B_WLAN_BITMAP = 7, + /* MCI_GPM_COEX_HALT_BT_GPM */ + MCI_GPM_COEX_B_HALT_STATE = 6, + /* MCI_GPM_COEX_WLAN_CHANNELS */ + MCI_GPM_COEX_B_CHANNEL_MAP = 6, + /* MCI_GPM_COEX_BT_PROFILE_INFO */ + MCI_GPM_COEX_B_PROFILE_TYPE = 6, + MCI_GPM_COEX_B_PROFILE_LINKID = 7, + MCI_GPM_COEX_B_PROFILE_STATE = 8, + MCI_GPM_COEX_B_PROFILE_ROLE = 9, + MCI_GPM_COEX_B_PROFILE_RATE = 10, + MCI_GPM_COEX_B_PROFILE_VOTYPE = 11, + MCI_GPM_COEX_H_PROFILE_T = 12, + MCI_GPM_COEX_B_PROFILE_W = 14, + MCI_GPM_COEX_B_PROFILE_A = 15, + /* MCI_GPM_COEX_BT_STATUS_UPDATE */ + MCI_GPM_COEX_B_STATUS_TYPE = 6, + MCI_GPM_COEX_B_STATUS_LINKID = 7, + MCI_GPM_COEX_B_STATUS_STATE = 8, + /* MCI_GPM_COEX_BT_UPDATE_FLAGS */ + MCI_GPM_COEX_W_BT_FLAGS = 6, + MCI_GPM_COEX_B_BT_FLAGS_OP = 10 +}; + +enum mci_gpm_subtype { + MCI_GPM_BT_CAL_REQ = 0, + MCI_GPM_BT_CAL_GRANT = 1, + MCI_GPM_BT_CAL_DONE = 2, + MCI_GPM_WLAN_CAL_REQ = 3, + MCI_GPM_WLAN_CAL_GRANT = 4, + MCI_GPM_WLAN_CAL_DONE = 5, + MCI_GPM_COEX_AGENT = 0x0c, + MCI_GPM_RSVD_PATTERN = 0xfe, + MCI_GPM_RSVD_PATTERN32 = 0xfefefefe, + MCI_GPM_BT_DEBUG = 0xff +}; + +enum mci_bt_state { + MCI_BT_SLEEP, + MCI_BT_AWAKE, + MCI_BT_CAL_START, + MCI_BT_CAL +}; + +/* Type of state query */ +enum mci_state_type { + MCI_STATE_ENABLE, + MCI_STATE_INIT_GPM_OFFSET, + MCI_STATE_NEXT_GPM_OFFSET, + MCI_STATE_LAST_GPM_OFFSET, + MCI_STATE_BT, + MCI_STATE_SET_BT_SLEEP, + MCI_STATE_SET_BT_AWAKE, + MCI_STATE_SET_BT_CAL_START, + MCI_STATE_SET_BT_CAL, + MCI_STATE_LAST_SCHD_MSG_OFFSET, + MCI_STATE_REMOTE_SLEEP, + MCI_STATE_CONT_RSSI_POWER, + MCI_STATE_CONT_PRIORITY, + MCI_STATE_CONT_TXRX, + MCI_STATE_RESET_REQ_WAKE, + MCI_STATE_SEND_WLAN_COEX_VERSION, + MCI_STATE_SET_BT_COEX_VERSION, + MCI_STATE_SEND_WLAN_CHANNELS, + MCI_STATE_SEND_VERSION_QUERY, + MCI_STATE_SEND_STATUS_QUERY, + MCI_STATE_NEED_FLUSH_BT_INFO, + MCI_STATE_SET_CONCUR_TX_PRI, + MCI_STATE_RECOVER_RX, + MCI_STATE_NEED_FTP_STOMP, + MCI_STATE_NEED_TUNING, + MCI_STATE_DEBUG, + MCI_STATE_MAX +}; + +enum mci_gpm_coex_opcode { + MCI_GPM_COEX_VERSION_QUERY, + MCI_GPM_COEX_VERSION_RESPONSE, + MCI_GPM_COEX_STATUS_QUERY, + MCI_GPM_COEX_HALT_BT_GPM, + MCI_GPM_COEX_WLAN_CHANNELS, + MCI_GPM_COEX_BT_PROFILE_INFO, + MCI_GPM_COEX_BT_STATUS_UPDATE, + MCI_GPM_COEX_BT_UPDATE_FLAGS +}; + +#define MCI_GPM_NOMORE 0 +#define MCI_GPM_MORE 1 +#define MCI_GPM_INVALID 0xffffffff + +#define MCI_GPM_RECYCLE(_p_gpm) do { \ + *(((u32 *)_p_gpm) + MCI_GPM_COEX_W_GPM_PAYLOAD) = \ + MCI_GPM_RSVD_PATTERN32; \ +} while (0) + +#define MCI_GPM_TYPE(_p_gpm) \ + (*(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_TYPE) & 0xff) + +#define MCI_GPM_OPCODE(_p_gpm) \ + (*(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_OPCODE) & 0xff) + +#define MCI_GPM_SET_CAL_TYPE(_p_gpm, _cal_type) do { \ + *(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_TYPE) = (_cal_type) & 0xff;\ +} while (0) + +#define MCI_GPM_SET_TYPE_OPCODE(_p_gpm, _type, _opcode) do { \ + *(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_TYPE) = (_type) & 0xff; \ + *(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_OPCODE) = (_opcode) & 0xff;\ +} while (0) + +#define MCI_GPM_IS_CAL_TYPE(_type) ((_type) <= MCI_GPM_WLAN_CAL_DONE) + struct ath9k_beacon_state { u32 bs_nexttbtt; u32 bs_nextdtim; @@ -791,8 +959,6 @@ struct ath_hw { /* Bluetooth coexistance */ struct ath_btcoex_hw btcoex_hw; - u32 bt_coex_bt_weight[AR9300_NUM_BT_WEIGHTS]; - u32 bt_coex_wlan_weight[AR9300_NUM_WLAN_WEIGHTS]; u32 intr_txqs; u8 txchainmask; @@ -850,7 +1016,7 @@ struct ath_hw { u32 ts_paddr_start; u32 ts_paddr_end; u16 ts_tail; - u8 ts_size; + u16 ts_size; u32 bb_watchdog_last_status; u32 bb_watchdog_timeout_ms; /* in ms, 0 to disable */ @@ -948,7 +1114,6 @@ bool ath9k_hw_disable(struct ath_hw *ah); void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit, bool test); void ath9k_hw_setopmode(struct ath_hw *ah); void ath9k_hw_setmcastfilter(struct ath_hw *ah, u32 filter0, u32 filter1); -void ath9k_hw_setbssidmask(struct ath_hw *ah); void ath9k_hw_write_associd(struct ath_hw *ah); u32 ath9k_hw_gettsf32(struct ath_hw *ah); u64 ath9k_hw_gettsf64(struct ath_hw *ah); @@ -1041,6 +1206,42 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning); void ath9k_hw_proc_mib_event(struct ath_hw *ah); void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan); +bool ar9003_mci_send_message(struct ath_hw *ah, u8 header, u32 flag, + u32 *payload, u8 len, bool wait_done, + bool check_bt); +void ar9003_mci_mute_bt(struct ath_hw *ah); +u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data); +void ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf, + u16 len, u32 sched_addr); +void ar9003_mci_cleanup(struct ath_hw *ah); +void ar9003_mci_send_coex_halt_bt_gpm(struct ath_hw *ah, bool halt, + bool wait_done); +u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type, + u8 gpm_opcode, int time_out); +void ar9003_mci_2g5g_changed(struct ath_hw *ah, bool is_2g); +void ar9003_mci_disable_interrupt(struct ath_hw *ah); +void ar9003_mci_enable_interrupt(struct ath_hw *ah); +void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool wait_done); +void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g, + bool is_full_sleep); +bool ar9003_mci_check_int(struct ath_hw *ah, u32 ints); +void ar9003_mci_remote_reset(struct ath_hw *ah, bool wait_done); +void ar9003_mci_send_sys_waking(struct ath_hw *ah, bool wait_done); +void ar9003_mci_send_lna_transfer(struct ath_hw *ah, bool wait_done); +void ar9003_mci_sync_bt_state(struct ath_hw *ah); +void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr, + u32 *rx_msg_intr); + +#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT +static inline enum ath_btcoex_scheme +ath9k_hw_get_btcoex_scheme(struct ath_hw *ah) +{ + return ah->btcoex_hw.scheme; +} +#else +#define ath9k_hw_get_btcoex_scheme(...) ATH_BTCOEX_CFG_NONE +#endif + #define ATH9K_CLOCK_RATE_CCK 22 #define ATH9K_CLOCK_RATE_5GHZ_OFDM 40 #define ATH9K_CLOCK_RATE_2GHZ_OFDM 44 diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index d4c909f8e47..abf943557de 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -258,6 +258,8 @@ static void setup_ht_cap(struct ath_softc *sc, if (AR_SREV_9330(ah) || AR_SREV_9485(ah)) max_streams = 1; + else if (AR_SREV_9462(ah)) + max_streams = 2; else if (AR_SREV_9300_20_OR_LATER(ah)) max_streams = 3; else @@ -274,8 +276,7 @@ static void setup_ht_cap(struct ath_softc *sc, tx_streams = ath9k_cmn_count_streams(ah->txchainmask, max_streams); rx_streams = ath9k_cmn_count_streams(ah->rxchainmask, max_streams); - ath_dbg(common, ATH_DBG_CONFIG, - "TX streams %d, RX streams: %d\n", + ath_dbg(common, CONFIG, "TX streams %d, RX streams: %d\n", tx_streams, rx_streams); if (tx_streams != rx_streams) { @@ -295,9 +296,22 @@ static int ath9k_reg_notifier(struct wiphy *wiphy, { struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct ath_softc *sc = hw->priv; - struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah); + struct ath_hw *ah = sc->sc_ah; + struct ath_regulatory *reg = ath9k_hw_regulatory(ah); + int ret; + + ret = ath_reg_notifier_apply(wiphy, request, reg); + + /* Set tx power */ + if (ah->curchan) { + sc->config.txpowlimit = 2 * ah->curchan->chan->max_power; + ath9k_ps_wakeup(sc); + ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit, false); + sc->curtxpow = ath9k_hw_regulatory(ah)->power_limit; + ath9k_ps_restore(sc); + } - return ath_reg_notifier_apply(wiphy, request, reg); + return ret; } /* @@ -314,7 +328,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd, struct ath_buf *bf; int i, bsize, error, desc_len; - ath_dbg(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n", + ath_dbg(common, CONFIG, "%s DMA: %u buffers %u desc/buf\n", name, nbuf, ndesc); INIT_LIST_HEAD(head); @@ -360,7 +374,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd, goto fail; } ds = (u8 *) dd->dd_desc; - ath_dbg(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n", + ath_dbg(common, CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n", name, ds, (u32) dd->dd_desc_len, ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len); @@ -408,9 +422,10 @@ fail: static int ath9k_init_btcoex(struct ath_softc *sc) { struct ath_txq *txq; + struct ath_hw *ah = sc->sc_ah; int r; - switch (sc->sc_ah->btcoex_hw.scheme) { + switch (ath9k_hw_get_btcoex_scheme(sc->sc_ah)) { case ATH_BTCOEX_CFG_NONE: break; case ATH_BTCOEX_CFG_2WIRE: @@ -425,6 +440,37 @@ static int ath9k_init_btcoex(struct ath_softc *sc) ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum); sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW; break; + case ATH_BTCOEX_CFG_MCI: + sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW; + sc->btcoex.duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE; + INIT_LIST_HEAD(&sc->btcoex.mci.info); + + r = ath_mci_setup(sc); + if (r) + return r; + + if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI) { + ah->btcoex_hw.mci.ready = false; + ah->btcoex_hw.mci.bt_state = 0; + ah->btcoex_hw.mci.bt_ver_major = 3; + ah->btcoex_hw.mci.bt_ver_minor = 0; + ah->btcoex_hw.mci.bt_version_known = false; + ah->btcoex_hw.mci.update_2g5g = true; + ah->btcoex_hw.mci.is_2g = true; + ah->btcoex_hw.mci.wlan_channels_update = false; + ah->btcoex_hw.mci.wlan_channels[0] = 0x00000000; + ah->btcoex_hw.mci.wlan_channels[1] = 0xffffffff; + ah->btcoex_hw.mci.wlan_channels[2] = 0xffffffff; + ah->btcoex_hw.mci.wlan_channels[3] = 0x7fffffff; + ah->btcoex_hw.mci.query_bt = true; + ah->btcoex_hw.mci.unhalt_bt_gpm = true; + ah->btcoex_hw.mci.halted_bt_gpm = false; + ah->btcoex_hw.mci.need_flush_btinfo = false; + ah->btcoex_hw.mci.wlan_cal_seq = 0; + ah->btcoex_hw.mci.wlan_cal_done = 0; + ah->btcoex_hw.mci.config = 0x2201; + } + break; default: WARN_ON(1); break; @@ -695,6 +741,7 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; + hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; hw->queues = 4; hw->max_rates = 4; @@ -833,9 +880,12 @@ static void ath9k_deinit_softc(struct ath_softc *sc) kfree(sc->sbands[IEEE80211_BAND_5GHZ].channels); if ((sc->btcoex.no_stomp_timer) && - sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) + ath9k_hw_get_btcoex_scheme(sc->sc_ah) == ATH_BTCOEX_CFG_3WIRE) ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer); + if (ath9k_hw_get_btcoex_scheme(sc->sc_ah) == ATH_BTCOEX_CFG_MCI) + ath_mci_cleanup(sc); + for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) if (ATH_TXQ_SETUP(sc, i)) ath_tx_cleanupq(sc, &sc->tx.txq[i]); diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c index ecdb6fd2907..fd3f19c2e55 100644 --- a/drivers/net/wireless/ath/ath9k/mac.c +++ b/drivers/net/wireless/ath/ath9k/mac.c @@ -21,7 +21,7 @@ static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah, struct ath9k_tx_queue_info *qi) { - ath_dbg(ath9k_hw_common(ah), ATH_DBG_INTERRUPT, + ath_dbg(ath9k_hw_common(ah), INTERRUPT, "tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n", ah->txok_interrupt_mask, ah->txerr_interrupt_mask, ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask, @@ -57,8 +57,7 @@ EXPORT_SYMBOL(ath9k_hw_puttxbuf); void ath9k_hw_txstart(struct ath_hw *ah, u32 q) { - ath_dbg(ath9k_hw_common(ah), ATH_DBG_QUEUE, - "Enable TXE on queue: %u\n", q); + ath_dbg(ath9k_hw_common(ah), QUEUE, "Enable TXE on queue: %u\n", q); REG_WRITE(ah, AR_Q_TXE, 1 << q); } EXPORT_SYMBOL(ath9k_hw_txstart); @@ -202,12 +201,12 @@ bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q, qi = &ah->txq[q]; if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { - ath_dbg(common, ATH_DBG_QUEUE, + ath_dbg(common, QUEUE, "Set TXQ properties, inactive queue: %u\n", q); return false; } - ath_dbg(common, ATH_DBG_QUEUE, "Set queue properties for: %u\n", q); + ath_dbg(common, QUEUE, "Set queue properties for: %u\n", q); qi->tqi_ver = qinfo->tqi_ver; qi->tqi_subtype = qinfo->tqi_subtype; @@ -266,7 +265,7 @@ bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q, qi = &ah->txq[q]; if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { - ath_dbg(common, ATH_DBG_QUEUE, + ath_dbg(common, QUEUE, "Get TXQ properties, inactive queue: %u\n", q); return false; } @@ -325,7 +324,7 @@ int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type, return -1; } - ath_dbg(common, ATH_DBG_QUEUE, "Setup TX queue: %u\n", q); + ath_dbg(common, QUEUE, "Setup TX queue: %u\n", q); qi = &ah->txq[q]; if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) { @@ -348,12 +347,11 @@ bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q) qi = &ah->txq[q]; if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { - ath_dbg(common, ATH_DBG_QUEUE, - "Release TXQ, inactive queue: %u\n", q); + ath_dbg(common, QUEUE, "Release TXQ, inactive queue: %u\n", q); return false; } - ath_dbg(common, ATH_DBG_QUEUE, "Release TX queue: %u\n", q); + ath_dbg(common, QUEUE, "Release TX queue: %u\n", q); qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE; ah->txok_interrupt_mask &= ~(1 << q); @@ -376,12 +374,11 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q) qi = &ah->txq[q]; if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { - ath_dbg(common, ATH_DBG_QUEUE, - "Reset TXQ, inactive queue: %u\n", q); + ath_dbg(common, QUEUE, "Reset TXQ, inactive queue: %u\n", q); return true; } - ath_dbg(common, ATH_DBG_QUEUE, "Reset TX queue: %u\n", q); + ath_dbg(common, QUEUE, "Reset TX queue: %u\n", q); if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) { if (chan && IS_CHAN_B(chan)) @@ -760,7 +757,10 @@ bool ath9k_hw_intrpend(struct ath_hw *ah) return true; host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE); - if ((host_isr & AR_INTR_MAC_IRQ) && (host_isr != AR_INTR_SPURIOUS)) + + if (((host_isr & AR_INTR_MAC_IRQ) || + (host_isr & AR_INTR_ASYNC_MASK_MCI)) && + (host_isr != AR_INTR_SPURIOUS)) return true; host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE); @@ -781,7 +781,7 @@ void ath9k_hw_disable_interrupts(struct ath_hw *ah) else atomic_dec(&ah->intr_ref_cnt); - ath_dbg(common, ATH_DBG_INTERRUPT, "disable IER\n"); + ath_dbg(common, INTERRUPT, "disable IER\n"); REG_WRITE(ah, AR_IER, AR_IER_DISABLE); (void) REG_READ(ah, AR_IER); if (!AR_SREV_9100(ah)) { @@ -798,13 +798,13 @@ void ath9k_hw_enable_interrupts(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); u32 sync_default = AR_INTR_SYNC_DEFAULT; + u32 async_mask; if (!(ah->imask & ATH9K_INT_GLOBAL)) return; if (!atomic_inc_and_test(&ah->intr_ref_cnt)) { - ath_dbg(common, ATH_DBG_INTERRUPT, - "Do not enable IER ref count %d\n", + ath_dbg(common, INTERRUPT, "Do not enable IER ref count %d\n", atomic_read(&ah->intr_ref_cnt)); return; } @@ -812,18 +812,21 @@ void ath9k_hw_enable_interrupts(struct ath_hw *ah) if (AR_SREV_9340(ah)) sync_default &= ~AR_INTR_SYNC_HOST1_FATAL; - ath_dbg(common, ATH_DBG_INTERRUPT, "enable IER\n"); + async_mask = AR_INTR_MAC_IRQ; + + if (ah->imask & ATH9K_INT_MCI) + async_mask |= AR_INTR_ASYNC_MASK_MCI; + + ath_dbg(common, INTERRUPT, "enable IER\n"); REG_WRITE(ah, AR_IER, AR_IER_ENABLE); if (!AR_SREV_9100(ah)) { - REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, - AR_INTR_MAC_IRQ); - REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ); - + REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, async_mask); + REG_WRITE(ah, AR_INTR_ASYNC_MASK, async_mask); REG_WRITE(ah, AR_INTR_SYNC_ENABLE, sync_default); REG_WRITE(ah, AR_INTR_SYNC_MASK, sync_default); } - ath_dbg(common, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n", + ath_dbg(common, INTERRUPT, "AR_IMR 0x%x IER 0x%x\n", REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER)); } EXPORT_SYMBOL(ath9k_hw_enable_interrupts); @@ -838,7 +841,7 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah) if (!(ints & ATH9K_INT_GLOBAL)) ath9k_hw_disable_interrupts(ah); - ath_dbg(common, ATH_DBG_INTERRUPT, "New interrupt mask 0x%x\n", ints); + ath_dbg(common, INTERRUPT, "New interrupt mask 0x%x\n", ints); mask = ints & ATH9K_INT_COMMON; mask2 = 0; @@ -901,7 +904,7 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah) mask2 |= AR_IMR_S2_CST; } - ath_dbg(common, ATH_DBG_INTERRUPT, "new IMR 0x%x\n", mask); + ath_dbg(common, INTERRUPT, "new IMR 0x%x\n", mask); REG_WRITE(ah, AR_IMR, mask); ah->imrs2_reg &= ~(AR_IMR_S2_TIM | AR_IMR_S2_DTIM | AR_IMR_S2_DTIMSYNC | AR_IMR_S2_CABEND | AR_IMR_S2_CABTO | diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index a9c5ae75277..e267c92dbfb 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -118,7 +118,7 @@ void ath9k_ps_restore(struct ath_softc *sc) if (--sc->ps_usecount != 0) goto unlock; - if (sc->ps_idle) + if (sc->ps_idle && (sc->ps_flags & PS_WAIT_FOR_TX_ACK)) mode = ATH9K_PM_FULL_SLEEP; else if (sc->ps_enabled && !(sc->ps_flags & (PS_WAIT_FOR_BEACON | @@ -332,14 +332,14 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan, hchan = ah->curchan; } - if (fastcc && !ath9k_hw_check_alive(ah)) + if (fastcc && (ah->chip_fullsleep || + !ath9k_hw_check_alive(ah))) fastcc = false; if (!ath_prepare_reset(sc, retry_tx, flush)) fastcc = false; - ath_dbg(common, ATH_DBG_CONFIG, - "Reset to %u MHz, HT40: %d fastcc: %d\n", + ath_dbg(common, CONFIG, "Reset to %u MHz, HT40: %d fastcc: %d\n", hchan->channel, !!(hchan->channelFlags & (CHANNEL_HT40MINUS | CHANNEL_HT40PLUS)), fastcc); @@ -428,7 +428,7 @@ static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int txctl.paprd = BIT(chain); if (ath_tx_start(hw, skb, &txctl) != 0) { - ath_dbg(common, ATH_DBG_CALIBRATE, "PAPRD TX failed\n"); + ath_dbg(common, CALIBRATE, "PAPRD TX failed\n"); dev_kfree_skb_any(skb); return false; } @@ -437,7 +437,7 @@ static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int msecs_to_jiffies(ATH_PAPRD_TIMEOUT)); if (!time_left) - ath_dbg(common, ATH_DBG_CALIBRATE, + ath_dbg(common, CALIBRATE, "Timeout waiting for paprd training on TX chain %d\n", chain); @@ -486,27 +486,27 @@ void ath_paprd_calibrate(struct work_struct *work) chain_ok = 0; - ath_dbg(common, ATH_DBG_CALIBRATE, - "Sending PAPRD frame for thermal measurement " - "on chain %d\n", chain); + ath_dbg(common, CALIBRATE, + "Sending PAPRD frame for thermal measurement on chain %d\n", + chain); if (!ath_paprd_send_frame(sc, skb, chain)) goto fail_paprd; ar9003_paprd_setup_gain_table(ah, chain); - ath_dbg(common, ATH_DBG_CALIBRATE, + ath_dbg(common, CALIBRATE, "Sending PAPRD training frame on chain %d\n", chain); if (!ath_paprd_send_frame(sc, skb, chain)) goto fail_paprd; if (!ar9003_paprd_is_done(ah)) { - ath_dbg(common, ATH_DBG_CALIBRATE, + ath_dbg(common, CALIBRATE, "PAPRD not yet done on chain %d\n", chain); break; } if (ar9003_paprd_create_curve(ah, caldata, chain)) { - ath_dbg(common, ATH_DBG_CALIBRATE, + ath_dbg(common, CALIBRATE, "PAPRD create curve failed on chain %d\n", chain); break; @@ -561,7 +561,6 @@ void ath_ani_calibrate(unsigned long data) /* Long calibration runs independently of short calibration. */ if ((timestamp - common->ani.longcal_timer) >= long_cal_interval) { longcal = true; - ath_dbg(common, ATH_DBG_ANI, "longcal @%lu\n", jiffies); common->ani.longcal_timer = timestamp; } @@ -569,8 +568,6 @@ void ath_ani_calibrate(unsigned long data) if (!common->ani.caldone) { if ((timestamp - common->ani.shortcal_timer) >= short_cal_interval) { shortcal = true; - ath_dbg(common, ATH_DBG_ANI, - "shortcal @%lu\n", jiffies); common->ani.shortcal_timer = timestamp; common->ani.resetcal_timer = timestamp; } @@ -584,8 +581,9 @@ void ath_ani_calibrate(unsigned long data) } /* Verify whether we must check ANI */ - if ((timestamp - common->ani.checkani_timer) >= - ah->config.ani_poll_interval) { + if (sc->sc_ah->config.enable_ani + && (timestamp - common->ani.checkani_timer) >= + ah->config.ani_poll_interval) { aniflag = true; common->ani.checkani_timer = timestamp; } @@ -605,6 +603,12 @@ void ath_ani_calibrate(unsigned long data) ah->rxchainmask, longcal); } + ath_dbg(common, ANI, + "Calibration @%lu finished: %s %s %s, caldone: %s\n", + jiffies, + longcal ? "long" : "", shortcal ? "short" : "", + aniflag ? "ani" : "", common->ani.caldone ? "true" : "false"); + ath9k_ps_restore(sc); set_timer: @@ -630,7 +634,8 @@ set_timer: } } -static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta) +static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta, + struct ieee80211_vif *vif) { struct ath_node *an; an = (struct ath_node *)sta->drv_priv; @@ -639,8 +644,9 @@ static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta) spin_lock(&sc->nodes_lock); list_add(&an->list, &sc->nodes); spin_unlock(&sc->nodes_lock); - an->sta = sta; #endif + an->sta = sta; + an->vif = vif; if (sc->sc_flags & SC_OP_TXAGGR) { ath_tx_node_init(sc, an); an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR + @@ -709,8 +715,7 @@ void ath9k_tasklet(unsigned long data) * TSF sync does not look correct; remain awake to sync with * the next Beacon. */ - ath_dbg(common, ATH_DBG_PS, - "TSFOOR - Sync with next Beacon\n"); + ath_dbg(common, PS, "TSFOOR - Sync with next Beacon\n"); sc->ps_flags |= PS_WAIT_FOR_BEACON | PS_BEACON_SYNC; } @@ -736,10 +741,13 @@ void ath9k_tasklet(unsigned long data) ath_tx_tasklet(sc); } - if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) + if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE) if (status & ATH9K_INT_GENTIMER) ath_gen_timer_isr(sc->sc_ah); + if ((status & ATH9K_INT_MCI) && ATH9K_HW_CAP_MCI) + ath_mci_intr(sc); + out: /* re-enable hardware interrupt */ ath9k_hw_enable_interrupts(ah); @@ -762,7 +770,8 @@ irqreturn_t ath_isr(int irq, void *dev) ATH9K_INT_BMISS | \ ATH9K_INT_CST | \ ATH9K_INT_TSFOOR | \ - ATH9K_INT_GENTIMER) + ATH9K_INT_GENTIMER | \ + ATH9K_INT_MCI) struct ath_softc *sc = dev; struct ath_hw *ah = sc->sc_ah; @@ -880,82 +889,6 @@ chip_reset: #undef SCHED_INTR } -static void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw) -{ - struct ath_hw *ah = sc->sc_ah; - struct ath_common *common = ath9k_hw_common(ah); - struct ieee80211_channel *channel = hw->conf.channel; - int r; - - ath9k_ps_wakeup(sc); - spin_lock_bh(&sc->sc_pcu_lock); - atomic_set(&ah->intr_ref_cnt, -1); - - ath9k_hw_configpcipowersave(ah, false); - - if (!ah->curchan) - ah->curchan = ath9k_cmn_get_curchannel(sc->hw, ah); - - r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false); - if (r) { - ath_err(common, - "Unable to reset channel (%u MHz), reset status %d\n", - channel->center_freq, r); - } - - ath_complete_reset(sc, true); - - /* Enable LED */ - ath9k_hw_cfg_output(ah, ah->led_pin, - AR_GPIO_OUTPUT_MUX_AS_OUTPUT); - ath9k_hw_set_gpio(ah, ah->led_pin, 0); - - spin_unlock_bh(&sc->sc_pcu_lock); - - ath9k_ps_restore(sc); -} - -void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw) -{ - struct ath_hw *ah = sc->sc_ah; - struct ieee80211_channel *channel = hw->conf.channel; - int r; - - ath9k_ps_wakeup(sc); - - ath_cancel_work(sc); - - spin_lock_bh(&sc->sc_pcu_lock); - - /* - * Keep the LED on when the radio is disabled - * during idle unassociated state. - */ - if (!sc->ps_idle) { - ath9k_hw_set_gpio(ah, ah->led_pin, 1); - ath9k_hw_cfg_gpio_input(ah, ah->led_pin); - } - - ath_prepare_reset(sc, false, true); - - if (!ah->curchan) - ah->curchan = ath9k_cmn_get_curchannel(hw, ah); - - r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false); - if (r) { - ath_err(ath9k_hw_common(sc->sc_ah), - "Unable to reset channel (%u MHz), reset status %d\n", - channel->center_freq, r); - } - - ath9k_hw_phy_disable(ah); - - ath9k_hw_configpcipowersave(ah, true); - - spin_unlock_bh(&sc->sc_pcu_lock); - ath9k_ps_restore(sc); -} - static int ath_reset(struct ath_softc *sc, bool retry_tx) { int r; @@ -1002,8 +935,8 @@ void ath_hw_check(struct work_struct *work) busy = ath_update_survey_stats(sc); spin_unlock_irqrestore(&common->cc_lock, flags); - ath_dbg(common, ATH_DBG_RESET, "Possible baseband hang, " - "busy=%d (try %d)\n", busy, sc->hw_busy_count + 1); + ath_dbg(common, RESET, "Possible baseband hang, busy=%d (try %d)\n", + busy, sc->hw_busy_count + 1); if (busy >= 99) { if (++sc->hw_busy_count >= 3) { RESET_STAT_INC(sc, RESET_TYPE_BB_HANG); @@ -1026,8 +959,7 @@ static void ath_hw_pll_rx_hang_check(struct ath_softc *sc, u32 pll_sqsum) count++; if (count == 3) { /* Rx is hung for more than 500ms. Reset it */ - ath_dbg(common, ATH_DBG_RESET, - "Possible RX hang, resetting"); + ath_dbg(common, RESET, "Possible RX hang, resetting\n"); RESET_STAT_INC(sc, RESET_TYPE_PLL_HANG); ieee80211_queue_work(sc->hw, &sc->hw_reset_work); count = 0; @@ -1067,7 +999,7 @@ static int ath9k_start(struct ieee80211_hw *hw) struct ath9k_channel *init_channel; int r; - ath_dbg(common, ATH_DBG_CONFIG, + ath_dbg(common, CONFIG, "Starting driver with initial channel: %d MHz\n", curchan->center_freq); @@ -1091,6 +1023,9 @@ static int ath9k_start(struct ieee80211_hw *hw) * and then setup of the interrupt mask. */ spin_lock_bh(&sc->sc_pcu_lock); + + atomic_set(&ah->intr_ref_cnt, -1); + r = ath9k_hw_reset(ah, init_channel, ah->caldata, false); if (r) { ath_err(common, @@ -1117,6 +1052,9 @@ static int ath9k_start(struct ieee80211_hw *hw) if (ah->caps.hw_caps & ATH9K_HW_CAP_HT) ah->imask |= ATH9K_INT_CST; + if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI) + ah->imask |= ATH9K_INT_MCI; + sc->sc_flags &= ~SC_OP_INVALID; sc->sc_ah->is_monitoring = false; @@ -1129,15 +1067,28 @@ static int ath9k_start(struct ieee80211_hw *hw) goto mutex_unlock; } + if (ah->led_pin >= 0) { + ath9k_hw_cfg_output(ah, ah->led_pin, + AR_GPIO_OUTPUT_MUX_AS_OUTPUT); + ath9k_hw_set_gpio(ah, ah->led_pin, 0); + } + + /* + * Reset key cache to sane defaults (all entries cleared) instead of + * semi-random values after suspend/resume. + */ + ath9k_cmn_init_crypto(sc->sc_ah); + spin_unlock_bh(&sc->sc_pcu_lock); - if ((ah->btcoex_hw.scheme != ATH_BTCOEX_CFG_NONE) && + if ((ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) && !ah->btcoex_hw.enabled) { - ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT, - AR_STOMP_LOW_WLAN_WGHT); + if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI)) + ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT, + AR_STOMP_LOW_WLAN_WGHT); ath9k_hw_btcoex_enable(ah); - if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) + if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE) ath9k_btcoex_timer_resume(sc); } @@ -1167,12 +1118,19 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb) if (ieee80211_is_data(hdr->frame_control) && !ieee80211_is_nullfunc(hdr->frame_control) && !ieee80211_has_pm(hdr->frame_control)) { - ath_dbg(common, ATH_DBG_PS, + ath_dbg(common, PS, "Add PM=1 for a TX frame while in PS mode\n"); hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); } } + /* + * Cannot tx while the hardware is in full sleep, it first needs a full + * chip reset to recover from that + */ + if (unlikely(sc->sc_ah->power_mode == ATH9K_PM_FULL_SLEEP)) + goto exit; + if (unlikely(sc->sc_ah->power_mode != ATH9K_PM_AWAKE)) { /* * We are using PS-Poll and mac80211 can request TX while in @@ -1183,12 +1141,11 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb) if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) ath9k_hw_setrxabort(sc->sc_ah, 0); if (ieee80211_is_pspoll(hdr->frame_control)) { - ath_dbg(common, ATH_DBG_PS, + ath_dbg(common, PS, "Sending PS-Poll to pick a buffered frame\n"); sc->ps_flags |= PS_WAIT_FOR_PSPOLL_DATA; } else { - ath_dbg(common, ATH_DBG_PS, - "Wake up to complete TX\n"); + ath_dbg(common, PS, "Wake up to complete TX\n"); sc->ps_flags |= PS_WAIT_FOR_TX_ACK; } /* @@ -1202,10 +1159,10 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb) memset(&txctl, 0, sizeof(struct ath_tx_control)); txctl.txq = sc->tx.txq_map[skb_get_queue_mapping(skb)]; - ath_dbg(common, ATH_DBG_XMIT, "transmitting packet, skb: %p\n", skb); + ath_dbg(common, XMIT, "transmitting packet, skb: %p\n", skb); if (ath_tx_start(hw, skb, &txctl) != 0) { - ath_dbg(common, ATH_DBG_XMIT, "TX failed\n"); + ath_dbg(common, XMIT, "TX failed\n"); goto exit; } @@ -1219,13 +1176,14 @@ static void ath9k_stop(struct ieee80211_hw *hw) struct ath_softc *sc = hw->priv; struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); + bool prev_idle; mutex_lock(&sc->mutex); ath_cancel_work(sc); if (sc->sc_flags & SC_OP_INVALID) { - ath_dbg(common, ATH_DBG_ANY, "Device not present\n"); + ath_dbg(common, ANY, "Device not present\n"); mutex_unlock(&sc->mutex); return; } @@ -1233,10 +1191,12 @@ static void ath9k_stop(struct ieee80211_hw *hw) /* Ensure HW is awake when we try to shut it down. */ ath9k_ps_wakeup(sc); - if (ah->btcoex_hw.enabled) { + if (ah->btcoex_hw.enabled && + ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) { ath9k_hw_btcoex_disable(ah); - if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) + if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE) ath9k_btcoex_timer_pause(sc); + ath_mci_flush_profile(&sc->btcoex.mci); } spin_lock_bh(&sc->sc_pcu_lock); @@ -1248,39 +1208,49 @@ static void ath9k_stop(struct ieee80211_hw *hw) * before setting the invalid flag. */ ath9k_hw_disable_interrupts(ah); - if (!(sc->sc_flags & SC_OP_INVALID)) { - ath_drain_all_txq(sc, false); - ath_stoprecv(sc); - ath9k_hw_phy_disable(ah); - } else - sc->rx.rxlink = NULL; + spin_unlock_bh(&sc->sc_pcu_lock); + + /* we can now sync irq and kill any running tasklets, since we already + * disabled interrupts and not holding a spin lock */ + synchronize_irq(sc->irq); + tasklet_kill(&sc->intr_tq); + tasklet_kill(&sc->bcon_tasklet); + + prev_idle = sc->ps_idle; + sc->ps_idle = true; + + spin_lock_bh(&sc->sc_pcu_lock); + + if (ah->led_pin >= 0) { + ath9k_hw_set_gpio(ah, ah->led_pin, 1); + ath9k_hw_cfg_gpio_input(ah, ah->led_pin); + } + + ath_prepare_reset(sc, false, true); if (sc->rx.frag) { dev_kfree_skb_any(sc->rx.frag); sc->rx.frag = NULL; } - /* disable HAL and put h/w to sleep */ - ath9k_hw_disable(ah); + if (!ah->curchan) + ah->curchan = ath9k_cmn_get_curchannel(hw, ah); + + ath9k_hw_reset(ah, ah->curchan, ah->caldata, false); + ath9k_hw_phy_disable(ah); - spin_unlock_bh(&sc->sc_pcu_lock); + ath9k_hw_configpcipowersave(ah, true); - /* we can now sync irq and kill any running tasklets, since we already - * disabled interrupts and not holding a spin lock */ - synchronize_irq(sc->irq); - tasklet_kill(&sc->intr_tq); - tasklet_kill(&sc->bcon_tasklet); + spin_unlock_bh(&sc->sc_pcu_lock); ath9k_ps_restore(sc); - sc->ps_idle = true; - ath_radio_disable(sc, hw); - sc->sc_flags |= SC_OP_INVALID; + sc->ps_idle = prev_idle; mutex_unlock(&sc->mutex); - ath_dbg(common, ATH_DBG_CONFIG, "Driver halt\n"); + ath_dbg(common, CONFIG, "Driver halt\n"); } bool ath9k_uses_beacons(int type) @@ -1495,8 +1465,7 @@ static int ath9k_add_interface(struct ieee80211_hw *hw, goto out; } - ath_dbg(common, ATH_DBG_CONFIG, - "Attach a VIF of type: %d\n", vif->type); + ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type); sc->nvifs++; @@ -1516,7 +1485,7 @@ static int ath9k_change_interface(struct ieee80211_hw *hw, struct ath_common *common = ath9k_hw_common(sc->sc_ah); int ret = 0; - ath_dbg(common, ATH_DBG_CONFIG, "Change Interface\n"); + ath_dbg(common, CONFIG, "Change Interface\n"); mutex_lock(&sc->mutex); ath9k_ps_wakeup(sc); @@ -1559,7 +1528,7 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw, struct ath_softc *sc = hw->priv; struct ath_common *common = ath9k_hw_common(sc->sc_ah); - ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface\n"); + ath_dbg(common, CONFIG, "Detach Interface\n"); ath9k_ps_wakeup(sc); mutex_lock(&sc->mutex); @@ -1616,8 +1585,8 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); struct ieee80211_conf *conf = &hw->conf; - bool disable_radio = false; + ath9k_ps_wakeup(sc); mutex_lock(&sc->mutex); /* @@ -1628,13 +1597,8 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) */ if (changed & IEEE80211_CONF_CHANGE_IDLE) { sc->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE); - if (!sc->ps_idle) { - ath_radio_enable(sc, hw); - ath_dbg(common, ATH_DBG_CONFIG, - "not-idle: enabling radio\n"); - } else { - disable_radio = true; - } + if (sc->ps_idle) + ath_cancel_work(sc); } /* @@ -1655,12 +1619,10 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) if (changed & IEEE80211_CONF_CHANGE_MONITOR) { if (conf->flags & IEEE80211_CONF_MONITOR) { - ath_dbg(common, ATH_DBG_CONFIG, - "Monitor mode is enabled\n"); + ath_dbg(common, CONFIG, "Monitor mode is enabled\n"); sc->sc_ah->is_monitoring = true; } else { - ath_dbg(common, ATH_DBG_CONFIG, - "Monitor mode is disabled\n"); + ath_dbg(common, CONFIG, "Monitor mode is disabled\n"); sc->sc_ah->is_monitoring = false; } } @@ -1680,8 +1642,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) else sc->sc_flags &= ~SC_OP_OFFCHANNEL; - ath_dbg(common, ATH_DBG_CONFIG, - "Set channel: %d MHz type: %d\n", + ath_dbg(common, CONFIG, "Set channel: %d MHz type: %d\n", curchan->center_freq, conf->channel_type); /* update survey stats for the old channel before switching */ @@ -1738,21 +1699,14 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) } if (changed & IEEE80211_CONF_CHANGE_POWER) { - ath_dbg(common, ATH_DBG_CONFIG, - "Set power: %d\n", conf->power_level); + ath_dbg(common, CONFIG, "Set power: %d\n", conf->power_level); sc->config.txpowlimit = 2 * conf->power_level; - ath9k_ps_wakeup(sc); ath9k_cmn_update_txpow(ah, sc->curtxpow, sc->config.txpowlimit, &sc->curtxpow); - ath9k_ps_restore(sc); - } - - if (disable_radio) { - ath_dbg(common, ATH_DBG_CONFIG, "idle: disabling radio\n"); - ath_radio_disable(sc, hw); } mutex_unlock(&sc->mutex); + ath9k_ps_restore(sc); return 0; } @@ -1785,8 +1739,8 @@ static void ath9k_configure_filter(struct ieee80211_hw *hw, ath9k_hw_setrxfilter(sc->sc_ah, rfilt); ath9k_ps_restore(sc); - ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG, - "Set HW RX filter: 0x%x\n", rfilt); + ath_dbg(ath9k_hw_common(sc->sc_ah), CONFIG, "Set HW RX filter: 0x%x\n", + rfilt); } static int ath9k_sta_add(struct ieee80211_hw *hw, @@ -1798,7 +1752,7 @@ static int ath9k_sta_add(struct ieee80211_hw *hw, struct ath_node *an = (struct ath_node *) sta->drv_priv; struct ieee80211_key_conf ps_key = { }; - ath_node_attach(sc, sta); + ath_node_attach(sc, sta, vif); if (vif->type != NL80211_IFTYPE_AP && vif->type != NL80211_IFTYPE_AP_VLAN) @@ -1883,7 +1837,7 @@ static int ath9k_conf_tx(struct ieee80211_hw *hw, qi.tqi_cwmax = params->cw_max; qi.tqi_burstTime = params->txop; - ath_dbg(common, ATH_DBG_CONFIG, + ath_dbg(common, CONFIG, "Configure tx [queue/halq] [%d/%d], aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n", queue, txq->axq_qnum, params->aifs, params->cw_min, params->cw_max, params->txop); @@ -1915,7 +1869,8 @@ static int ath9k_set_key(struct ieee80211_hw *hw, if (ath9k_modparam_nohwcrypt) return -ENOSPC; - if (vif->type == NL80211_IFTYPE_ADHOC && + if ((vif->type == NL80211_IFTYPE_ADHOC || + vif->type == NL80211_IFTYPE_MESH_POINT) && (key->cipher == WLAN_CIPHER_SUITE_TKIP || key->cipher == WLAN_CIPHER_SUITE_CCMP) && !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) { @@ -1931,7 +1886,7 @@ static int ath9k_set_key(struct ieee80211_hw *hw, mutex_lock(&sc->mutex); ath9k_ps_wakeup(sc); - ath_dbg(common, ATH_DBG_CONFIG, "Set HW Key\n"); + ath_dbg(common, CONFIG, "Set HW Key\n"); switch (cmd) { case SET_KEY: @@ -1983,9 +1938,8 @@ static void ath9k_bss_iter(void *data, u8 *mac, struct ieee80211_vif *vif) memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN); common->curaid = bss_conf->aid; ath9k_hw_write_associd(sc->sc_ah); - ath_dbg(common, ATH_DBG_CONFIG, - "Bss Info ASSOC %d, bssid: %pM\n", - bss_conf->aid, common->curbssid); + ath_dbg(common, CONFIG, "Bss Info ASSOC %d, bssid: %pM\n", + bss_conf->aid, common->curbssid); ath_beacon_config(sc, vif); /* * Request a re-configuration of Beacon related timers @@ -2016,8 +1970,7 @@ static void ath9k_config_bss(struct ath_softc *sc, struct ieee80211_vif *vif) /* Reconfigure bss info */ if (avp->primary_sta_vif && !bss_conf->assoc) { - ath_dbg(common, ATH_DBG_CONFIG, - "Bss Info DISASSOC %d, bssid %pM\n", + ath_dbg(common, CONFIG, "Bss Info DISASSOC %d, bssid %pM\n", common->curaid, common->curbssid); sc->sc_flags &= ~(SC_OP_PRIM_STA_VIF | SC_OP_BEACONS); avp->primary_sta_vif = false; @@ -2059,7 +2012,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw, if (changed & BSS_CHANGED_BSSID) { ath9k_config_bss(sc, vif); - ath_dbg(common, ATH_DBG_CONFIG, "BSSID: %pM aid: 0x%x\n", + ath_dbg(common, CONFIG, "BSSID: %pM aid: 0x%x\n", common->curbssid, common->curaid); } @@ -2137,7 +2090,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw, } if (changed & BSS_CHANGED_ERP_PREAMBLE) { - ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed PREAMBLE %d\n", + ath_dbg(common, CONFIG, "BSS Changed PREAMBLE %d\n", bss_conf->use_short_preamble); if (bss_conf->use_short_preamble) sc->sc_flags |= SC_OP_PREAMBLE_SHORT; @@ -2146,7 +2099,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw, } if (changed & BSS_CHANGED_ERP_CTS_PROT) { - ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed CTS PROT %d\n", + ath_dbg(common, CONFIG, "BSS Changed CTS PROT %d\n", bss_conf->use_cts_prot); if (bss_conf->use_cts_prot && hw->conf.channel->band != IEEE80211_BAND_5GHZ) @@ -2312,20 +2265,17 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop) cancel_delayed_work_sync(&sc->tx_complete_work); if (ah->ah_flags & AH_UNPLUGGED) { - ath_dbg(common, ATH_DBG_ANY, "Device has been unplugged!\n"); + ath_dbg(common, ANY, "Device has been unplugged!\n"); mutex_unlock(&sc->mutex); return; } if (sc->sc_flags & SC_OP_INVALID) { - ath_dbg(common, ATH_DBG_ANY, "Device not present\n"); + ath_dbg(common, ANY, "Device not present\n"); mutex_unlock(&sc->mutex); return; } - if (drop) - timeout = 1; - for (j = 0; j < timeout; j++) { bool npend = false; @@ -2343,21 +2293,22 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop) } if (!npend) - goto out; + break; } - ath9k_ps_wakeup(sc); - spin_lock_bh(&sc->sc_pcu_lock); - drain_txq = ath_drain_all_txq(sc, false); - spin_unlock_bh(&sc->sc_pcu_lock); + if (drop) { + ath9k_ps_wakeup(sc); + spin_lock_bh(&sc->sc_pcu_lock); + drain_txq = ath_drain_all_txq(sc, false); + spin_unlock_bh(&sc->sc_pcu_lock); - if (!drain_txq) - ath_reset(sc, false); + if (!drain_txq) + ath_reset(sc, false); - ath9k_ps_restore(sc); - ieee80211_wake_queues(hw); + ath9k_ps_restore(sc); + ieee80211_wake_queues(hw); + } -out: ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0); mutex_unlock(&sc->mutex); } diff --git a/drivers/net/wireless/ath/ath9k/mci.c b/drivers/net/wireless/ath/ath9k/mci.c new file mode 100644 index 00000000000..05c23ea4c63 --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/mci.c @@ -0,0 +1,668 @@ +/* + * Copyright (c) 2010-2011 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/dma-mapping.h> +#include <linux/slab.h> + +#include "ath9k.h" +#include "mci.h" + +static const u8 ath_mci_duty_cycle[] = { 0, 50, 60, 70, 80, 85, 90, 95, 98 }; + +static struct ath_mci_profile_info* +ath_mci_find_profile(struct ath_mci_profile *mci, + struct ath_mci_profile_info *info) +{ + struct ath_mci_profile_info *entry; + + list_for_each_entry(entry, &mci->info, list) { + if (entry->conn_handle == info->conn_handle) + break; + } + return entry; +} + +static bool ath_mci_add_profile(struct ath_common *common, + struct ath_mci_profile *mci, + struct ath_mci_profile_info *info) +{ + struct ath_mci_profile_info *entry; + + if ((mci->num_sco == ATH_MCI_MAX_SCO_PROFILE) && + (info->type == MCI_GPM_COEX_PROFILE_VOICE)) { + ath_dbg(common, MCI, + "Too many SCO profile, failed to add new profile\n"); + return false; + } + + if (((NUM_PROF(mci) - mci->num_sco) == ATH_MCI_MAX_ACL_PROFILE) && + (info->type != MCI_GPM_COEX_PROFILE_VOICE)) { + ath_dbg(common, MCI, + "Too many ACL profile, failed to add new profile\n"); + return false; + } + + entry = ath_mci_find_profile(mci, info); + + if (entry) + memcpy(entry, info, 10); + else { + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return false; + + memcpy(entry, info, 10); + INC_PROF(mci, info); + list_add_tail(&info->list, &mci->info); + } + return true; +} + +static void ath_mci_del_profile(struct ath_common *common, + struct ath_mci_profile *mci, + struct ath_mci_profile_info *info) +{ + struct ath_mci_profile_info *entry; + + entry = ath_mci_find_profile(mci, info); + + if (!entry) { + ath_dbg(common, MCI, "Profile to be deleted not found\n"); + return; + } + DEC_PROF(mci, entry); + list_del(&entry->list); + kfree(entry); +} + +void ath_mci_flush_profile(struct ath_mci_profile *mci) +{ + struct ath_mci_profile_info *info, *tinfo; + + list_for_each_entry_safe(info, tinfo, &mci->info, list) { + list_del(&info->list); + DEC_PROF(mci, info); + kfree(info); + } + mci->aggr_limit = 0; +} + +static void ath_mci_adjust_aggr_limit(struct ath_btcoex *btcoex) +{ + struct ath_mci_profile *mci = &btcoex->mci; + u32 wlan_airtime = btcoex->btcoex_period * + (100 - btcoex->duty_cycle) / 100; + + /* + * Scale: wlan_airtime is in ms, aggr_limit is in 0.25 ms. + * When wlan_airtime is less than 4ms, aggregation limit has to be + * adjusted half of wlan_airtime to ensure that the aggregation can fit + * without collision with BT traffic. + */ + if ((wlan_airtime <= 4) && + (!mci->aggr_limit || (mci->aggr_limit > (2 * wlan_airtime)))) + mci->aggr_limit = 2 * wlan_airtime; +} + +static void ath_mci_update_scheme(struct ath_softc *sc) +{ + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + struct ath_btcoex *btcoex = &sc->btcoex; + struct ath_mci_profile *mci = &btcoex->mci; + struct ath_mci_profile_info *info; + u32 num_profile = NUM_PROF(mci); + + if (num_profile == 1) { + info = list_first_entry(&mci->info, + struct ath_mci_profile_info, + list); + if (mci->num_sco && info->T == 12) { + mci->aggr_limit = 8; + ath_dbg(common, MCI, + "Single SCO, aggregation limit 2 ms\n"); + } else if ((info->type == MCI_GPM_COEX_PROFILE_BNEP) && + !info->master) { + btcoex->btcoex_period = 60; + ath_dbg(common, MCI, + "Single slave PAN/FTP, bt period 60 ms\n"); + } else if ((info->type == MCI_GPM_COEX_PROFILE_HID) && + (info->T > 0 && info->T < 50) && + (info->A > 1 || info->W > 1)) { + btcoex->duty_cycle = 30; + mci->aggr_limit = 8; + ath_dbg(common, MCI, + "Multiple attempt/timeout single HID " + "aggregation limit 2 ms dutycycle 30%%\n"); + } + } else if ((num_profile == 2) && (mci->num_hid == 2)) { + btcoex->duty_cycle = 30; + mci->aggr_limit = 8; + ath_dbg(common, MCI, + "Two HIDs aggregation limit 2 ms dutycycle 30%%\n"); + } else if (num_profile > 3) { + mci->aggr_limit = 6; + ath_dbg(common, MCI, + "Three or more profiles aggregation limit 1.5 ms\n"); + } + + if (IS_CHAN_2GHZ(sc->sc_ah->curchan)) { + if (IS_CHAN_HT(sc->sc_ah->curchan)) + ath_mci_adjust_aggr_limit(btcoex); + else + btcoex->btcoex_period >>= 1; + } + + ath9k_hw_btcoex_disable(sc->sc_ah); + ath9k_btcoex_timer_pause(sc); + + if (IS_CHAN_5GHZ(sc->sc_ah->curchan)) + return; + + btcoex->duty_cycle += (mci->num_bdr ? ATH_MCI_MAX_DUTY_CYCLE : 0); + if (btcoex->duty_cycle > ATH_MCI_MAX_DUTY_CYCLE) + btcoex->duty_cycle = ATH_MCI_MAX_DUTY_CYCLE; + + btcoex->btcoex_period *= 1000; + btcoex->btcoex_no_stomp = btcoex->btcoex_period * + (100 - btcoex->duty_cycle) / 100; + + ath9k_hw_btcoex_enable(sc->sc_ah); + ath9k_btcoex_timer_resume(sc); +} + + +static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload) +{ + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); + u32 payload[4] = {0, 0, 0, 0}; + + switch (opcode) { + case MCI_GPM_BT_CAL_REQ: + + ath_dbg(common, MCI, "MCI received BT_CAL_REQ\n"); + + if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_AWAKE) { + ar9003_mci_state(ah, MCI_STATE_SET_BT_CAL_START, NULL); + ieee80211_queue_work(sc->hw, &sc->hw_reset_work); + } else + ath_dbg(common, MCI, "MCI State mismatches: %d\n", + ar9003_mci_state(ah, MCI_STATE_BT, NULL)); + + break; + + case MCI_GPM_BT_CAL_DONE: + + ath_dbg(common, MCI, "MCI received BT_CAL_DONE\n"); + + if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_CAL) + ath_dbg(common, MCI, "MCI error illegal!\n"); + else + ath_dbg(common, MCI, "MCI BT not in CAL state\n"); + + break; + + case MCI_GPM_BT_CAL_GRANT: + + ath_dbg(common, MCI, "MCI received BT_CAL_GRANT\n"); + + /* Send WLAN_CAL_DONE for now */ + ath_dbg(common, MCI, "MCI send WLAN_CAL_DONE\n"); + MCI_GPM_SET_CAL_TYPE(payload, MCI_GPM_WLAN_CAL_DONE); + ar9003_mci_send_message(sc->sc_ah, MCI_GPM, 0, payload, + 16, false, true); + break; + + default: + ath_dbg(common, MCI, "MCI Unknown GPM CAL message\n"); + break; + } +} + +static void ath_mci_process_profile(struct ath_softc *sc, + struct ath_mci_profile_info *info) +{ + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + struct ath_btcoex *btcoex = &sc->btcoex; + struct ath_mci_profile *mci = &btcoex->mci; + + if (info->start) { + if (!ath_mci_add_profile(common, mci, info)) + return; + } else + ath_mci_del_profile(common, mci, info); + + btcoex->btcoex_period = ATH_MCI_DEF_BT_PERIOD; + mci->aggr_limit = mci->num_sco ? 6 : 0; + if (NUM_PROF(mci)) { + btcoex->bt_stomp_type = ATH_BTCOEX_STOMP_LOW; + btcoex->duty_cycle = ath_mci_duty_cycle[NUM_PROF(mci)]; + } else { + btcoex->bt_stomp_type = mci->num_mgmt ? ATH_BTCOEX_STOMP_ALL : + ATH_BTCOEX_STOMP_LOW; + btcoex->duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE; + } + + ath_mci_update_scheme(sc); +} + +static void ath_mci_process_status(struct ath_softc *sc, + struct ath_mci_profile_status *status) +{ + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + struct ath_btcoex *btcoex = &sc->btcoex; + struct ath_mci_profile *mci = &btcoex->mci; + struct ath_mci_profile_info info; + int i = 0, old_num_mgmt = mci->num_mgmt; + + /* Link status type are not handled */ + if (status->is_link) { + ath_dbg(common, MCI, "Skip link type status update\n"); + return; + } + + memset(&info, 0, sizeof(struct ath_mci_profile_info)); + + info.conn_handle = status->conn_handle; + if (ath_mci_find_profile(mci, &info)) { + ath_dbg(common, MCI, + "Skip non link state update for existing profile %d\n", + status->conn_handle); + return; + } + if (status->conn_handle >= ATH_MCI_MAX_PROFILE) { + ath_dbg(common, MCI, "Ignore too many non-link update\n"); + return; + } + if (status->is_critical) + __set_bit(status->conn_handle, mci->status); + else + __clear_bit(status->conn_handle, mci->status); + + mci->num_mgmt = 0; + do { + if (test_bit(i, mci->status)) + mci->num_mgmt++; + } while (++i < ATH_MCI_MAX_PROFILE); + + if (old_num_mgmt != mci->num_mgmt) + ath_mci_update_scheme(sc); +} + +static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload) +{ + struct ath_hw *ah = sc->sc_ah; + struct ath_mci_profile_info profile_info; + struct ath_mci_profile_status profile_status; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + u32 version; + u8 major; + u8 minor; + u32 seq_num; + + switch (opcode) { + + case MCI_GPM_COEX_VERSION_QUERY: + ath_dbg(common, MCI, "MCI Recv GPM COEX Version Query\n"); + version = ar9003_mci_state(ah, + MCI_STATE_SEND_WLAN_COEX_VERSION, NULL); + break; + + case MCI_GPM_COEX_VERSION_RESPONSE: + ath_dbg(common, MCI, "MCI Recv GPM COEX Version Response\n"); + major = *(rx_payload + MCI_GPM_COEX_B_MAJOR_VERSION); + minor = *(rx_payload + MCI_GPM_COEX_B_MINOR_VERSION); + ath_dbg(common, MCI, "MCI BT Coex version: %d.%d\n", + major, minor); + version = (major << 8) + minor; + version = ar9003_mci_state(ah, + MCI_STATE_SET_BT_COEX_VERSION, &version); + break; + + case MCI_GPM_COEX_STATUS_QUERY: + ath_dbg(common, MCI, + "MCI Recv GPM COEX Status Query = 0x%02x\n", + *(rx_payload + MCI_GPM_COEX_B_WLAN_BITMAP)); + ar9003_mci_state(ah, + MCI_STATE_SEND_WLAN_CHANNELS, NULL); + break; + + case MCI_GPM_COEX_BT_PROFILE_INFO: + ath_dbg(common, MCI, "MCI Recv GPM Coex BT profile info\n"); + memcpy(&profile_info, + (rx_payload + MCI_GPM_COEX_B_PROFILE_TYPE), 10); + + if ((profile_info.type == MCI_GPM_COEX_PROFILE_UNKNOWN) + || (profile_info.type >= + MCI_GPM_COEX_PROFILE_MAX)) { + + ath_dbg(common, MCI, + "illegal profile type = %d, state = %d\n", + profile_info.type, + profile_info.start); + break; + } + + ath_mci_process_profile(sc, &profile_info); + break; + + case MCI_GPM_COEX_BT_STATUS_UPDATE: + profile_status.is_link = *(rx_payload + + MCI_GPM_COEX_B_STATUS_TYPE); + profile_status.conn_handle = *(rx_payload + + MCI_GPM_COEX_B_STATUS_LINKID); + profile_status.is_critical = *(rx_payload + + MCI_GPM_COEX_B_STATUS_STATE); + + seq_num = *((u32 *)(rx_payload + 12)); + ath_dbg(common, MCI, + "MCI Recv GPM COEX BT_Status_Update: is_link=%d, linkId=%d, state=%d, SEQ=%d\n", + profile_status.is_link, profile_status.conn_handle, + profile_status.is_critical, seq_num); + + ath_mci_process_status(sc, &profile_status); + break; + + default: + ath_dbg(common, MCI, "MCI Unknown GPM COEX message = 0x%02x\n", + opcode); + break; + } +} + +static int ath_mci_buf_alloc(struct ath_softc *sc, struct ath_mci_buf *buf) +{ + int error = 0; + + buf->bf_addr = dma_alloc_coherent(sc->dev, buf->bf_len, + &buf->bf_paddr, GFP_KERNEL); + + if (buf->bf_addr == NULL) { + error = -ENOMEM; + goto fail; + } + + return 0; + +fail: + memset(buf, 0, sizeof(*buf)); + return error; +} + +static void ath_mci_buf_free(struct ath_softc *sc, struct ath_mci_buf *buf) +{ + if (buf->bf_addr) { + dma_free_coherent(sc->dev, buf->bf_len, buf->bf_addr, + buf->bf_paddr); + memset(buf, 0, sizeof(*buf)); + } +} + +int ath_mci_setup(struct ath_softc *sc) +{ + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + struct ath_mci_coex *mci = &sc->mci_coex; + int error = 0; + + if (!ATH9K_HW_CAP_MCI) + return 0; + + mci->sched_buf.bf_len = ATH_MCI_SCHED_BUF_SIZE + ATH_MCI_GPM_BUF_SIZE; + + if (ath_mci_buf_alloc(sc, &mci->sched_buf)) { + ath_dbg(common, FATAL, "MCI buffer alloc failed\n"); + error = -ENOMEM; + goto fail; + } + + mci->sched_buf.bf_len = ATH_MCI_SCHED_BUF_SIZE; + + memset(mci->sched_buf.bf_addr, MCI_GPM_RSVD_PATTERN, + mci->sched_buf.bf_len); + + mci->gpm_buf.bf_len = ATH_MCI_GPM_BUF_SIZE; + mci->gpm_buf.bf_addr = (u8 *)mci->sched_buf.bf_addr + + mci->sched_buf.bf_len; + mci->gpm_buf.bf_paddr = mci->sched_buf.bf_paddr + mci->sched_buf.bf_len; + + /* initialize the buffer */ + memset(mci->gpm_buf.bf_addr, MCI_GPM_RSVD_PATTERN, mci->gpm_buf.bf_len); + + ar9003_mci_setup(sc->sc_ah, mci->gpm_buf.bf_paddr, + mci->gpm_buf.bf_addr, (mci->gpm_buf.bf_len >> 4), + mci->sched_buf.bf_paddr); +fail: + return error; +} + +void ath_mci_cleanup(struct ath_softc *sc) +{ + struct ath_hw *ah = sc->sc_ah; + struct ath_mci_coex *mci = &sc->mci_coex; + + if (!ATH9K_HW_CAP_MCI) + return; + + /* + * both schedule and gpm buffers will be released + */ + ath_mci_buf_free(sc, &mci->sched_buf); + ar9003_mci_cleanup(ah); +} + +void ath_mci_intr(struct ath_softc *sc) +{ + struct ath_mci_coex *mci = &sc->mci_coex; + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); + u32 mci_int, mci_int_rxmsg; + u32 offset, subtype, opcode; + u32 *pgpm; + u32 more_data = MCI_GPM_MORE; + bool skip_gpm = false; + + if (!ATH9K_HW_CAP_MCI) + return; + + ar9003_mci_get_interrupt(sc->sc_ah, &mci_int, &mci_int_rxmsg); + + if (ar9003_mci_state(ah, MCI_STATE_ENABLE, NULL) == 0) { + + ar9003_mci_state(sc->sc_ah, MCI_STATE_INIT_GPM_OFFSET, NULL); + ath_dbg(common, MCI, "MCI interrupt but MCI disabled\n"); + + ath_dbg(common, MCI, + "MCI interrupt: intr = 0x%x, intr_rxmsg = 0x%x\n", + mci_int, mci_int_rxmsg); + return; + } + + if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE) { + u32 payload[4] = { 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffff00}; + + /* + * The following REMOTE_RESET and SYS_WAKING used to sent + * only when BT wake up. Now they are always sent, as a + * recovery method to reset BT MCI's RX alignment. + */ + ath_dbg(common, MCI, "MCI interrupt send REMOTE_RESET\n"); + + ar9003_mci_send_message(ah, MCI_REMOTE_RESET, 0, + payload, 16, true, false); + ath_dbg(common, MCI, "MCI interrupt send SYS_WAKING\n"); + ar9003_mci_send_message(ah, MCI_SYS_WAKING, 0, + NULL, 0, true, false); + + mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE; + ar9003_mci_state(ah, MCI_STATE_RESET_REQ_WAKE, NULL); + + /* + * always do this for recovery and 2G/5G toggling and LNA_TRANS + */ + ath_dbg(common, MCI, "MCI Set BT state to AWAKE\n"); + ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE, NULL); + } + + /* Processing SYS_WAKING/SYS_SLEEPING */ + if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING) { + mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING; + + if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_SLEEP) { + + if (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL) + == MCI_BT_SLEEP) + ath_dbg(common, MCI, + "MCI BT stays in sleep mode\n"); + else { + ath_dbg(common, MCI, + "MCI Set BT state to AWAKE\n"); + ar9003_mci_state(ah, + MCI_STATE_SET_BT_AWAKE, NULL); + } + } else + ath_dbg(common, MCI, "MCI BT stays in AWAKE mode\n"); + } + + if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING) { + + mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING; + + if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_AWAKE) { + + if (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL) + == MCI_BT_AWAKE) + ath_dbg(common, MCI, + "MCI BT stays in AWAKE mode\n"); + else { + ath_dbg(common, MCI, + "MCI SetBT state to SLEEP\n"); + ar9003_mci_state(ah, MCI_STATE_SET_BT_SLEEP, + NULL); + } + } else + ath_dbg(common, MCI, "MCI BT stays in SLEEP mode\n"); + } + + if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) || + (mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)) { + + ath_dbg(common, MCI, "MCI RX broken, skip GPM msgs\n"); + ar9003_mci_state(ah, MCI_STATE_RECOVER_RX, NULL); + skip_gpm = true; + } + + if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO) { + + mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO; + offset = ar9003_mci_state(ah, MCI_STATE_LAST_SCHD_MSG_OFFSET, + NULL); + } + + if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_GPM) { + + mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_GPM; + + while (more_data == MCI_GPM_MORE) { + + pgpm = mci->gpm_buf.bf_addr; + offset = ar9003_mci_state(ah, + MCI_STATE_NEXT_GPM_OFFSET, &more_data); + + if (offset == MCI_GPM_INVALID) + break; + + pgpm += (offset >> 2); + + /* + * The first dword is timer. + * The real data starts from 2nd dword. + */ + + subtype = MCI_GPM_TYPE(pgpm); + opcode = MCI_GPM_OPCODE(pgpm); + + if (!skip_gpm) { + + if (MCI_GPM_IS_CAL_TYPE(subtype)) + ath_mci_cal_msg(sc, subtype, + (u8 *) pgpm); + else { + switch (subtype) { + case MCI_GPM_COEX_AGENT: + ath_mci_msg(sc, opcode, + (u8 *) pgpm); + break; + default: + break; + } + } + } + MCI_GPM_RECYCLE(pgpm); + } + } + + if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_HW_MSG_MASK) { + + if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL) + mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL; + + if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_LNA_INFO) { + mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_LNA_INFO; + ath_dbg(common, MCI, "MCI LNA_INFO\n"); + } + + if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO) { + + int value_dbm = ar9003_mci_state(ah, + MCI_STATE_CONT_RSSI_POWER, NULL); + + mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_INFO; + + if (ar9003_mci_state(ah, MCI_STATE_CONT_TXRX, NULL)) + ath_dbg(common, MCI, + "MCI CONT_INFO: (tx) pri = %d, pwr = %d dBm\n", + ar9003_mci_state(ah, + MCI_STATE_CONT_PRIORITY, NULL), + value_dbm); + else + ath_dbg(common, MCI, + "MCI CONT_INFO: (rx) pri = %d,pwr = %d dBm\n", + ar9003_mci_state(ah, + MCI_STATE_CONT_PRIORITY, NULL), + value_dbm); + } + + if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_NACK) { + mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_NACK; + ath_dbg(common, MCI, "MCI CONT_NACK\n"); + } + + if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_RST) { + mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_RST; + ath_dbg(common, MCI, "MCI CONT_RST\n"); + } + } + + if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) || + (mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)) + mci_int &= ~(AR_MCI_INTERRUPT_RX_INVALID_HDR | + AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT); + + if (mci_int_rxmsg & 0xfffffffe) + ath_dbg(common, MCI, "MCI not processed mci_int_rxmsg = 0x%x\n", + mci_int_rxmsg); +} diff --git a/drivers/net/wireless/ath/ath9k/mci.h b/drivers/net/wireless/ath/ath9k/mci.h new file mode 100644 index 00000000000..29e3e51d078 --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/mci.h @@ -0,0 +1,134 @@ +/* + * Copyright (c) 2010-2011 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef MCI_H +#define MCI_H + +#define ATH_MCI_SCHED_BUF_SIZE (16 * 16) /* 16 entries, 4 dword each */ +#define ATH_MCI_GPM_MAX_ENTRY 16 +#define ATH_MCI_GPM_BUF_SIZE (ATH_MCI_GPM_MAX_ENTRY * 16) +#define ATH_MCI_DEF_BT_PERIOD 40 +#define ATH_MCI_BDR_DUTY_CYCLE 20 +#define ATH_MCI_MAX_DUTY_CYCLE 90 + +#define ATH_MCI_DEF_AGGR_LIMIT 6 /* in 0.24 ms */ +#define ATH_MCI_MAX_ACL_PROFILE 7 +#define ATH_MCI_MAX_SCO_PROFILE 1 +#define ATH_MCI_MAX_PROFILE (ATH_MCI_MAX_ACL_PROFILE +\ + ATH_MCI_MAX_SCO_PROFILE) + +#define INC_PROF(_mci, _info) do { \ + switch (_info->type) { \ + case MCI_GPM_COEX_PROFILE_RFCOMM:\ + _mci->num_other_acl++; \ + break; \ + case MCI_GPM_COEX_PROFILE_A2DP: \ + _mci->num_a2dp++; \ + if (!_info->edr) \ + _mci->num_bdr++; \ + break; \ + case MCI_GPM_COEX_PROFILE_HID: \ + _mci->num_hid++; \ + break; \ + case MCI_GPM_COEX_PROFILE_BNEP: \ + _mci->num_pan++; \ + break; \ + case MCI_GPM_COEX_PROFILE_VOICE: \ + _mci->num_sco++; \ + break; \ + default: \ + break; \ + } \ + } while (0) + +#define DEC_PROF(_mci, _info) do { \ + switch (_info->type) { \ + case MCI_GPM_COEX_PROFILE_RFCOMM:\ + _mci->num_other_acl--; \ + break; \ + case MCI_GPM_COEX_PROFILE_A2DP: \ + _mci->num_a2dp--; \ + if (!_info->edr) \ + _mci->num_bdr--; \ + break; \ + case MCI_GPM_COEX_PROFILE_HID: \ + _mci->num_hid--; \ + break; \ + case MCI_GPM_COEX_PROFILE_BNEP: \ + _mci->num_pan--; \ + break; \ + case MCI_GPM_COEX_PROFILE_VOICE: \ + _mci->num_sco--; \ + break; \ + default: \ + break; \ + } \ + } while (0) + +#define NUM_PROF(_mci) (_mci->num_other_acl + _mci->num_a2dp + \ + _mci->num_hid + _mci->num_pan + _mci->num_sco) + +struct ath_mci_profile_info { + u8 type; + u8 conn_handle; + bool start; + bool master; + bool edr; + u8 voice_type; + u16 T; /* Voice: Tvoice, HID: Tsniff, in slots */ + u8 W; /* Voice: Wvoice, HID: Sniff timeout, in slots */ + u8 A; /* HID: Sniff attempt, in slots */ + struct list_head list; +}; + +struct ath_mci_profile_status { + bool is_critical; + bool is_link; + u8 conn_handle; +}; + +struct ath_mci_profile { + struct list_head info; + DECLARE_BITMAP(status, ATH_MCI_MAX_PROFILE); + u16 aggr_limit; + u8 num_mgmt; + u8 num_sco; + u8 num_a2dp; + u8 num_hid; + u8 num_pan; + u8 num_other_acl; + u8 num_bdr; +}; + + +struct ath_mci_buf { + void *bf_addr; /* virtual addr of desc */ + dma_addr_t bf_paddr; /* physical addr of buffer */ + u32 bf_len; /* len of data */ +}; + +struct ath_mci_coex { + atomic_t mci_cal_flag; + struct ath_mci_buf sched_buf; + struct ath_mci_buf gpm_buf; + u32 bt_cal_start; +}; + +void ath_mci_flush_profile(struct ath_mci_profile *mci); +int ath_mci_setup(struct ath_softc *sc); +void ath_mci_cleanup(struct ath_softc *sc); +void ath_mci_intr(struct ath_softc *sc); +#endif diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c index 2dcdf63cb39..77dc327def8 100644 --- a/drivers/net/wireless/ath/ath9k/pci.c +++ b/drivers/net/wireless/ath/ath9k/pci.c @@ -121,7 +121,7 @@ static void ath_pci_aspm_init(struct ath_common *common) if (!parent) return; - if (ah->btcoex_hw.scheme != ATH_BTCOEX_CFG_NONE) { + if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) { /* Bluetooth coexistance requires disabling ASPM. */ pci_read_config_byte(pdev, pos + PCI_EXP_LNKCTL, &aspm); aspm &= ~(PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1); @@ -307,12 +307,11 @@ static int ath_pci_suspend(struct device *device) struct ieee80211_hw *hw = pci_get_drvdata(pdev); struct ath_softc *sc = hw->priv; - ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1); - /* The device has to be moved to FULLSLEEP forcibly. * Otherwise the chip never moved to full sleep, * when no interface is up. */ + ath9k_hw_disable(sc->sc_ah); ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP); return 0; @@ -321,8 +320,6 @@ static int ath_pci_suspend(struct device *device) static int ath_pci_resume(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); - struct ieee80211_hw *hw = pci_get_drvdata(pdev); - struct ath_softc *sc = hw->priv; u32 val; /* @@ -334,22 +331,6 @@ static int ath_pci_resume(struct device *device) if ((val & 0x0000ff00) != 0) pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); - ath9k_ps_wakeup(sc); - /* Enable LED */ - ath9k_hw_cfg_output(sc->sc_ah, sc->sc_ah->led_pin, - AR_GPIO_OUTPUT_MUX_AS_OUTPUT); - ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0); - - /* - * Reset key cache to sane defaults (all entries cleared) instead of - * semi-random values after suspend/resume. - */ - ath9k_cmn_init_crypto(sc->sc_ah); - ath9k_ps_restore(sc); - - sc->ps_idle = true; - ath_radio_disable(sc, hw); - return 0; } diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c index 528d5f3e868..b3c3798fe51 100644 --- a/drivers/net/wireless/ath/ath9k/rc.c +++ b/drivers/net/wireless/ath/ath9k/rc.c @@ -1199,7 +1199,7 @@ struct ath_rate_table *ath_choose_rate_table(struct ath_softc *sc, return &ar5416_11na_ratetable; return &ar5416_11a_ratetable; default: - ath_dbg(common, ATH_DBG_CONFIG, "Invalid band\n"); + ath_dbg(common, CONFIG, "Invalid band\n"); return NULL; } } @@ -1276,8 +1276,7 @@ static void ath_rc_init(struct ath_softc *sc, ath_rc_priv->valid_rate_index[k-1]; ath_rc_priv->rate_table = rate_table; - ath_dbg(common, ATH_DBG_CONFIG, - "RC Initialized with capabilities: 0x%x\n", + ath_dbg(common, CONFIG, "RC Initialized with capabilities: 0x%x\n", ath_rc_priv->ht_cap); } @@ -1474,7 +1473,7 @@ static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband, oper_cw40, oper_sgi); ath_rc_init(sc, priv_sta, sband, sta, rate_table); - ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG, + ath_dbg(ath9k_hw_common(sc->sc_ah), CONFIG, "Operating HT Bandwidth changed to: %d\n", sc->hw->conf.channel_type); } diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index 67b862cdae6..0e666fbe084 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -172,7 +172,7 @@ static void ath_rx_addbuffer_edma(struct ath_softc *sc, u32 nbuf = 0; if (list_empty(&sc->rx.rxbuf)) { - ath_dbg(common, ATH_DBG_QUEUE, "No free rx buf available\n"); + ath_dbg(common, QUEUE, "No free rx buf available\n"); return; } @@ -337,7 +337,7 @@ int ath_rx_init(struct ath_softc *sc, int nbufs) if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { return ath_rx_edma_init(sc, nbufs); } else { - ath_dbg(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n", + ath_dbg(common, CONFIG, "cachelsz %u rxbufsize %u\n", common->cachelsz, common->rx_bufsize); /* Initialize rx descriptors */ @@ -475,7 +475,6 @@ u32 ath_calcrxfilter(struct ath_softc *sc) return rfilt; -#undef RX_FILTER_PRESERVE } int ath_startrecv(struct ath_softc *sc) @@ -592,7 +591,7 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb) if (sc->ps_flags & PS_BEACON_SYNC) { sc->ps_flags &= ~PS_BEACON_SYNC; - ath_dbg(common, ATH_DBG_PS, + ath_dbg(common, PS, "Reconfigure Beacon timers based on timestamp from the AP\n"); ath_set_beacon(sc); } @@ -605,7 +604,7 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb) * a backup trigger for returning into NETWORK SLEEP state, * so we are waiting for it as well. */ - ath_dbg(common, ATH_DBG_PS, + ath_dbg(common, PS, "Received DTIM beacon indicating buffered broadcast/multicast frame(s)\n"); sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON; return; @@ -618,8 +617,7 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb) * been delivered. */ sc->ps_flags &= ~PS_WAIT_FOR_CAB; - ath_dbg(common, ATH_DBG_PS, - "PS wait for CAB frames timed out\n"); + ath_dbg(common, PS, "PS wait for CAB frames timed out\n"); } } @@ -644,13 +642,13 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb, bool mybeacon) * point. */ sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON); - ath_dbg(common, ATH_DBG_PS, + ath_dbg(common, PS, "All PS CAB frames received, back to sleep\n"); } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) && !is_multicast_ether_addr(hdr->addr1) && !ieee80211_has_morefrags(hdr->frame_control)) { sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA; - ath_dbg(common, ATH_DBG_PS, + ath_dbg(common, PS, "Going back to sleep after having received PS-Poll data (0x%lx)\n", sc->ps_flags & (PS_WAIT_FOR_BEACON | PS_WAIT_FOR_CAB | @@ -933,7 +931,7 @@ static int ath9k_process_rate(struct ath_common *common, * No valid hardware bitrate found -- we should not get here * because hardware has already validated this frame as OK. */ - ath_dbg(common, ATH_DBG_ANY, + ath_dbg(common, ANY, "unsupported hw bitrate detected 0x%02x using 1 Mbit\n", rx_stats->rs_rate); @@ -1824,6 +1822,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len); rxs = IEEE80211_SKB_RXCB(hdr_skb); if (ieee80211_is_beacon(hdr->frame_control) && + !is_zero_ether_addr(common->curbssid) && !compare_ether_addr(hdr->addr3, common->curbssid)) rs.is_mybeacon = true; else @@ -1838,11 +1837,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) if (sc->sc_flags & SC_OP_RXFLUSH) goto requeue_drop_frag; - retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs, - rxs, &decrypt_error); - if (retval) - goto requeue_drop_frag; - rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp; if (rs.rs_tstamp > tsf_lower && unlikely(rs.rs_tstamp - tsf_lower > 0x10000000)) @@ -1852,6 +1846,11 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) unlikely(tsf_lower - rs.rs_tstamp > 0x10000000)) rxs->mactime += 0x100000000ULL; + retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs, + rxs, &decrypt_error); + if (retval) + goto requeue_drop_frag; + /* Ensure we always have an skb to requeue once we are done * processing the current buffer's skb */ requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC); @@ -1923,15 +1922,20 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) skb = hdr_skb; } - /* - * change the default rx antenna if rx diversity chooses the - * other antenna 3 times in a row. - */ - if (sc->rx.defant != rs.rs_antenna) { - if (++sc->rx.rxotherant >= 3) - ath_setdefantenna(sc, rs.rs_antenna); - } else { - sc->rx.rxotherant = 0; + + if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) { + + /* + * change the default rx antenna if rx diversity + * chooses the other antenna 3 times in a row. + */ + if (sc->rx.defant != rs.rs_antenna) { + if (++sc->rx.rxotherant >= 3) + ath_setdefantenna(sc, rs.rs_antenna); + } else { + sc->rx.rxotherant = 0; + } + } if (rxs->flag & RX_FLAG_MMIC_STRIPPED) diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h index 8fcb7e9e839..6e2f18861f5 100644 --- a/drivers/net/wireless/ath/ath9k/reg.h +++ b/drivers/net/wireless/ath/ath9k/reg.h @@ -1006,6 +1006,8 @@ enum { #define AR_INTR_ASYNC_MASK (AR_SREV_9340(ah) ? 0x4018 : 0x4030) #define AR_INTR_ASYNC_MASK_GPIO 0xFFFC0000 #define AR_INTR_ASYNC_MASK_GPIO_S 18 +#define AR_INTR_ASYNC_MASK_MCI 0x00000080 +#define AR_INTR_ASYNC_MASK_MCI_S 7 #define AR_INTR_SYNC_MASK (AR_SREV_9340(ah) ? 0x401c : 0x4034) #define AR_INTR_SYNC_MASK_GPIO 0xFFFC0000 @@ -1013,6 +1015,14 @@ enum { #define AR_INTR_ASYNC_CAUSE_CLR (AR_SREV_9340(ah) ? 0x4020 : 0x4038) #define AR_INTR_ASYNC_CAUSE (AR_SREV_9340(ah) ? 0x4020 : 0x4038) +#define AR_INTR_ASYNC_CAUSE_MCI 0x00000080 +#define AR_INTR_ASYNC_USED (AR_INTR_MAC_IRQ | \ + AR_INTR_ASYNC_CAUSE_MCI) + +/* Asynchronous Interrupt Enable Register */ +#define AR_INTR_ASYNC_ENABLE_MCI 0x00000080 +#define AR_INTR_ASYNC_ENABLE_MCI_S 7 + #define AR_INTR_ASYNC_ENABLE (AR_SREV_9340(ah) ? 0x4024 : 0x403c) #define AR_INTR_ASYNC_ENABLE_GPIO 0xFFFC0000 @@ -1269,6 +1279,8 @@ enum { #define AR_RTC_INTR_MASK \ ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0058) : 0x7058) +#define AR_RTC_KEEP_AWAKE 0x7034 + /* RTC_DERIVED_* - only for AR9100 */ #define AR_RTC_DERIVED_CLK \ @@ -1555,6 +1567,8 @@ enum { #define AR_DIAG_FRAME_NV0 0x00020000 #define AR_DIAG_OBS_PT_SEL1 0x000C0000 #define AR_DIAG_OBS_PT_SEL1_S 18 +#define AR_DIAG_OBS_PT_SEL2 0x08000000 +#define AR_DIAG_OBS_PT_SEL2_S 27 #define AR_DIAG_FORCE_RX_CLEAR 0x00100000 /* force rx_clear high */ #define AR_DIAG_IGNORE_VIRT_CS 0x00200000 #define AR_DIAG_FORCE_CH_IDLE_HIGH 0x00400000 @@ -1752,19 +1766,10 @@ enum { #define AR_BT_COEX_WL_WEIGHTS0 0x8174 #define AR_BT_COEX_WL_WEIGHTS1 0x81c4 +#define AR_MCI_COEX_WL_WEIGHTS(_i) (0x18b0 + (_i << 2)) +#define AR_BT_COEX_BT_WEIGHTS(_i) (0x83ac + (_i << 2)) -#define AR_BT_COEX_BT_WEIGHTS0 0x83ac -#define AR_BT_COEX_BT_WEIGHTS1 0x83b0 -#define AR_BT_COEX_BT_WEIGHTS2 0x83b4 -#define AR_BT_COEX_BT_WEIGHTS3 0x83b8 - -#define AR9300_BT_WGHT 0xcccc4444 -#define AR9300_STOMP_ALL_WLAN_WGHT0 0xfffffff0 -#define AR9300_STOMP_ALL_WLAN_WGHT1 0xfffffff0 -#define AR9300_STOMP_LOW_WLAN_WGHT0 0x88888880 -#define AR9300_STOMP_LOW_WLAN_WGHT1 0x88888880 -#define AR9300_STOMP_NONE_WLAN_WGHT0 0x00000000 -#define AR9300_STOMP_NONE_WLAN_WGHT1 0x00000000 +#define AR9300_BT_WGHT 0xcccc4444 #define AR_BT_COEX_MODE2 0x817c #define AR_BT_BCN_MISS_THRESH 0x000000ff @@ -1938,37 +1943,277 @@ enum { #define AR_PHY_AGC_CONTROL_YCOK_MAX_S 6 /* MCI Registers */ -#define AR_MCI_INTERRUPT_RX_MSG_EN 0x183c -#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET 0x00000001 -#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET_S 0 -#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL 0x00000002 -#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL_S 1 -#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK 0x00000004 -#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK_S 2 -#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO 0x00000008 -#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO_S 3 -#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST 0x00000010 -#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST_S 4 -#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO 0x00000020 -#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO_S 5 -#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT 0x00000040 -#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT_S 6 -#define AR_MCI_INTERRUPT_RX_MSG_GPM 0x00000100 -#define AR_MCI_INTERRUPT_RX_MSG_GPM_S 8 -#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO 0x00000200 -#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO_S 9 -#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING 0x00000400 -#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING_S 10 -#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING 0x00000800 -#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING_S 11 -#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE 0x00001000 -#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE_S 12 -#define AR_MCI_INTERRUPT_RX_HW_MSG_MASK (AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO | \ + +#define AR_MCI_COMMAND0 0x1800 +#define AR_MCI_COMMAND0_HEADER 0xFF +#define AR_MCI_COMMAND0_HEADER_S 0 +#define AR_MCI_COMMAND0_LEN 0x1f00 +#define AR_MCI_COMMAND0_LEN_S 8 +#define AR_MCI_COMMAND0_DISABLE_TIMESTAMP 0x2000 +#define AR_MCI_COMMAND0_DISABLE_TIMESTAMP_S 13 + +#define AR_MCI_COMMAND1 0x1804 + +#define AR_MCI_COMMAND2 0x1808 +#define AR_MCI_COMMAND2_RESET_TX 0x01 +#define AR_MCI_COMMAND2_RESET_TX_S 0 +#define AR_MCI_COMMAND2_RESET_RX 0x02 +#define AR_MCI_COMMAND2_RESET_RX_S 1 +#define AR_MCI_COMMAND2_RESET_RX_NUM_CYCLES 0x3FC +#define AR_MCI_COMMAND2_RESET_RX_NUM_CYCLES_S 2 +#define AR_MCI_COMMAND2_RESET_REQ_WAKEUP 0x400 +#define AR_MCI_COMMAND2_RESET_REQ_WAKEUP_S 10 + +#define AR_MCI_RX_CTRL 0x180c + +#define AR_MCI_TX_CTRL 0x1810 +/* 0 = no division, 1 = divide by 2, 2 = divide by 4, 3 = divide by 8 */ +#define AR_MCI_TX_CTRL_CLK_DIV 0x03 +#define AR_MCI_TX_CTRL_CLK_DIV_S 0 +#define AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE 0x04 +#define AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE_S 2 +#define AR_MCI_TX_CTRL_GAIN_UPDATE_FREQ 0xFFFFF8 +#define AR_MCI_TX_CTRL_GAIN_UPDATE_FREQ_S 3 +#define AR_MCI_TX_CTRL_GAIN_UPDATE_NUM 0xF000000 +#define AR_MCI_TX_CTRL_GAIN_UPDATE_NUM_S 24 + +#define AR_MCI_MSG_ATTRIBUTES_TABLE 0x1814 +#define AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM 0xFFFF +#define AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM_S 0 +#define AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR 0xFFFF0000 +#define AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR_S 16 + +#define AR_MCI_SCHD_TABLE_0 0x1818 +#define AR_MCI_SCHD_TABLE_1 0x181c +#define AR_MCI_GPM_0 0x1820 +#define AR_MCI_GPM_1 0x1824 +#define AR_MCI_GPM_WRITE_PTR 0xFFFF0000 +#define AR_MCI_GPM_WRITE_PTR_S 16 +#define AR_MCI_GPM_BUF_LEN 0x0000FFFF +#define AR_MCI_GPM_BUF_LEN_S 0 + +#define AR_MCI_INTERRUPT_RAW 0x1828 +#define AR_MCI_INTERRUPT_EN 0x182c +#define AR_MCI_INTERRUPT_SW_MSG_DONE 0x00000001 +#define AR_MCI_INTERRUPT_SW_MSG_DONE_S 0 +#define AR_MCI_INTERRUPT_CPU_INT_MSG 0x00000002 +#define AR_MCI_INTERRUPT_CPU_INT_MSG_S 1 +#define AR_MCI_INTERRUPT_RX_CKSUM_FAIL 0x00000004 +#define AR_MCI_INTERRUPT_RX_CKSUM_FAIL_S 2 +#define AR_MCI_INTERRUPT_RX_INVALID_HDR 0x00000008 +#define AR_MCI_INTERRUPT_RX_INVALID_HDR_S 3 +#define AR_MCI_INTERRUPT_RX_HW_MSG_FAIL 0x00000010 +#define AR_MCI_INTERRUPT_RX_HW_MSG_FAIL_S 4 +#define AR_MCI_INTERRUPT_RX_SW_MSG_FAIL 0x00000020 +#define AR_MCI_INTERRUPT_RX_SW_MSG_FAIL_S 5 +#define AR_MCI_INTERRUPT_TX_HW_MSG_FAIL 0x00000080 +#define AR_MCI_INTERRUPT_TX_HW_MSG_FAIL_S 7 +#define AR_MCI_INTERRUPT_TX_SW_MSG_FAIL 0x00000100 +#define AR_MCI_INTERRUPT_TX_SW_MSG_FAIL_S 8 +#define AR_MCI_INTERRUPT_RX_MSG 0x00000200 +#define AR_MCI_INTERRUPT_RX_MSG_S 9 +#define AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE 0x00000400 +#define AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE_S 10 +#define AR_MCI_INTERRUPT_BT_PRI 0x07fff800 +#define AR_MCI_INTERRUPT_BT_PRI_S 11 +#define AR_MCI_INTERRUPT_BT_PRI_THRESH 0x08000000 +#define AR_MCI_INTERRUPT_BT_PRI_THRESH_S 27 +#define AR_MCI_INTERRUPT_BT_FREQ 0x10000000 +#define AR_MCI_INTERRUPT_BT_FREQ_S 28 +#define AR_MCI_INTERRUPT_BT_STOMP 0x20000000 +#define AR_MCI_INTERRUPT_BT_STOMP_S 29 +#define AR_MCI_INTERRUPT_BB_AIC_IRQ 0x40000000 +#define AR_MCI_INTERRUPT_BB_AIC_IRQ_S 30 +#define AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT 0x80000000 +#define AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT_S 31 + +#define AR_MCI_INTERRUPT_DEFAULT (AR_MCI_INTERRUPT_SW_MSG_DONE | \ + AR_MCI_INTERRUPT_RX_INVALID_HDR | \ + AR_MCI_INTERRUPT_RX_HW_MSG_FAIL | \ + AR_MCI_INTERRUPT_RX_SW_MSG_FAIL | \ + AR_MCI_INTERRUPT_TX_HW_MSG_FAIL | \ + AR_MCI_INTERRUPT_TX_SW_MSG_FAIL | \ + AR_MCI_INTERRUPT_RX_MSG | \ + AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE | \ + AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT) + +#define AR_MCI_INTERRUPT_MSG_FAIL_MASK (AR_MCI_INTERRUPT_RX_HW_MSG_FAIL | \ + AR_MCI_INTERRUPT_RX_SW_MSG_FAIL | \ + AR_MCI_INTERRUPT_TX_HW_MSG_FAIL | \ + AR_MCI_INTERRUPT_TX_SW_MSG_FAIL) + +#define AR_MCI_REMOTE_CPU_INT 0x1830 +#define AR_MCI_REMOTE_CPU_INT_EN 0x1834 +#define AR_MCI_INTERRUPT_RX_MSG_RAW 0x1838 +#define AR_MCI_INTERRUPT_RX_MSG_EN 0x183c +#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET 0x00000001 +#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET_S 0 +#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL 0x00000002 +#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL_S 1 +#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK 0x00000004 +#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK_S 2 +#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO 0x00000008 +#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO_S 3 +#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST 0x00000010 +#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST_S 4 +#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO 0x00000020 +#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO_S 5 +#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT 0x00000040 +#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT_S 6 +#define AR_MCI_INTERRUPT_RX_MSG_GPM 0x00000100 +#define AR_MCI_INTERRUPT_RX_MSG_GPM_S 8 +#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO 0x00000200 +#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO_S 9 +#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING 0x00000400 +#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING_S 10 +#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING 0x00000800 +#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING_S 11 +#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE 0x00001000 +#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE_S 12 +#define AR_MCI_INTERRUPT_RX_HW_MSG_MASK (AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO | \ AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL| \ AR_MCI_INTERRUPT_RX_MSG_LNA_INFO | \ AR_MCI_INTERRUPT_RX_MSG_CONT_NACK | \ AR_MCI_INTERRUPT_RX_MSG_CONT_INFO | \ AR_MCI_INTERRUPT_RX_MSG_CONT_RST) +#define AR_MCI_INTERRUPT_RX_MSG_DEFAULT (AR_MCI_INTERRUPT_RX_MSG_GPM | \ + AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET| \ + AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING | \ + AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING| \ + AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO | \ + AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL | \ + AR_MCI_INTERRUPT_RX_MSG_LNA_INFO | \ + AR_MCI_INTERRUPT_RX_MSG_CONT_NACK | \ + AR_MCI_INTERRUPT_RX_MSG_CONT_INFO | \ + AR_MCI_INTERRUPT_RX_MSG_CONT_RST | \ + AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE) + +#define AR_MCI_CPU_INT 0x1840 + +#define AR_MCI_RX_STATUS 0x1844 +#define AR_MCI_RX_LAST_SCHD_MSG_INDEX 0x00000F00 +#define AR_MCI_RX_LAST_SCHD_MSG_INDEX_S 8 +#define AR_MCI_RX_REMOTE_SLEEP 0x00001000 +#define AR_MCI_RX_REMOTE_SLEEP_S 12 +#define AR_MCI_RX_MCI_CLK_REQ 0x00002000 +#define AR_MCI_RX_MCI_CLK_REQ_S 13 + +#define AR_MCI_CONT_STATUS 0x1848 +#define AR_MCI_CONT_RSSI_POWER 0x000000FF +#define AR_MCI_CONT_RSSI_POWER_S 0 +#define AR_MCI_CONT_RRIORITY 0x0000FF00 +#define AR_MCI_CONT_RRIORITY_S 8 +#define AR_MCI_CONT_TXRX 0x00010000 +#define AR_MCI_CONT_TXRX_S 16 + +#define AR_MCI_BT_PRI0 0x184c +#define AR_MCI_BT_PRI1 0x1850 +#define AR_MCI_BT_PRI2 0x1854 +#define AR_MCI_BT_PRI3 0x1858 +#define AR_MCI_BT_PRI 0x185c +#define AR_MCI_WL_FREQ0 0x1860 +#define AR_MCI_WL_FREQ1 0x1864 +#define AR_MCI_WL_FREQ2 0x1868 +#define AR_MCI_GAIN 0x186c +#define AR_MCI_WBTIMER1 0x1870 +#define AR_MCI_WBTIMER2 0x1874 +#define AR_MCI_WBTIMER3 0x1878 +#define AR_MCI_WBTIMER4 0x187c +#define AR_MCI_MAXGAIN 0x1880 +#define AR_MCI_HW_SCHD_TBL_CTL 0x1884 +#define AR_MCI_HW_SCHD_TBL_D0 0x1888 +#define AR_MCI_HW_SCHD_TBL_D1 0x188c +#define AR_MCI_HW_SCHD_TBL_D2 0x1890 +#define AR_MCI_HW_SCHD_TBL_D3 0x1894 +#define AR_MCI_TX_PAYLOAD0 0x1898 +#define AR_MCI_TX_PAYLOAD1 0x189c +#define AR_MCI_TX_PAYLOAD2 0x18a0 +#define AR_MCI_TX_PAYLOAD3 0x18a4 +#define AR_BTCOEX_WBTIMER 0x18a8 + +#define AR_BTCOEX_CTRL 0x18ac +#define AR_BTCOEX_CTRL_AR9462_MODE 0x00000001 +#define AR_BTCOEX_CTRL_AR9462_MODE_S 0 +#define AR_BTCOEX_CTRL_WBTIMER_EN 0x00000002 +#define AR_BTCOEX_CTRL_WBTIMER_EN_S 1 +#define AR_BTCOEX_CTRL_MCI_MODE_EN 0x00000004 +#define AR_BTCOEX_CTRL_MCI_MODE_EN_S 2 +#define AR_BTCOEX_CTRL_LNA_SHARED 0x00000008 +#define AR_BTCOEX_CTRL_LNA_SHARED_S 3 +#define AR_BTCOEX_CTRL_PA_SHARED 0x00000010 +#define AR_BTCOEX_CTRL_PA_SHARED_S 4 +#define AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN 0x00000020 +#define AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN_S 5 +#define AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN 0x00000040 +#define AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN_S 6 +#define AR_BTCOEX_CTRL_NUM_ANTENNAS 0x00000180 +#define AR_BTCOEX_CTRL_NUM_ANTENNAS_S 7 +#define AR_BTCOEX_CTRL_RX_CHAIN_MASK 0x00000E00 +#define AR_BTCOEX_CTRL_RX_CHAIN_MASK_S 9 +#define AR_BTCOEX_CTRL_AGGR_THRESH 0x00007000 +#define AR_BTCOEX_CTRL_AGGR_THRESH_S 12 +#define AR_BTCOEX_CTRL_1_CHAIN_BCN 0x00080000 +#define AR_BTCOEX_CTRL_1_CHAIN_BCN_S 19 +#define AR_BTCOEX_CTRL_1_CHAIN_ACK 0x00100000 +#define AR_BTCOEX_CTRL_1_CHAIN_ACK_S 20 +#define AR_BTCOEX_CTRL_WAIT_BA_MARGIN 0x1FE00000 +#define AR_BTCOEX_CTRL_WAIT_BA_MARGIN_S 28 +#define AR_BTCOEX_CTRL_REDUCE_TXPWR 0x20000000 +#define AR_BTCOEX_CTRL_REDUCE_TXPWR_S 29 +#define AR_BTCOEX_CTRL_SPDT_ENABLE_10 0x40000000 +#define AR_BTCOEX_CTRL_SPDT_ENABLE_10_S 30 +#define AR_BTCOEX_CTRL_SPDT_POLARITY 0x80000000 +#define AR_BTCOEX_CTRL_SPDT_POLARITY_S 31 + +#define AR_BTCOEX_WL_WEIGHTS0 0x18b0 +#define AR_BTCOEX_WL_WEIGHTS1 0x18b4 +#define AR_BTCOEX_WL_WEIGHTS2 0x18b8 +#define AR_BTCOEX_WL_WEIGHTS3 0x18bc +#define AR_BTCOEX_MAX_TXPWR(_x) (0x18c0 + ((_x) << 2)) +#define AR_BTCOEX_WL_LNA 0x1940 +#define AR_BTCOEX_RFGAIN_CTRL 0x1944 + +#define AR_BTCOEX_CTRL2 0x1948 +#define AR_BTCOEX_CTRL2_TXPWR_THRESH 0x0007F800 +#define AR_BTCOEX_CTRL2_TXPWR_THRESH_S 11 +#define AR_BTCOEX_CTRL2_TX_CHAIN_MASK 0x00380000 +#define AR_BTCOEX_CTRL2_TX_CHAIN_MASK_S 19 +#define AR_BTCOEX_CTRL2_RX_DEWEIGHT 0x00400000 +#define AR_BTCOEX_CTRL2_RX_DEWEIGHT_S 22 +#define AR_BTCOEX_CTRL2_GPIO_OBS_SEL 0x00800000 +#define AR_BTCOEX_CTRL2_GPIO_OBS_SEL_S 23 +#define AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL 0x01000000 +#define AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL_S 24 +#define AR_BTCOEX_CTRL2_DESC_BASED_TXPWR_ENABLE 0x02000000 +#define AR_BTCOEX_CTRL2_DESC_BASED_TXPWR_ENABLE_S 25 + +#define AR_BTCOEX_CTRL_SPDT_ENABLE 0x00000001 +#define AR_BTCOEX_CTRL_SPDT_ENABLE_S 0 +#define AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL 0x00000002 +#define AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL_S 1 +#define AR_BTCOEX_CTRL_USE_LATCHED_BT_ANT 0x00000004 +#define AR_BTCOEX_CTRL_USE_LATCHED_BT_ANT_S 2 +#define AR_GLB_WLAN_UART_INTF_EN 0x00020000 +#define AR_GLB_WLAN_UART_INTF_EN_S 17 +#define AR_GLB_DS_JTAG_DISABLE 0x00040000 +#define AR_GLB_DS_JTAG_DISABLE_S 18 + +#define AR_BTCOEX_RC 0x194c +#define AR_BTCOEX_MAX_RFGAIN(_x) (0x1950 + ((_x) << 2)) +#define AR_BTCOEX_DBG 0x1a50 +#define AR_MCI_LAST_HW_MSG_HDR 0x1a54 +#define AR_MCI_LAST_HW_MSG_BDY 0x1a58 + +#define AR_MCI_SCHD_TABLE_2 0x1a5c +#define AR_MCI_SCHD_TABLE_2_MEM_BASED 0x00000001 +#define AR_MCI_SCHD_TABLE_2_MEM_BASED_S 0 +#define AR_MCI_SCHD_TABLE_2_HW_BASED 0x00000002 +#define AR_MCI_SCHD_TABLE_2_HW_BASED_S 1 + +#define AR_BTCOEX_CTRL3 0x1a60 +#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT 0x00000fff +#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT_S 0 + #endif diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c index 35422fc1f2c..65c8894c5f8 100644 --- a/drivers/net/wireless/ath/ath9k/wmi.c +++ b/drivers/net/wireless/ath/ath9k/wmi.c @@ -187,7 +187,7 @@ void ath9k_fatal_work(struct work_struct *work) fatal_work); struct ath_common *common = ath9k_hw_common(priv->ah); - ath_dbg(common, ATH_DBG_FATAL, "FATAL Event received, resetting device\n"); + ath_dbg(common, FATAL, "FATAL Event received, resetting device\n"); ath9k_htc_reset(priv); } @@ -330,8 +330,7 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id, time_left = wait_for_completion_timeout(&wmi->cmd_wait, timeout); if (!time_left) { - ath_dbg(common, ATH_DBG_WMI, - "Timeout waiting for WMI command: %s\n", + ath_dbg(common, WMI, "Timeout waiting for WMI command: %s\n", wmi_cmd_to_name(cmd_id)); mutex_unlock(&wmi->op_mutex); return -ETIMEDOUT; @@ -342,8 +341,7 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id, return 0; out: - ath_dbg(common, ATH_DBG_WMI, - "WMI failure for: %s\n", wmi_cmd_to_name(cmd_id)); + ath_dbg(common, WMI, "WMI failure for: %s\n", wmi_cmd_to_name(cmd_id)); mutex_unlock(&wmi->op_mutex); kfree_skb(skb); diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 03b0a651a59..3182408ffe3 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -53,7 +53,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, int tx_flags, struct ath_txq *txq); static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, struct ath_txq *txq, struct list_head *bf_q, - struct ath_tx_status *ts, int txok, int sendbar); + struct ath_tx_status *ts, int txok); static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, struct list_head *head, bool internal); static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, @@ -104,6 +104,32 @@ static int ath_max_4ms_framelen[4][32] = { /* Aggregation logic */ /*********************/ +static void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq) + __acquires(&txq->axq_lock) +{ + spin_lock_bh(&txq->axq_lock); +} + +static void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq) + __releases(&txq->axq_lock) +{ + spin_unlock_bh(&txq->axq_lock); +} + +static void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq) + __releases(&txq->axq_lock) +{ + struct sk_buff_head q; + struct sk_buff *skb; + + __skb_queue_head_init(&q); + skb_queue_splice_init(&txq->complete_q, &q); + spin_unlock_bh(&txq->axq_lock); + + while ((skb = __skb_dequeue(&q))) + ieee80211_tx_status(sc->hw, skb); +} + static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid) { struct ath_atx_ac *ac = tid->ac; @@ -130,7 +156,7 @@ static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid) WARN_ON(!tid->paused); - spin_lock_bh(&txq->axq_lock); + ath_txq_lock(sc, txq); tid->paused = false; if (skb_queue_empty(&tid->buf_q)) @@ -139,7 +165,7 @@ static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid) ath_tx_queue_tid(txq, tid); ath_txq_schedule(sc, txq); unlock: - spin_unlock_bh(&txq->axq_lock); + ath_txq_unlock_complete(sc, txq); } static struct ath_frame_info *get_frame_info(struct sk_buff *skb) @@ -150,6 +176,12 @@ static struct ath_frame_info *get_frame_info(struct sk_buff *skb) return (struct ath_frame_info *) &tx_info->rate_driver_data[0]; } +static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno) +{ + ieee80211_send_bar(tid->an->vif, tid->an->sta->addr, tid->tidno, + seqno << IEEE80211_SEQ_SEQ_SHIFT); +} + static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) { struct ath_txq *txq = tid->ac->txq; @@ -158,28 +190,36 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) struct list_head bf_head; struct ath_tx_status ts; struct ath_frame_info *fi; + bool sendbar = false; INIT_LIST_HEAD(&bf_head); memset(&ts, 0, sizeof(ts)); - spin_lock_bh(&txq->axq_lock); while ((skb = __skb_dequeue(&tid->buf_q))) { fi = get_frame_info(skb); bf = fi->bf; - spin_unlock_bh(&txq->axq_lock); if (bf && fi->retries) { list_add_tail(&bf->list, &bf_head); ath_tx_update_baw(sc, tid, bf->bf_state.seqno); - ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1); + ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0); + sendbar = true; } else { ath_tx_send_normal(sc, txq, NULL, skb); } - spin_lock_bh(&txq->axq_lock); } - spin_unlock_bh(&txq->axq_lock); + if (tid->baw_head == tid->baw_tail) { + tid->state &= ~AGGR_ADDBA_COMPLETE; + tid->state &= ~AGGR_CLEANUP; + } + + if (sendbar) { + ath_txq_unlock(sc, txq); + ath_send_bar(tid, tid->seq_start); + ath_txq_lock(sc, txq); + } } static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid, @@ -195,6 +235,8 @@ static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid, while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) { INCR(tid->seq_start, IEEE80211_SEQ_MAX); INCR(tid->baw_head, ATH_TID_MAX_BUFS); + if (tid->bar_index >= 0) + tid->bar_index--; } } @@ -238,9 +280,7 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq, bf = fi->bf; if (!bf) { - spin_unlock(&txq->axq_lock); ath_tx_complete(sc, skb, ATH_TX_ERROR, txq); - spin_lock(&txq->axq_lock); continue; } @@ -249,24 +289,26 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq, if (fi->retries) ath_tx_update_baw(sc, tid, bf->bf_state.seqno); - spin_unlock(&txq->axq_lock); - ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0); - spin_lock(&txq->axq_lock); + ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0); } tid->seq_next = tid->seq_start; tid->baw_tail = tid->baw_head; + tid->bar_index = -1; } static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq, - struct sk_buff *skb) + struct sk_buff *skb, int count) { struct ath_frame_info *fi = get_frame_info(skb); struct ath_buf *bf = fi->bf; struct ieee80211_hdr *hdr; + int prev = fi->retries; TX_STAT_INC(txq->axq_qnum, a_retries); - if (fi->retries++ > 0) + fi->retries += count; + + if (prev > 0) return; hdr = (struct ieee80211_hdr *)skb->data; @@ -365,7 +407,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf_next, *bf_last = bf->bf_lastbf; struct list_head bf_head; struct sk_buff_head bf_pending; - u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0; + u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0, seq_first; u32 ba[WME_BA_BMP_SIZE >> 5]; int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0; bool rc_update = true; @@ -374,6 +416,8 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, int nframes; u8 tidno; bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH); + int i, retries; + int bar_index = -1; skb = bf->bf_mpdu; hdr = (struct ieee80211_hdr *)skb->data; @@ -382,6 +426,10 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, memcpy(rates, tx_info->control.rates, sizeof(rates)); + retries = ts->ts_longretry + 1; + for (i = 0; i < ts->ts_rateindex; i++) + retries += rates[i].count; + rcu_read_lock(); sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2); @@ -395,8 +443,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, if (!bf->bf_stale || bf_next != NULL) list_move_tail(&bf->list, &bf_head); - ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, - 0, 0); + ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0); bf = bf_next; } @@ -406,6 +453,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, an = (struct ath_node *)sta->drv_priv; tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK; tid = ATH_AN_2_TID(an, tidno); + seq_first = tid->seq_start; /* * The hardware occasionally sends a tx status for the wrong TID. @@ -455,25 +503,25 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, } else if (!isaggr && txok) { /* transmit completion */ acked_cnt++; + } else if ((tid->state & AGGR_CLEANUP) || !retry) { + /* + * cleanup in progress, just fail + * the un-acked sub-frames + */ + txfail = 1; + } else if (flush) { + txpending = 1; + } else if (fi->retries < ATH_MAX_SW_RETRIES) { + if (txok || !an->sleeping) + ath_tx_set_retry(sc, txq, bf->bf_mpdu, + retries); + + txpending = 1; } else { - if ((tid->state & AGGR_CLEANUP) || !retry) { - /* - * cleanup in progress, just fail - * the un-acked sub-frames - */ - txfail = 1; - } else if (flush) { - txpending = 1; - } else if (fi->retries < ATH_MAX_SW_RETRIES) { - if (txok || !an->sleeping) - ath_tx_set_retry(sc, txq, bf->bf_mpdu); - - txpending = 1; - } else { - txfail = 1; - sendbar = 1; - txfail_cnt++; - } + txfail = 1; + txfail_cnt++; + bar_index = max_t(int, bar_index, + ATH_BA_INDEX(seq_first, seqno)); } /* @@ -490,9 +538,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, * complete the acked-ones/xretried ones; update * block-ack window */ - spin_lock_bh(&txq->axq_lock); ath_tx_update_baw(sc, tid, seqno); - spin_unlock_bh(&txq->axq_lock); if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) { memcpy(tx_info->control.rates, rates, sizeof(rates)); @@ -501,33 +547,30 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, } ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, - !txfail, sendbar); + !txfail); } else { /* retry the un-acked ones */ - if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) { - if (bf->bf_next == NULL && bf_last->bf_stale) { - struct ath_buf *tbf; - - tbf = ath_clone_txbuf(sc, bf_last); - /* - * Update tx baw and complete the - * frame with failed status if we - * run out of tx buf. - */ - if (!tbf) { - spin_lock_bh(&txq->axq_lock); - ath_tx_update_baw(sc, tid, seqno); - spin_unlock_bh(&txq->axq_lock); - - ath_tx_complete_buf(sc, bf, txq, - &bf_head, - ts, 0, - !flush); - break; - } - - fi->bf = tbf; + if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) && + bf->bf_next == NULL && bf_last->bf_stale) { + struct ath_buf *tbf; + + tbf = ath_clone_txbuf(sc, bf_last); + /* + * Update tx baw and complete the + * frame with failed status if we + * run out of tx buf. + */ + if (!tbf) { + ath_tx_update_baw(sc, tid, seqno); + + ath_tx_complete_buf(sc, bf, txq, + &bf_head, ts, 0); + bar_index = max_t(int, bar_index, + ATH_BA_INDEX(seq_first, seqno)); + break; } + + fi->bf = tbf; } /* @@ -545,7 +588,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, if (an->sleeping) ieee80211_sta_set_buffered(sta, tid->tidno, true); - spin_lock_bh(&txq->axq_lock); skb_queue_splice(&bf_pending, &tid->buf_q); if (!an->sleeping) { ath_tx_queue_tid(txq, tid); @@ -553,18 +595,22 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, if (ts->ts_status & ATH9K_TXERR_FILT) tid->ac->clear_ps_filter = true; } - spin_unlock_bh(&txq->axq_lock); } - if (tid->state & AGGR_CLEANUP) { - ath_tx_flush_tid(sc, tid); + if (bar_index >= 0) { + u16 bar_seq = ATH_BA_INDEX2SEQ(seq_first, bar_index); - if (tid->baw_head == tid->baw_tail) { - tid->state &= ~AGGR_ADDBA_COMPLETE; - tid->state &= ~AGGR_CLEANUP; - } + if (BAW_WITHIN(tid->seq_start, tid->baw_size, bar_seq)) + tid->bar_index = ATH_BA_INDEX(tid->seq_start, bar_seq); + + ath_txq_unlock(sc, txq); + ath_send_bar(tid, ATH_BA_INDEX2SEQ(seq_first, bar_index + 1)); + ath_txq_lock(sc, txq); } + if (tid->state & AGGR_CLEANUP) + ath_tx_flush_tid(sc, tid); + rcu_read_unlock(); if (needreset) { @@ -601,6 +647,7 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf, struct sk_buff *skb; struct ieee80211_tx_info *tx_info; struct ieee80211_tx_rate *rates; + struct ath_mci_profile *mci = &sc->btcoex.mci; u32 max_4ms_framelen, frmlen; u16 aggr_limit, legacy = 0; int i; @@ -617,24 +664,26 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf, max_4ms_framelen = ATH_AMPDU_LIMIT_MAX; for (i = 0; i < 4; i++) { - if (rates[i].count) { - int modeidx; - if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) { - legacy = 1; - break; - } - - if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) - modeidx = MCS_HT40; - else - modeidx = MCS_HT20; + int modeidx; - if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI) - modeidx++; + if (!rates[i].count) + continue; - frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx]; - max_4ms_framelen = min(max_4ms_framelen, frmlen); + if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) { + legacy = 1; + break; } + + if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) + modeidx = MCS_HT40; + else + modeidx = MCS_HT20; + + if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI) + modeidx++; + + frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx]; + max_4ms_framelen = min(max_4ms_framelen, frmlen); } /* @@ -645,7 +694,9 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf, if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy) return 0; - if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED) + if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI) && mci->aggr_limit) + aggr_limit = (max_4ms_framelen * mci->aggr_limit) >> 4; + else if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED) aggr_limit = min((max_4ms_framelen * 3) / 8, (u32)ATH_AMPDU_LIMIT_MAX); else @@ -768,8 +819,6 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR; seqno = bf->bf_state.seqno; - if (!bf_first) - bf_first = bf; /* do not step over block-ack window */ if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) { @@ -777,6 +826,21 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, break; } + if (tid->bar_index > ATH_BA_INDEX(tid->seq_start, seqno)) { + struct ath_tx_status ts = {}; + struct list_head bf_head; + + INIT_LIST_HEAD(&bf_head); + list_add(&bf->list, &bf_head); + __skb_unlink(skb, &tid->buf_q); + ath_tx_update_baw(sc, tid, seqno); + ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0); + continue; + } + + if (!bf_first) + bf_first = bf; + if (!rl) { aggr_limit = ath_lookup_rate(sc, bf, tid); rl = 1; @@ -1119,6 +1183,7 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, txtid->state |= AGGR_ADDBA_PROGRESS; txtid->paused = true; *ssn = txtid->seq_start = txtid->seq_next; + txtid->bar_index = -1; memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf)); txtid->baw_head = txtid->baw_tail = 0; @@ -1140,7 +1205,7 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) return; } - spin_lock_bh(&txq->axq_lock); + ath_txq_lock(sc, txq); txtid->paused = true; /* @@ -1153,9 +1218,9 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) txtid->state |= AGGR_CLEANUP; else txtid->state &= ~AGGR_ADDBA_COMPLETE; - spin_unlock_bh(&txq->axq_lock); ath_tx_flush_tid(sc, txtid); + ath_txq_unlock_complete(sc, txq); } void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc, @@ -1176,7 +1241,7 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc, ac = tid->ac; txq = ac->txq; - spin_lock_bh(&txq->axq_lock); + ath_txq_lock(sc, txq); buffered = !skb_queue_empty(&tid->buf_q); @@ -1188,7 +1253,7 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc, list_del(&ac->list); } - spin_unlock_bh(&txq->axq_lock); + ath_txq_unlock(sc, txq); ieee80211_sta_set_buffered(sta, tidno, buffered); } @@ -1207,7 +1272,7 @@ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an) ac = tid->ac; txq = ac->txq; - spin_lock_bh(&txq->axq_lock); + ath_txq_lock(sc, txq); ac->clear_ps_filter = true; if (!skb_queue_empty(&tid->buf_q) && !tid->paused) { @@ -1215,7 +1280,7 @@ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an) ath_txq_schedule(sc, txq); } - spin_unlock_bh(&txq->axq_lock); + ath_txq_unlock_complete(sc, txq); } } @@ -1315,6 +1380,7 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) txq->axq_qnum = axq_qnum; txq->mac80211_qnum = -1; txq->axq_link = NULL; + __skb_queue_head_init(&txq->complete_q); INIT_LIST_HEAD(&txq->axq_q); INIT_LIST_HEAD(&txq->axq_acq); spin_lock_init(&txq->axq_lock); @@ -1397,8 +1463,6 @@ static bool bf_is_ampdu_not_probing(struct ath_buf *bf) static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq, struct list_head *list, bool retry_tx) - __releases(txq->axq_lock) - __acquires(txq->axq_lock) { struct ath_buf *bf, *lastbf; struct list_head bf_head; @@ -1425,13 +1489,11 @@ static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq, if (bf_is_ampdu_not_probing(bf)) txq->axq_ampdu_depth--; - spin_unlock_bh(&txq->axq_lock); if (bf_isampdu(bf)) ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0, retry_tx); else - ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0); - spin_lock_bh(&txq->axq_lock); + ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0); } } @@ -1443,7 +1505,8 @@ static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq, */ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx) { - spin_lock_bh(&txq->axq_lock); + ath_txq_lock(sc, txq); + if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { int idx = txq->txq_tailidx; @@ -1464,7 +1527,7 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx) if ((sc->sc_flags & SC_OP_TXAGGR) && !retry_tx) ath_txq_drain_pending_buffers(sc, txq); - spin_unlock_bh(&txq->axq_lock); + ath_txq_unlock_complete(sc, txq); } bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx) @@ -1558,11 +1621,9 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) break; } - if (!list_empty(&ac->tid_q)) { - if (!ac->sched) { - ac->sched = true; - list_add_tail(&ac->list, &txq->axq_acq); - } + if (!list_empty(&ac->tid_q) && !ac->sched) { + ac->sched = true; + list_add_tail(&ac->list, &txq->axq_acq); } if (ac == last_ac || @@ -1600,8 +1661,8 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, bf = list_first_entry(head, struct ath_buf, list); bf_last = list_entry(head->prev, struct ath_buf, list); - ath_dbg(common, ATH_DBG_QUEUE, - "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth); + ath_dbg(common, QUEUE, "qnum: %d, txq depth: %d\n", + txq->axq_qnum, txq->axq_depth); if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) { list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]); @@ -1612,8 +1673,7 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, if (txq->axq_link) { ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr); - ath_dbg(common, ATH_DBG_XMIT, - "link[%u] (%p)=%llx (%p)\n", + ath_dbg(common, XMIT, "link[%u] (%p)=%llx (%p)\n", txq->axq_qnum, txq->axq_link, ito64(bf->bf_daddr), bf->bf_desc); } else if (!edma) @@ -1625,7 +1685,7 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, if (puttxbuf) { TX_STAT_INC(txq->axq_qnum, puttxbuf); ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); - ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n", + ath_dbg(common, XMIT, "TXDP[%u] = %llx (%p)\n", txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc); } @@ -1705,10 +1765,6 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, list_add_tail(&bf->list, &bf_head); bf->bf_state.bf_type = 0; - /* update starting sequence number for subsequent ADDBA request */ - if (tid) - INCR(tid->seq_start, IEEE80211_SEQ_MAX); - bf->bf_lastbf = bf; ath_tx_fill_desc(sc, bf, txq, fi->framelen); ath_tx_txqaddbuf(sc, txq, &bf_head, false); @@ -1771,7 +1827,7 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc, bf = ath_tx_get_buffer(sc); if (!bf) { - ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n"); + ath_dbg(common, XMIT, "TX buffers are full\n"); goto error; } @@ -1816,7 +1872,6 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb, struct ath_buf *bf; u8 tidno; - spin_lock_bh(&txctl->txq->axq_lock); if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an && ieee80211_is_data_qos(hdr->frame_control)) { tidno = ieee80211_get_qos_ctl(hdr)[0] & @@ -1835,7 +1890,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb, } else { bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb); if (!bf) - goto out; + return; bf->bf_state.bfs_paprd = txctl->paprd; @@ -1844,9 +1899,6 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb, ath_tx_send_normal(sc, txctl->txq, tid, skb); } - -out: - spin_unlock_bh(&txctl->txq->axq_lock); } /* Upon failure caller should free skb */ @@ -1907,15 +1959,18 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, */ q = skb_get_queue_mapping(skb); - spin_lock_bh(&txq->axq_lock); + + ath_txq_lock(sc, txq); if (txq == sc->tx.txq_map[q] && ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) { ieee80211_stop_queue(sc->hw, q); - txq->stopped = 1; + txq->stopped = true; } - spin_unlock_bh(&txq->axq_lock); ath_tx_start_dma(sc, skb, txctl); + + ath_txq_unlock(sc, txq); + return 0; } @@ -1926,16 +1981,12 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, int tx_flags, struct ath_txq *txq) { - struct ieee80211_hw *hw = sc->hw; struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data; int q, padpos, padsize; - ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb); - - if (tx_flags & ATH_TX_BAR) - tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; + ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb); if (!(tx_flags & ATH_TX_ERROR)) /* Frame was ACKed */ @@ -1952,9 +2003,9 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, skb_pull(skb, padsize); } - if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) { + if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) { sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK; - ath_dbg(common, ATH_DBG_PS, + ath_dbg(common, PS, "Going back to sleep after having received TX status (0x%lx)\n", sc->ps_flags & (PS_WAIT_FOR_BEACON | PS_WAIT_FOR_CAB | @@ -1964,32 +2015,27 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, q = skb_get_queue_mapping(skb); if (txq == sc->tx.txq_map[q]) { - spin_lock_bh(&txq->axq_lock); if (WARN_ON(--txq->pending_frames < 0)) txq->pending_frames = 0; if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) { ieee80211_wake_queue(sc->hw, q); - txq->stopped = 0; + txq->stopped = false; } - spin_unlock_bh(&txq->axq_lock); } - ieee80211_tx_status(hw, skb); + __skb_queue_tail(&txq->complete_q, skb); } static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, struct ath_txq *txq, struct list_head *bf_q, - struct ath_tx_status *ts, int txok, int sendbar) + struct ath_tx_status *ts, int txok) { struct sk_buff *skb = bf->bf_mpdu; struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); unsigned long flags; int tx_flags = 0; - if (sendbar) - tx_flags = ATH_TX_BAR; - if (!txok) tx_flags |= ATH_TX_ERROR; @@ -2081,8 +2127,6 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq, struct ath_tx_status *ts, struct ath_buf *bf, struct list_head *bf_head) - __releases(txq->axq_lock) - __acquires(txq->axq_lock) { int txok; @@ -2092,16 +2136,12 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq, if (bf_is_ampdu_not_probing(bf)) txq->axq_ampdu_depth--; - spin_unlock_bh(&txq->axq_lock); - if (!bf_isampdu(bf)) { ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok); - ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok, 0); + ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok); } else ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true); - spin_lock_bh(&txq->axq_lock); - if (sc->sc_flags & SC_OP_TXAGGR) ath_txq_schedule(sc, txq); } @@ -2116,11 +2156,11 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) struct ath_tx_status ts; int status; - ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n", + ath_dbg(common, QUEUE, "tx queue %d (%x), link %p\n", txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum), txq->axq_link); - spin_lock_bh(&txq->axq_lock); + ath_txq_lock(sc, txq); for (;;) { if (work_pending(&sc->hw_reset_work)) break; @@ -2179,7 +2219,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head); } - spin_unlock_bh(&txq->axq_lock); + ath_txq_unlock_complete(sc, txq); } static void ath_tx_complete_poll_work(struct work_struct *work) @@ -2196,21 +2236,21 @@ static void ath_tx_complete_poll_work(struct work_struct *work) for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) if (ATH_TXQ_SETUP(sc, i)) { txq = &sc->tx.txq[i]; - spin_lock_bh(&txq->axq_lock); + ath_txq_lock(sc, txq); if (txq->axq_depth) { if (txq->axq_tx_inprogress) { needreset = true; - spin_unlock_bh(&txq->axq_lock); + ath_txq_unlock(sc, txq); break; } else { txq->axq_tx_inprogress = true; } } - spin_unlock_bh(&txq->axq_lock); + ath_txq_unlock_complete(sc, txq); } if (needreset) { - ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET, + ath_dbg(ath9k_hw_common(sc->sc_ah), RESET, "tx hung, resetting the chip\n"); RESET_STAT_INC(sc, RESET_TYPE_TX_HANG); ieee80211_queue_work(sc->hw, &sc->hw_reset_work); @@ -2253,8 +2293,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc) if (status == -EINPROGRESS) break; if (status == -EIO) { - ath_dbg(common, ATH_DBG_XMIT, - "Error processing tx status\n"); + ath_dbg(common, XMIT, "Error processing tx status\n"); break; } @@ -2264,10 +2303,10 @@ void ath_tx_edma_tasklet(struct ath_softc *sc) txq = &sc->tx.txq[ts.qid]; - spin_lock_bh(&txq->axq_lock); + ath_txq_lock(sc, txq); if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) { - spin_unlock_bh(&txq->axq_lock); + ath_txq_unlock(sc, txq); return; } @@ -2293,7 +2332,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc) } ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head); - spin_unlock_bh(&txq->axq_lock); + ath_txq_unlock_complete(sc, txq); } } @@ -2431,7 +2470,7 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an) ac = tid->ac; txq = ac->txq; - spin_lock_bh(&txq->axq_lock); + ath_txq_lock(sc, txq); if (tid->sched) { list_del(&tid->list); @@ -2447,6 +2486,6 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an) tid->state &= ~AGGR_ADDBA_COMPLETE; tid->state &= ~AGGR_CLEANUP; - spin_unlock_bh(&txq->axq_lock); + ath_txq_unlock(sc, txq); } } diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c index cba9d0435dc..3de61adacd3 100644 --- a/drivers/net/wireless/ath/carl9170/fw.c +++ b/drivers/net/wireless/ath/carl9170/fw.c @@ -146,13 +146,15 @@ static bool valid_cpu_addr(const u32 address) return false; } -static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len) +static int carl9170_fw_checksum(struct ar9170 *ar, const __u8 *data, + size_t len) { const struct carl9170fw_otus_desc *otus_desc; - const struct carl9170fw_chk_desc *chk_desc; const struct carl9170fw_last_desc *last_desc; - const struct carl9170fw_txsq_desc *txsq_desc; - u16 if_comb_types; + const struct carl9170fw_chk_desc *chk_desc; + unsigned long fin, diff; + unsigned int dsc_len; + u32 crc32; last_desc = carl9170_fw_find_desc(ar, LAST_MAGIC, sizeof(*last_desc), CARL9170FW_LAST_DESC_CUR_VER); @@ -170,36 +172,68 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len) chk_desc = carl9170_fw_find_desc(ar, CHK_MAGIC, sizeof(*chk_desc), CARL9170FW_CHK_DESC_CUR_VER); - if (chk_desc) { - unsigned long fin, diff; - unsigned int dsc_len; - u32 crc32; + if (!chk_desc) { + dev_warn(&ar->udev->dev, "Unprotected firmware image.\n"); + return 0; + } - dsc_len = min_t(unsigned int, len, + dsc_len = min_t(unsigned int, len, (unsigned long)chk_desc - (unsigned long)otus_desc); - fin = (unsigned long) last_desc + sizeof(*last_desc); - diff = fin - (unsigned long) otus_desc; + fin = (unsigned long) last_desc + sizeof(*last_desc); + diff = fin - (unsigned long) otus_desc; - if (diff < len) - len -= diff; + if (diff < len) + len -= diff; - if (len < 256) - return -EIO; + if (len < 256) + return -EIO; - crc32 = crc32_le(~0, data, len); - if (cpu_to_le32(crc32) != chk_desc->fw_crc32) { - dev_err(&ar->udev->dev, "fw checksum test failed.\n"); - return -ENOEXEC; - } + crc32 = crc32_le(~0, data, len); + if (cpu_to_le32(crc32) != chk_desc->fw_crc32) { + dev_err(&ar->udev->dev, "fw checksum test failed.\n"); + return -ENOEXEC; + } + + crc32 = crc32_le(crc32, (void *)otus_desc, dsc_len); + if (cpu_to_le32(crc32) != chk_desc->hdr_crc32) { + dev_err(&ar->udev->dev, "descriptor check failed.\n"); + return -EINVAL; + } + return 0; +} - crc32 = crc32_le(crc32, (void *)otus_desc, dsc_len); - if (cpu_to_le32(crc32) != chk_desc->hdr_crc32) { - dev_err(&ar->udev->dev, "descriptor check failed.\n"); +static int carl9170_fw_tx_sequence(struct ar9170 *ar) +{ + const struct carl9170fw_txsq_desc *txsq_desc; + + txsq_desc = carl9170_fw_find_desc(ar, TXSQ_MAGIC, sizeof(*txsq_desc), + CARL9170FW_TXSQ_DESC_CUR_VER); + if (txsq_desc) { + ar->fw.tx_seq_table = le32_to_cpu(txsq_desc->seq_table_addr); + if (!valid_cpu_addr(ar->fw.tx_seq_table)) return -EINVAL; - } } else { - dev_warn(&ar->udev->dev, "Unprotected firmware image.\n"); + ar->fw.tx_seq_table = 0; + } + + return 0; +} + +static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len) +{ + const struct carl9170fw_otus_desc *otus_desc; + int err; + u16 if_comb_types; + + err = carl9170_fw_checksum(ar, data, len); + if (err) + return err; + + otus_desc = carl9170_fw_find_desc(ar, OTUS_MAGIC, + sizeof(*otus_desc), CARL9170FW_OTUS_DESC_CUR_VER); + if (!otus_desc) { + return -ENODATA; } #define SUPP(feat) \ @@ -321,19 +355,8 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len) ar->hw->wiphy->interface_modes |= if_comb_types; - txsq_desc = carl9170_fw_find_desc(ar, TXSQ_MAGIC, - sizeof(*txsq_desc), CARL9170FW_TXSQ_DESC_CUR_VER); - - if (txsq_desc) { - ar->fw.tx_seq_table = le32_to_cpu(txsq_desc->seq_table_addr); - if (!valid_cpu_addr(ar->fw.tx_seq_table)) - return -EINVAL; - } else { - ar->fw.tx_seq_table = 0; - } - #undef SUPPORTED - return 0; + return carl9170_fw_tx_sequence(ar); } static struct carl9170fw_desc_head * diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c index f06e0695d41..db774212161 100644 --- a/drivers/net/wireless/ath/carl9170/main.c +++ b/drivers/net/wireless/ath/carl9170/main.c @@ -48,7 +48,7 @@ #include "carl9170.h" #include "cmd.h" -static int modparam_nohwcrypt; +static bool modparam_nohwcrypt; module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); MODULE_PARM_DESC(nohwcrypt, "Disable hardware crypto offload."); @@ -446,7 +446,7 @@ static void carl9170_op_stop(struct ieee80211_hw *hw) mutex_lock(&ar->mutex); if (IS_ACCEPTING_CMD(ar)) { - rcu_assign_pointer(ar->beacon_iter, NULL); + RCU_INIT_POINTER(ar->beacon_iter, NULL); carl9170_led_set_state(ar, 0); @@ -678,7 +678,7 @@ unlock: vif_priv->active = false; bitmap_release_region(&ar->vif_bitmap, vif_id, 0); ar->vifs--; - rcu_assign_pointer(ar->vif_priv[vif_id].vif, NULL); + RCU_INIT_POINTER(ar->vif_priv[vif_id].vif, NULL); list_del_rcu(&vif_priv->list); mutex_unlock(&ar->mutex); synchronize_rcu(); @@ -716,7 +716,7 @@ static void carl9170_op_remove_interface(struct ieee80211_hw *hw, WARN_ON(vif_priv->enable_beacon); vif_priv->enable_beacon = false; list_del_rcu(&vif_priv->list); - rcu_assign_pointer(ar->vif_priv[id].vif, NULL); + RCU_INIT_POINTER(ar->vif_priv[id].vif, NULL); if (vif == main_vif) { rcu_read_unlock(); @@ -1258,7 +1258,7 @@ static int carl9170_op_sta_add(struct ieee80211_hw *hw, } for (i = 0; i < CARL9170_NUM_TID; i++) - rcu_assign_pointer(sta_info->agg[i], NULL); + RCU_INIT_POINTER(sta_info->agg[i], NULL); sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor); sta_info->ht_sta = true; @@ -1285,7 +1285,7 @@ static int carl9170_op_sta_remove(struct ieee80211_hw *hw, struct carl9170_sta_tid *tid_info; tid_info = rcu_dereference(sta_info->agg[i]); - rcu_assign_pointer(sta_info->agg[i], NULL); + RCU_INIT_POINTER(sta_info->agg[i], NULL); if (!tid_info) continue; @@ -1398,7 +1398,7 @@ static int carl9170_op_ampdu_action(struct ieee80211_hw *hw, spin_unlock_bh(&ar->tx_ampdu_list_lock); } - rcu_assign_pointer(sta_info->agg[tid], NULL); + RCU_INIT_POINTER(sta_info->agg[tid], NULL); rcu_read_unlock(); ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c index 59472e1605c..d19a9ee9d05 100644 --- a/drivers/net/wireless/ath/carl9170/tx.c +++ b/drivers/net/wireless/ath/carl9170/tx.c @@ -314,7 +314,7 @@ static void carl9170_tx_release(struct kref *ref) * feedback either [CTL_REQ_TX_STATUS not set] */ - dev_kfree_skb_any(skb); + ieee80211_free_txskb(ar->hw, skb); return; } else { /* @@ -1432,7 +1432,7 @@ void carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) err_free: ar->tx_dropped++; - dev_kfree_skb_any(skb); + ieee80211_free_txskb(ar->hw, skb); } void carl9170_tx_scheduler(struct ar9170 *ar) diff --git a/drivers/net/wireless/ath/key.c b/drivers/net/wireless/ath/key.c index 4cf7c5eb481..0e81904956c 100644 --- a/drivers/net/wireless/ath/key.c +++ b/drivers/net/wireless/ath/key.c @@ -143,7 +143,7 @@ static bool ath_hw_set_keycache_entry(struct ath_common *common, u16 entry, break; case ATH_CIPHER_AES_CCM: if (!(common->crypt_caps & ATH_CRYPT_CAP_CIPHER_AESCCM)) { - ath_dbg(common, ATH_DBG_ANY, + ath_dbg(common, ANY, "AES-CCM not supported by this mac rev\n"); return false; } @@ -152,15 +152,15 @@ static bool ath_hw_set_keycache_entry(struct ath_common *common, u16 entry, case ATH_CIPHER_TKIP: keyType = AR_KEYTABLE_TYPE_TKIP; if (entry + 64 >= common->keymax) { - ath_dbg(common, ATH_DBG_ANY, + ath_dbg(common, ANY, "entry %u inappropriate for TKIP\n", entry); return false; } break; case ATH_CIPHER_WEP: if (k->kv_len < WLAN_KEY_LEN_WEP40) { - ath_dbg(common, ATH_DBG_ANY, - "WEP key length %u too small\n", k->kv_len); + ath_dbg(common, ANY, "WEP key length %u too small\n", + k->kv_len); return false; } if (k->kv_len <= WLAN_KEY_LEN_WEP40) diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c index 65ecb5bab25..10dea37431b 100644 --- a/drivers/net/wireless/ath/regd.c +++ b/drivers/net/wireless/ath/regd.c @@ -21,6 +21,8 @@ #include "regd.h" #include "regd_common.h" +static int __ath_regd_init(struct ath_regulatory *reg); + /* * This is a set of common rules used by our world regulatory domains. * We have 12 world regulatory domains. To save space we consolidate @@ -347,10 +349,26 @@ static void ath_reg_apply_world_flags(struct wiphy *wiphy, } } +static u16 ath_regd_find_country_by_name(char *alpha2) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(allCountries); i++) { + if (!memcmp(allCountries[i].isoName, alpha2, 2)) + return allCountries[i].countryCode; + } + + return -1; +} + int ath_reg_notifier_apply(struct wiphy *wiphy, struct regulatory_request *request, struct ath_regulatory *reg) { + struct ath_common *common = container_of(reg, struct ath_common, + regulatory); + u16 country_code; + /* We always apply this */ ath_reg_apply_radar_flags(wiphy); @@ -363,14 +381,37 @@ int ath_reg_notifier_apply(struct wiphy *wiphy, return 0; switch (request->initiator) { - case NL80211_REGDOM_SET_BY_DRIVER: case NL80211_REGDOM_SET_BY_CORE: + /* + * If common->reg_world_copy is world roaming it means we *were* + * world roaming... so we now have to restore that data. + */ + if (!ath_is_world_regd(&common->reg_world_copy)) + break; + + memcpy(reg, &common->reg_world_copy, + sizeof(struct ath_regulatory)); + break; + case NL80211_REGDOM_SET_BY_DRIVER: case NL80211_REGDOM_SET_BY_USER: break; case NL80211_REGDOM_SET_BY_COUNTRY_IE: - if (ath_is_world_regd(reg)) - ath_reg_apply_world_flags(wiphy, request->initiator, - reg); + if (!ath_is_world_regd(reg)) + break; + + country_code = ath_regd_find_country_by_name(request->alpha2); + if (country_code == (u16) -1) + break; + + reg->current_rd = COUNTRY_ERD_FLAG; + reg->current_rd |= country_code; + + printk(KERN_DEBUG "ath: regdomain 0x%0x updated by CountryIE\n", + reg->current_rd); + __ath_regd_init(reg); + + ath_reg_apply_world_flags(wiphy, request->initiator, reg); + break; } @@ -508,11 +549,7 @@ static void ath_regd_sanitize(struct ath_regulatory *reg) reg->current_rd = 0x64; } -int -ath_regd_init(struct ath_regulatory *reg, - struct wiphy *wiphy, - int (*reg_notifier)(struct wiphy *wiphy, - struct regulatory_request *request)) +static int __ath_regd_init(struct ath_regulatory *reg) { struct country_code_to_enum_rd *country = NULL; u16 regdmn; @@ -583,7 +620,29 @@ ath_regd_init(struct ath_regulatory *reg, printk(KERN_DEBUG "ath: Regpair used: 0x%0x\n", reg->regpair->regDmnEnum); + return 0; +} + +int +ath_regd_init(struct ath_regulatory *reg, + struct wiphy *wiphy, + int (*reg_notifier)(struct wiphy *wiphy, + struct regulatory_request *request)) +{ + struct ath_common *common = container_of(reg, struct ath_common, + regulatory); + int r; + + r = __ath_regd_init(reg); + if (r) + return r; + + if (ath_is_world_regd(reg)) + memcpy(&common->reg_world_copy, reg, + sizeof(struct ath_regulatory)); + ath_regd_init_wiphy(reg, wiphy, reg_notifier); + return 0; } EXPORT_SYMBOL(ath_regd_init); diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h index 37110dfd2c9..16e8f805815 100644 --- a/drivers/net/wireless/b43/b43.h +++ b/drivers/net/wireless/b43/b43.h @@ -191,6 +191,9 @@ #define B43_BFH_BUCKBOOST 0x0020 /* has buck/booster */ #define B43_BFH_FEM_BT 0x0040 /* has FEM and switch to share antenna * with bluetooth */ +#define B43_BFH_NOCBUCK 0x0080 +#define B43_BFH_PALDO 0x0200 +#define B43_BFH_EXTLNA_5GHZ 0x1000 /* has an external LNA (5GHz mode) */ /* SPROM boardflags2_lo values */ #define B43_BFL2_RXBB_INT_REG_DIS 0x0001 /* external RX BB regulator present */ @@ -204,6 +207,14 @@ #define B43_BFL2_SKWRKFEM_BRD 0x0100 /* 4321mcm93 uses Skyworks FEM */ #define B43_BFL2_SPUR_WAR 0x0200 /* has a workaround for clock-harmonic spurs */ #define B43_BFL2_GPLL_WAR 0x0400 /* altenative G-band PLL settings implemented */ +#define B43_BFL2_SINGLEANT_CCK 0x1000 +#define B43_BFL2_2G_SPUR_WAR 0x2000 + +/* SPROM boardflags2_hi values */ +#define B43_BFH2_GPLL_WAR2 0x0001 +#define B43_BFH2_IPALVLSHIFT_3P3 0x0002 +#define B43_BFH2_INTERNDET_TXIQCAL 0x0004 +#define B43_BFH2_XTALBUFOUTEN 0x0008 /* GPIO register offset, in both ChipCommon and PCI core. */ #define B43_GPIO_CONTROL 0x6c @@ -667,6 +678,7 @@ struct b43_key { }; /* SHM offsets to the QOS data structures for the 4 different queues. */ +#define B43_QOS_QUEUE_NUM 4 #define B43_QOS_PARAMS(queue) (B43_SHM_SH_EDCFQ + \ (B43_NR_QOSPARAMS * sizeof(u16) * (queue))) #define B43_QOS_BACKGROUND B43_QOS_PARAMS(0) @@ -904,7 +916,7 @@ struct b43_wl { struct work_struct beacon_update_trigger; /* The current QOS parameters for the 4 queues. */ - struct b43_qos_params qos_params[4]; + struct b43_qos_params qos_params[B43_QOS_QUEUE_NUM]; /* Work for adjustment of the transmission power. * This is scheduled when we determine that the actual TX output @@ -913,8 +925,12 @@ struct b43_wl { /* Packet transmit work */ struct work_struct tx_work; + /* Queue of packets to be transmitted. */ - struct sk_buff_head tx_queue; + struct sk_buff_head tx_queue[B43_QOS_QUEUE_NUM]; + + /* Flag that implement the queues stopping. */ + bool tx_queue_stopped[B43_QOS_QUEUE_NUM]; /* The device LEDs. */ struct b43_leds leds; diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c index 5e45604f0f5..b5f1b91002b 100644 --- a/drivers/net/wireless/b43/dma.c +++ b/drivers/net/wireless/b43/dma.c @@ -890,7 +890,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, else ring->ops = &dma32_ops; if (for_tx) { - ring->tx = 1; + ring->tx = true; ring->current_slot = -1; } else { if (ring->index == 0) { @@ -1061,7 +1061,7 @@ void b43_dma_free(struct b43_wldev *dev) static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask) { u64 orig_mask = mask; - bool fallback = 0; + bool fallback = false; int err; /* Try to set the DMA mask. If it fails, try falling back to a @@ -1075,12 +1075,12 @@ static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask) } if (mask == DMA_BIT_MASK(64)) { mask = DMA_BIT_MASK(32); - fallback = 1; + fallback = true; continue; } if (mask == DMA_BIT_MASK(32)) { mask = DMA_BIT_MASK(30); - fallback = 1; + fallback = true; continue; } b43err(dev->wl, "The machine/kernel does not support " @@ -1307,7 +1307,7 @@ static int dma_tx_fragment(struct b43_dmaring *ring, memset(meta, 0, sizeof(*meta)); meta->skb = skb; - meta->is_last_fragment = 1; + meta->is_last_fragment = true; priv_info->bouncebuffer = NULL; meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); @@ -1465,8 +1465,10 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb) if ((free_slots(ring) < TX_SLOTS_PER_FRAME) || should_inject_overflow(ring)) { /* This TX ring is full. */ - ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb)); - ring->stopped = 1; + unsigned int skb_mapping = skb_get_queue_mapping(skb); + ieee80211_stop_queue(dev->wl->hw, skb_mapping); + dev->wl->tx_queue_stopped[skb_mapping] = 1; + ring->stopped = true; if (b43_debug(dev, B43_DBG_DMAVERBOSE)) { b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index); } @@ -1584,12 +1586,21 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, } if (ring->stopped) { B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME); + ring->stopped = false; + } + + if (dev->wl->tx_queue_stopped[ring->queue_prio]) { + dev->wl->tx_queue_stopped[ring->queue_prio] = 0; + } else { + /* If the driver queue is running wake the corresponding + * mac80211 queue. */ ieee80211_wake_queue(dev->wl->hw, ring->queue_prio); - ring->stopped = 0; if (b43_debug(dev, B43_DBG_DMAVERBOSE)) { b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index); } } + /* Add work to the queue. */ + ieee80211_queue_work(dev->wl->hw, &dev->wl->tx_work); } static void dma_rx(struct b43_dmaring *ring, int *slot) diff --git a/drivers/net/wireless/b43/leds.c b/drivers/net/wireless/b43/leds.c index a38c1c6446a..d79ab2a227e 100644 --- a/drivers/net/wireless/b43/leds.c +++ b/drivers/net/wireless/b43/leds.c @@ -74,7 +74,7 @@ static void b43_led_update(struct b43_wldev *dev, if (radio_enabled) turn_on = atomic_read(&led->state) != LED_OFF; else - turn_on = 0; + turn_on = false; if (turn_on == led->hw_state) return; led->hw_state = turn_on; @@ -225,11 +225,11 @@ static void b43_led_get_sprominfo(struct b43_wldev *dev, if (sprom[led_index] == 0xFF) { /* There is no LED information in the SPROM * for this LED. Hardcode it here. */ - *activelow = 0; + *activelow = false; switch (led_index) { case 0: *behaviour = B43_LED_ACTIVITY; - *activelow = 1; + *activelow = true; if (dev->dev->board_vendor == PCI_VENDOR_ID_COMPAQ) *behaviour = B43_LED_RADIO_ALL; break; @@ -267,11 +267,11 @@ void b43_leds_init(struct b43_wldev *dev) if (led->wl) { if (dev->phy.radio_on && b43_is_hw_radio_enabled(dev)) { b43_led_turn_on(dev, led->index, led->activelow); - led->hw_state = 1; + led->hw_state = true; atomic_set(&led->state, 1); } else { b43_led_turn_off(dev, led->index, led->activelow); - led->hw_state = 0; + led->hw_state = false; atomic_set(&led->state, 0); } } @@ -280,19 +280,19 @@ void b43_leds_init(struct b43_wldev *dev) led = &dev->wl->leds.led_tx; if (led->wl) { b43_led_turn_off(dev, led->index, led->activelow); - led->hw_state = 0; + led->hw_state = false; atomic_set(&led->state, 0); } led = &dev->wl->leds.led_rx; if (led->wl) { b43_led_turn_off(dev, led->index, led->activelow); - led->hw_state = 0; + led->hw_state = false; atomic_set(&led->state, 0); } led = &dev->wl->leds.led_assoc; if (led->wl) { b43_led_turn_off(dev, led->index, led->activelow); - led->hw_state = 0; + led->hw_state = false; atomic_set(&led->state, 0); } diff --git a/drivers/net/wireless/b43/lo.c b/drivers/net/wireless/b43/lo.c index 4c82d582a52..916123a3d74 100644 --- a/drivers/net/wireless/b43/lo.c +++ b/drivers/net/wireless/b43/lo.c @@ -826,7 +826,7 @@ void b43_gphy_dc_lt_init(struct b43_wldev *dev, bool update_all) const struct b43_rfatt *rfatt; const struct b43_bbatt *bbatt; u64 power_vector; - bool table_changed = 0; + bool table_changed = false; BUILD_BUG_ON(B43_DC_LT_SIZE != 32); B43_WARN_ON(lo->rfatt_list.len * lo->bbatt_list.len > 64); @@ -876,7 +876,7 @@ void b43_gphy_dc_lt_init(struct b43_wldev *dev, bool update_all) lo->dc_lt[idx] = (lo->dc_lt[idx] & 0xFF00) | (val & 0x00FF); } - table_changed = 1; + table_changed = true; } if (table_changed) { /* The table changed in memory. Update the hardware table. */ @@ -938,7 +938,7 @@ void b43_lo_g_maintanance_work(struct b43_wldev *dev) unsigned long now; unsigned long expire; struct b43_lo_calib *cal, *tmp; - bool current_item_expired = 0; + bool current_item_expired = false; bool hwpctl; if (!lo) @@ -968,7 +968,7 @@ void b43_lo_g_maintanance_work(struct b43_wldev *dev) if (b43_compare_bbatt(&cal->bbatt, &gphy->bbatt) && b43_compare_rfatt(&cal->rfatt, &gphy->rfatt)) { B43_WARN_ON(current_item_expired); - current_item_expired = 1; + current_item_expired = true; } if (b43_debug(dev, B43_DBG_LO)) { b43dbg(dev->wl, "LO: Item BB(%u), RF(%u,%u), " diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index 5634d9a9965..1c6f19393ef 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c @@ -1122,17 +1122,17 @@ void b43_power_saving_ctl_bits(struct b43_wldev *dev, unsigned int ps_flags) B43_WARN_ON((ps_flags & B43_PS_AWAKE) && (ps_flags & B43_PS_ASLEEP)); if (ps_flags & B43_PS_ENABLED) { - hwps = 1; + hwps = true; } else if (ps_flags & B43_PS_DISABLED) { - hwps = 0; + hwps = false; } else { //TODO: If powersave is not off and FIXME is not set and we are not in adhoc // and thus is not an AP and we are associated, set bit 25 } if (ps_flags & B43_PS_AWAKE) { - awake = 1; + awake = true; } else if (ps_flags & B43_PS_ASLEEP) { - awake = 0; + awake = false; } else { //TODO: If the device is awake or this is an AP, or we are scanning, or FIXME, // or we are associated, or FIXME, or the latest PS-Poll packet sent was @@ -1140,8 +1140,8 @@ void b43_power_saving_ctl_bits(struct b43_wldev *dev, unsigned int ps_flags) } /* FIXME: For now we force awake-on and hwps-off */ - hwps = 0; - awake = 1; + hwps = false; + awake = true; macctl = b43_read32(dev, B43_MMIO_MACCTL); if (hwps) @@ -1339,7 +1339,7 @@ static void b43_calculate_link_quality(struct b43_wldev *dev) return; if (dev->noisecalc.calculation_running) return; - dev->noisecalc.calculation_running = 1; + dev->noisecalc.calculation_running = true; dev->noisecalc.nr_samples = 0; b43_generate_noise_sample(dev); @@ -1408,7 +1408,7 @@ static void handle_irq_noise(struct b43_wldev *dev) average -= 48; dev->stats.link_noise = average; - dev->noisecalc.calculation_running = 0; + dev->noisecalc.calculation_running = false; return; } generate_new: @@ -1424,7 +1424,7 @@ static void handle_irq_tbtt_indication(struct b43_wldev *dev) b43_power_saving_ctl_bits(dev, 0); } if (b43_is_mode(dev->wl, NL80211_IFTYPE_ADHOC)) - dev->dfq_valid = 1; + dev->dfq_valid = true; } static void handle_irq_atim_end(struct b43_wldev *dev) @@ -1433,7 +1433,7 @@ static void handle_irq_atim_end(struct b43_wldev *dev) b43_write32(dev, B43_MMIO_MACCMD, b43_read32(dev, B43_MMIO_MACCMD) | B43_MACCMD_DFQ_VALID); - dev->dfq_valid = 0; + dev->dfq_valid = false; } } @@ -1539,7 +1539,7 @@ static void b43_write_beacon_template(struct b43_wldev *dev, unsigned int i, len, variable_len; const struct ieee80211_mgmt *bcn; const u8 *ie; - bool tim_found = 0; + bool tim_found = false; unsigned int rate; u16 ctl; int antenna; @@ -1588,7 +1588,7 @@ static void b43_write_beacon_template(struct b43_wldev *dev, /* A valid TIM is at least 4 bytes long. */ if (ie_len < 4) break; - tim_found = 1; + tim_found = true; tim_position = sizeof(struct b43_plcp_hdr6); tim_position += offsetof(struct ieee80211_mgmt, u.beacon.variable); @@ -1625,7 +1625,7 @@ static void b43_upload_beacon0(struct b43_wldev *dev) if (wl->beacon0_uploaded) return; b43_write_beacon_template(dev, 0x68, 0x18); - wl->beacon0_uploaded = 1; + wl->beacon0_uploaded = true; } static void b43_upload_beacon1(struct b43_wldev *dev) @@ -1635,7 +1635,7 @@ static void b43_upload_beacon1(struct b43_wldev *dev) if (wl->beacon1_uploaded) return; b43_write_beacon_template(dev, 0x468, 0x1A); - wl->beacon1_uploaded = 1; + wl->beacon1_uploaded = true; } static void handle_irq_beacon(struct b43_wldev *dev) @@ -1667,7 +1667,7 @@ static void handle_irq_beacon(struct b43_wldev *dev) if (unlikely(wl->beacon_templates_virgin)) { /* We never uploaded a beacon before. * Upload both templates now, but only mark one valid. */ - wl->beacon_templates_virgin = 0; + wl->beacon_templates_virgin = false; b43_upload_beacon0(dev); b43_upload_beacon1(dev); cmd = b43_read32(dev, B43_MMIO_MACCMD); @@ -1755,8 +1755,8 @@ static void b43_update_templates(struct b43_wl *wl) if (wl->current_beacon) dev_kfree_skb_any(wl->current_beacon); wl->current_beacon = beacon; - wl->beacon0_uploaded = 0; - wl->beacon1_uploaded = 0; + wl->beacon0_uploaded = false; + wl->beacon1_uploaded = false; ieee80211_queue_work(wl->hw, &wl->beacon_update_trigger); } @@ -1913,7 +1913,7 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev) b43err(dev->wl, "This device does not support DMA " "on your system. It will now be switched to PIO.\n"); /* Fall back to PIO transfers if we get fatal DMA errors! */ - dev->use_pio = 1; + dev->use_pio = true; b43_controller_restart(dev, "DMA error"); return; } @@ -2240,12 +2240,12 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx) filename = NULL; else goto err_no_pcm; - fw->pcm_request_failed = 0; + fw->pcm_request_failed = false; err = b43_do_request_fw(ctx, filename, &fw->pcm); if (err == -ENOENT) { /* We did not find a PCM file? Not fatal, but * core rev <= 10 must do without hwcrypto then. */ - fw->pcm_request_failed = 1; + fw->pcm_request_failed = true; } else if (err) goto err_load; @@ -2535,7 +2535,7 @@ static int b43_upload_microcode(struct b43_wldev *dev) dev->wl->hw->queues = dev->wl->mac80211_initially_registered_queues; dev->qos_enabled = !!modparam_qos; /* Default to firmware/hardware crypto acceleration. */ - dev->hwcrypto_enabled = 1; + dev->hwcrypto_enabled = true; if (dev->fw.opensource) { u16 fwcapa; @@ -2549,7 +2549,7 @@ static int b43_upload_microcode(struct b43_wldev *dev) if (!(fwcapa & B43_FWCAPA_HWCRYPTO) || dev->fw.pcm_request_failed) { b43info(dev->wl, "Hardware crypto acceleration not supported by firmware\n"); /* Disable hardware crypto and fall back to software crypto. */ - dev->hwcrypto_enabled = 0; + dev->hwcrypto_enabled = false; } if (!(fwcapa & B43_FWCAPA_QOS)) { b43info(dev->wl, "QoS not supported by firmware\n"); @@ -2557,7 +2557,7 @@ static int b43_upload_microcode(struct b43_wldev *dev) * ieee80211_unregister to make sure the networking core can * properly free possible resources. */ dev->wl->hw->queues = 1; - dev->qos_enabled = 0; + dev->qos_enabled = false; } } else { b43info(dev->wl, "Loading firmware version %u.%u " @@ -3361,10 +3361,10 @@ static int b43_rng_init(struct b43_wl *wl) wl->rng.name = wl->rng_name; wl->rng.data_read = b43_rng_read; wl->rng.priv = (unsigned long)wl; - wl->rng_initialized = 1; + wl->rng_initialized = true; err = hwrng_register(&wl->rng); if (err) { - wl->rng_initialized = 0; + wl->rng_initialized = false; b43err(wl, "Failed to register the random " "number generator (%d)\n", err); } @@ -3378,6 +3378,7 @@ static void b43_tx_work(struct work_struct *work) struct b43_wl *wl = container_of(work, struct b43_wl, tx_work); struct b43_wldev *dev; struct sk_buff *skb; + int queue_num; int err = 0; mutex_lock(&wl->mutex); @@ -3387,15 +3388,26 @@ static void b43_tx_work(struct work_struct *work) return; } - while (skb_queue_len(&wl->tx_queue)) { - skb = skb_dequeue(&wl->tx_queue); + for (queue_num = 0; queue_num < B43_QOS_QUEUE_NUM; queue_num++) { + while (skb_queue_len(&wl->tx_queue[queue_num])) { + skb = skb_dequeue(&wl->tx_queue[queue_num]); + if (b43_using_pio_transfers(dev)) + err = b43_pio_tx(dev, skb); + else + err = b43_dma_tx(dev, skb); + if (err == -ENOSPC) { + wl->tx_queue_stopped[queue_num] = 1; + ieee80211_stop_queue(wl->hw, queue_num); + skb_queue_head(&wl->tx_queue[queue_num], skb); + break; + } + if (unlikely(err)) + dev_kfree_skb(skb); /* Drop it */ + err = 0; + } - if (b43_using_pio_transfers(dev)) - err = b43_pio_tx(dev, skb); - else - err = b43_dma_tx(dev, skb); - if (unlikely(err)) - dev_kfree_skb(skb); /* Drop it */ + if (!err) + wl->tx_queue_stopped[queue_num] = 0; } #if B43_DEBUG @@ -3416,8 +3428,12 @@ static void b43_op_tx(struct ieee80211_hw *hw, } B43_WARN_ON(skb_shinfo(skb)->nr_frags); - skb_queue_tail(&wl->tx_queue, skb); - ieee80211_queue_work(wl->hw, &wl->tx_work); + skb_queue_tail(&wl->tx_queue[skb->queue_mapping], skb); + if (!wl->tx_queue_stopped[skb->queue_mapping]) { + ieee80211_queue_work(wl->hw, &wl->tx_work); + } else { + ieee80211_stop_queue(wl->hw, skb->queue_mapping); + } } static void b43_qos_params_upload(struct b43_wldev *dev, @@ -3702,13 +3718,13 @@ static int b43_switch_band(struct b43_wl *wl, struct ieee80211_channel *chan) case IEEE80211_BAND_5GHZ: if (d->phy.supports_5ghz) { up_dev = d; - gmode = 0; + gmode = false; } break; case IEEE80211_BAND_2GHZ: if (d->phy.supports_2ghz) { up_dev = d; - gmode = 1; + gmode = true; } break; default: @@ -4147,6 +4163,7 @@ static struct b43_wldev * b43_wireless_core_stop(struct b43_wldev *dev) struct b43_wl *wl; struct b43_wldev *orig_dev; u32 mask; + int queue_num; if (!dev) return NULL; @@ -4199,9 +4216,11 @@ redo: mask = b43_read32(dev, B43_MMIO_GEN_IRQ_MASK); B43_WARN_ON(mask != 0xFFFFFFFF && mask); - /* Drain the TX queue */ - while (skb_queue_len(&wl->tx_queue)) - dev_kfree_skb(skb_dequeue(&wl->tx_queue)); + /* Drain all TX queues. */ + for (queue_num = 0; queue_num < B43_QOS_QUEUE_NUM; queue_num++) { + while (skb_queue_len(&wl->tx_queue[queue_num])) + dev_kfree_skb(skb_dequeue(&wl->tx_queue[queue_num])); + } b43_mac_suspend(dev); b43_leds_exit(dev); @@ -4425,18 +4444,18 @@ static void setup_struct_phy_for_init(struct b43_wldev *dev, atomic_set(&phy->txerr_cnt, B43_PHY_TX_BADNESS_LIMIT); #if B43_DEBUG - phy->phy_locked = 0; - phy->radio_locked = 0; + phy->phy_locked = false; + phy->radio_locked = false; #endif } static void setup_struct_wldev_for_init(struct b43_wldev *dev) { - dev->dfq_valid = 0; + dev->dfq_valid = false; /* Assume the radio is enabled. If it's not enabled, the state will * immediately get fixed on the first periodic work run. */ - dev->radio_hw_enable = 1; + dev->radio_hw_enable = true; /* Stats */ memset(&dev->stats, 0, sizeof(dev->stats)); @@ -4670,16 +4689,16 @@ static int b43_wireless_core_init(struct b43_wldev *dev) if (b43_bus_host_is_pcmcia(dev->dev) || b43_bus_host_is_sdio(dev->dev)) { - dev->__using_pio_transfers = 1; + dev->__using_pio_transfers = true; err = b43_pio_init(dev); } else if (dev->use_pio) { b43warn(dev->wl, "Forced PIO by use_pio module parameter. " "This should not be needed and will result in lower " "performance.\n"); - dev->__using_pio_transfers = 1; + dev->__using_pio_transfers = true; err = b43_pio_init(dev); } else { - dev->__using_pio_transfers = 0; + dev->__using_pio_transfers = false; err = b43_dma_init(dev); } if (err) @@ -4733,7 +4752,7 @@ static int b43_op_add_interface(struct ieee80211_hw *hw, b43dbg(wl, "Adding Interface type %d\n", vif->type); dev = wl->current_dev; - wl->operating = 1; + wl->operating = true; wl->vif = vif; wl->if_type = vif->type; memcpy(wl->mac_addr, vif->addr, ETH_ALEN); @@ -4767,7 +4786,7 @@ static void b43_op_remove_interface(struct ieee80211_hw *hw, B43_WARN_ON(wl->vif != vif); wl->vif = NULL; - wl->operating = 0; + wl->operating = false; b43_adjust_opmode(dev); memset(wl->mac_addr, 0, ETH_ALEN); @@ -4789,12 +4808,12 @@ static int b43_op_start(struct ieee80211_hw *hw) memset(wl->bssid, 0, ETH_ALEN); memset(wl->mac_addr, 0, ETH_ALEN); wl->filter_flags = 0; - wl->radiotap_enabled = 0; + wl->radiotap_enabled = false; b43_qos_clear(wl); - wl->beacon0_uploaded = 0; - wl->beacon1_uploaded = 0; - wl->beacon_templates_virgin = 1; - wl->radio_enabled = 1; + wl->beacon0_uploaded = false; + wl->beacon1_uploaded = false; + wl->beacon_templates_virgin = true; + wl->radio_enabled = true; mutex_lock(&wl->mutex); @@ -4840,7 +4859,7 @@ static void b43_op_stop(struct ieee80211_hw *hw) goto out_unlock; } b43_wireless_core_exit(dev); - wl->radio_enabled = 0; + wl->radio_enabled = false; out_unlock: mutex_unlock(&wl->mutex); @@ -5028,7 +5047,7 @@ static int b43_wireless_core_attach(struct b43_wldev *dev) struct pci_dev *pdev = NULL; int err; u32 tmp; - bool have_2ghz_phy = 0, have_5ghz_phy = 0; + bool have_2ghz_phy = false, have_5ghz_phy = false; /* Do NOT do any device initialization here. * Do it in wireless_core_init() instead. @@ -5071,7 +5090,7 @@ static int b43_wireless_core_attach(struct b43_wldev *dev) } dev->phy.gmode = have_2ghz_phy; - dev->phy.radio_on = 1; + dev->phy.radio_on = true; b43_wireless_core_reset(dev, dev->phy.gmode); err = b43_phy_versioning(dev); @@ -5082,11 +5101,11 @@ static int b43_wireless_core_attach(struct b43_wldev *dev) (pdev->device != 0x4312 && pdev->device != 0x4319 && pdev->device != 0x4324)) { /* No multiband support. */ - have_2ghz_phy = 0; - have_5ghz_phy = 0; + have_2ghz_phy = false; + have_5ghz_phy = false; switch (dev->phy.type) { case B43_PHYTYPE_A: - have_5ghz_phy = 1; + have_5ghz_phy = true; break; case B43_PHYTYPE_LP: //FIXME not always! #if 0 //FIXME enabling 5GHz causes a NULL pointer dereference @@ -5096,7 +5115,7 @@ static int b43_wireless_core_attach(struct b43_wldev *dev) case B43_PHYTYPE_N: case B43_PHYTYPE_HT: case B43_PHYTYPE_LCN: - have_2ghz_phy = 1; + have_2ghz_phy = true; break; default: B43_WARN_ON(1); @@ -5112,8 +5131,8 @@ static int b43_wireless_core_attach(struct b43_wldev *dev) /* FIXME: For now we disable the A-PHY on multi-PHY devices. */ if (dev->phy.type != B43_PHYTYPE_N && dev->phy.type != B43_PHYTYPE_LP) { - have_2ghz_phy = 1; - have_5ghz_phy = 0; + have_2ghz_phy = true; + have_5ghz_phy = false; } } @@ -5245,6 +5264,7 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev) struct ieee80211_hw *hw; struct b43_wl *wl; char chip_name[6]; + int queue_num; hw = ieee80211_alloc_hw(sizeof(*wl), &b43_hw_ops); if (!hw) { @@ -5264,7 +5284,7 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev) BIT(NL80211_IFTYPE_WDS) | BIT(NL80211_IFTYPE_ADHOC); - hw->queues = modparam_qos ? 4 : 1; + hw->queues = modparam_qos ? B43_QOS_QUEUE_NUM : 1; wl->mac80211_initially_registered_queues = hw->queues; hw->max_rates = 2; SET_IEEE80211_DEV(hw, dev->dev); @@ -5281,7 +5301,12 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev) INIT_WORK(&wl->beacon_update_trigger, b43_beacon_update_trigger_work); INIT_WORK(&wl->txpower_adjust_work, b43_phy_txpower_adjust_work); INIT_WORK(&wl->tx_work, b43_tx_work); - skb_queue_head_init(&wl->tx_queue); + + /* Initialize queues and flags. */ + for (queue_num = 0; queue_num < B43_QOS_QUEUE_NUM; queue_num++) { + skb_queue_head_init(&wl->tx_queue[queue_num]); + wl->tx_queue_stopped[queue_num] = 0; + } snprintf(chip_name, ARRAY_SIZE(chip_name), (dev->chip_id > 0x9999) ? "%d" : "%04X", dev->chip_id); diff --git a/drivers/net/wireless/b43/phy_common.c b/drivers/net/wireless/b43/phy_common.c index 3ea44bb0368..3f8883b14d9 100644 --- a/drivers/net/wireless/b43/phy_common.c +++ b/drivers/net/wireless/b43/phy_common.c @@ -145,7 +145,7 @@ void b43_radio_lock(struct b43_wldev *dev) #if B43_DEBUG B43_WARN_ON(dev->phy.radio_locked); - dev->phy.radio_locked = 1; + dev->phy.radio_locked = true; #endif macctl = b43_read32(dev, B43_MMIO_MACCTL); @@ -163,7 +163,7 @@ void b43_radio_unlock(struct b43_wldev *dev) #if B43_DEBUG B43_WARN_ON(!dev->phy.radio_locked); - dev->phy.radio_locked = 0; + dev->phy.radio_locked = false; #endif /* Commit any write */ @@ -178,7 +178,7 @@ void b43_phy_lock(struct b43_wldev *dev) { #if B43_DEBUG B43_WARN_ON(dev->phy.phy_locked); - dev->phy.phy_locked = 1; + dev->phy.phy_locked = true; #endif B43_WARN_ON(dev->dev->core_rev < 3); @@ -190,7 +190,7 @@ void b43_phy_unlock(struct b43_wldev *dev) { #if B43_DEBUG B43_WARN_ON(!dev->phy.phy_locked); - dev->phy.phy_locked = 0; + dev->phy.phy_locked = false; #endif B43_WARN_ON(dev->dev->core_rev < 3); diff --git a/drivers/net/wireless/b43/phy_g.c b/drivers/net/wireless/b43/phy_g.c index 8e157bc213f..12f467b8d56 100644 --- a/drivers/net/wireless/b43/phy_g.c +++ b/drivers/net/wireless/b43/phy_g.c @@ -897,7 +897,7 @@ b43_radio_interference_mitigation_enable(struct b43_wldev *dev, int mode) if (b43_phy_read(dev, 0x0033) & 0x0800) break; - gphy->aci_enable = 1; + gphy->aci_enable = true; phy_stacksave(B43_PHY_RADIO_BITFIELD); phy_stacksave(B43_PHY_G_CRS); @@ -1038,7 +1038,7 @@ b43_radio_interference_mitigation_disable(struct b43_wldev *dev, int mode) if (!(b43_phy_read(dev, 0x0033) & 0x0800)) break; - gphy->aci_enable = 0; + gphy->aci_enable = false; phy_stackrestore(B43_PHY_RADIO_BITFIELD); phy_stackrestore(B43_PHY_G_CRS); @@ -1956,10 +1956,10 @@ static void b43_phy_init_pctl(struct b43_wldev *dev) bbatt.att = 11; if (phy->radio_rev == 8) { rfatt.att = 15; - rfatt.with_padmix = 1; + rfatt.with_padmix = true; } else { rfatt.att = 9; - rfatt.with_padmix = 0; + rfatt.with_padmix = false; } b43_set_txpower_g(dev, &bbatt, &rfatt, 0); } @@ -2137,7 +2137,7 @@ static void default_radio_attenuation(struct b43_wldev *dev, struct b43_bus_dev *bdev = dev->dev; struct b43_phy *phy = &dev->phy; - rf->with_padmix = 0; + rf->with_padmix = false; if (dev->dev->board_vendor == SSB_BOARDVENDOR_BCM && dev->dev->board_type == SSB_BOARD_BCM4309G) { @@ -2221,7 +2221,7 @@ static void default_radio_attenuation(struct b43_wldev *dev, return; case 8: rf->att = 0xA; - rf->with_padmix = 1; + rf->with_padmix = true; return; case 9: default: @@ -2389,7 +2389,7 @@ static int b43_gphy_init_tssi2dbm_table(struct b43_wldev *dev) B43_WARN_ON((dev->dev->chip_id == 0x4301) && (phy->radio_ver != 0x2050)); /* Not supported anymore */ - gphy->dyn_tssi_tbl = 0; + gphy->dyn_tssi_tbl = false; if (pab0 != 0 && pab1 != 0 && pab2 != 0 && pab0 != -1 && pab1 != -1 && pab2 != -1) { @@ -2404,7 +2404,7 @@ static int b43_gphy_init_tssi2dbm_table(struct b43_wldev *dev) pab1, pab2); if (!gphy->tssi2dbm) return -ENOMEM; - gphy->dyn_tssi_tbl = 1; + gphy->dyn_tssi_tbl = true; } else { /* pabX values not set in SPROM. */ gphy->tgt_idle_tssi = 52; @@ -2504,7 +2504,7 @@ static void b43_gphy_op_free(struct b43_wldev *dev) if (gphy->dyn_tssi_tbl) kfree(gphy->tssi2dbm); - gphy->dyn_tssi_tbl = 0; + gphy->dyn_tssi_tbl = false; gphy->tssi2dbm = NULL; kfree(gphy); @@ -2531,10 +2531,10 @@ static int b43_gphy_op_prepare_hardware(struct b43_wldev *dev) if (phy->rev == 1) { /* Workaround: Temporarly disable gmode through the early init * phase, as the gmode stuff is not needed for phy rev 1 */ - phy->gmode = 0; + phy->gmode = false; b43_wireless_core_reset(dev, 0); b43_phy_initg(dev); - phy->gmode = 1; + phy->gmode = true; b43_wireless_core_reset(dev, 1); } @@ -2613,7 +2613,7 @@ static void b43_gphy_op_software_rfkill(struct b43_wldev *dev, gphy->radio_off_context.rfover); b43_phy_write(dev, B43_PHY_RFOVERVAL, gphy->radio_off_context.rfoverval); - gphy->radio_off_context.valid = 0; + gphy->radio_off_context.valid = false; } channel = phy->channel; b43_gphy_channel_switch(dev, 6, 1); @@ -2626,7 +2626,7 @@ static void b43_gphy_op_software_rfkill(struct b43_wldev *dev, rfoverval = b43_phy_read(dev, B43_PHY_RFOVERVAL); gphy->radio_off_context.rfover = rfover; gphy->radio_off_context.rfoverval = rfoverval; - gphy->radio_off_context.valid = 1; + gphy->radio_off_context.valid = true; b43_phy_write(dev, B43_PHY_RFOVER, rfover | 0x008C); b43_phy_write(dev, B43_PHY_RFOVERVAL, rfoverval & 0xFF73); } @@ -2711,10 +2711,10 @@ static int b43_gphy_op_interf_mitigation(struct b43_wldev *dev, if ((phy->rev == 0) || (!phy->gmode)) return -ENODEV; - gphy->aci_wlan_automatic = 0; + gphy->aci_wlan_automatic = false; switch (mode) { case B43_INTERFMODE_AUTOWLAN: - gphy->aci_wlan_automatic = 1; + gphy->aci_wlan_automatic = true; if (gphy->aci_enable) mode = B43_INTERFMODE_MANUALWLAN; else @@ -2735,8 +2735,8 @@ static int b43_gphy_op_interf_mitigation(struct b43_wldev *dev, b43_radio_interference_mitigation_disable(dev, currentmode); if (mode == B43_INTERFMODE_NONE) { - gphy->aci_enable = 0; - gphy->aci_hw_rssi = 0; + gphy->aci_enable = false; + gphy->aci_hw_rssi = false; } else b43_radio_interference_mitigation_enable(dev, mode); gphy->interfmode = mode; diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c index f93d66b1817..3ae28561f7a 100644 --- a/drivers/net/wireless/b43/phy_lp.c +++ b/drivers/net/wireless/b43/phy_lp.c @@ -736,9 +736,9 @@ static void lpphy_set_deaf(struct b43_wldev *dev, bool user) struct b43_phy_lp *lpphy = dev->phy.lp; if (user) - lpphy->crs_usr_disable = 1; + lpphy->crs_usr_disable = true; else - lpphy->crs_sys_disable = 1; + lpphy->crs_sys_disable = true; b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL, 0xFF1F, 0x80); } @@ -747,9 +747,9 @@ static void lpphy_clear_deaf(struct b43_wldev *dev, bool user) struct b43_phy_lp *lpphy = dev->phy.lp; if (user) - lpphy->crs_usr_disable = 0; + lpphy->crs_usr_disable = false; else - lpphy->crs_sys_disable = 0; + lpphy->crs_sys_disable = false; if (!lpphy->crs_usr_disable && !lpphy->crs_sys_disable) { if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c index b17d9b6c33a..bf5a4385535 100644 --- a/drivers/net/wireless/b43/phy_n.c +++ b/drivers/net/wireless/b43/phy_n.c @@ -78,19 +78,6 @@ enum b43_nphy_rssi_type { B43_NPHY_RSSI_TBD, }; -/* TODO: reorder functions */ -static void b43_nphy_stay_in_carrier_search(struct b43_wldev *dev, - bool enable); -static void b43_nphy_set_rf_sequence(struct b43_wldev *dev, u8 cmd, - u8 *events, u8 *delays, u8 length); -static void b43_nphy_force_rf_sequence(struct b43_wldev *dev, - enum b43_nphy_rf_sequence seq); -static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field, - u16 value, u8 core, bool off); -static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field, - u16 value, u8 core); -static const u32 *b43_nphy_get_ipa_gain_table(struct b43_wldev *dev); - static inline bool b43_nphy_ipa(struct b43_wldev *dev) { enum ieee80211_band band = b43_current_band(dev->wl); @@ -98,57 +85,394 @@ static inline bool b43_nphy_ipa(struct b43_wldev *dev) (dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ)); } -void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna) -{//TODO +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GetIpaGainTbl */ +static const u32 *b43_nphy_get_ipa_gain_table(struct b43_wldev *dev) +{ + if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + if (dev->phy.rev >= 6) { + if (dev->dev->chip_id == 47162) + return txpwrctrl_tx_gain_ipa_rev5; + return txpwrctrl_tx_gain_ipa_rev6; + } else if (dev->phy.rev >= 5) { + return txpwrctrl_tx_gain_ipa_rev5; + } else { + return txpwrctrl_tx_gain_ipa; + } + } else { + return txpwrctrl_tx_gain_ipa_5g; + } } -static void b43_nphy_op_adjust_txpower(struct b43_wldev *dev) -{//TODO +/************************************************** + * RF (just without b43_nphy_rf_control_intc_override) + **************************************************/ + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ForceRFSeq */ +static void b43_nphy_force_rf_sequence(struct b43_wldev *dev, + enum b43_nphy_rf_sequence seq) +{ + static const u16 trigger[] = { + [B43_RFSEQ_RX2TX] = B43_NPHY_RFSEQTR_RX2TX, + [B43_RFSEQ_TX2RX] = B43_NPHY_RFSEQTR_TX2RX, + [B43_RFSEQ_RESET2RX] = B43_NPHY_RFSEQTR_RST2RX, + [B43_RFSEQ_UPDATE_GAINH] = B43_NPHY_RFSEQTR_UPGH, + [B43_RFSEQ_UPDATE_GAINL] = B43_NPHY_RFSEQTR_UPGL, + [B43_RFSEQ_UPDATE_GAINU] = B43_NPHY_RFSEQTR_UPGU, + }; + int i; + u16 seq_mode = b43_phy_read(dev, B43_NPHY_RFSEQMODE); + + B43_WARN_ON(seq >= ARRAY_SIZE(trigger)); + + b43_phy_set(dev, B43_NPHY_RFSEQMODE, + B43_NPHY_RFSEQMODE_CAOVER | B43_NPHY_RFSEQMODE_TROVER); + b43_phy_set(dev, B43_NPHY_RFSEQTR, trigger[seq]); + for (i = 0; i < 200; i++) { + if (!(b43_phy_read(dev, B43_NPHY_RFSEQST) & trigger[seq])) + goto ok; + msleep(1); + } + b43err(dev->wl, "RF sequence status timeout\n"); +ok: + b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode); } -static enum b43_txpwr_result b43_nphy_op_recalc_txpower(struct b43_wldev *dev, - bool ignore_tssi) -{//TODO - return B43_TXPWR_RES_DONE; +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverride */ +static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field, + u16 value, u8 core, bool off) +{ + int i; + u8 index = fls(field); + u8 addr, en_addr, val_addr; + /* we expect only one bit set */ + B43_WARN_ON(field & (~(1 << (index - 1)))); + + if (dev->phy.rev >= 3) { + const struct nphy_rf_control_override_rev3 *rf_ctrl; + for (i = 0; i < 2; i++) { + if (index == 0 || index == 16) { + b43err(dev->wl, + "Unsupported RF Ctrl Override call\n"); + return; + } + + rf_ctrl = &tbl_rf_control_override_rev3[index - 1]; + en_addr = B43_PHY_N((i == 0) ? + rf_ctrl->en_addr0 : rf_ctrl->en_addr1); + val_addr = B43_PHY_N((i == 0) ? + rf_ctrl->val_addr0 : rf_ctrl->val_addr1); + + if (off) { + b43_phy_mask(dev, en_addr, ~(field)); + b43_phy_mask(dev, val_addr, + ~(rf_ctrl->val_mask)); + } else { + if (core == 0 || ((1 << i) & core)) { + b43_phy_set(dev, en_addr, field); + b43_phy_maskset(dev, val_addr, + ~(rf_ctrl->val_mask), + (value << rf_ctrl->val_shift)); + } + } + } + } else { + const struct nphy_rf_control_override_rev2 *rf_ctrl; + if (off) { + b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~(field)); + value = 0; + } else { + b43_phy_set(dev, B43_NPHY_RFCTL_OVER, field); + } + + for (i = 0; i < 2; i++) { + if (index <= 1 || index == 16) { + b43err(dev->wl, + "Unsupported RF Ctrl Override call\n"); + return; + } + + if (index == 2 || index == 10 || + (index >= 13 && index <= 15)) { + core = 1; + } + + rf_ctrl = &tbl_rf_control_override_rev2[index - 2]; + addr = B43_PHY_N((i == 0) ? + rf_ctrl->addr0 : rf_ctrl->addr1); + + if ((1 << i) & core) + b43_phy_maskset(dev, addr, ~(rf_ctrl->bmask), + (value << rf_ctrl->shift)); + + b43_phy_set(dev, B43_NPHY_RFCTL_OVER, 0x1); + b43_phy_set(dev, B43_NPHY_RFCTL_CMD, + B43_NPHY_RFCTL_CMD_START); + udelay(1); + b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, 0xFFFE); + } + } } -static void b43_chantab_radio_upload(struct b43_wldev *dev, - const struct b43_nphy_channeltab_entry_rev2 *e) +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlIntcOverride */ +static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field, + u16 value, u8 core) { - b43_radio_write(dev, B2055_PLL_REF, e->radio_pll_ref); - b43_radio_write(dev, B2055_RF_PLLMOD0, e->radio_rf_pllmod0); - b43_radio_write(dev, B2055_RF_PLLMOD1, e->radio_rf_pllmod1); - b43_radio_write(dev, B2055_VCO_CAPTAIL, e->radio_vco_captail); - b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */ + u8 i, j; + u16 reg, tmp, val; - b43_radio_write(dev, B2055_VCO_CAL1, e->radio_vco_cal1); - b43_radio_write(dev, B2055_VCO_CAL2, e->radio_vco_cal2); - b43_radio_write(dev, B2055_PLL_LFC1, e->radio_pll_lfc1); - b43_radio_write(dev, B2055_PLL_LFR1, e->radio_pll_lfr1); - b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */ + B43_WARN_ON(dev->phy.rev < 3); + B43_WARN_ON(field > 4); - b43_radio_write(dev, B2055_PLL_LFC2, e->radio_pll_lfc2); - b43_radio_write(dev, B2055_LGBUF_CENBUF, e->radio_lgbuf_cenbuf); - b43_radio_write(dev, B2055_LGEN_TUNE1, e->radio_lgen_tune1); - b43_radio_write(dev, B2055_LGEN_TUNE2, e->radio_lgen_tune2); - b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */ + for (i = 0; i < 2; i++) { + if ((core == 1 && i == 1) || (core == 2 && !i)) + continue; - b43_radio_write(dev, B2055_C1_LGBUF_ATUNE, e->radio_c1_lgbuf_atune); - b43_radio_write(dev, B2055_C1_LGBUF_GTUNE, e->radio_c1_lgbuf_gtune); - b43_radio_write(dev, B2055_C1_RX_RFR1, e->radio_c1_rx_rfr1); - b43_radio_write(dev, B2055_C1_TX_PGAPADTN, e->radio_c1_tx_pgapadtn); - b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */ + reg = (i == 0) ? + B43_NPHY_RFCTL_INTC1 : B43_NPHY_RFCTL_INTC2; + b43_phy_mask(dev, reg, 0xFBFF); - b43_radio_write(dev, B2055_C1_TX_MXBGTRIM, e->radio_c1_tx_mxbgtrim); - b43_radio_write(dev, B2055_C2_LGBUF_ATUNE, e->radio_c2_lgbuf_atune); - b43_radio_write(dev, B2055_C2_LGBUF_GTUNE, e->radio_c2_lgbuf_gtune); - b43_radio_write(dev, B2055_C2_RX_RFR1, e->radio_c2_rx_rfr1); - b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */ + switch (field) { + case 0: + b43_phy_write(dev, reg, 0); + b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX); + break; + case 1: + if (!i) { + b43_phy_maskset(dev, B43_NPHY_RFCTL_INTC1, + 0xFC3F, (value << 6)); + b43_phy_maskset(dev, B43_NPHY_TXF_40CO_B1S1, + 0xFFFE, 1); + b43_phy_set(dev, B43_NPHY_RFCTL_CMD, + B43_NPHY_RFCTL_CMD_START); + for (j = 0; j < 100; j++) { + if (b43_phy_read(dev, B43_NPHY_RFCTL_CMD) & B43_NPHY_RFCTL_CMD_START) { + j = 0; + break; + } + udelay(10); + } + if (j) + b43err(dev->wl, + "intc override timeout\n"); + b43_phy_mask(dev, B43_NPHY_TXF_40CO_B1S1, + 0xFFFE); + } else { + b43_phy_maskset(dev, B43_NPHY_RFCTL_INTC2, + 0xFC3F, (value << 6)); + b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER, + 0xFFFE, 1); + b43_phy_set(dev, B43_NPHY_RFCTL_CMD, + B43_NPHY_RFCTL_CMD_RXTX); + for (j = 0; j < 100; j++) { + if (b43_phy_read(dev, B43_NPHY_RFCTL_CMD) & B43_NPHY_RFCTL_CMD_RXTX) { + j = 0; + break; + } + udelay(10); + } + if (j) + b43err(dev->wl, + "intc override timeout\n"); + b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, + 0xFFFE); + } + break; + case 2: + if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { + tmp = 0x0020; + val = value << 5; + } else { + tmp = 0x0010; + val = value << 4; + } + b43_phy_maskset(dev, reg, ~tmp, val); + break; + case 3: + if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { + tmp = 0x0001; + val = value; + } else { + tmp = 0x0004; + val = value << 2; + } + b43_phy_maskset(dev, reg, ~tmp, val); + break; + case 4: + if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { + tmp = 0x0002; + val = value << 1; + } else { + tmp = 0x0008; + val = value << 3; + } + b43_phy_maskset(dev, reg, ~tmp, val); + break; + } + } +} - b43_radio_write(dev, B2055_C2_TX_PGAPADTN, e->radio_c2_tx_pgapadtn); - b43_radio_write(dev, B2055_C2_TX_MXBGTRIM, e->radio_c2_tx_mxbgtrim); +/************************************************** + * Various PHY ops + **************************************************/ + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */ +static void b43_nphy_write_clip_detection(struct b43_wldev *dev, + const u16 *clip_st) +{ + b43_phy_write(dev, B43_NPHY_C1_CLIP1THRES, clip_st[0]); + b43_phy_write(dev, B43_NPHY_C2_CLIP1THRES, clip_st[1]); } +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */ +static void b43_nphy_read_clip_detection(struct b43_wldev *dev, u16 *clip_st) +{ + clip_st[0] = b43_phy_read(dev, B43_NPHY_C1_CLIP1THRES); + clip_st[1] = b43_phy_read(dev, B43_NPHY_C2_CLIP1THRES); +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/classifier */ +static u16 b43_nphy_classifier(struct b43_wldev *dev, u16 mask, u16 val) +{ + u16 tmp; + + if (dev->dev->core_rev == 16) + b43_mac_suspend(dev); + + tmp = b43_phy_read(dev, B43_NPHY_CLASSCTL); + tmp &= (B43_NPHY_CLASSCTL_CCKEN | B43_NPHY_CLASSCTL_OFDMEN | + B43_NPHY_CLASSCTL_WAITEDEN); + tmp &= ~mask; + tmp |= (val & mask); + b43_phy_maskset(dev, B43_NPHY_CLASSCTL, 0xFFF8, tmp); + + if (dev->dev->core_rev == 16) + b43_mac_enable(dev); + + return tmp; +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CCA */ +static void b43_nphy_reset_cca(struct b43_wldev *dev) +{ + u16 bbcfg; + + b43_phy_force_clock(dev, 1); + bbcfg = b43_phy_read(dev, B43_NPHY_BBCFG); + b43_phy_write(dev, B43_NPHY_BBCFG, bbcfg | B43_NPHY_BBCFG_RSTCCA); + udelay(1); + b43_phy_write(dev, B43_NPHY_BBCFG, bbcfg & ~B43_NPHY_BBCFG_RSTCCA); + b43_phy_force_clock(dev, 0); + b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX); +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/carriersearch */ +static void b43_nphy_stay_in_carrier_search(struct b43_wldev *dev, bool enable) +{ + struct b43_phy *phy = &dev->phy; + struct b43_phy_n *nphy = phy->n; + + if (enable) { + static const u16 clip[] = { 0xFFFF, 0xFFFF }; + if (nphy->deaf_count++ == 0) { + nphy->classifier_state = b43_nphy_classifier(dev, 0, 0); + b43_nphy_classifier(dev, 0x7, 0); + b43_nphy_read_clip_detection(dev, nphy->clip_state); + b43_nphy_write_clip_detection(dev, clip); + } + b43_nphy_reset_cca(dev); + } else { + if (--nphy->deaf_count == 0) { + b43_nphy_classifier(dev, 0x7, nphy->classifier_state); + b43_nphy_write_clip_detection(dev, nphy->clip_state); + } + } +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/AdjustLnaGainTbl */ +static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev) +{ + struct b43_phy_n *nphy = dev->phy.n; + + u8 i; + s16 tmp; + u16 data[4]; + s16 gain[2]; + u16 minmax[2]; + static const u16 lna_gain[4] = { -2, 10, 19, 25 }; + + if (nphy->hang_avoid) + b43_nphy_stay_in_carrier_search(dev, 1); + + if (nphy->gain_boost) { + if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + gain[0] = 6; + gain[1] = 6; + } else { + tmp = 40370 - 315 * dev->phy.channel; + gain[0] = ((tmp >> 13) + ((tmp >> 12) & 1)); + tmp = 23242 - 224 * dev->phy.channel; + gain[1] = ((tmp >> 13) + ((tmp >> 12) & 1)); + } + } else { + gain[0] = 0; + gain[1] = 0; + } + + for (i = 0; i < 2; i++) { + if (nphy->elna_gain_config) { + data[0] = 19 + gain[i]; + data[1] = 25 + gain[i]; + data[2] = 25 + gain[i]; + data[3] = 25 + gain[i]; + } else { + data[0] = lna_gain[0] + gain[i]; + data[1] = lna_gain[1] + gain[i]; + data[2] = lna_gain[2] + gain[i]; + data[3] = lna_gain[3] + gain[i]; + } + b43_ntab_write_bulk(dev, B43_NTAB16(i, 8), 4, data); + + minmax[i] = 23 + gain[i]; + } + + b43_phy_maskset(dev, B43_NPHY_C1_MINMAX_GAIN, ~B43_NPHY_C1_MINGAIN, + minmax[0] << B43_NPHY_C1_MINGAIN_SHIFT); + b43_phy_maskset(dev, B43_NPHY_C2_MINMAX_GAIN, ~B43_NPHY_C2_MINGAIN, + minmax[1] << B43_NPHY_C2_MINGAIN_SHIFT); + + if (nphy->hang_avoid) + b43_nphy_stay_in_carrier_search(dev, 0); +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetRfSeq */ +static void b43_nphy_set_rf_sequence(struct b43_wldev *dev, u8 cmd, + u8 *events, u8 *delays, u8 length) +{ + struct b43_phy_n *nphy = dev->phy.n; + u8 i; + u8 end = (dev->phy.rev >= 3) ? 0x1F : 0x0F; + u16 offset1 = cmd << 4; + u16 offset2 = offset1 + 0x80; + + if (nphy->hang_avoid) + b43_nphy_stay_in_carrier_search(dev, true); + + b43_ntab_write_bulk(dev, B43_NTAB8(7, offset1), length, events); + b43_ntab_write_bulk(dev, B43_NTAB8(7, offset2), length, delays); + + for (i = length; i < 16; i++) { + b43_ntab_write(dev, B43_NTAB8(7, offset1 + i), end); + b43_ntab_write(dev, B43_NTAB8(7, offset2 + i), 1); + } + + if (nphy->hang_avoid) + b43_nphy_stay_in_carrier_search(dev, false); +} + +/************************************************** + * Radio 0x2056 + **************************************************/ + static void b43_chantab_radio_2056_upload(struct b43_wldev *dev, const struct b43_nphy_channeltab_entry_rev3 *e) { @@ -228,10 +552,98 @@ static void b43_chantab_radio_2056_upload(struct b43_wldev *dev, static void b43_radio_2056_setup(struct b43_wldev *dev, const struct b43_nphy_channeltab_entry_rev3 *e) { + struct ssb_sprom *sprom = dev->dev->bus_sprom; + enum ieee80211_band band = b43_current_band(dev->wl); + u16 offset; + u8 i; + u16 bias, cbias, pag_boost, pgag_boost, mixg_boost, padg_boost; + B43_WARN_ON(dev->phy.rev < 3); b43_chantab_radio_2056_upload(dev, e); - /* TODO */ + b2056_upload_syn_pll_cp2(dev, band == IEEE80211_BAND_5GHZ); + + if (sprom->boardflags2_lo & B43_BFL2_GPLL_WAR && + b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F); + b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1F); + if (dev->dev->chip_id == 0x4716) { + b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x14); + b43_radio_write(dev, B2056_SYN_PLL_CP2, 0); + } else { + b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x0B); + b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x14); + } + } + if (sprom->boardflags2_lo & B43_BFL2_APLL_WAR && + b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { + b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F); + b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1F); + b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x05); + b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x0C); + } + + if (dev->phy.n->ipa2g_on && band == IEEE80211_BAND_2GHZ) { + for (i = 0; i < 2; i++) { + offset = i ? B2056_TX1 : B2056_TX0; + if (dev->phy.rev >= 5) { + b43_radio_write(dev, + offset | B2056_TX_PADG_IDAC, 0xcc); + + if (dev->dev->chip_id == 0x4716) { + bias = 0x40; + cbias = 0x45; + pag_boost = 0x5; + pgag_boost = 0x33; + mixg_boost = 0x55; + } else { + bias = 0x25; + cbias = 0x20; + pag_boost = 0x4; + pgag_boost = 0x03; + mixg_boost = 0x65; + } + padg_boost = 0x77; + + b43_radio_write(dev, + offset | B2056_TX_INTPAG_IMAIN_STAT, + bias); + b43_radio_write(dev, + offset | B2056_TX_INTPAG_IAUX_STAT, + bias); + b43_radio_write(dev, + offset | B2056_TX_INTPAG_CASCBIAS, + cbias); + b43_radio_write(dev, + offset | B2056_TX_INTPAG_BOOST_TUNE, + pag_boost); + b43_radio_write(dev, + offset | B2056_TX_PGAG_BOOST_TUNE, + pgag_boost); + b43_radio_write(dev, + offset | B2056_TX_PADG_BOOST_TUNE, + padg_boost); + b43_radio_write(dev, + offset | B2056_TX_MIXG_BOOST_TUNE, + mixg_boost); + } else { + bias = dev->phy.is_40mhz ? 0x40 : 0x20; + b43_radio_write(dev, + offset | B2056_TX_INTPAG_IMAIN_STAT, + bias); + b43_radio_write(dev, + offset | B2056_TX_INTPAG_IAUX_STAT, + bias); + b43_radio_write(dev, + offset | B2056_TX_INTPAG_CASCBIAS, + 0x30); + } + b43_radio_write(dev, offset | B2056_TX_PA_SPARE1, 0xee); + } + } else if (dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ) { + /* TODO */ + } + udelay(50); /* VCO calibration */ b43_radio_write(dev, B2056_SYN_PLL_VCOCAL12, 0x00); @@ -242,285 +654,84 @@ static void b43_radio_2056_setup(struct b43_wldev *dev, udelay(300); } -static void b43_chantab_phy_upload(struct b43_wldev *dev, - const struct b43_phy_n_sfo_cfg *e) -{ - b43_phy_write(dev, B43_NPHY_BW1A, e->phy_bw1a); - b43_phy_write(dev, B43_NPHY_BW2, e->phy_bw2); - b43_phy_write(dev, B43_NPHY_BW3, e->phy_bw3); - b43_phy_write(dev, B43_NPHY_BW4, e->phy_bw4); - b43_phy_write(dev, B43_NPHY_BW5, e->phy_bw5); - b43_phy_write(dev, B43_NPHY_BW6, e->phy_bw6); -} - -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlEnable */ -static void b43_nphy_tx_power_ctrl(struct b43_wldev *dev, bool enable) +static void b43_radio_init2056_pre(struct b43_wldev *dev) { - struct b43_phy_n *nphy = dev->phy.n; - u8 i; - u16 bmask, val, tmp; - enum ieee80211_band band = b43_current_band(dev->wl); - - if (nphy->hang_avoid) - b43_nphy_stay_in_carrier_search(dev, 1); - - nphy->txpwrctrl = enable; - if (!enable) { - if (dev->phy.rev >= 3 && - (b43_phy_read(dev, B43_NPHY_TXPCTL_CMD) & - (B43_NPHY_TXPCTL_CMD_COEFF | - B43_NPHY_TXPCTL_CMD_HWPCTLEN | - B43_NPHY_TXPCTL_CMD_PCTLEN))) { - /* We disable enabled TX pwr ctl, save it's state */ - nphy->tx_pwr_idx[0] = b43_phy_read(dev, - B43_NPHY_C1_TXPCTL_STAT) & 0x7f; - nphy->tx_pwr_idx[1] = b43_phy_read(dev, - B43_NPHY_C2_TXPCTL_STAT) & 0x7f; - } - - b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x6840); - for (i = 0; i < 84; i++) - b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0); - - b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x6C40); - for (i = 0; i < 84; i++) - b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0); - - tmp = B43_NPHY_TXPCTL_CMD_COEFF | B43_NPHY_TXPCTL_CMD_HWPCTLEN; - if (dev->phy.rev >= 3) - tmp |= B43_NPHY_TXPCTL_CMD_PCTLEN; - b43_phy_mask(dev, B43_NPHY_TXPCTL_CMD, ~tmp); - - if (dev->phy.rev >= 3) { - b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x0100); - b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0100); - } else { - b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x4000); - } - - if (dev->phy.rev == 2) - b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3, - ~B43_NPHY_BPHY_CTL3_SCALE, 0x53); - else if (dev->phy.rev < 2) - b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3, - ~B43_NPHY_BPHY_CTL3_SCALE, 0x5A); - - if (dev->phy.rev < 2 && dev->phy.is_40mhz) - b43_hf_write(dev, b43_hf_read(dev) | B43_HF_TSSIRPSMW); - } else { - b43_ntab_write_bulk(dev, B43_NTAB16(26, 64), 84, - nphy->adj_pwr_tbl); - b43_ntab_write_bulk(dev, B43_NTAB16(27, 64), 84, - nphy->adj_pwr_tbl); - - bmask = B43_NPHY_TXPCTL_CMD_COEFF | - B43_NPHY_TXPCTL_CMD_HWPCTLEN; - /* wl does useless check for "enable" param here */ - val = B43_NPHY_TXPCTL_CMD_COEFF | B43_NPHY_TXPCTL_CMD_HWPCTLEN; - if (dev->phy.rev >= 3) { - bmask |= B43_NPHY_TXPCTL_CMD_PCTLEN; - if (val) - val |= B43_NPHY_TXPCTL_CMD_PCTLEN; - } - b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD, ~(bmask), val); - - if (band == IEEE80211_BAND_5GHZ) { - b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD, - ~B43_NPHY_TXPCTL_CMD_INIT, 0x64); - if (dev->phy.rev > 1) - b43_phy_maskset(dev, B43_NPHY_TXPCTL_INIT, - ~B43_NPHY_TXPCTL_INIT_PIDXI1, - 0x64); - } - - if (dev->phy.rev >= 3) { - if (nphy->tx_pwr_idx[0] != 128 && - nphy->tx_pwr_idx[1] != 128) { - /* Recover TX pwr ctl state */ - b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD, - ~B43_NPHY_TXPCTL_CMD_INIT, - nphy->tx_pwr_idx[0]); - if (dev->phy.rev > 1) - b43_phy_maskset(dev, - B43_NPHY_TXPCTL_INIT, - ~0xff, nphy->tx_pwr_idx[1]); - } - } - - if (dev->phy.rev >= 3) { - b43_phy_mask(dev, B43_NPHY_AFECTL_OVER1, ~0x100); - b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x100); - } else { - b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x4000); - } - - if (dev->phy.rev == 2) - b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3, ~0xFF, 0x3b); - else if (dev->phy.rev < 2) - b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3, ~0xFF, 0x40); - - if (dev->phy.rev < 2 && dev->phy.is_40mhz) - b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_TSSIRPSMW); - - if (b43_nphy_ipa(dev)) { - b43_phy_mask(dev, B43_NPHY_PAPD_EN0, ~0x4); - b43_phy_mask(dev, B43_NPHY_PAPD_EN1, ~0x4); - } - } - - if (nphy->hang_avoid) - b43_nphy_stay_in_carrier_search(dev, 0); + b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, + ~B43_NPHY_RFCTL_CMD_CHIP0PU); + /* Maybe wl meant to reset and set (order?) RFCTL_CMD_OEPORFORCE? */ + b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, + B43_NPHY_RFCTL_CMD_OEPORFORCE); + b43_phy_set(dev, B43_NPHY_RFCTL_CMD, + ~B43_NPHY_RFCTL_CMD_OEPORFORCE); + b43_phy_set(dev, B43_NPHY_RFCTL_CMD, + B43_NPHY_RFCTL_CMD_CHIP0PU); } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrFix */ -static void b43_nphy_tx_power_fix(struct b43_wldev *dev) +static void b43_radio_init2056_post(struct b43_wldev *dev) { - struct b43_phy_n *nphy = dev->phy.n; - struct ssb_sprom *sprom = dev->dev->bus_sprom; - - u8 txpi[2], bbmult, i; - u16 tmp, radio_gain, dac_gain; - u16 freq = dev->phy.channel_freq; - u32 txgain; - /* u32 gaintbl; rev3+ */ - - if (nphy->hang_avoid) - b43_nphy_stay_in_carrier_search(dev, 1); - - if (dev->phy.rev >= 3) { - txpi[0] = 40; - txpi[1] = 40; - } else if (sprom->revision < 4) { - txpi[0] = 72; - txpi[1] = 72; - } else { - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { - txpi[0] = sprom->txpid2g[0]; - txpi[1] = sprom->txpid2g[1]; - } else if (freq >= 4900 && freq < 5100) { - txpi[0] = sprom->txpid5gl[0]; - txpi[1] = sprom->txpid5gl[1]; - } else if (freq >= 5100 && freq < 5500) { - txpi[0] = sprom->txpid5g[0]; - txpi[1] = sprom->txpid5g[1]; - } else if (freq >= 5500) { - txpi[0] = sprom->txpid5gh[0]; - txpi[1] = sprom->txpid5gh[1]; - } else { - txpi[0] = 91; - txpi[1] = 91; - } - } - + b43_radio_set(dev, B2056_SYN_COM_CTRL, 0xB); + b43_radio_set(dev, B2056_SYN_COM_PU, 0x2); + b43_radio_set(dev, B2056_SYN_COM_RESET, 0x2); + msleep(1); + b43_radio_mask(dev, B2056_SYN_COM_RESET, ~0x2); + b43_radio_mask(dev, B2056_SYN_PLL_MAST2, ~0xFC); + b43_radio_mask(dev, B2056_SYN_RCCAL_CTRL0, ~0x1); /* - for (i = 0; i < 2; i++) { - nphy->txpwrindex[i].index_internal = txpi[i]; - nphy->txpwrindex[i].index_internal_save = txpi[i]; - } + if (nphy->init_por) + Call Radio 2056 Recalibrate */ +} - for (i = 0; i < 2; i++) { - if (dev->phy.rev >= 3) { - /* FIXME: support 5GHz */ - txgain = b43_ntab_tx_gain_rev3plus_2ghz[txpi[i]]; - radio_gain = (txgain >> 16) & 0x1FFFF; - } else { - txgain = b43_ntab_tx_gain_rev0_1_2[txpi[i]]; - radio_gain = (txgain >> 16) & 0x1FFF; - } - - dac_gain = (txgain >> 8) & 0x3F; - bbmult = txgain & 0xFF; - - if (dev->phy.rev >= 3) { - if (i == 0) - b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x0100); - else - b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0100); - } else { - b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x4000); - } - - if (i == 0) - b43_phy_write(dev, B43_NPHY_AFECTL_DACGAIN1, dac_gain); - else - b43_phy_write(dev, B43_NPHY_AFECTL_DACGAIN2, dac_gain); - - b43_ntab_write(dev, B43_NTAB16(0x7, 0x110 + i), radio_gain); - - tmp = b43_ntab_read(dev, B43_NTAB16(0xF, 0x57)); - if (i == 0) - tmp = (tmp & 0x00FF) | (bbmult << 8); - else - tmp = (tmp & 0xFF00) | bbmult; - b43_ntab_write(dev, B43_NTAB16(0xF, 0x57), tmp); - - if (b43_nphy_ipa(dev)) { - u32 tmp32; - u16 reg = (i == 0) ? - B43_NPHY_PAPD_EN0 : B43_NPHY_PAPD_EN1; - tmp32 = b43_ntab_read(dev, B43_NTAB32(26 + i, txpi[i])); - b43_phy_maskset(dev, reg, 0xE00F, (u32) tmp32 << 4); - b43_phy_set(dev, reg, 0x4); - } - } - - b43_phy_mask(dev, B43_NPHY_BPHY_CTL2, ~B43_NPHY_BPHY_CTL2_LUT); - - if (nphy->hang_avoid) - b43_nphy_stay_in_carrier_search(dev, 0); +/* + * Initialize a Broadcom 2056 N-radio + * http://bcm-v4.sipsolutions.net/802.11/Radio/2056/Init + */ +static void b43_radio_init2056(struct b43_wldev *dev) +{ + b43_radio_init2056_pre(dev); + b2056_upload_inittabs(dev, 0, 0); + b43_radio_init2056_post(dev); } -static void b43_nphy_tx_gain_table_upload(struct b43_wldev *dev) +/************************************************** + * Radio 0x2055 + **************************************************/ + +static void b43_chantab_radio_upload(struct b43_wldev *dev, + const struct b43_nphy_channeltab_entry_rev2 *e) { - struct b43_phy *phy = &dev->phy; + b43_radio_write(dev, B2055_PLL_REF, e->radio_pll_ref); + b43_radio_write(dev, B2055_RF_PLLMOD0, e->radio_rf_pllmod0); + b43_radio_write(dev, B2055_RF_PLLMOD1, e->radio_rf_pllmod1); + b43_radio_write(dev, B2055_VCO_CAPTAIL, e->radio_vco_captail); + b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */ - const u32 *table = NULL; -#if 0 - TODO: b43_ntab_papd_pga_gain_delta_ipa_2* - u32 rfpwr_offset; - u8 pga_gain; - int i; -#endif + b43_radio_write(dev, B2055_VCO_CAL1, e->radio_vco_cal1); + b43_radio_write(dev, B2055_VCO_CAL2, e->radio_vco_cal2); + b43_radio_write(dev, B2055_PLL_LFC1, e->radio_pll_lfc1); + b43_radio_write(dev, B2055_PLL_LFR1, e->radio_pll_lfr1); + b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */ - if (phy->rev >= 3) { - if (b43_nphy_ipa(dev)) { - table = b43_nphy_get_ipa_gain_table(dev); - } else { - if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { - if (phy->rev == 3) - table = b43_ntab_tx_gain_rev3_5ghz; - if (phy->rev == 4) - table = b43_ntab_tx_gain_rev4_5ghz; - else - table = b43_ntab_tx_gain_rev5plus_5ghz; - } else { - table = b43_ntab_tx_gain_rev3plus_2ghz; - } - } - } else { - table = b43_ntab_tx_gain_rev0_1_2; - } - b43_ntab_write_bulk(dev, B43_NTAB32(26, 192), 128, table); - b43_ntab_write_bulk(dev, B43_NTAB32(27, 192), 128, table); + b43_radio_write(dev, B2055_PLL_LFC2, e->radio_pll_lfc2); + b43_radio_write(dev, B2055_LGBUF_CENBUF, e->radio_lgbuf_cenbuf); + b43_radio_write(dev, B2055_LGEN_TUNE1, e->radio_lgen_tune1); + b43_radio_write(dev, B2055_LGEN_TUNE2, e->radio_lgen_tune2); + b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */ - if (phy->rev >= 3) { -#if 0 - nphy->gmval = (table[0] >> 16) & 0x7000; + b43_radio_write(dev, B2055_C1_LGBUF_ATUNE, e->radio_c1_lgbuf_atune); + b43_radio_write(dev, B2055_C1_LGBUF_GTUNE, e->radio_c1_lgbuf_gtune); + b43_radio_write(dev, B2055_C1_RX_RFR1, e->radio_c1_rx_rfr1); + b43_radio_write(dev, B2055_C1_TX_PGAPADTN, e->radio_c1_tx_pgapadtn); + b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */ - for (i = 0; i < 128; i++) { - pga_gain = (table[i] >> 24) & 0xF; - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) - rfpwr_offset = b43_ntab_papd_pga_gain_delta_ipa_2g[pga_gain]; - else - rfpwr_offset = b43_ntab_papd_pga_gain_delta_ipa_5g[pga_gain]; - b43_ntab_write(dev, B43_NTAB32(26, 576 + i), - rfpwr_offset); - b43_ntab_write(dev, B43_NTAB32(27, 576 + i), - rfpwr_offset); - } -#endif - } + b43_radio_write(dev, B2055_C1_TX_MXBGTRIM, e->radio_c1_tx_mxbgtrim); + b43_radio_write(dev, B2055_C2_LGBUF_ATUNE, e->radio_c2_lgbuf_atune); + b43_radio_write(dev, B2055_C2_LGBUF_GTUNE, e->radio_c2_lgbuf_gtune); + b43_radio_write(dev, B2055_C2_RX_RFR1, e->radio_c2_rx_rfr1); + b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */ + + b43_radio_write(dev, B2055_C2_TX_PGAPADTN, e->radio_c2_tx_pgapadtn); + b43_radio_write(dev, B2055_C2_TX_MXBGTRIM, e->radio_c2_tx_mxbgtrim); } /* http://bcm-v4.sipsolutions.net/802.11/PHY/Radio/2055Setup */ @@ -622,1089 +833,9 @@ static void b43_radio_init2055(struct b43_wldev *dev) b43_radio_init2055_post(dev); } -static void b43_radio_init2056_pre(struct b43_wldev *dev) -{ - b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, - ~B43_NPHY_RFCTL_CMD_CHIP0PU); - /* Maybe wl meant to reset and set (order?) RFCTL_CMD_OEPORFORCE? */ - b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, - B43_NPHY_RFCTL_CMD_OEPORFORCE); - b43_phy_set(dev, B43_NPHY_RFCTL_CMD, - ~B43_NPHY_RFCTL_CMD_OEPORFORCE); - b43_phy_set(dev, B43_NPHY_RFCTL_CMD, - B43_NPHY_RFCTL_CMD_CHIP0PU); -} - -static void b43_radio_init2056_post(struct b43_wldev *dev) -{ - b43_radio_set(dev, B2056_SYN_COM_CTRL, 0xB); - b43_radio_set(dev, B2056_SYN_COM_PU, 0x2); - b43_radio_set(dev, B2056_SYN_COM_RESET, 0x2); - msleep(1); - b43_radio_mask(dev, B2056_SYN_COM_RESET, ~0x2); - b43_radio_mask(dev, B2056_SYN_PLL_MAST2, ~0xFC); - b43_radio_mask(dev, B2056_SYN_RCCAL_CTRL0, ~0x1); - /* - if (nphy->init_por) - Call Radio 2056 Recalibrate - */ -} - -/* - * Initialize a Broadcom 2056 N-radio - * http://bcm-v4.sipsolutions.net/802.11/Radio/2056/Init - */ -static void b43_radio_init2056(struct b43_wldev *dev) -{ - b43_radio_init2056_pre(dev); - b2056_upload_inittabs(dev, 0, 0); - b43_radio_init2056_post(dev); -} - -/* - * Upload the N-PHY tables. - * http://bcm-v4.sipsolutions.net/802.11/PHY/N/InitTables - */ -static void b43_nphy_tables_init(struct b43_wldev *dev) -{ - if (dev->phy.rev < 3) - b43_nphy_rev0_1_2_tables_init(dev); - else - b43_nphy_rev3plus_tables_init(dev); -} - -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/PA%20override */ -static void b43_nphy_pa_override(struct b43_wldev *dev, bool enable) -{ - struct b43_phy_n *nphy = dev->phy.n; - enum ieee80211_band band; - u16 tmp; - - if (!enable) { - nphy->rfctrl_intc1_save = b43_phy_read(dev, - B43_NPHY_RFCTL_INTC1); - nphy->rfctrl_intc2_save = b43_phy_read(dev, - B43_NPHY_RFCTL_INTC2); - band = b43_current_band(dev->wl); - if (dev->phy.rev >= 3) { - if (band == IEEE80211_BAND_5GHZ) - tmp = 0x600; - else - tmp = 0x480; - } else { - if (band == IEEE80211_BAND_5GHZ) - tmp = 0x180; - else - tmp = 0x120; - } - b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, tmp); - b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, tmp); - } else { - b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, - nphy->rfctrl_intc1_save); - b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, - nphy->rfctrl_intc2_save); - } -} - -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxLpFbw */ -static void b43_nphy_tx_lp_fbw(struct b43_wldev *dev) -{ - u16 tmp; - - if (dev->phy.rev >= 3) { - if (b43_nphy_ipa(dev)) { - tmp = 4; - b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S2, - (((((tmp << 3) | tmp) << 3) | tmp) << 3) | tmp); - } - - tmp = 1; - b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S2, - (((((tmp << 3) | tmp) << 3) | tmp) << 3) | tmp); - } -} - -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CCA */ -static void b43_nphy_reset_cca(struct b43_wldev *dev) -{ - u16 bbcfg; - - b43_phy_force_clock(dev, 1); - bbcfg = b43_phy_read(dev, B43_NPHY_BBCFG); - b43_phy_write(dev, B43_NPHY_BBCFG, bbcfg | B43_NPHY_BBCFG_RSTCCA); - udelay(1); - b43_phy_write(dev, B43_NPHY_BBCFG, bbcfg & ~B43_NPHY_BBCFG_RSTCCA); - b43_phy_force_clock(dev, 0); - b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX); -} - -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/MIMOConfig */ -static void b43_nphy_update_mimo_config(struct b43_wldev *dev, s32 preamble) -{ - u16 mimocfg = b43_phy_read(dev, B43_NPHY_MIMOCFG); - - mimocfg |= B43_NPHY_MIMOCFG_AUTO; - if (preamble == 1) - mimocfg |= B43_NPHY_MIMOCFG_GFMIX; - else - mimocfg &= ~B43_NPHY_MIMOCFG_GFMIX; - - b43_phy_write(dev, B43_NPHY_MIMOCFG, mimocfg); -} - -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Chains */ -static void b43_nphy_update_txrx_chain(struct b43_wldev *dev) -{ - struct b43_phy_n *nphy = dev->phy.n; - - bool override = false; - u16 chain = 0x33; - - if (nphy->txrx_chain == 0) { - chain = 0x11; - override = true; - } else if (nphy->txrx_chain == 1) { - chain = 0x22; - override = true; - } - - b43_phy_maskset(dev, B43_NPHY_RFSEQCA, - ~(B43_NPHY_RFSEQCA_TXEN | B43_NPHY_RFSEQCA_RXEN), - chain); - - if (override) - b43_phy_set(dev, B43_NPHY_RFSEQMODE, - B43_NPHY_RFSEQMODE_CAOVER); - else - b43_phy_mask(dev, B43_NPHY_RFSEQMODE, - ~B43_NPHY_RFSEQMODE_CAOVER); -} - -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqEst */ -static void b43_nphy_rx_iq_est(struct b43_wldev *dev, struct nphy_iq_est *est, - u16 samps, u8 time, bool wait) -{ - int i; - u16 tmp; - - b43_phy_write(dev, B43_NPHY_IQEST_SAMCNT, samps); - b43_phy_maskset(dev, B43_NPHY_IQEST_WT, ~B43_NPHY_IQEST_WT_VAL, time); - if (wait) - b43_phy_set(dev, B43_NPHY_IQEST_CMD, B43_NPHY_IQEST_CMD_MODE); - else - b43_phy_mask(dev, B43_NPHY_IQEST_CMD, ~B43_NPHY_IQEST_CMD_MODE); - - b43_phy_set(dev, B43_NPHY_IQEST_CMD, B43_NPHY_IQEST_CMD_START); - - for (i = 1000; i; i--) { - tmp = b43_phy_read(dev, B43_NPHY_IQEST_CMD); - if (!(tmp & B43_NPHY_IQEST_CMD_START)) { - est->i0_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_IPACC_HI0) << 16) | - b43_phy_read(dev, B43_NPHY_IQEST_IPACC_LO0); - est->q0_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_QPACC_HI0) << 16) | - b43_phy_read(dev, B43_NPHY_IQEST_QPACC_LO0); - est->iq0_prod = (b43_phy_read(dev, B43_NPHY_IQEST_IQACC_HI0) << 16) | - b43_phy_read(dev, B43_NPHY_IQEST_IQACC_LO0); - - est->i1_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_IPACC_HI1) << 16) | - b43_phy_read(dev, B43_NPHY_IQEST_IPACC_LO1); - est->q1_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_QPACC_HI1) << 16) | - b43_phy_read(dev, B43_NPHY_IQEST_QPACC_LO1); - est->iq1_prod = (b43_phy_read(dev, B43_NPHY_IQEST_IQACC_HI1) << 16) | - b43_phy_read(dev, B43_NPHY_IQEST_IQACC_LO1); - return; - } - udelay(10); - } - memset(est, 0, sizeof(*est)); -} - -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqCoeffs */ -static void b43_nphy_rx_iq_coeffs(struct b43_wldev *dev, bool write, - struct b43_phy_n_iq_comp *pcomp) -{ - if (write) { - b43_phy_write(dev, B43_NPHY_C1_RXIQ_COMPA0, pcomp->a0); - b43_phy_write(dev, B43_NPHY_C1_RXIQ_COMPB0, pcomp->b0); - b43_phy_write(dev, B43_NPHY_C2_RXIQ_COMPA1, pcomp->a1); - b43_phy_write(dev, B43_NPHY_C2_RXIQ_COMPB1, pcomp->b1); - } else { - pcomp->a0 = b43_phy_read(dev, B43_NPHY_C1_RXIQ_COMPA0); - pcomp->b0 = b43_phy_read(dev, B43_NPHY_C1_RXIQ_COMPB0); - pcomp->a1 = b43_phy_read(dev, B43_NPHY_C2_RXIQ_COMPA1); - pcomp->b1 = b43_phy_read(dev, B43_NPHY_C2_RXIQ_COMPB1); - } -} - -#if 0 -/* Ready but not used anywhere */ -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhyCleanup */ -static void b43_nphy_rx_cal_phy_cleanup(struct b43_wldev *dev, u8 core) -{ - u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs; - - b43_phy_write(dev, B43_NPHY_RFSEQCA, regs[0]); - if (core == 0) { - b43_phy_write(dev, B43_NPHY_AFECTL_C1, regs[1]); - b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, regs[2]); - } else { - b43_phy_write(dev, B43_NPHY_AFECTL_C2, regs[1]); - b43_phy_write(dev, B43_NPHY_AFECTL_OVER, regs[2]); - } - b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, regs[3]); - b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, regs[4]); - b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO1, regs[5]); - b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO2, regs[6]); - b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S1, regs[7]); - b43_phy_write(dev, B43_NPHY_RFCTL_OVER, regs[8]); - b43_phy_write(dev, B43_NPHY_PAPD_EN0, regs[9]); - b43_phy_write(dev, B43_NPHY_PAPD_EN1, regs[10]); -} - -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhySetup */ -static void b43_nphy_rx_cal_phy_setup(struct b43_wldev *dev, u8 core) -{ - u8 rxval, txval; - u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs; - - regs[0] = b43_phy_read(dev, B43_NPHY_RFSEQCA); - if (core == 0) { - regs[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C1); - regs[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER1); - } else { - regs[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C2); - regs[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER); - } - regs[3] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1); - regs[4] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2); - regs[5] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO1); - regs[6] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO2); - regs[7] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B1S1); - regs[8] = b43_phy_read(dev, B43_NPHY_RFCTL_OVER); - regs[9] = b43_phy_read(dev, B43_NPHY_PAPD_EN0); - regs[10] = b43_phy_read(dev, B43_NPHY_PAPD_EN1); - - b43_phy_mask(dev, B43_NPHY_PAPD_EN0, ~0x0001); - b43_phy_mask(dev, B43_NPHY_PAPD_EN1, ~0x0001); - - b43_phy_maskset(dev, B43_NPHY_RFSEQCA, - ~B43_NPHY_RFSEQCA_RXDIS & 0xFFFF, - ((1 - core) << B43_NPHY_RFSEQCA_RXDIS_SHIFT)); - b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXEN, - ((1 - core) << B43_NPHY_RFSEQCA_TXEN_SHIFT)); - b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_RXEN, - (core << B43_NPHY_RFSEQCA_RXEN_SHIFT)); - b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXDIS, - (core << B43_NPHY_RFSEQCA_TXDIS_SHIFT)); - - if (core == 0) { - b43_phy_mask(dev, B43_NPHY_AFECTL_C1, ~0x0007); - b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x0007); - } else { - b43_phy_mask(dev, B43_NPHY_AFECTL_C2, ~0x0007); - b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0007); - } - - b43_nphy_rf_control_intc_override(dev, 2, 0, 3); - b43_nphy_rf_control_override(dev, 8, 0, 3, false); - b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RX2TX); - - if (core == 0) { - rxval = 1; - txval = 8; - } else { - rxval = 4; - txval = 2; - } - b43_nphy_rf_control_intc_override(dev, 1, rxval, (core + 1)); - b43_nphy_rf_control_intc_override(dev, 1, txval, (2 - core)); -} -#endif - -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalcRxIqComp */ -static void b43_nphy_calc_rx_iq_comp(struct b43_wldev *dev, u8 mask) -{ - int i; - s32 iq; - u32 ii; - u32 qq; - int iq_nbits, qq_nbits; - int arsh, brsh; - u16 tmp, a, b; - - struct nphy_iq_est est; - struct b43_phy_n_iq_comp old; - struct b43_phy_n_iq_comp new = { }; - bool error = false; - - if (mask == 0) - return; - - b43_nphy_rx_iq_coeffs(dev, false, &old); - b43_nphy_rx_iq_coeffs(dev, true, &new); - b43_nphy_rx_iq_est(dev, &est, 0x4000, 32, false); - new = old; - - for (i = 0; i < 2; i++) { - if (i == 0 && (mask & 1)) { - iq = est.iq0_prod; - ii = est.i0_pwr; - qq = est.q0_pwr; - } else if (i == 1 && (mask & 2)) { - iq = est.iq1_prod; - ii = est.i1_pwr; - qq = est.q1_pwr; - } else { - continue; - } - - if (ii + qq < 2) { - error = true; - break; - } - - iq_nbits = fls(abs(iq)); - qq_nbits = fls(qq); - - arsh = iq_nbits - 20; - if (arsh >= 0) { - a = -((iq << (30 - iq_nbits)) + (ii >> (1 + arsh))); - tmp = ii >> arsh; - } else { - a = -((iq << (30 - iq_nbits)) + (ii << (-1 - arsh))); - tmp = ii << -arsh; - } - if (tmp == 0) { - error = true; - break; - } - a /= tmp; - - brsh = qq_nbits - 11; - if (brsh >= 0) { - b = (qq << (31 - qq_nbits)); - tmp = ii >> brsh; - } else { - b = (qq << (31 - qq_nbits)); - tmp = ii << -brsh; - } - if (tmp == 0) { - error = true; - break; - } - b = int_sqrt(b / tmp - a * a) - (1 << 10); - - if (i == 0 && (mask & 0x1)) { - if (dev->phy.rev >= 3) { - new.a0 = a & 0x3FF; - new.b0 = b & 0x3FF; - } else { - new.a0 = b & 0x3FF; - new.b0 = a & 0x3FF; - } - } else if (i == 1 && (mask & 0x2)) { - if (dev->phy.rev >= 3) { - new.a1 = a & 0x3FF; - new.b1 = b & 0x3FF; - } else { - new.a1 = b & 0x3FF; - new.b1 = a & 0x3FF; - } - } - } - - if (error) - new = old; - - b43_nphy_rx_iq_coeffs(dev, true, &new); -} - -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxIqWar */ -static void b43_nphy_tx_iq_workaround(struct b43_wldev *dev) -{ - u16 array[4]; - b43_ntab_read_bulk(dev, B43_NTAB16(0xF, 0x50), 4, array); - - b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW0, array[0]); - b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW1, array[1]); - b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW2, array[2]); - b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW3, array[3]); -} - -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */ -static void b43_nphy_write_clip_detection(struct b43_wldev *dev, - const u16 *clip_st) -{ - b43_phy_write(dev, B43_NPHY_C1_CLIP1THRES, clip_st[0]); - b43_phy_write(dev, B43_NPHY_C2_CLIP1THRES, clip_st[1]); -} - -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */ -static void b43_nphy_read_clip_detection(struct b43_wldev *dev, u16 *clip_st) -{ - clip_st[0] = b43_phy_read(dev, B43_NPHY_C1_CLIP1THRES); - clip_st[1] = b43_phy_read(dev, B43_NPHY_C2_CLIP1THRES); -} - -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SuperSwitchInit */ -static void b43_nphy_superswitch_init(struct b43_wldev *dev, bool init) -{ - if (dev->phy.rev >= 3) { - if (!init) - return; - if (0 /* FIXME */) { - b43_ntab_write(dev, B43_NTAB16(9, 2), 0x211); - b43_ntab_write(dev, B43_NTAB16(9, 3), 0x222); - b43_ntab_write(dev, B43_NTAB16(9, 8), 0x144); - b43_ntab_write(dev, B43_NTAB16(9, 12), 0x188); - } - } else { - b43_phy_write(dev, B43_NPHY_GPIO_LOOEN, 0); - b43_phy_write(dev, B43_NPHY_GPIO_HIOEN, 0); - - switch (dev->dev->bus_type) { -#ifdef CONFIG_B43_BCMA - case B43_BUS_BCMA: - bcma_chipco_gpio_control(&dev->dev->bdev->bus->drv_cc, - 0xFC00, 0xFC00); - break; -#endif -#ifdef CONFIG_B43_SSB - case B43_BUS_SSB: - ssb_chipco_gpio_control(&dev->dev->sdev->bus->chipco, - 0xFC00, 0xFC00); - break; -#endif - } - - b43_write32(dev, B43_MMIO_MACCTL, - b43_read32(dev, B43_MMIO_MACCTL) & - ~B43_MACCTL_GPOUTSMSK); - b43_write16(dev, B43_MMIO_GPIO_MASK, - b43_read16(dev, B43_MMIO_GPIO_MASK) | 0xFC00); - b43_write16(dev, B43_MMIO_GPIO_CONTROL, - b43_read16(dev, B43_MMIO_GPIO_CONTROL) & ~0xFC00); - - if (init) { - b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO1, 0x2D8); - b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0x301); - b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8); - b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301); - } - } -} - -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/classifier */ -static u16 b43_nphy_classifier(struct b43_wldev *dev, u16 mask, u16 val) -{ - u16 tmp; - - if (dev->dev->core_rev == 16) - b43_mac_suspend(dev); - - tmp = b43_phy_read(dev, B43_NPHY_CLASSCTL); - tmp &= (B43_NPHY_CLASSCTL_CCKEN | B43_NPHY_CLASSCTL_OFDMEN | - B43_NPHY_CLASSCTL_WAITEDEN); - tmp &= ~mask; - tmp |= (val & mask); - b43_phy_maskset(dev, B43_NPHY_CLASSCTL, 0xFFF8, tmp); - - if (dev->dev->core_rev == 16) - b43_mac_enable(dev); - - return tmp; -} - -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/carriersearch */ -static void b43_nphy_stay_in_carrier_search(struct b43_wldev *dev, bool enable) -{ - struct b43_phy *phy = &dev->phy; - struct b43_phy_n *nphy = phy->n; - - if (enable) { - static const u16 clip[] = { 0xFFFF, 0xFFFF }; - if (nphy->deaf_count++ == 0) { - nphy->classifier_state = b43_nphy_classifier(dev, 0, 0); - b43_nphy_classifier(dev, 0x7, 0); - b43_nphy_read_clip_detection(dev, nphy->clip_state); - b43_nphy_write_clip_detection(dev, clip); - } - b43_nphy_reset_cca(dev); - } else { - if (--nphy->deaf_count == 0) { - b43_nphy_classifier(dev, 0x7, nphy->classifier_state); - b43_nphy_write_clip_detection(dev, nphy->clip_state); - } - } -} - -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/stop-playback */ -static void b43_nphy_stop_playback(struct b43_wldev *dev) -{ - struct b43_phy_n *nphy = dev->phy.n; - u16 tmp; - - if (nphy->hang_avoid) - b43_nphy_stay_in_carrier_search(dev, 1); - - tmp = b43_phy_read(dev, B43_NPHY_SAMP_STAT); - if (tmp & 0x1) - b43_phy_set(dev, B43_NPHY_SAMP_CMD, B43_NPHY_SAMP_CMD_STOP); - else if (tmp & 0x2) - b43_phy_mask(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0x7FFF); - - b43_phy_mask(dev, B43_NPHY_SAMP_CMD, ~0x0004); - - if (nphy->bb_mult_save & 0x80000000) { - tmp = nphy->bb_mult_save & 0xFFFF; - b43_ntab_write(dev, B43_NTAB16(15, 87), tmp); - nphy->bb_mult_save = 0; - } - - if (nphy->hang_avoid) - b43_nphy_stay_in_carrier_search(dev, 0); -} - -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SpurWar */ -static void b43_nphy_spur_workaround(struct b43_wldev *dev) -{ - struct b43_phy_n *nphy = dev->phy.n; - - u8 channel = dev->phy.channel; - int tone[2] = { 57, 58 }; - u32 noise[2] = { 0x3FF, 0x3FF }; - - B43_WARN_ON(dev->phy.rev < 3); - - if (nphy->hang_avoid) - b43_nphy_stay_in_carrier_search(dev, 1); - - if (nphy->gband_spurwar_en) { - /* TODO: N PHY Adjust Analog Pfbw (7) */ - if (channel == 11 && dev->phy.is_40mhz) - ; /* TODO: N PHY Adjust Min Noise Var(2, tone, noise)*/ - else - ; /* TODO: N PHY Adjust Min Noise Var(0, NULL, NULL)*/ - /* TODO: N PHY Adjust CRS Min Power (0x1E) */ - } - - if (nphy->aband_spurwar_en) { - if (channel == 54) { - tone[0] = 0x20; - noise[0] = 0x25F; - } else if (channel == 38 || channel == 102 || channel == 118) { - if (0 /* FIXME */) { - tone[0] = 0x20; - noise[0] = 0x21F; - } else { - tone[0] = 0; - noise[0] = 0; - } - } else if (channel == 134) { - tone[0] = 0x20; - noise[0] = 0x21F; - } else if (channel == 151) { - tone[0] = 0x10; - noise[0] = 0x23F; - } else if (channel == 153 || channel == 161) { - tone[0] = 0x30; - noise[0] = 0x23F; - } else { - tone[0] = 0; - noise[0] = 0; - } - - if (!tone[0] && !noise[0]) - ; /* TODO: N PHY Adjust Min Noise Var(1, tone, noise)*/ - else - ; /* TODO: N PHY Adjust Min Noise Var(0, NULL, NULL)*/ - } - - if (nphy->hang_avoid) - b43_nphy_stay_in_carrier_search(dev, 0); -} - -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/AdjustLnaGainTbl */ -static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev) -{ - struct b43_phy_n *nphy = dev->phy.n; - - u8 i; - s16 tmp; - u16 data[4]; - s16 gain[2]; - u16 minmax[2]; - static const u16 lna_gain[4] = { -2, 10, 19, 25 }; - - if (nphy->hang_avoid) - b43_nphy_stay_in_carrier_search(dev, 1); - - if (nphy->gain_boost) { - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { - gain[0] = 6; - gain[1] = 6; - } else { - tmp = 40370 - 315 * dev->phy.channel; - gain[0] = ((tmp >> 13) + ((tmp >> 12) & 1)); - tmp = 23242 - 224 * dev->phy.channel; - gain[1] = ((tmp >> 13) + ((tmp >> 12) & 1)); - } - } else { - gain[0] = 0; - gain[1] = 0; - } - - for (i = 0; i < 2; i++) { - if (nphy->elna_gain_config) { - data[0] = 19 + gain[i]; - data[1] = 25 + gain[i]; - data[2] = 25 + gain[i]; - data[3] = 25 + gain[i]; - } else { - data[0] = lna_gain[0] + gain[i]; - data[1] = lna_gain[1] + gain[i]; - data[2] = lna_gain[2] + gain[i]; - data[3] = lna_gain[3] + gain[i]; - } - b43_ntab_write_bulk(dev, B43_NTAB16(i, 8), 4, data); - - minmax[i] = 23 + gain[i]; - } - - b43_phy_maskset(dev, B43_NPHY_C1_MINMAX_GAIN, ~B43_NPHY_C1_MINGAIN, - minmax[0] << B43_NPHY_C1_MINGAIN_SHIFT); - b43_phy_maskset(dev, B43_NPHY_C2_MINMAX_GAIN, ~B43_NPHY_C2_MINGAIN, - minmax[1] << B43_NPHY_C2_MINGAIN_SHIFT); - - if (nphy->hang_avoid) - b43_nphy_stay_in_carrier_search(dev, 0); -} - -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/WorkaroundsGainCtrl */ -static void b43_nphy_gain_ctrl_workarounds(struct b43_wldev *dev) -{ - struct b43_phy_n *nphy = dev->phy.n; - struct ssb_sprom *sprom = dev->dev->bus_sprom; - - /* PHY rev 0, 1, 2 */ - u8 i, j; - u8 code; - u16 tmp; - u8 rfseq_events[3] = { 6, 8, 7 }; - u8 rfseq_delays[3] = { 10, 30, 1 }; - - /* PHY rev >= 3 */ - bool ghz5; - bool ext_lna; - u16 rssi_gain; - struct nphy_gain_ctl_workaround_entry *e; - u8 lpf_gain[6] = { 0x00, 0x06, 0x0C, 0x12, 0x12, 0x12 }; - u8 lpf_bits[6] = { 0, 1, 2, 3, 3, 3 }; - - if (dev->phy.rev >= 3) { - /* Prepare values */ - ghz5 = b43_phy_read(dev, B43_NPHY_BANDCTL) - & B43_NPHY_BANDCTL_5GHZ; - ext_lna = sprom->boardflags_lo & B43_BFL_EXTLNA; - e = b43_nphy_get_gain_ctl_workaround_ent(dev, ghz5, ext_lna); - if (ghz5 && dev->phy.rev >= 5) - rssi_gain = 0x90; - else - rssi_gain = 0x50; - - b43_phy_set(dev, B43_NPHY_RXCTL, 0x0040); - - /* Set Clip 2 detect */ - b43_phy_set(dev, B43_NPHY_C1_CGAINI, - B43_NPHY_C1_CGAINI_CL2DETECT); - b43_phy_set(dev, B43_NPHY_C2_CGAINI, - B43_NPHY_C2_CGAINI_CL2DETECT); - - b43_radio_write(dev, B2056_RX0 | B2056_RX_BIASPOLE_LNAG1_IDAC, - 0x17); - b43_radio_write(dev, B2056_RX1 | B2056_RX_BIASPOLE_LNAG1_IDAC, - 0x17); - b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAG2_IDAC, 0xF0); - b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAG2_IDAC, 0xF0); - b43_radio_write(dev, B2056_RX0 | B2056_RX_RSSI_POLE, 0x00); - b43_radio_write(dev, B2056_RX1 | B2056_RX_RSSI_POLE, 0x00); - b43_radio_write(dev, B2056_RX0 | B2056_RX_RSSI_GAIN, - rssi_gain); - b43_radio_write(dev, B2056_RX1 | B2056_RX_RSSI_GAIN, - rssi_gain); - b43_radio_write(dev, B2056_RX0 | B2056_RX_BIASPOLE_LNAA1_IDAC, - 0x17); - b43_radio_write(dev, B2056_RX1 | B2056_RX_BIASPOLE_LNAA1_IDAC, - 0x17); - b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAA2_IDAC, 0xFF); - b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAA2_IDAC, 0xFF); - - b43_ntab_write_bulk(dev, B43_NTAB8(0, 8), 4, e->lna1_gain); - b43_ntab_write_bulk(dev, B43_NTAB8(1, 8), 4, e->lna1_gain); - b43_ntab_write_bulk(dev, B43_NTAB8(0, 16), 4, e->lna2_gain); - b43_ntab_write_bulk(dev, B43_NTAB8(1, 16), 4, e->lna2_gain); - b43_ntab_write_bulk(dev, B43_NTAB8(0, 32), 10, e->gain_db); - b43_ntab_write_bulk(dev, B43_NTAB8(1, 32), 10, e->gain_db); - b43_ntab_write_bulk(dev, B43_NTAB8(2, 32), 10, e->gain_bits); - b43_ntab_write_bulk(dev, B43_NTAB8(3, 32), 10, e->gain_bits); - b43_ntab_write_bulk(dev, B43_NTAB8(0, 0x40), 6, lpf_gain); - b43_ntab_write_bulk(dev, B43_NTAB8(1, 0x40), 6, lpf_gain); - b43_ntab_write_bulk(dev, B43_NTAB8(2, 0x40), 6, lpf_bits); - b43_ntab_write_bulk(dev, B43_NTAB8(3, 0x40), 6, lpf_bits); - - b43_phy_write(dev, B43_NPHY_C1_INITGAIN, e->init_gain); - b43_phy_write(dev, 0x2A7, e->init_gain); - b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x106), 2, - e->rfseq_init); - b43_phy_write(dev, B43_NPHY_C1_INITGAIN, e->init_gain); - - /* TODO: check defines. Do not match variables names */ - b43_phy_write(dev, B43_NPHY_C1_CLIP1_MEDGAIN, e->cliphi_gain); - b43_phy_write(dev, 0x2A9, e->cliphi_gain); - b43_phy_write(dev, B43_NPHY_C1_CLIP2_GAIN, e->clipmd_gain); - b43_phy_write(dev, 0x2AB, e->clipmd_gain); - b43_phy_write(dev, B43_NPHY_C2_CLIP1_HIGAIN, e->cliplo_gain); - b43_phy_write(dev, 0x2AD, e->cliplo_gain); - - b43_phy_maskset(dev, 0x27D, 0xFF00, e->crsmin); - b43_phy_maskset(dev, 0x280, 0xFF00, e->crsminl); - b43_phy_maskset(dev, 0x283, 0xFF00, e->crsminu); - b43_phy_write(dev, B43_NPHY_C1_NBCLIPTHRES, e->nbclip); - b43_phy_write(dev, B43_NPHY_C2_NBCLIPTHRES, e->nbclip); - b43_phy_maskset(dev, B43_NPHY_C1_CLIPWBTHRES, - ~B43_NPHY_C1_CLIPWBTHRES_CLIP2, e->wlclip); - b43_phy_maskset(dev, B43_NPHY_C2_CLIPWBTHRES, - ~B43_NPHY_C2_CLIPWBTHRES_CLIP2, e->wlclip); - b43_phy_write(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C); - } else { - /* Set Clip 2 detect */ - b43_phy_set(dev, B43_NPHY_C1_CGAINI, - B43_NPHY_C1_CGAINI_CL2DETECT); - b43_phy_set(dev, B43_NPHY_C2_CGAINI, - B43_NPHY_C2_CGAINI_CL2DETECT); - - /* Set narrowband clip threshold */ - b43_phy_write(dev, B43_NPHY_C1_NBCLIPTHRES, 0x84); - b43_phy_write(dev, B43_NPHY_C2_NBCLIPTHRES, 0x84); - - if (!dev->phy.is_40mhz) { - /* Set dwell lengths */ - b43_phy_write(dev, B43_NPHY_CLIP1_NBDWELL_LEN, 0x002B); - b43_phy_write(dev, B43_NPHY_CLIP2_NBDWELL_LEN, 0x002B); - b43_phy_write(dev, B43_NPHY_W1CLIP1_DWELL_LEN, 0x0009); - b43_phy_write(dev, B43_NPHY_W1CLIP2_DWELL_LEN, 0x0009); - } - - /* Set wideband clip 2 threshold */ - b43_phy_maskset(dev, B43_NPHY_C1_CLIPWBTHRES, - ~B43_NPHY_C1_CLIPWBTHRES_CLIP2, - 21); - b43_phy_maskset(dev, B43_NPHY_C2_CLIPWBTHRES, - ~B43_NPHY_C2_CLIPWBTHRES_CLIP2, - 21); - - if (!dev->phy.is_40mhz) { - b43_phy_maskset(dev, B43_NPHY_C1_CGAINI, - ~B43_NPHY_C1_CGAINI_GAINBKOFF, 0x1); - b43_phy_maskset(dev, B43_NPHY_C2_CGAINI, - ~B43_NPHY_C2_CGAINI_GAINBKOFF, 0x1); - b43_phy_maskset(dev, B43_NPHY_C1_CCK_CGAINI, - ~B43_NPHY_C1_CCK_CGAINI_GAINBKOFF, 0x1); - b43_phy_maskset(dev, B43_NPHY_C2_CCK_CGAINI, - ~B43_NPHY_C2_CCK_CGAINI_GAINBKOFF, 0x1); - } - - b43_phy_write(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C); - - if (nphy->gain_boost) { - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ && - dev->phy.is_40mhz) - code = 4; - else - code = 5; - } else { - code = dev->phy.is_40mhz ? 6 : 7; - } - - /* Set HPVGA2 index */ - b43_phy_maskset(dev, B43_NPHY_C1_INITGAIN, - ~B43_NPHY_C1_INITGAIN_HPVGA2, - code << B43_NPHY_C1_INITGAIN_HPVGA2_SHIFT); - b43_phy_maskset(dev, B43_NPHY_C2_INITGAIN, - ~B43_NPHY_C2_INITGAIN_HPVGA2, - code << B43_NPHY_C2_INITGAIN_HPVGA2_SHIFT); - - b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x1D06); - /* specs say about 2 loops, but wl does 4 */ - for (i = 0; i < 4; i++) - b43_phy_write(dev, B43_NPHY_TABLE_DATALO, - (code << 8 | 0x7C)); - - b43_nphy_adjust_lna_gain_table(dev); - - if (nphy->elna_gain_config) { - b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x0808); - b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0); - b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1); - b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1); - b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1); - - b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x0C08); - b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0); - b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1); - b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1); - b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1); - - b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x1D06); - /* specs say about 2 loops, but wl does 4 */ - for (i = 0; i < 4; i++) - b43_phy_write(dev, B43_NPHY_TABLE_DATALO, - (code << 8 | 0x74)); - } - - if (dev->phy.rev == 2) { - for (i = 0; i < 4; i++) { - b43_phy_write(dev, B43_NPHY_TABLE_ADDR, - (0x0400 * i) + 0x0020); - for (j = 0; j < 21; j++) { - tmp = j * (i < 2 ? 3 : 1); - b43_phy_write(dev, - B43_NPHY_TABLE_DATALO, tmp); - } - } - } - - b43_nphy_set_rf_sequence(dev, 5, - rfseq_events, rfseq_delays, 3); - b43_phy_maskset(dev, B43_NPHY_OVER_DGAIN1, - ~B43_NPHY_OVER_DGAIN_CCKDGECV & 0xFFFF, - 0x5A << B43_NPHY_OVER_DGAIN_CCKDGECV_SHIFT); - - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) - b43_phy_maskset(dev, B43_PHY_N(0xC5D), - 0xFF80, 4); - } -} - -static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev) -{ - struct b43_phy_n *nphy = dev->phy.n; - struct ssb_sprom *sprom = dev->dev->bus_sprom; - - /* TX to RX */ - u8 tx2rx_events[9] = { 0x4, 0x3, 0x6, 0x5, 0x2, 0x1, 0x8, 0x1F }; - u8 tx2rx_delays[9] = { 8, 4, 2, 2, 4, 4, 6, 1 }; - /* RX to TX */ - u8 rx2tx_events_ipa[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0xF, 0x3, - 0x1F }; - u8 rx2tx_delays_ipa[9] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 }; - u8 rx2tx_events[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0x3, 0x4, 0x1F }; - u8 rx2tx_delays[9] = { 8, 6, 6, 4, 4, 18, 42, 1, 1 }; - - u16 tmp16; - u32 tmp32; - - tmp32 = b43_ntab_read(dev, B43_NTAB32(30, 0)); - tmp32 &= 0xffffff; - b43_ntab_write(dev, B43_NTAB32(30, 0), tmp32); - - b43_phy_write(dev, B43_NPHY_PHASETR_A0, 0x0125); - b43_phy_write(dev, B43_NPHY_PHASETR_A1, 0x01B3); - b43_phy_write(dev, B43_NPHY_PHASETR_A2, 0x0105); - b43_phy_write(dev, B43_NPHY_PHASETR_B0, 0x016E); - b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0x00CD); - b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x0020); - - b43_phy_write(dev, B43_NPHY_C2_CLIP1_MEDGAIN, 0x000C); - b43_phy_write(dev, 0x2AE, 0x000C); - - /* TX to RX */ - b43_nphy_set_rf_sequence(dev, 1, tx2rx_events, tx2rx_delays, 9); - - /* RX to TX */ - if (b43_nphy_ipa(dev)) - b43_nphy_set_rf_sequence(dev, 1, rx2tx_events_ipa, - rx2tx_delays_ipa, 9); - if (nphy->hw_phyrxchain != 3 && - nphy->hw_phyrxchain != nphy->hw_phytxchain) { - if (b43_nphy_ipa(dev)) { - rx2tx_delays[5] = 59; - rx2tx_delays[6] = 1; - rx2tx_events[7] = 0x1F; - } - b43_nphy_set_rf_sequence(dev, 1, rx2tx_events, rx2tx_delays, 9); - } - - tmp16 = (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) ? - 0x2 : 0x9C40; - b43_phy_write(dev, B43_NPHY_ENDROP_TLEN, tmp16); - - b43_phy_maskset(dev, 0x294, 0xF0FF, 0x0700); - - b43_ntab_write(dev, B43_NTAB32(16, 3), 0x18D); - b43_ntab_write(dev, B43_NTAB32(16, 127), 0x18D); - - b43_nphy_gain_ctrl_workarounds(dev); - - b43_ntab_write(dev, B43_NTAB32(8, 0), 2); - b43_ntab_write(dev, B43_NTAB32(8, 16), 2); - - /* TODO */ - - b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_MAST_BIAS, 0x00); - b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_MAST_BIAS, 0x00); - b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_BIAS_MAIN, 0x06); - b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_BIAS_MAIN, 0x06); - b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_BIAS_AUX, 0x07); - b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_BIAS_AUX, 0x07); - b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_LOB_BIAS, 0x88); - b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_LOB_BIAS, 0x88); - b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXG_CMFB_IDAC, 0x00); - b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXG_CMFB_IDAC, 0x00); - - /* N PHY WAR TX Chain Update with hw_phytxchain as argument */ - - if ((sprom->boardflags2_lo & B43_BFL2_APLL_WAR && - b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) || - (sprom->boardflags2_lo & B43_BFL2_GPLL_WAR && - b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)) - tmp32 = 0x00088888; - else - tmp32 = 0x88888888; - b43_ntab_write(dev, B43_NTAB32(30, 1), tmp32); - b43_ntab_write(dev, B43_NTAB32(30, 2), tmp32); - b43_ntab_write(dev, B43_NTAB32(30, 3), tmp32); - - if (dev->phy.rev == 4 && - b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { - b43_radio_write(dev, B2056_TX0 | B2056_TX_GMBB_IDAC, - 0x70); - b43_radio_write(dev, B2056_TX1 | B2056_TX_GMBB_IDAC, - 0x70); - } - - b43_phy_write(dev, 0x224, 0x039C); - b43_phy_write(dev, 0x225, 0x0357); - b43_phy_write(dev, 0x226, 0x0317); - b43_phy_write(dev, 0x227, 0x02D7); - b43_phy_write(dev, 0x228, 0x039C); - b43_phy_write(dev, 0x229, 0x0357); - b43_phy_write(dev, 0x22A, 0x0317); - b43_phy_write(dev, 0x22B, 0x02D7); - b43_phy_write(dev, 0x22C, 0x039C); - b43_phy_write(dev, 0x22D, 0x0357); - b43_phy_write(dev, 0x22E, 0x0317); - b43_phy_write(dev, 0x22F, 0x02D7); -} - -static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev) -{ - struct ssb_sprom *sprom = dev->dev->bus_sprom; - struct b43_phy *phy = &dev->phy; - struct b43_phy_n *nphy = phy->n; - - u8 events1[7] = { 0x0, 0x1, 0x2, 0x8, 0x4, 0x5, 0x3 }; - u8 delays1[7] = { 0x8, 0x6, 0x6, 0x2, 0x4, 0x3C, 0x1 }; - - u8 events2[7] = { 0x0, 0x3, 0x5, 0x4, 0x2, 0x1, 0x8 }; - u8 delays2[7] = { 0x8, 0x6, 0x2, 0x4, 0x4, 0x6, 0x1 }; - - if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ && - nphy->band5g_pwrgain) { - b43_radio_mask(dev, B2055_C1_TX_RF_SPARE, ~0x8); - b43_radio_mask(dev, B2055_C2_TX_RF_SPARE, ~0x8); - } else { - b43_radio_set(dev, B2055_C1_TX_RF_SPARE, 0x8); - b43_radio_set(dev, B2055_C2_TX_RF_SPARE, 0x8); - } - - b43_ntab_write(dev, B43_NTAB16(8, 0x00), 0x000A); - b43_ntab_write(dev, B43_NTAB16(8, 0x10), 0x000A); - b43_ntab_write(dev, B43_NTAB16(8, 0x02), 0xCDAA); - b43_ntab_write(dev, B43_NTAB16(8, 0x12), 0xCDAA); - - if (dev->phy.rev < 2) { - b43_ntab_write(dev, B43_NTAB16(8, 0x08), 0x0000); - b43_ntab_write(dev, B43_NTAB16(8, 0x18), 0x0000); - b43_ntab_write(dev, B43_NTAB16(8, 0x07), 0x7AAB); - b43_ntab_write(dev, B43_NTAB16(8, 0x17), 0x7AAB); - b43_ntab_write(dev, B43_NTAB16(8, 0x06), 0x0800); - b43_ntab_write(dev, B43_NTAB16(8, 0x16), 0x0800); - } - - b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO1, 0x2D8); - b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0x301); - b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8); - b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301); - - if (sprom->boardflags2_lo & B43_BFL2_SKWRKFEM_BRD && - dev->dev->board_type == 0x8B) { - delays1[0] = 0x1; - delays1[5] = 0x14; - } - b43_nphy_set_rf_sequence(dev, 0, events1, delays1, 7); - b43_nphy_set_rf_sequence(dev, 1, events2, delays2, 7); - - b43_nphy_gain_ctrl_workarounds(dev); - - if (dev->phy.rev < 2) { - if (b43_phy_read(dev, B43_NPHY_RXCTL) & 0x2) - b43_hf_write(dev, b43_hf_read(dev) | - B43_HF_MLADVW); - } else if (dev->phy.rev == 2) { - b43_phy_write(dev, B43_NPHY_CRSCHECK2, 0); - b43_phy_write(dev, B43_NPHY_CRSCHECK3, 0); - } - - if (dev->phy.rev < 2) - b43_phy_mask(dev, B43_NPHY_SCRAM_SIGCTL, - ~B43_NPHY_SCRAM_SIGCTL_SCM); - - /* Set phase track alpha and beta */ - b43_phy_write(dev, B43_NPHY_PHASETR_A0, 0x125); - b43_phy_write(dev, B43_NPHY_PHASETR_A1, 0x1B3); - b43_phy_write(dev, B43_NPHY_PHASETR_A2, 0x105); - b43_phy_write(dev, B43_NPHY_PHASETR_B0, 0x16E); - b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0xCD); - b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x20); - - b43_phy_mask(dev, B43_NPHY_PIL_DW1, - ~B43_NPHY_PIL_DW_64QAM & 0xFFFF); - b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B1, 0xB5); - b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B2, 0xA4); - b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B3, 0x00); - - if (dev->phy.rev == 2) - b43_phy_set(dev, B43_NPHY_FINERX2_CGC, - B43_NPHY_FINERX2_CGC_DECGC); -} - -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Workarounds */ -static void b43_nphy_workarounds(struct b43_wldev *dev) -{ - struct b43_phy *phy = &dev->phy; - struct b43_phy_n *nphy = phy->n; - - if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) - b43_nphy_classifier(dev, 1, 0); - else - b43_nphy_classifier(dev, 1, 1); - - if (nphy->hang_avoid) - b43_nphy_stay_in_carrier_search(dev, 1); - - b43_phy_set(dev, B43_NPHY_IQFLIP, - B43_NPHY_IQFLIP_ADC1 | B43_NPHY_IQFLIP_ADC2); - - if (dev->phy.rev >= 3) - b43_nphy_workarounds_rev3plus(dev); - else - b43_nphy_workarounds_rev1_2(dev); - - if (nphy->hang_avoid) - b43_nphy_stay_in_carrier_search(dev, 0); -} +/************************************************** + * Samples + **************************************************/ /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/LoadSampleTable */ static int b43_nphy_load_samples(struct b43_wldev *dev, @@ -1825,7 +956,7 @@ static void b43_nphy_run_samples(struct b43_wldev *dev, u16 samps, u16 loops, b43_phy_write(dev, B43_NPHY_SAMP_CMD, 1); } for (i = 0; i < 100; i++) { - if (b43_phy_read(dev, B43_NPHY_RFSEQST) & 1) { + if (!(b43_phy_read(dev, B43_NPHY_RFSEQST) & 1)) { i = 0; break; } @@ -1837,333 +968,9 @@ static void b43_nphy_run_samples(struct b43_wldev *dev, u16 samps, u16 loops, b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode); } -/* - * Transmits a known value for LO calibration - * http://bcm-v4.sipsolutions.net/802.11/PHY/N/TXTone - */ -static int b43_nphy_tx_tone(struct b43_wldev *dev, u32 freq, u16 max_val, - bool iqmode, bool dac_test) -{ - u16 samp = b43_nphy_gen_load_samples(dev, freq, max_val, dac_test); - if (samp == 0) - return -1; - b43_nphy_run_samples(dev, samp, 0xFFFF, 0, iqmode, dac_test); - return 0; -} - -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlCoefSetup */ -static void b43_nphy_tx_pwr_ctrl_coef_setup(struct b43_wldev *dev) -{ - struct b43_phy_n *nphy = dev->phy.n; - int i, j; - u32 tmp; - u32 cur_real, cur_imag, real_part, imag_part; - - u16 buffer[7]; - - if (nphy->hang_avoid) - b43_nphy_stay_in_carrier_search(dev, true); - - b43_ntab_read_bulk(dev, B43_NTAB16(15, 80), 7, buffer); - - for (i = 0; i < 2; i++) { - tmp = ((buffer[i * 2] & 0x3FF) << 10) | - (buffer[i * 2 + 1] & 0x3FF); - b43_phy_write(dev, B43_NPHY_TABLE_ADDR, - (((i + 26) << 10) | 320)); - for (j = 0; j < 128; j++) { - b43_phy_write(dev, B43_NPHY_TABLE_DATAHI, - ((tmp >> 16) & 0xFFFF)); - b43_phy_write(dev, B43_NPHY_TABLE_DATALO, - (tmp & 0xFFFF)); - } - } - - for (i = 0; i < 2; i++) { - tmp = buffer[5 + i]; - real_part = (tmp >> 8) & 0xFF; - imag_part = (tmp & 0xFF); - b43_phy_write(dev, B43_NPHY_TABLE_ADDR, - (((i + 26) << 10) | 448)); - - if (dev->phy.rev >= 3) { - cur_real = real_part; - cur_imag = imag_part; - tmp = ((cur_real & 0xFF) << 8) | (cur_imag & 0xFF); - } - - for (j = 0; j < 128; j++) { - if (dev->phy.rev < 3) { - cur_real = (real_part * loscale[j] + 128) >> 8; - cur_imag = (imag_part * loscale[j] + 128) >> 8; - tmp = ((cur_real & 0xFF) << 8) | - (cur_imag & 0xFF); - } - b43_phy_write(dev, B43_NPHY_TABLE_DATAHI, - ((tmp >> 16) & 0xFFFF)); - b43_phy_write(dev, B43_NPHY_TABLE_DATALO, - (tmp & 0xFFFF)); - } - } - - if (dev->phy.rev >= 3) { - b43_shm_write16(dev, B43_SHM_SHARED, - B43_SHM_SH_NPHY_TXPWR_INDX0, 0xFFFF); - b43_shm_write16(dev, B43_SHM_SHARED, - B43_SHM_SH_NPHY_TXPWR_INDX1, 0xFFFF); - } - - if (nphy->hang_avoid) - b43_nphy_stay_in_carrier_search(dev, false); -} - -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetRfSeq */ -static void b43_nphy_set_rf_sequence(struct b43_wldev *dev, u8 cmd, - u8 *events, u8 *delays, u8 length) -{ - struct b43_phy_n *nphy = dev->phy.n; - u8 i; - u8 end = (dev->phy.rev >= 3) ? 0x1F : 0x0F; - u16 offset1 = cmd << 4; - u16 offset2 = offset1 + 0x80; - - if (nphy->hang_avoid) - b43_nphy_stay_in_carrier_search(dev, true); - - b43_ntab_write_bulk(dev, B43_NTAB8(7, offset1), length, events); - b43_ntab_write_bulk(dev, B43_NTAB8(7, offset2), length, delays); - - for (i = length; i < 16; i++) { - b43_ntab_write(dev, B43_NTAB8(7, offset1 + i), end); - b43_ntab_write(dev, B43_NTAB8(7, offset2 + i), 1); - } - - if (nphy->hang_avoid) - b43_nphy_stay_in_carrier_search(dev, false); -} - -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ForceRFSeq */ -static void b43_nphy_force_rf_sequence(struct b43_wldev *dev, - enum b43_nphy_rf_sequence seq) -{ - static const u16 trigger[] = { - [B43_RFSEQ_RX2TX] = B43_NPHY_RFSEQTR_RX2TX, - [B43_RFSEQ_TX2RX] = B43_NPHY_RFSEQTR_TX2RX, - [B43_RFSEQ_RESET2RX] = B43_NPHY_RFSEQTR_RST2RX, - [B43_RFSEQ_UPDATE_GAINH] = B43_NPHY_RFSEQTR_UPGH, - [B43_RFSEQ_UPDATE_GAINL] = B43_NPHY_RFSEQTR_UPGL, - [B43_RFSEQ_UPDATE_GAINU] = B43_NPHY_RFSEQTR_UPGU, - }; - int i; - u16 seq_mode = b43_phy_read(dev, B43_NPHY_RFSEQMODE); - - B43_WARN_ON(seq >= ARRAY_SIZE(trigger)); - - b43_phy_set(dev, B43_NPHY_RFSEQMODE, - B43_NPHY_RFSEQMODE_CAOVER | B43_NPHY_RFSEQMODE_TROVER); - b43_phy_set(dev, B43_NPHY_RFSEQTR, trigger[seq]); - for (i = 0; i < 200; i++) { - if (!(b43_phy_read(dev, B43_NPHY_RFSEQST) & trigger[seq])) - goto ok; - msleep(1); - } - b43err(dev->wl, "RF sequence status timeout\n"); -ok: - b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode); -} - -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverride */ -static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field, - u16 value, u8 core, bool off) -{ - int i; - u8 index = fls(field); - u8 addr, en_addr, val_addr; - /* we expect only one bit set */ - B43_WARN_ON(field & (~(1 << (index - 1)))); - - if (dev->phy.rev >= 3) { - const struct nphy_rf_control_override_rev3 *rf_ctrl; - for (i = 0; i < 2; i++) { - if (index == 0 || index == 16) { - b43err(dev->wl, - "Unsupported RF Ctrl Override call\n"); - return; - } - - rf_ctrl = &tbl_rf_control_override_rev3[index - 1]; - en_addr = B43_PHY_N((i == 0) ? - rf_ctrl->en_addr0 : rf_ctrl->en_addr1); - val_addr = B43_PHY_N((i == 0) ? - rf_ctrl->val_addr0 : rf_ctrl->val_addr1); - - if (off) { - b43_phy_mask(dev, en_addr, ~(field)); - b43_phy_mask(dev, val_addr, - ~(rf_ctrl->val_mask)); - } else { - if (core == 0 || ((1 << core) & i) != 0) { - b43_phy_set(dev, en_addr, field); - b43_phy_maskset(dev, val_addr, - ~(rf_ctrl->val_mask), - (value << rf_ctrl->val_shift)); - } - } - } - } else { - const struct nphy_rf_control_override_rev2 *rf_ctrl; - if (off) { - b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~(field)); - value = 0; - } else { - b43_phy_set(dev, B43_NPHY_RFCTL_OVER, field); - } - - for (i = 0; i < 2; i++) { - if (index <= 1 || index == 16) { - b43err(dev->wl, - "Unsupported RF Ctrl Override call\n"); - return; - } - - if (index == 2 || index == 10 || - (index >= 13 && index <= 15)) { - core = 1; - } - - rf_ctrl = &tbl_rf_control_override_rev2[index - 2]; - addr = B43_PHY_N((i == 0) ? - rf_ctrl->addr0 : rf_ctrl->addr1); - - if ((core & (1 << i)) != 0) - b43_phy_maskset(dev, addr, ~(rf_ctrl->bmask), - (value << rf_ctrl->shift)); - - b43_phy_set(dev, B43_NPHY_RFCTL_OVER, 0x1); - b43_phy_set(dev, B43_NPHY_RFCTL_CMD, - B43_NPHY_RFCTL_CMD_START); - udelay(1); - b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, 0xFFFE); - } - } -} - -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlIntcOverride */ -static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field, - u16 value, u8 core) -{ - u8 i, j; - u16 reg, tmp, val; - - B43_WARN_ON(dev->phy.rev < 3); - B43_WARN_ON(field > 4); - - for (i = 0; i < 2; i++) { - if ((core == 1 && i == 1) || (core == 2 && !i)) - continue; - - reg = (i == 0) ? - B43_NPHY_RFCTL_INTC1 : B43_NPHY_RFCTL_INTC2; - b43_phy_mask(dev, reg, 0xFBFF); - - switch (field) { - case 0: - b43_phy_write(dev, reg, 0); - b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX); - break; - case 1: - if (!i) { - b43_phy_maskset(dev, B43_NPHY_RFCTL_INTC1, - 0xFC3F, (value << 6)); - b43_phy_maskset(dev, B43_NPHY_TXF_40CO_B1S1, - 0xFFFE, 1); - b43_phy_set(dev, B43_NPHY_RFCTL_CMD, - B43_NPHY_RFCTL_CMD_START); - for (j = 0; j < 100; j++) { - if (b43_phy_read(dev, B43_NPHY_RFCTL_CMD) & B43_NPHY_RFCTL_CMD_START) { - j = 0; - break; - } - udelay(10); - } - if (j) - b43err(dev->wl, - "intc override timeout\n"); - b43_phy_mask(dev, B43_NPHY_TXF_40CO_B1S1, - 0xFFFE); - } else { - b43_phy_maskset(dev, B43_NPHY_RFCTL_INTC2, - 0xFC3F, (value << 6)); - b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER, - 0xFFFE, 1); - b43_phy_set(dev, B43_NPHY_RFCTL_CMD, - B43_NPHY_RFCTL_CMD_RXTX); - for (j = 0; j < 100; j++) { - if (b43_phy_read(dev, B43_NPHY_RFCTL_CMD) & B43_NPHY_RFCTL_CMD_RXTX) { - j = 0; - break; - } - udelay(10); - } - if (j) - b43err(dev->wl, - "intc override timeout\n"); - b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, - 0xFFFE); - } - break; - case 2: - if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { - tmp = 0x0020; - val = value << 5; - } else { - tmp = 0x0010; - val = value << 4; - } - b43_phy_maskset(dev, reg, ~tmp, val); - break; - case 3: - if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { - tmp = 0x0001; - val = value; - } else { - tmp = 0x0004; - val = value << 2; - } - b43_phy_maskset(dev, reg, ~tmp, val); - break; - case 4: - if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { - tmp = 0x0002; - val = value << 1; - } else { - tmp = 0x0008; - val = value << 3; - } - b43_phy_maskset(dev, reg, ~tmp, val); - break; - } - } -} - -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/BPHYInit */ -static void b43_nphy_bphy_init(struct b43_wldev *dev) -{ - unsigned int i; - u16 val; - - val = 0x1E1F; - for (i = 0; i < 16; i++) { - b43_phy_write(dev, B43_PHY_N_BMODE(0x88 + i), val); - val -= 0x202; - } - val = 0x3E3F; - for (i = 0; i < 16; i++) { - b43_phy_write(dev, B43_PHY_N_BMODE(0x98 + i), val); - val -= 0x202; - } - b43_phy_write(dev, B43_PHY_N_BMODE(0x38), 0x668); -} +/************************************************** + * RSSI + **************************************************/ /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ScaleOffsetRssi */ static void b43_nphy_scale_offset_rssi(struct b43_wldev *dev, u16 scale, @@ -2233,67 +1040,6 @@ static void b43_nphy_scale_offset_rssi(struct b43_wldev *dev, u16 scale, b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_TSSI, tmp); } -static void b43_nphy_rev2_rssi_select(struct b43_wldev *dev, u8 code, u8 type) -{ - u16 val; - - if (type < 3) - val = 0; - else if (type == 6) - val = 1; - else if (type == 3) - val = 2; - else - val = 3; - - val = (val << 12) | (val << 14); - b43_phy_maskset(dev, B43_NPHY_AFECTL_C1, 0x0FFF, val); - b43_phy_maskset(dev, B43_NPHY_AFECTL_C2, 0x0FFF, val); - - if (type < 3) { - b43_phy_maskset(dev, B43_NPHY_RFCTL_RSSIO1, 0xFFCF, - (type + 1) << 4); - b43_phy_maskset(dev, B43_NPHY_RFCTL_RSSIO2, 0xFFCF, - (type + 1) << 4); - } - - if (code == 0) { - b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x3000); - if (type < 3) { - b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, - ~(B43_NPHY_RFCTL_CMD_RXEN | - B43_NPHY_RFCTL_CMD_CORESEL)); - b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, - ~(0x1 << 12 | - 0x1 << 5 | - 0x1 << 1 | - 0x1)); - b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, - ~B43_NPHY_RFCTL_CMD_START); - udelay(20); - b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~0x1); - } - } else { - b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x3000); - if (type < 3) { - b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD, - ~(B43_NPHY_RFCTL_CMD_RXEN | - B43_NPHY_RFCTL_CMD_CORESEL), - (B43_NPHY_RFCTL_CMD_RXEN | - code << B43_NPHY_RFCTL_CMD_CORESEL_SHIFT)); - b43_phy_set(dev, B43_NPHY_RFCTL_OVER, - (0x1 << 12 | - 0x1 << 5 | - 0x1 << 1 | - 0x1)); - b43_phy_set(dev, B43_NPHY_RFCTL_CMD, - B43_NPHY_RFCTL_CMD_START); - udelay(20); - b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~0x1); - } - } -} - static void b43_nphy_rev3_rssi_select(struct b43_wldev *dev, u8 code, u8 type) { u8 i; @@ -2377,6 +1123,67 @@ static void b43_nphy_rev3_rssi_select(struct b43_wldev *dev, u8 code, u8 type) } } +static void b43_nphy_rev2_rssi_select(struct b43_wldev *dev, u8 code, u8 type) +{ + u16 val; + + if (type < 3) + val = 0; + else if (type == 6) + val = 1; + else if (type == 3) + val = 2; + else + val = 3; + + val = (val << 12) | (val << 14); + b43_phy_maskset(dev, B43_NPHY_AFECTL_C1, 0x0FFF, val); + b43_phy_maskset(dev, B43_NPHY_AFECTL_C2, 0x0FFF, val); + + if (type < 3) { + b43_phy_maskset(dev, B43_NPHY_RFCTL_RSSIO1, 0xFFCF, + (type + 1) << 4); + b43_phy_maskset(dev, B43_NPHY_RFCTL_RSSIO2, 0xFFCF, + (type + 1) << 4); + } + + if (code == 0) { + b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x3000); + if (type < 3) { + b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, + ~(B43_NPHY_RFCTL_CMD_RXEN | + B43_NPHY_RFCTL_CMD_CORESEL)); + b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, + ~(0x1 << 12 | + 0x1 << 5 | + 0x1 << 1 | + 0x1)); + b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, + ~B43_NPHY_RFCTL_CMD_START); + udelay(20); + b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~0x1); + } + } else { + b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x3000); + if (type < 3) { + b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD, + ~(B43_NPHY_RFCTL_CMD_RXEN | + B43_NPHY_RFCTL_CMD_CORESEL), + (B43_NPHY_RFCTL_CMD_RXEN | + code << B43_NPHY_RFCTL_CMD_CORESEL_SHIFT)); + b43_phy_set(dev, B43_NPHY_RFCTL_OVER, + (0x1 << 12 | + 0x1 << 5 | + 0x1 << 1 | + 0x1)); + b43_phy_set(dev, B43_NPHY_RFCTL_CMD, + B43_NPHY_RFCTL_CMD_START); + udelay(20); + b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~0x1); + } + } +} + /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSISel */ static void b43_nphy_rssi_select(struct b43_wldev *dev, u8 code, u8 type) { @@ -2686,6 +1493,1413 @@ static void b43_nphy_rssi_cal(struct b43_wldev *dev) } } +/************************************************** + * Workarounds + **************************************************/ + +static void b43_nphy_gain_ctl_workarounds_rev3plus(struct b43_wldev *dev) +{ + struct ssb_sprom *sprom = dev->dev->bus_sprom; + + bool ghz5; + bool ext_lna; + u16 rssi_gain; + struct nphy_gain_ctl_workaround_entry *e; + u8 lpf_gain[6] = { 0x00, 0x06, 0x0C, 0x12, 0x12, 0x12 }; + u8 lpf_bits[6] = { 0, 1, 2, 3, 3, 3 }; + + /* Prepare values */ + ghz5 = b43_phy_read(dev, B43_NPHY_BANDCTL) + & B43_NPHY_BANDCTL_5GHZ; + ext_lna = ghz5 ? sprom->boardflags_hi & B43_BFH_EXTLNA_5GHZ : + sprom->boardflags_lo & B43_BFL_EXTLNA; + e = b43_nphy_get_gain_ctl_workaround_ent(dev, ghz5, ext_lna); + if (ghz5 && dev->phy.rev >= 5) + rssi_gain = 0x90; + else + rssi_gain = 0x50; + + b43_phy_set(dev, B43_NPHY_RXCTL, 0x0040); + + /* Set Clip 2 detect */ + b43_phy_set(dev, B43_NPHY_C1_CGAINI, + B43_NPHY_C1_CGAINI_CL2DETECT); + b43_phy_set(dev, B43_NPHY_C2_CGAINI, + B43_NPHY_C2_CGAINI_CL2DETECT); + + b43_radio_write(dev, B2056_RX0 | B2056_RX_BIASPOLE_LNAG1_IDAC, + 0x17); + b43_radio_write(dev, B2056_RX1 | B2056_RX_BIASPOLE_LNAG1_IDAC, + 0x17); + b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAG2_IDAC, 0xF0); + b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAG2_IDAC, 0xF0); + b43_radio_write(dev, B2056_RX0 | B2056_RX_RSSI_POLE, 0x00); + b43_radio_write(dev, B2056_RX1 | B2056_RX_RSSI_POLE, 0x00); + b43_radio_write(dev, B2056_RX0 | B2056_RX_RSSI_GAIN, + rssi_gain); + b43_radio_write(dev, B2056_RX1 | B2056_RX_RSSI_GAIN, + rssi_gain); + b43_radio_write(dev, B2056_RX0 | B2056_RX_BIASPOLE_LNAA1_IDAC, + 0x17); + b43_radio_write(dev, B2056_RX1 | B2056_RX_BIASPOLE_LNAA1_IDAC, + 0x17); + b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAA2_IDAC, 0xFF); + b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAA2_IDAC, 0xFF); + + b43_ntab_write_bulk(dev, B43_NTAB8(0, 8), 4, e->lna1_gain); + b43_ntab_write_bulk(dev, B43_NTAB8(1, 8), 4, e->lna1_gain); + b43_ntab_write_bulk(dev, B43_NTAB8(0, 16), 4, e->lna2_gain); + b43_ntab_write_bulk(dev, B43_NTAB8(1, 16), 4, e->lna2_gain); + b43_ntab_write_bulk(dev, B43_NTAB8(0, 32), 10, e->gain_db); + b43_ntab_write_bulk(dev, B43_NTAB8(1, 32), 10, e->gain_db); + b43_ntab_write_bulk(dev, B43_NTAB8(2, 32), 10, e->gain_bits); + b43_ntab_write_bulk(dev, B43_NTAB8(3, 32), 10, e->gain_bits); + b43_ntab_write_bulk(dev, B43_NTAB8(0, 0x40), 6, lpf_gain); + b43_ntab_write_bulk(dev, B43_NTAB8(1, 0x40), 6, lpf_gain); + b43_ntab_write_bulk(dev, B43_NTAB8(2, 0x40), 6, lpf_bits); + b43_ntab_write_bulk(dev, B43_NTAB8(3, 0x40), 6, lpf_bits); + + b43_phy_write(dev, B43_NPHY_C1_INITGAIN, e->init_gain); + b43_phy_write(dev, 0x2A7, e->init_gain); + b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x106), 2, + e->rfseq_init); + + /* TODO: check defines. Do not match variables names */ + b43_phy_write(dev, B43_NPHY_C1_CLIP1_MEDGAIN, e->cliphi_gain); + b43_phy_write(dev, 0x2A9, e->cliphi_gain); + b43_phy_write(dev, B43_NPHY_C1_CLIP2_GAIN, e->clipmd_gain); + b43_phy_write(dev, 0x2AB, e->clipmd_gain); + b43_phy_write(dev, B43_NPHY_C2_CLIP1_HIGAIN, e->cliplo_gain); + b43_phy_write(dev, 0x2AD, e->cliplo_gain); + + b43_phy_maskset(dev, 0x27D, 0xFF00, e->crsmin); + b43_phy_maskset(dev, 0x280, 0xFF00, e->crsminl); + b43_phy_maskset(dev, 0x283, 0xFF00, e->crsminu); + b43_phy_write(dev, B43_NPHY_C1_NBCLIPTHRES, e->nbclip); + b43_phy_write(dev, B43_NPHY_C2_NBCLIPTHRES, e->nbclip); + b43_phy_maskset(dev, B43_NPHY_C1_CLIPWBTHRES, + ~B43_NPHY_C1_CLIPWBTHRES_CLIP2, e->wlclip); + b43_phy_maskset(dev, B43_NPHY_C2_CLIPWBTHRES, + ~B43_NPHY_C2_CLIPWBTHRES_CLIP2, e->wlclip); + b43_phy_write(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C); +} + +static void b43_nphy_gain_ctl_workarounds_rev1_2(struct b43_wldev *dev) +{ + struct b43_phy_n *nphy = dev->phy.n; + + u8 i, j; + u8 code; + u16 tmp; + u8 rfseq_events[3] = { 6, 8, 7 }; + u8 rfseq_delays[3] = { 10, 30, 1 }; + + /* Set Clip 2 detect */ + b43_phy_set(dev, B43_NPHY_C1_CGAINI, B43_NPHY_C1_CGAINI_CL2DETECT); + b43_phy_set(dev, B43_NPHY_C2_CGAINI, B43_NPHY_C2_CGAINI_CL2DETECT); + + /* Set narrowband clip threshold */ + b43_phy_write(dev, B43_NPHY_C1_NBCLIPTHRES, 0x84); + b43_phy_write(dev, B43_NPHY_C2_NBCLIPTHRES, 0x84); + + if (!dev->phy.is_40mhz) { + /* Set dwell lengths */ + b43_phy_write(dev, B43_NPHY_CLIP1_NBDWELL_LEN, 0x002B); + b43_phy_write(dev, B43_NPHY_CLIP2_NBDWELL_LEN, 0x002B); + b43_phy_write(dev, B43_NPHY_W1CLIP1_DWELL_LEN, 0x0009); + b43_phy_write(dev, B43_NPHY_W1CLIP2_DWELL_LEN, 0x0009); + } + + /* Set wideband clip 2 threshold */ + b43_phy_maskset(dev, B43_NPHY_C1_CLIPWBTHRES, + ~B43_NPHY_C1_CLIPWBTHRES_CLIP2, 21); + b43_phy_maskset(dev, B43_NPHY_C2_CLIPWBTHRES, + ~B43_NPHY_C2_CLIPWBTHRES_CLIP2, 21); + + if (!dev->phy.is_40mhz) { + b43_phy_maskset(dev, B43_NPHY_C1_CGAINI, + ~B43_NPHY_C1_CGAINI_GAINBKOFF, 0x1); + b43_phy_maskset(dev, B43_NPHY_C2_CGAINI, + ~B43_NPHY_C2_CGAINI_GAINBKOFF, 0x1); + b43_phy_maskset(dev, B43_NPHY_C1_CCK_CGAINI, + ~B43_NPHY_C1_CCK_CGAINI_GAINBKOFF, 0x1); + b43_phy_maskset(dev, B43_NPHY_C2_CCK_CGAINI, + ~B43_NPHY_C2_CCK_CGAINI_GAINBKOFF, 0x1); + } + + b43_phy_write(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C); + + if (nphy->gain_boost) { + if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ && + dev->phy.is_40mhz) + code = 4; + else + code = 5; + } else { + code = dev->phy.is_40mhz ? 6 : 7; + } + + /* Set HPVGA2 index */ + b43_phy_maskset(dev, B43_NPHY_C1_INITGAIN, ~B43_NPHY_C1_INITGAIN_HPVGA2, + code << B43_NPHY_C1_INITGAIN_HPVGA2_SHIFT); + b43_phy_maskset(dev, B43_NPHY_C2_INITGAIN, ~B43_NPHY_C2_INITGAIN_HPVGA2, + code << B43_NPHY_C2_INITGAIN_HPVGA2_SHIFT); + + b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x1D06); + /* specs say about 2 loops, but wl does 4 */ + for (i = 0; i < 4; i++) + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, (code << 8 | 0x7C)); + + b43_nphy_adjust_lna_gain_table(dev); + + if (nphy->elna_gain_config) { + b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x0808); + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0); + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1); + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1); + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1); + + b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x0C08); + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0); + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1); + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1); + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1); + + b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x1D06); + /* specs say about 2 loops, but wl does 4 */ + for (i = 0; i < 4; i++) + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, + (code << 8 | 0x74)); + } + + if (dev->phy.rev == 2) { + for (i = 0; i < 4; i++) { + b43_phy_write(dev, B43_NPHY_TABLE_ADDR, + (0x0400 * i) + 0x0020); + for (j = 0; j < 21; j++) { + tmp = j * (i < 2 ? 3 : 1); + b43_phy_write(dev, + B43_NPHY_TABLE_DATALO, tmp); + } + } + } + + b43_nphy_set_rf_sequence(dev, 5, rfseq_events, rfseq_delays, 3); + b43_phy_maskset(dev, B43_NPHY_OVER_DGAIN1, + ~B43_NPHY_OVER_DGAIN_CCKDGECV & 0xFFFF, + 0x5A << B43_NPHY_OVER_DGAIN_CCKDGECV_SHIFT); + + if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) + b43_phy_maskset(dev, B43_PHY_N(0xC5D), 0xFF80, 4); +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/WorkaroundsGainCtrl */ +static void b43_nphy_gain_ctl_workarounds(struct b43_wldev *dev) +{ + if (dev->phy.rev >= 3) + b43_nphy_gain_ctl_workarounds_rev3plus(dev); + else + b43_nphy_gain_ctl_workarounds_rev1_2(dev); +} + +static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev) +{ + struct b43_phy_n *nphy = dev->phy.n; + struct ssb_sprom *sprom = dev->dev->bus_sprom; + + /* TX to RX */ + u8 tx2rx_events[8] = { 0x4, 0x3, 0x6, 0x5, 0x2, 0x1, 0x8, 0x1F }; + u8 tx2rx_delays[8] = { 8, 4, 2, 2, 4, 4, 6, 1 }; + /* RX to TX */ + u8 rx2tx_events_ipa[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0xF, 0x3, + 0x1F }; + u8 rx2tx_delays_ipa[9] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 }; + u8 rx2tx_events[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0x3, 0x4, 0x1F }; + u8 rx2tx_delays[9] = { 8, 6, 6, 4, 4, 18, 42, 1, 1 }; + + u16 tmp16; + u32 tmp32; + + b43_phy_write(dev, 0x23f, 0x1f8); + b43_phy_write(dev, 0x240, 0x1f8); + + tmp32 = b43_ntab_read(dev, B43_NTAB32(30, 0)); + tmp32 &= 0xffffff; + b43_ntab_write(dev, B43_NTAB32(30, 0), tmp32); + + b43_phy_write(dev, B43_NPHY_PHASETR_A0, 0x0125); + b43_phy_write(dev, B43_NPHY_PHASETR_A1, 0x01B3); + b43_phy_write(dev, B43_NPHY_PHASETR_A2, 0x0105); + b43_phy_write(dev, B43_NPHY_PHASETR_B0, 0x016E); + b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0x00CD); + b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x0020); + + b43_phy_write(dev, B43_NPHY_C2_CLIP1_MEDGAIN, 0x000C); + b43_phy_write(dev, 0x2AE, 0x000C); + + /* TX to RX */ + b43_nphy_set_rf_sequence(dev, 1, tx2rx_events, tx2rx_delays, + ARRAY_SIZE(tx2rx_events)); + + /* RX to TX */ + if (b43_nphy_ipa(dev)) + b43_nphy_set_rf_sequence(dev, 0, rx2tx_events_ipa, + rx2tx_delays_ipa, ARRAY_SIZE(rx2tx_events_ipa)); + if (nphy->hw_phyrxchain != 3 && + nphy->hw_phyrxchain != nphy->hw_phytxchain) { + if (b43_nphy_ipa(dev)) { + rx2tx_delays[5] = 59; + rx2tx_delays[6] = 1; + rx2tx_events[7] = 0x1F; + } + b43_nphy_set_rf_sequence(dev, 1, rx2tx_events, rx2tx_delays, + ARRAY_SIZE(rx2tx_events)); + } + + tmp16 = (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) ? + 0x2 : 0x9C40; + b43_phy_write(dev, B43_NPHY_ENDROP_TLEN, tmp16); + + b43_phy_maskset(dev, 0x294, 0xF0FF, 0x0700); + + b43_ntab_write(dev, B43_NTAB32(16, 3), 0x18D); + b43_ntab_write(dev, B43_NTAB32(16, 127), 0x18D); + + b43_nphy_gain_ctl_workarounds(dev); + + b43_ntab_write(dev, B43_NTAB16(8, 0), 2); + b43_ntab_write(dev, B43_NTAB16(8, 16), 2); + + /* TODO */ + + b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_MAST_BIAS, 0x00); + b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_MAST_BIAS, 0x00); + b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_BIAS_MAIN, 0x06); + b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_BIAS_MAIN, 0x06); + b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_BIAS_AUX, 0x07); + b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_BIAS_AUX, 0x07); + b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_LOB_BIAS, 0x88); + b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_LOB_BIAS, 0x88); + b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_CMFB_IDAC, 0x00); + b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_CMFB_IDAC, 0x00); + b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXG_CMFB_IDAC, 0x00); + b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXG_CMFB_IDAC, 0x00); + + /* N PHY WAR TX Chain Update with hw_phytxchain as argument */ + + if ((sprom->boardflags2_lo & B43_BFL2_APLL_WAR && + b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) || + (sprom->boardflags2_lo & B43_BFL2_GPLL_WAR && + b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)) + tmp32 = 0x00088888; + else + tmp32 = 0x88888888; + b43_ntab_write(dev, B43_NTAB32(30, 1), tmp32); + b43_ntab_write(dev, B43_NTAB32(30, 2), tmp32); + b43_ntab_write(dev, B43_NTAB32(30, 3), tmp32); + + if (dev->phy.rev == 4 && + b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { + b43_radio_write(dev, B2056_TX0 | B2056_TX_GMBB_IDAC, + 0x70); + b43_radio_write(dev, B2056_TX1 | B2056_TX_GMBB_IDAC, + 0x70); + } + + b43_phy_write(dev, 0x224, 0x03eb); + b43_phy_write(dev, 0x225, 0x03eb); + b43_phy_write(dev, 0x226, 0x0341); + b43_phy_write(dev, 0x227, 0x0341); + b43_phy_write(dev, 0x228, 0x042b); + b43_phy_write(dev, 0x229, 0x042b); + b43_phy_write(dev, 0x22a, 0x0381); + b43_phy_write(dev, 0x22b, 0x0381); + b43_phy_write(dev, 0x22c, 0x042b); + b43_phy_write(dev, 0x22d, 0x042b); + b43_phy_write(dev, 0x22e, 0x0381); + b43_phy_write(dev, 0x22f, 0x0381); +} + +static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev) +{ + struct ssb_sprom *sprom = dev->dev->bus_sprom; + struct b43_phy *phy = &dev->phy; + struct b43_phy_n *nphy = phy->n; + + u8 events1[7] = { 0x0, 0x1, 0x2, 0x8, 0x4, 0x5, 0x3 }; + u8 delays1[7] = { 0x8, 0x6, 0x6, 0x2, 0x4, 0x3C, 0x1 }; + + u8 events2[7] = { 0x0, 0x3, 0x5, 0x4, 0x2, 0x1, 0x8 }; + u8 delays2[7] = { 0x8, 0x6, 0x2, 0x4, 0x4, 0x6, 0x1 }; + + if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ && + nphy->band5g_pwrgain) { + b43_radio_mask(dev, B2055_C1_TX_RF_SPARE, ~0x8); + b43_radio_mask(dev, B2055_C2_TX_RF_SPARE, ~0x8); + } else { + b43_radio_set(dev, B2055_C1_TX_RF_SPARE, 0x8); + b43_radio_set(dev, B2055_C2_TX_RF_SPARE, 0x8); + } + + b43_ntab_write(dev, B43_NTAB16(8, 0x00), 0x000A); + b43_ntab_write(dev, B43_NTAB16(8, 0x10), 0x000A); + b43_ntab_write(dev, B43_NTAB16(8, 0x02), 0xCDAA); + b43_ntab_write(dev, B43_NTAB16(8, 0x12), 0xCDAA); + + if (dev->phy.rev < 2) { + b43_ntab_write(dev, B43_NTAB16(8, 0x08), 0x0000); + b43_ntab_write(dev, B43_NTAB16(8, 0x18), 0x0000); + b43_ntab_write(dev, B43_NTAB16(8, 0x07), 0x7AAB); + b43_ntab_write(dev, B43_NTAB16(8, 0x17), 0x7AAB); + b43_ntab_write(dev, B43_NTAB16(8, 0x06), 0x0800); + b43_ntab_write(dev, B43_NTAB16(8, 0x16), 0x0800); + } + + b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO1, 0x2D8); + b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0x301); + b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8); + b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301); + + if (sprom->boardflags2_lo & B43_BFL2_SKWRKFEM_BRD && + dev->dev->board_type == 0x8B) { + delays1[0] = 0x1; + delays1[5] = 0x14; + } + b43_nphy_set_rf_sequence(dev, 0, events1, delays1, 7); + b43_nphy_set_rf_sequence(dev, 1, events2, delays2, 7); + + b43_nphy_gain_ctl_workarounds(dev); + + if (dev->phy.rev < 2) { + if (b43_phy_read(dev, B43_NPHY_RXCTL) & 0x2) + b43_hf_write(dev, b43_hf_read(dev) | + B43_HF_MLADVW); + } else if (dev->phy.rev == 2) { + b43_phy_write(dev, B43_NPHY_CRSCHECK2, 0); + b43_phy_write(dev, B43_NPHY_CRSCHECK3, 0); + } + + if (dev->phy.rev < 2) + b43_phy_mask(dev, B43_NPHY_SCRAM_SIGCTL, + ~B43_NPHY_SCRAM_SIGCTL_SCM); + + /* Set phase track alpha and beta */ + b43_phy_write(dev, B43_NPHY_PHASETR_A0, 0x125); + b43_phy_write(dev, B43_NPHY_PHASETR_A1, 0x1B3); + b43_phy_write(dev, B43_NPHY_PHASETR_A2, 0x105); + b43_phy_write(dev, B43_NPHY_PHASETR_B0, 0x16E); + b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0xCD); + b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x20); + + b43_phy_mask(dev, B43_NPHY_PIL_DW1, + ~B43_NPHY_PIL_DW_64QAM & 0xFFFF); + b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B1, 0xB5); + b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B2, 0xA4); + b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B3, 0x00); + + if (dev->phy.rev == 2) + b43_phy_set(dev, B43_NPHY_FINERX2_CGC, + B43_NPHY_FINERX2_CGC_DECGC); +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Workarounds */ +static void b43_nphy_workarounds(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + struct b43_phy_n *nphy = phy->n; + + if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) + b43_nphy_classifier(dev, 1, 0); + else + b43_nphy_classifier(dev, 1, 1); + + if (nphy->hang_avoid) + b43_nphy_stay_in_carrier_search(dev, 1); + + b43_phy_set(dev, B43_NPHY_IQFLIP, + B43_NPHY_IQFLIP_ADC1 | B43_NPHY_IQFLIP_ADC2); + + if (dev->phy.rev >= 3) + b43_nphy_workarounds_rev3plus(dev); + else + b43_nphy_workarounds_rev1_2(dev); + + if (nphy->hang_avoid) + b43_nphy_stay_in_carrier_search(dev, 0); +} + +/************************************************** + * Tx/Rx common + **************************************************/ + +/* + * Transmits a known value for LO calibration + * http://bcm-v4.sipsolutions.net/802.11/PHY/N/TXTone + */ +static int b43_nphy_tx_tone(struct b43_wldev *dev, u32 freq, u16 max_val, + bool iqmode, bool dac_test) +{ + u16 samp = b43_nphy_gen_load_samples(dev, freq, max_val, dac_test); + if (samp == 0) + return -1; + b43_nphy_run_samples(dev, samp, 0xFFFF, 0, iqmode, dac_test); + return 0; +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Chains */ +static void b43_nphy_update_txrx_chain(struct b43_wldev *dev) +{ + struct b43_phy_n *nphy = dev->phy.n; + + bool override = false; + u16 chain = 0x33; + + if (nphy->txrx_chain == 0) { + chain = 0x11; + override = true; + } else if (nphy->txrx_chain == 1) { + chain = 0x22; + override = true; + } + + b43_phy_maskset(dev, B43_NPHY_RFSEQCA, + ~(B43_NPHY_RFSEQCA_TXEN | B43_NPHY_RFSEQCA_RXEN), + chain); + + if (override) + b43_phy_set(dev, B43_NPHY_RFSEQMODE, + B43_NPHY_RFSEQMODE_CAOVER); + else + b43_phy_mask(dev, B43_NPHY_RFSEQMODE, + ~B43_NPHY_RFSEQMODE_CAOVER); +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/stop-playback */ +static void b43_nphy_stop_playback(struct b43_wldev *dev) +{ + struct b43_phy_n *nphy = dev->phy.n; + u16 tmp; + + if (nphy->hang_avoid) + b43_nphy_stay_in_carrier_search(dev, 1); + + tmp = b43_phy_read(dev, B43_NPHY_SAMP_STAT); + if (tmp & 0x1) + b43_phy_set(dev, B43_NPHY_SAMP_CMD, B43_NPHY_SAMP_CMD_STOP); + else if (tmp & 0x2) + b43_phy_mask(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0x7FFF); + + b43_phy_mask(dev, B43_NPHY_SAMP_CMD, ~0x0004); + + if (nphy->bb_mult_save & 0x80000000) { + tmp = nphy->bb_mult_save & 0xFFFF; + b43_ntab_write(dev, B43_NTAB16(15, 87), tmp); + nphy->bb_mult_save = 0; + } + + if (nphy->hang_avoid) + b43_nphy_stay_in_carrier_search(dev, 0); +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/IqCalGainParams */ +static void b43_nphy_iq_cal_gain_params(struct b43_wldev *dev, u16 core, + struct nphy_txgains target, + struct nphy_iqcal_params *params) +{ + int i, j, indx; + u16 gain; + + if (dev->phy.rev >= 3) { + params->txgm = target.txgm[core]; + params->pga = target.pga[core]; + params->pad = target.pad[core]; + params->ipa = target.ipa[core]; + params->cal_gain = (params->txgm << 12) | (params->pga << 8) | + (params->pad << 4) | (params->ipa); + for (j = 0; j < 5; j++) + params->ncorr[j] = 0x79; + } else { + gain = (target.pad[core]) | (target.pga[core] << 4) | + (target.txgm[core] << 8); + + indx = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ? + 1 : 0; + for (i = 0; i < 9; i++) + if (tbl_iqcal_gainparams[indx][i][0] == gain) + break; + i = min(i, 8); + + params->txgm = tbl_iqcal_gainparams[indx][i][1]; + params->pga = tbl_iqcal_gainparams[indx][i][2]; + params->pad = tbl_iqcal_gainparams[indx][i][3]; + params->cal_gain = (params->txgm << 7) | (params->pga << 4) | + (params->pad << 2); + for (j = 0; j < 4; j++) + params->ncorr[j] = tbl_iqcal_gainparams[indx][i][4 + j]; + } +} + +/************************************************** + * Tx and Rx + **************************************************/ + +void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna) +{//TODO +} + +static void b43_nphy_op_adjust_txpower(struct b43_wldev *dev) +{//TODO +} + +static enum b43_txpwr_result b43_nphy_op_recalc_txpower(struct b43_wldev *dev, + bool ignore_tssi) +{//TODO + return B43_TXPWR_RES_DONE; +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlEnable */ +static void b43_nphy_tx_power_ctrl(struct b43_wldev *dev, bool enable) +{ + struct b43_phy_n *nphy = dev->phy.n; + u8 i; + u16 bmask, val, tmp; + enum ieee80211_band band = b43_current_band(dev->wl); + + if (nphy->hang_avoid) + b43_nphy_stay_in_carrier_search(dev, 1); + + nphy->txpwrctrl = enable; + if (!enable) { + if (dev->phy.rev >= 3 && + (b43_phy_read(dev, B43_NPHY_TXPCTL_CMD) & + (B43_NPHY_TXPCTL_CMD_COEFF | + B43_NPHY_TXPCTL_CMD_HWPCTLEN | + B43_NPHY_TXPCTL_CMD_PCTLEN))) { + /* We disable enabled TX pwr ctl, save it's state */ + nphy->tx_pwr_idx[0] = b43_phy_read(dev, + B43_NPHY_C1_TXPCTL_STAT) & 0x7f; + nphy->tx_pwr_idx[1] = b43_phy_read(dev, + B43_NPHY_C2_TXPCTL_STAT) & 0x7f; + } + + b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x6840); + for (i = 0; i < 84; i++) + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0); + + b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x6C40); + for (i = 0; i < 84; i++) + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0); + + tmp = B43_NPHY_TXPCTL_CMD_COEFF | B43_NPHY_TXPCTL_CMD_HWPCTLEN; + if (dev->phy.rev >= 3) + tmp |= B43_NPHY_TXPCTL_CMD_PCTLEN; + b43_phy_mask(dev, B43_NPHY_TXPCTL_CMD, ~tmp); + + if (dev->phy.rev >= 3) { + b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x0100); + b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0100); + } else { + b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x4000); + } + + if (dev->phy.rev == 2) + b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3, + ~B43_NPHY_BPHY_CTL3_SCALE, 0x53); + else if (dev->phy.rev < 2) + b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3, + ~B43_NPHY_BPHY_CTL3_SCALE, 0x5A); + + if (dev->phy.rev < 2 && dev->phy.is_40mhz) + b43_hf_write(dev, b43_hf_read(dev) | B43_HF_TSSIRPSMW); + } else { + b43_ntab_write_bulk(dev, B43_NTAB16(26, 64), 84, + nphy->adj_pwr_tbl); + b43_ntab_write_bulk(dev, B43_NTAB16(27, 64), 84, + nphy->adj_pwr_tbl); + + bmask = B43_NPHY_TXPCTL_CMD_COEFF | + B43_NPHY_TXPCTL_CMD_HWPCTLEN; + /* wl does useless check for "enable" param here */ + val = B43_NPHY_TXPCTL_CMD_COEFF | B43_NPHY_TXPCTL_CMD_HWPCTLEN; + if (dev->phy.rev >= 3) { + bmask |= B43_NPHY_TXPCTL_CMD_PCTLEN; + if (val) + val |= B43_NPHY_TXPCTL_CMD_PCTLEN; + } + b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD, ~(bmask), val); + + if (band == IEEE80211_BAND_5GHZ) { + b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD, + ~B43_NPHY_TXPCTL_CMD_INIT, 0x64); + if (dev->phy.rev > 1) + b43_phy_maskset(dev, B43_NPHY_TXPCTL_INIT, + ~B43_NPHY_TXPCTL_INIT_PIDXI1, + 0x64); + } + + if (dev->phy.rev >= 3) { + if (nphy->tx_pwr_idx[0] != 128 && + nphy->tx_pwr_idx[1] != 128) { + /* Recover TX pwr ctl state */ + b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD, + ~B43_NPHY_TXPCTL_CMD_INIT, + nphy->tx_pwr_idx[0]); + if (dev->phy.rev > 1) + b43_phy_maskset(dev, + B43_NPHY_TXPCTL_INIT, + ~0xff, nphy->tx_pwr_idx[1]); + } + } + + if (dev->phy.rev >= 3) { + b43_phy_mask(dev, B43_NPHY_AFECTL_OVER1, ~0x100); + b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x100); + } else { + b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x4000); + } + + if (dev->phy.rev == 2) + b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3, ~0xFF, 0x3b); + else if (dev->phy.rev < 2) + b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3, ~0xFF, 0x40); + + if (dev->phy.rev < 2 && dev->phy.is_40mhz) + b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_TSSIRPSMW); + + if (b43_nphy_ipa(dev)) { + b43_phy_mask(dev, B43_NPHY_PAPD_EN0, ~0x4); + b43_phy_mask(dev, B43_NPHY_PAPD_EN1, ~0x4); + } + } + + if (nphy->hang_avoid) + b43_nphy_stay_in_carrier_search(dev, 0); +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrFix */ +static void b43_nphy_tx_power_fix(struct b43_wldev *dev) +{ + struct b43_phy_n *nphy = dev->phy.n; + struct ssb_sprom *sprom = dev->dev->bus_sprom; + + u8 txpi[2], bbmult, i; + u16 tmp, radio_gain, dac_gain; + u16 freq = dev->phy.channel_freq; + u32 txgain; + /* u32 gaintbl; rev3+ */ + + if (nphy->hang_avoid) + b43_nphy_stay_in_carrier_search(dev, 1); + + if (dev->phy.rev >= 7) { + txpi[0] = txpi[1] = 30; + } else if (dev->phy.rev >= 3) { + txpi[0] = 40; + txpi[1] = 40; + } else if (sprom->revision < 4) { + txpi[0] = 72; + txpi[1] = 72; + } else { + if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + txpi[0] = sprom->txpid2g[0]; + txpi[1] = sprom->txpid2g[1]; + } else if (freq >= 4900 && freq < 5100) { + txpi[0] = sprom->txpid5gl[0]; + txpi[1] = sprom->txpid5gl[1]; + } else if (freq >= 5100 && freq < 5500) { + txpi[0] = sprom->txpid5g[0]; + txpi[1] = sprom->txpid5g[1]; + } else if (freq >= 5500) { + txpi[0] = sprom->txpid5gh[0]; + txpi[1] = sprom->txpid5gh[1]; + } else { + txpi[0] = 91; + txpi[1] = 91; + } + } + if (dev->phy.rev < 7 && + (txpi[0] < 40 || txpi[0] > 100 || txpi[1] < 40 || txpi[1] > 100)) + txpi[0] = txpi[1] = 91; + + /* + for (i = 0; i < 2; i++) { + nphy->txpwrindex[i].index_internal = txpi[i]; + nphy->txpwrindex[i].index_internal_save = txpi[i]; + } + */ + + for (i = 0; i < 2; i++) { + if (dev->phy.rev >= 3) { + if (b43_nphy_ipa(dev)) { + txgain = *(b43_nphy_get_ipa_gain_table(dev) + + txpi[i]); + } else if (b43_current_band(dev->wl) == + IEEE80211_BAND_5GHZ) { + /* FIXME: use 5GHz tables */ + txgain = + b43_ntab_tx_gain_rev3plus_2ghz[txpi[i]]; + } else { + if (dev->phy.rev >= 5 && + sprom->fem.ghz5.extpa_gain == 3) + ; /* FIXME: 5GHz_txgain_HiPwrEPA */ + txgain = + b43_ntab_tx_gain_rev3plus_2ghz[txpi[i]]; + } + radio_gain = (txgain >> 16) & 0x1FFFF; + } else { + txgain = b43_ntab_tx_gain_rev0_1_2[txpi[i]]; + radio_gain = (txgain >> 16) & 0x1FFF; + } + + if (dev->phy.rev >= 7) + dac_gain = (txgain >> 8) & 0x7; + else + dac_gain = (txgain >> 8) & 0x3F; + bbmult = txgain & 0xFF; + + if (dev->phy.rev >= 3) { + if (i == 0) + b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x0100); + else + b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0100); + } else { + b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x4000); + } + + if (i == 0) + b43_phy_write(dev, B43_NPHY_AFECTL_DACGAIN1, dac_gain); + else + b43_phy_write(dev, B43_NPHY_AFECTL_DACGAIN2, dac_gain); + + b43_ntab_write(dev, B43_NTAB16(0x7, 0x110 + i), radio_gain); + + tmp = b43_ntab_read(dev, B43_NTAB16(0xF, 0x57)); + if (i == 0) + tmp = (tmp & 0x00FF) | (bbmult << 8); + else + tmp = (tmp & 0xFF00) | bbmult; + b43_ntab_write(dev, B43_NTAB16(0xF, 0x57), tmp); + + if (b43_nphy_ipa(dev)) { + u32 tmp32; + u16 reg = (i == 0) ? + B43_NPHY_PAPD_EN0 : B43_NPHY_PAPD_EN1; + tmp32 = b43_ntab_read(dev, B43_NTAB32(26 + i, + 576 + txpi[i])); + b43_phy_maskset(dev, reg, 0xE00F, (u32) tmp32 << 4); + b43_phy_set(dev, reg, 0x4); + } + } + + b43_phy_mask(dev, B43_NPHY_BPHY_CTL2, ~B43_NPHY_BPHY_CTL2_LUT); + + if (nphy->hang_avoid) + b43_nphy_stay_in_carrier_search(dev, 0); +} + +static void b43_nphy_ipa_internal_tssi_setup(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + + u8 core; + u16 r; /* routing */ + + if (phy->rev >= 7) { + for (core = 0; core < 2; core++) { + r = core ? 0x190 : 0x170; + if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + b43_radio_write(dev, r + 0x5, 0x5); + b43_radio_write(dev, r + 0x9, 0xE); + if (phy->rev != 5) + b43_radio_write(dev, r + 0xA, 0); + if (phy->rev != 7) + b43_radio_write(dev, r + 0xB, 1); + else + b43_radio_write(dev, r + 0xB, 0x31); + } else { + b43_radio_write(dev, r + 0x5, 0x9); + b43_radio_write(dev, r + 0x9, 0xC); + b43_radio_write(dev, r + 0xB, 0x0); + if (phy->rev != 5) + b43_radio_write(dev, r + 0xA, 1); + else + b43_radio_write(dev, r + 0xA, 0x31); + } + b43_radio_write(dev, r + 0x6, 0); + b43_radio_write(dev, r + 0x7, 0); + b43_radio_write(dev, r + 0x8, 3); + b43_radio_write(dev, r + 0xC, 0); + } + } else { + if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) + b43_radio_write(dev, B2056_SYN_RESERVED_ADDR31, 0x128); + else + b43_radio_write(dev, B2056_SYN_RESERVED_ADDR31, 0x80); + b43_radio_write(dev, B2056_SYN_RESERVED_ADDR30, 0); + b43_radio_write(dev, B2056_SYN_GPIO_MASTER1, 0x29); + + for (core = 0; core < 2; core++) { + r = core ? B2056_TX1 : B2056_TX0; + + b43_radio_write(dev, r | B2056_TX_IQCAL_VCM_HG, 0); + b43_radio_write(dev, r | B2056_TX_IQCAL_IDAC, 0); + b43_radio_write(dev, r | B2056_TX_TSSI_VCM, 3); + b43_radio_write(dev, r | B2056_TX_TX_AMP_DET, 0); + b43_radio_write(dev, r | B2056_TX_TSSI_MISC1, 8); + b43_radio_write(dev, r | B2056_TX_TSSI_MISC2, 0); + b43_radio_write(dev, r | B2056_TX_TSSI_MISC3, 0); + if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + b43_radio_write(dev, r | B2056_TX_TX_SSI_MASTER, + 0x5); + if (phy->rev != 5) + b43_radio_write(dev, r | B2056_TX_TSSIA, + 0x00); + if (phy->rev >= 5) + b43_radio_write(dev, r | B2056_TX_TSSIG, + 0x31); + else + b43_radio_write(dev, r | B2056_TX_TSSIG, + 0x11); + b43_radio_write(dev, r | B2056_TX_TX_SSI_MUX, + 0xE); + } else { + b43_radio_write(dev, r | B2056_TX_TX_SSI_MASTER, + 0x9); + b43_radio_write(dev, r | B2056_TX_TSSIA, 0x31); + b43_radio_write(dev, r | B2056_TX_TSSIG, 0x0); + b43_radio_write(dev, r | B2056_TX_TX_SSI_MUX, + 0xC); + } + } + } +} + +/* + * Stop radio and transmit known signal. Then check received signal strength to + * get TSSI (Transmit Signal Strength Indicator). + * http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlIdleTssi + */ +static void b43_nphy_tx_power_ctl_idle_tssi(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + struct b43_phy_n *nphy = dev->phy.n; + + u32 tmp; + s32 rssi[4] = { }; + + /* TODO: check if we can transmit */ + + if (b43_nphy_ipa(dev)) + b43_nphy_ipa_internal_tssi_setup(dev); + + if (phy->rev >= 7) + ; /* TODO: Override Rev7 with 0x2000, 0, 3, 0, 0 as arguments */ + else if (phy->rev >= 3) + b43_nphy_rf_control_override(dev, 0x2000, 0, 3, false); + + b43_nphy_stop_playback(dev); + b43_nphy_tx_tone(dev, 0xFA0, 0, false, false); + udelay(20); + tmp = b43_nphy_poll_rssi(dev, 4, rssi, 1); + b43_nphy_stop_playback(dev); + b43_nphy_rssi_select(dev, 0, 0); + + if (phy->rev >= 7) + ; /* TODO: Override Rev7 with 0x2000, 0, 3, 1, 0 as arguments */ + else if (phy->rev >= 3) + b43_nphy_rf_control_override(dev, 0x2000, 0, 3, true); + + if (phy->rev >= 3) { + nphy->pwr_ctl_info[0].idle_tssi_5g = (tmp >> 24) & 0xFF; + nphy->pwr_ctl_info[1].idle_tssi_5g = (tmp >> 8) & 0xFF; + } else { + nphy->pwr_ctl_info[0].idle_tssi_5g = (tmp >> 16) & 0xFF; + nphy->pwr_ctl_info[1].idle_tssi_5g = tmp & 0xFF; + } + nphy->pwr_ctl_info[0].idle_tssi_2g = (tmp >> 24) & 0xFF; + nphy->pwr_ctl_info[1].idle_tssi_2g = (tmp >> 8) & 0xFF; +} + +static void b43_nphy_tx_gain_table_upload(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + + const u32 *table = NULL; +#if 0 + TODO: b43_ntab_papd_pga_gain_delta_ipa_2* + u32 rfpwr_offset; + u8 pga_gain; + int i; +#endif + + if (phy->rev >= 3) { + if (b43_nphy_ipa(dev)) { + table = b43_nphy_get_ipa_gain_table(dev); + } else { + if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { + if (phy->rev == 3) + table = b43_ntab_tx_gain_rev3_5ghz; + if (phy->rev == 4) + table = b43_ntab_tx_gain_rev4_5ghz; + else + table = b43_ntab_tx_gain_rev5plus_5ghz; + } else { + table = b43_ntab_tx_gain_rev3plus_2ghz; + } + } + } else { + table = b43_ntab_tx_gain_rev0_1_2; + } + b43_ntab_write_bulk(dev, B43_NTAB32(26, 192), 128, table); + b43_ntab_write_bulk(dev, B43_NTAB32(27, 192), 128, table); + + if (phy->rev >= 3) { +#if 0 + nphy->gmval = (table[0] >> 16) & 0x7000; + + for (i = 0; i < 128; i++) { + pga_gain = (table[i] >> 24) & 0xF; + if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) + rfpwr_offset = b43_ntab_papd_pga_gain_delta_ipa_2g[pga_gain]; + else + rfpwr_offset = b43_ntab_papd_pga_gain_delta_ipa_5g[pga_gain]; + b43_ntab_write(dev, B43_NTAB32(26, 576 + i), + rfpwr_offset); + b43_ntab_write(dev, B43_NTAB32(27, 576 + i), + rfpwr_offset); + } +#endif + } +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/PA%20override */ +static void b43_nphy_pa_override(struct b43_wldev *dev, bool enable) +{ + struct b43_phy_n *nphy = dev->phy.n; + enum ieee80211_band band; + u16 tmp; + + if (!enable) { + nphy->rfctrl_intc1_save = b43_phy_read(dev, + B43_NPHY_RFCTL_INTC1); + nphy->rfctrl_intc2_save = b43_phy_read(dev, + B43_NPHY_RFCTL_INTC2); + band = b43_current_band(dev->wl); + if (dev->phy.rev >= 3) { + if (band == IEEE80211_BAND_5GHZ) + tmp = 0x600; + else + tmp = 0x480; + } else { + if (band == IEEE80211_BAND_5GHZ) + tmp = 0x180; + else + tmp = 0x120; + } + b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, tmp); + b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, tmp); + } else { + b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, + nphy->rfctrl_intc1_save); + b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, + nphy->rfctrl_intc2_save); + } +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxLpFbw */ +static void b43_nphy_tx_lp_fbw(struct b43_wldev *dev) +{ + u16 tmp; + + if (dev->phy.rev >= 3) { + if (b43_nphy_ipa(dev)) { + tmp = 4; + b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S2, + (((((tmp << 3) | tmp) << 3) | tmp) << 3) | tmp); + } + + tmp = 1; + b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S2, + (((((tmp << 3) | tmp) << 3) | tmp) << 3) | tmp); + } +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqEst */ +static void b43_nphy_rx_iq_est(struct b43_wldev *dev, struct nphy_iq_est *est, + u16 samps, u8 time, bool wait) +{ + int i; + u16 tmp; + + b43_phy_write(dev, B43_NPHY_IQEST_SAMCNT, samps); + b43_phy_maskset(dev, B43_NPHY_IQEST_WT, ~B43_NPHY_IQEST_WT_VAL, time); + if (wait) + b43_phy_set(dev, B43_NPHY_IQEST_CMD, B43_NPHY_IQEST_CMD_MODE); + else + b43_phy_mask(dev, B43_NPHY_IQEST_CMD, ~B43_NPHY_IQEST_CMD_MODE); + + b43_phy_set(dev, B43_NPHY_IQEST_CMD, B43_NPHY_IQEST_CMD_START); + + for (i = 1000; i; i--) { + tmp = b43_phy_read(dev, B43_NPHY_IQEST_CMD); + if (!(tmp & B43_NPHY_IQEST_CMD_START)) { + est->i0_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_IPACC_HI0) << 16) | + b43_phy_read(dev, B43_NPHY_IQEST_IPACC_LO0); + est->q0_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_QPACC_HI0) << 16) | + b43_phy_read(dev, B43_NPHY_IQEST_QPACC_LO0); + est->iq0_prod = (b43_phy_read(dev, B43_NPHY_IQEST_IQACC_HI0) << 16) | + b43_phy_read(dev, B43_NPHY_IQEST_IQACC_LO0); + + est->i1_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_IPACC_HI1) << 16) | + b43_phy_read(dev, B43_NPHY_IQEST_IPACC_LO1); + est->q1_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_QPACC_HI1) << 16) | + b43_phy_read(dev, B43_NPHY_IQEST_QPACC_LO1); + est->iq1_prod = (b43_phy_read(dev, B43_NPHY_IQEST_IQACC_HI1) << 16) | + b43_phy_read(dev, B43_NPHY_IQEST_IQACC_LO1); + return; + } + udelay(10); + } + memset(est, 0, sizeof(*est)); +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqCoeffs */ +static void b43_nphy_rx_iq_coeffs(struct b43_wldev *dev, bool write, + struct b43_phy_n_iq_comp *pcomp) +{ + if (write) { + b43_phy_write(dev, B43_NPHY_C1_RXIQ_COMPA0, pcomp->a0); + b43_phy_write(dev, B43_NPHY_C1_RXIQ_COMPB0, pcomp->b0); + b43_phy_write(dev, B43_NPHY_C2_RXIQ_COMPA1, pcomp->a1); + b43_phy_write(dev, B43_NPHY_C2_RXIQ_COMPB1, pcomp->b1); + } else { + pcomp->a0 = b43_phy_read(dev, B43_NPHY_C1_RXIQ_COMPA0); + pcomp->b0 = b43_phy_read(dev, B43_NPHY_C1_RXIQ_COMPB0); + pcomp->a1 = b43_phy_read(dev, B43_NPHY_C2_RXIQ_COMPA1); + pcomp->b1 = b43_phy_read(dev, B43_NPHY_C2_RXIQ_COMPB1); + } +} + +#if 0 +/* Ready but not used anywhere */ +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhyCleanup */ +static void b43_nphy_rx_cal_phy_cleanup(struct b43_wldev *dev, u8 core) +{ + u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs; + + b43_phy_write(dev, B43_NPHY_RFSEQCA, regs[0]); + if (core == 0) { + b43_phy_write(dev, B43_NPHY_AFECTL_C1, regs[1]); + b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, regs[2]); + } else { + b43_phy_write(dev, B43_NPHY_AFECTL_C2, regs[1]); + b43_phy_write(dev, B43_NPHY_AFECTL_OVER, regs[2]); + } + b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, regs[3]); + b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, regs[4]); + b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO1, regs[5]); + b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO2, regs[6]); + b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S1, regs[7]); + b43_phy_write(dev, B43_NPHY_RFCTL_OVER, regs[8]); + b43_phy_write(dev, B43_NPHY_PAPD_EN0, regs[9]); + b43_phy_write(dev, B43_NPHY_PAPD_EN1, regs[10]); +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhySetup */ +static void b43_nphy_rx_cal_phy_setup(struct b43_wldev *dev, u8 core) +{ + u8 rxval, txval; + u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs; + + regs[0] = b43_phy_read(dev, B43_NPHY_RFSEQCA); + if (core == 0) { + regs[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C1); + regs[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER1); + } else { + regs[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C2); + regs[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER); + } + regs[3] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1); + regs[4] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2); + regs[5] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO1); + regs[6] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO2); + regs[7] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B1S1); + regs[8] = b43_phy_read(dev, B43_NPHY_RFCTL_OVER); + regs[9] = b43_phy_read(dev, B43_NPHY_PAPD_EN0); + regs[10] = b43_phy_read(dev, B43_NPHY_PAPD_EN1); + + b43_phy_mask(dev, B43_NPHY_PAPD_EN0, ~0x0001); + b43_phy_mask(dev, B43_NPHY_PAPD_EN1, ~0x0001); + + b43_phy_maskset(dev, B43_NPHY_RFSEQCA, + ~B43_NPHY_RFSEQCA_RXDIS & 0xFFFF, + ((1 - core) << B43_NPHY_RFSEQCA_RXDIS_SHIFT)); + b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXEN, + ((1 - core) << B43_NPHY_RFSEQCA_TXEN_SHIFT)); + b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_RXEN, + (core << B43_NPHY_RFSEQCA_RXEN_SHIFT)); + b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXDIS, + (core << B43_NPHY_RFSEQCA_TXDIS_SHIFT)); + + if (core == 0) { + b43_phy_mask(dev, B43_NPHY_AFECTL_C1, ~0x0007); + b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x0007); + } else { + b43_phy_mask(dev, B43_NPHY_AFECTL_C2, ~0x0007); + b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0007); + } + + b43_nphy_rf_control_intc_override(dev, 2, 0, 3); + b43_nphy_rf_control_override(dev, 8, 0, 3, false); + b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RX2TX); + + if (core == 0) { + rxval = 1; + txval = 8; + } else { + rxval = 4; + txval = 2; + } + b43_nphy_rf_control_intc_override(dev, 1, rxval, (core + 1)); + b43_nphy_rf_control_intc_override(dev, 1, txval, (2 - core)); +} +#endif + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalcRxIqComp */ +static void b43_nphy_calc_rx_iq_comp(struct b43_wldev *dev, u8 mask) +{ + int i; + s32 iq; + u32 ii; + u32 qq; + int iq_nbits, qq_nbits; + int arsh, brsh; + u16 tmp, a, b; + + struct nphy_iq_est est; + struct b43_phy_n_iq_comp old; + struct b43_phy_n_iq_comp new = { }; + bool error = false; + + if (mask == 0) + return; + + b43_nphy_rx_iq_coeffs(dev, false, &old); + b43_nphy_rx_iq_coeffs(dev, true, &new); + b43_nphy_rx_iq_est(dev, &est, 0x4000, 32, false); + new = old; + + for (i = 0; i < 2; i++) { + if (i == 0 && (mask & 1)) { + iq = est.iq0_prod; + ii = est.i0_pwr; + qq = est.q0_pwr; + } else if (i == 1 && (mask & 2)) { + iq = est.iq1_prod; + ii = est.i1_pwr; + qq = est.q1_pwr; + } else { + continue; + } + + if (ii + qq < 2) { + error = true; + break; + } + + iq_nbits = fls(abs(iq)); + qq_nbits = fls(qq); + + arsh = iq_nbits - 20; + if (arsh >= 0) { + a = -((iq << (30 - iq_nbits)) + (ii >> (1 + arsh))); + tmp = ii >> arsh; + } else { + a = -((iq << (30 - iq_nbits)) + (ii << (-1 - arsh))); + tmp = ii << -arsh; + } + if (tmp == 0) { + error = true; + break; + } + a /= tmp; + + brsh = qq_nbits - 11; + if (brsh >= 0) { + b = (qq << (31 - qq_nbits)); + tmp = ii >> brsh; + } else { + b = (qq << (31 - qq_nbits)); + tmp = ii << -brsh; + } + if (tmp == 0) { + error = true; + break; + } + b = int_sqrt(b / tmp - a * a) - (1 << 10); + + if (i == 0 && (mask & 0x1)) { + if (dev->phy.rev >= 3) { + new.a0 = a & 0x3FF; + new.b0 = b & 0x3FF; + } else { + new.a0 = b & 0x3FF; + new.b0 = a & 0x3FF; + } + } else if (i == 1 && (mask & 0x2)) { + if (dev->phy.rev >= 3) { + new.a1 = a & 0x3FF; + new.b1 = b & 0x3FF; + } else { + new.a1 = b & 0x3FF; + new.b1 = a & 0x3FF; + } + } + } + + if (error) + new = old; + + b43_nphy_rx_iq_coeffs(dev, true, &new); +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxIqWar */ +static void b43_nphy_tx_iq_workaround(struct b43_wldev *dev) +{ + u16 array[4]; + b43_ntab_read_bulk(dev, B43_NTAB16(0xF, 0x50), 4, array); + + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW0, array[0]); + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW1, array[1]); + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW2, array[2]); + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW3, array[3]); +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SpurWar */ +static void b43_nphy_spur_workaround(struct b43_wldev *dev) +{ + struct b43_phy_n *nphy = dev->phy.n; + + u8 channel = dev->phy.channel; + int tone[2] = { 57, 58 }; + u32 noise[2] = { 0x3FF, 0x3FF }; + + B43_WARN_ON(dev->phy.rev < 3); + + if (nphy->hang_avoid) + b43_nphy_stay_in_carrier_search(dev, 1); + + if (nphy->gband_spurwar_en) { + /* TODO: N PHY Adjust Analog Pfbw (7) */ + if (channel == 11 && dev->phy.is_40mhz) + ; /* TODO: N PHY Adjust Min Noise Var(2, tone, noise)*/ + else + ; /* TODO: N PHY Adjust Min Noise Var(0, NULL, NULL)*/ + /* TODO: N PHY Adjust CRS Min Power (0x1E) */ + } + + if (nphy->aband_spurwar_en) { + if (channel == 54) { + tone[0] = 0x20; + noise[0] = 0x25F; + } else if (channel == 38 || channel == 102 || channel == 118) { + if (0 /* FIXME */) { + tone[0] = 0x20; + noise[0] = 0x21F; + } else { + tone[0] = 0; + noise[0] = 0; + } + } else if (channel == 134) { + tone[0] = 0x20; + noise[0] = 0x21F; + } else if (channel == 151) { + tone[0] = 0x10; + noise[0] = 0x23F; + } else if (channel == 153 || channel == 161) { + tone[0] = 0x30; + noise[0] = 0x23F; + } else { + tone[0] = 0; + noise[0] = 0; + } + + if (!tone[0] && !noise[0]) + ; /* TODO: N PHY Adjust Min Noise Var(1, tone, noise)*/ + else + ; /* TODO: N PHY Adjust Min Noise Var(0, NULL, NULL)*/ + } + + if (nphy->hang_avoid) + b43_nphy_stay_in_carrier_search(dev, 0); +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlCoefSetup */ +static void b43_nphy_tx_pwr_ctrl_coef_setup(struct b43_wldev *dev) +{ + struct b43_phy_n *nphy = dev->phy.n; + int i, j; + u32 tmp; + u32 cur_real, cur_imag, real_part, imag_part; + + u16 buffer[7]; + + if (nphy->hang_avoid) + b43_nphy_stay_in_carrier_search(dev, true); + + b43_ntab_read_bulk(dev, B43_NTAB16(15, 80), 7, buffer); + + for (i = 0; i < 2; i++) { + tmp = ((buffer[i * 2] & 0x3FF) << 10) | + (buffer[i * 2 + 1] & 0x3FF); + b43_phy_write(dev, B43_NPHY_TABLE_ADDR, + (((i + 26) << 10) | 320)); + for (j = 0; j < 128; j++) { + b43_phy_write(dev, B43_NPHY_TABLE_DATAHI, + ((tmp >> 16) & 0xFFFF)); + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, + (tmp & 0xFFFF)); + } + } + + for (i = 0; i < 2; i++) { + tmp = buffer[5 + i]; + real_part = (tmp >> 8) & 0xFF; + imag_part = (tmp & 0xFF); + b43_phy_write(dev, B43_NPHY_TABLE_ADDR, + (((i + 26) << 10) | 448)); + + if (dev->phy.rev >= 3) { + cur_real = real_part; + cur_imag = imag_part; + tmp = ((cur_real & 0xFF) << 8) | (cur_imag & 0xFF); + } + + for (j = 0; j < 128; j++) { + if (dev->phy.rev < 3) { + cur_real = (real_part * loscale[j] + 128) >> 8; + cur_imag = (imag_part * loscale[j] + 128) >> 8; + tmp = ((cur_real & 0xFF) << 8) | + (cur_imag & 0xFF); + } + b43_phy_write(dev, B43_NPHY_TABLE_DATAHI, + ((tmp >> 16) & 0xFFFF)); + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, + (tmp & 0xFFFF)); + } + } + + if (dev->phy.rev >= 3) { + b43_shm_write16(dev, B43_SHM_SHARED, + B43_SHM_SH_NPHY_TXPWR_INDX0, 0xFFFF); + b43_shm_write16(dev, B43_SHM_SHARED, + B43_SHM_SH_NPHY_TXPWR_INDX1, 0xFFFF); + } + + if (nphy->hang_avoid) + b43_nphy_stay_in_carrier_search(dev, false); +} + /* * Restore RSSI Calibration * http://bcm-v4.sipsolutions.net/802.11/PHY/N/RestoreRssiCal @@ -2729,24 +2943,6 @@ static void b43_nphy_restore_rssi_cal(struct b43_wldev *dev) b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Y, rssical_phy_regs[11]); } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GetIpaGainTbl */ -static const u32 *b43_nphy_get_ipa_gain_table(struct b43_wldev *dev) -{ - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { - if (dev->phy.rev >= 6) { - if (dev->dev->chip_id == 47162) - return txpwrctrl_tx_gain_ipa_rev5; - return txpwrctrl_tx_gain_ipa_rev6; - } else if (dev->phy.rev >= 5) { - return txpwrctrl_tx_gain_ipa_rev5; - } else { - return txpwrctrl_tx_gain_ipa; - } - } else { - return txpwrctrl_tx_gain_ipa_5g; - } -} - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalRadioSetup */ static void b43_nphy_tx_cal_radio_setup(struct b43_wldev *dev) { @@ -2841,44 +3037,6 @@ static void b43_nphy_tx_cal_radio_setup(struct b43_wldev *dev) } } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/IqCalGainParams */ -static void b43_nphy_iq_cal_gain_params(struct b43_wldev *dev, u16 core, - struct nphy_txgains target, - struct nphy_iqcal_params *params) -{ - int i, j, indx; - u16 gain; - - if (dev->phy.rev >= 3) { - params->txgm = target.txgm[core]; - params->pga = target.pga[core]; - params->pad = target.pad[core]; - params->ipa = target.ipa[core]; - params->cal_gain = (params->txgm << 12) | (params->pga << 8) | - (params->pad << 4) | (params->ipa); - for (j = 0; j < 5; j++) - params->ncorr[j] = 0x79; - } else { - gain = (target.pad[core]) | (target.pga[core] << 4) | - (target.txgm[core] << 8); - - indx = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ? - 1 : 0; - for (i = 0; i < 9; i++) - if (tbl_iqcal_gainparams[indx][i][0] == gain) - break; - i = min(i, 8); - - params->txgm = tbl_iqcal_gainparams[indx][i][1]; - params->pga = tbl_iqcal_gainparams[indx][i][2]; - params->pad = tbl_iqcal_gainparams[indx][i][3]; - params->cal_gain = (params->txgm << 7) | (params->pga << 4) | - (params->pad << 2); - for (j = 0; j < 4; j++) - params->ncorr[j] = tbl_iqcal_gainparams[indx][i][4 + j]; - } -} - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/UpdateTxCalLadder */ static void b43_nphy_update_tx_cal_ladder(struct b43_wldev *dev, u16 core) { @@ -3258,7 +3416,7 @@ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev, if (dev->phy.rev >= 4) { avoid = nphy->hang_avoid; - nphy->hang_avoid = 0; + nphy->hang_avoid = false; } b43_ntab_read_bulk(dev, B43_NTAB16(7, 0x110), 2, save); @@ -3368,7 +3526,7 @@ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev, if (phy6or5x && updated[core] == 0) { b43_nphy_update_tx_cal_ladder(dev, core); - updated[core] = 1; + updated[core] = true; } tmp = (params[core].ncorr[type] << 8) | 0x66; @@ -3730,10 +3888,104 @@ static void b43_nphy_set_rx_core_state(struct b43_wldev *dev, u8 mask) b43_mac_enable(dev); } +/************************************************** + * N-PHY init + **************************************************/ + /* - * Init N-PHY - * http://bcm-v4.sipsolutions.net/802.11/PHY/Init/N + * Upload the N-PHY tables. + * http://bcm-v4.sipsolutions.net/802.11/PHY/N/InitTables */ +static void b43_nphy_tables_init(struct b43_wldev *dev) +{ + if (dev->phy.rev < 3) + b43_nphy_rev0_1_2_tables_init(dev); + else + b43_nphy_rev3plus_tables_init(dev); +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/MIMOConfig */ +static void b43_nphy_update_mimo_config(struct b43_wldev *dev, s32 preamble) +{ + u16 mimocfg = b43_phy_read(dev, B43_NPHY_MIMOCFG); + + mimocfg |= B43_NPHY_MIMOCFG_AUTO; + if (preamble == 1) + mimocfg |= B43_NPHY_MIMOCFG_GFMIX; + else + mimocfg &= ~B43_NPHY_MIMOCFG_GFMIX; + + b43_phy_write(dev, B43_NPHY_MIMOCFG, mimocfg); +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/BPHYInit */ +static void b43_nphy_bphy_init(struct b43_wldev *dev) +{ + unsigned int i; + u16 val; + + val = 0x1E1F; + for (i = 0; i < 16; i++) { + b43_phy_write(dev, B43_PHY_N_BMODE(0x88 + i), val); + val -= 0x202; + } + val = 0x3E3F; + for (i = 0; i < 16; i++) { + b43_phy_write(dev, B43_PHY_N_BMODE(0x98 + i), val); + val -= 0x202; + } + b43_phy_write(dev, B43_PHY_N_BMODE(0x38), 0x668); +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SuperSwitchInit */ +static void b43_nphy_superswitch_init(struct b43_wldev *dev, bool init) +{ + if (dev->phy.rev >= 3) { + if (!init) + return; + if (0 /* FIXME */) { + b43_ntab_write(dev, B43_NTAB16(9, 2), 0x211); + b43_ntab_write(dev, B43_NTAB16(9, 3), 0x222); + b43_ntab_write(dev, B43_NTAB16(9, 8), 0x144); + b43_ntab_write(dev, B43_NTAB16(9, 12), 0x188); + } + } else { + b43_phy_write(dev, B43_NPHY_GPIO_LOOEN, 0); + b43_phy_write(dev, B43_NPHY_GPIO_HIOEN, 0); + + switch (dev->dev->bus_type) { +#ifdef CONFIG_B43_BCMA + case B43_BUS_BCMA: + bcma_chipco_gpio_control(&dev->dev->bdev->bus->drv_cc, + 0xFC00, 0xFC00); + break; +#endif +#ifdef CONFIG_B43_SSB + case B43_BUS_SSB: + ssb_chipco_gpio_control(&dev->dev->sdev->bus->chipco, + 0xFC00, 0xFC00); + break; +#endif + } + + b43_write32(dev, B43_MMIO_MACCTL, + b43_read32(dev, B43_MMIO_MACCTL) & + ~B43_MACCTL_GPOUTSMSK); + b43_write16(dev, B43_MMIO_GPIO_MASK, + b43_read16(dev, B43_MMIO_GPIO_MASK) | 0xFC00); + b43_write16(dev, B43_MMIO_GPIO_CONTROL, + b43_read16(dev, B43_MMIO_GPIO_CONTROL) & ~0xFC00); + + if (init) { + b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO1, 0x2D8); + b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0x301); + b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8); + b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301); + } + } +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/Init/N */ int b43_phy_initn(struct b43_wldev *dev) { struct ssb_sprom *sprom = dev->dev->bus_sprom; @@ -3857,7 +4109,7 @@ int b43_phy_initn(struct b43_wldev *dev) tx_pwr_state = nphy->txpwrctrl; b43_nphy_tx_power_ctrl(dev, false); b43_nphy_tx_power_fix(dev); - /* TODO N PHY TX Power Control Idle TSSI */ + b43_nphy_tx_power_ctl_idle_tssi(dev); /* TODO N PHY TX Power Control Setup */ b43_nphy_tx_gain_table_upload(dev); @@ -3928,6 +4180,91 @@ int b43_phy_initn(struct b43_wldev *dev) return 0; } +/************************************************** + * Channel switching ops. + **************************************************/ + +static void b43_chantab_phy_upload(struct b43_wldev *dev, + const struct b43_phy_n_sfo_cfg *e) +{ + b43_phy_write(dev, B43_NPHY_BW1A, e->phy_bw1a); + b43_phy_write(dev, B43_NPHY_BW2, e->phy_bw2); + b43_phy_write(dev, B43_NPHY_BW3, e->phy_bw3); + b43_phy_write(dev, B43_NPHY_BW4, e->phy_bw4); + b43_phy_write(dev, B43_NPHY_BW5, e->phy_bw5); + b43_phy_write(dev, B43_NPHY_BW6, e->phy_bw6); +} + +/* http://bcm-v4.sipsolutions.net/802.11/PmuSpurAvoid */ +static void b43_nphy_pmu_spur_avoid(struct b43_wldev *dev, bool avoid) +{ + struct bcma_drv_cc __maybe_unused *cc; + u32 __maybe_unused pmu_ctl; + + switch (dev->dev->bus_type) { +#ifdef CONFIG_B43_BCMA + case B43_BUS_BCMA: + cc = &dev->dev->bdev->bus->drv_cc; + if (dev->dev->chip_id == 43224 || dev->dev->chip_id == 43225) { + if (avoid) { + bcma_chipco_pll_write(cc, 0x0, 0x11500010); + bcma_chipco_pll_write(cc, 0x1, 0x000C0C06); + bcma_chipco_pll_write(cc, 0x2, 0x0F600a08); + bcma_chipco_pll_write(cc, 0x3, 0x00000000); + bcma_chipco_pll_write(cc, 0x4, 0x2001E920); + bcma_chipco_pll_write(cc, 0x5, 0x88888815); + } else { + bcma_chipco_pll_write(cc, 0x0, 0x11100010); + bcma_chipco_pll_write(cc, 0x1, 0x000c0c06); + bcma_chipco_pll_write(cc, 0x2, 0x03000a08); + bcma_chipco_pll_write(cc, 0x3, 0x00000000); + bcma_chipco_pll_write(cc, 0x4, 0x200005c0); + bcma_chipco_pll_write(cc, 0x5, 0x88888815); + } + pmu_ctl = BCMA_CC_PMU_CTL_PLL_UPD; + } else if (dev->dev->chip_id == 0x4716) { + if (avoid) { + bcma_chipco_pll_write(cc, 0x0, 0x11500060); + bcma_chipco_pll_write(cc, 0x1, 0x080C0C06); + bcma_chipco_pll_write(cc, 0x2, 0x0F600000); + bcma_chipco_pll_write(cc, 0x3, 0x00000000); + bcma_chipco_pll_write(cc, 0x4, 0x2001E924); + bcma_chipco_pll_write(cc, 0x5, 0x88888815); + } else { + bcma_chipco_pll_write(cc, 0x0, 0x11100060); + bcma_chipco_pll_write(cc, 0x1, 0x080c0c06); + bcma_chipco_pll_write(cc, 0x2, 0x03000000); + bcma_chipco_pll_write(cc, 0x3, 0x00000000); + bcma_chipco_pll_write(cc, 0x4, 0x200005c0); + bcma_chipco_pll_write(cc, 0x5, 0x88888815); + } + pmu_ctl = BCMA_CC_PMU_CTL_PLL_UPD | + BCMA_CC_PMU_CTL_NOILPONW; + } else if (dev->dev->chip_id == 0x4322 || + dev->dev->chip_id == 0x4340 || + dev->dev->chip_id == 0x4341) { + bcma_chipco_pll_write(cc, 0x0, 0x11100070); + bcma_chipco_pll_write(cc, 0x1, 0x1014140a); + bcma_chipco_pll_write(cc, 0x5, 0x88888854); + if (avoid) + bcma_chipco_pll_write(cc, 0x2, 0x05201828); + else + bcma_chipco_pll_write(cc, 0x2, 0x05001828); + pmu_ctl = BCMA_CC_PMU_CTL_PLL_UPD; + } else { + return; + } + bcma_cc_set32(cc, BCMA_CC_PMU_CTL, pmu_ctl); + break; +#endif +#ifdef CONFIG_B43_SSB + case B43_BUS_SSB: + /* FIXME */ + break; +#endif + } +} + /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ChanspecSetup */ static void b43_nphy_channel_setup(struct b43_wldev *dev, const struct b43_phy_n_sfo_cfg *e, @@ -3935,6 +4272,7 @@ static void b43_nphy_channel_setup(struct b43_wldev *dev, { struct b43_phy *phy = &dev->phy; struct b43_phy_n *nphy = dev->phy.n; + int ch = new_channel->hw_value; u16 old_band_5ghz; u32 tmp32; @@ -3974,8 +4312,41 @@ static void b43_nphy_channel_setup(struct b43_wldev *dev, b43_nphy_tx_lp_fbw(dev); - if (dev->phy.rev >= 3 && 0) { - /* TODO */ + if (dev->phy.rev >= 3 && + dev->phy.n->spur_avoid != B43_SPUR_AVOID_DISABLE) { + bool avoid = false; + if (dev->phy.n->spur_avoid == B43_SPUR_AVOID_FORCE) { + avoid = true; + } else if (!b43_channel_type_is_40mhz(phy->channel_type)) { + if ((ch >= 5 && ch <= 8) || ch == 13 || ch == 14) + avoid = true; + } else { /* 40MHz */ + if (nphy->aband_spurwar_en && + (ch == 38 || ch == 102 || ch == 118)) + avoid = dev->dev->chip_id == 0x4716; + } + + b43_nphy_pmu_spur_avoid(dev, avoid); + + if (dev->dev->chip_id == 43222 || dev->dev->chip_id == 43224 || + dev->dev->chip_id == 43225) { + b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_LOW, + avoid ? 0x5341 : 0x8889); + b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_HIGH, 0x8); + } + + if (dev->phy.rev == 3 || dev->phy.rev == 4) + ; /* TODO: reset PLL */ + + if (avoid) + b43_phy_set(dev, B43_NPHY_BBCFG, B43_NPHY_BBCFG_RSTRX); + else + b43_phy_mask(dev, B43_NPHY_BBCFG, + ~B43_NPHY_BBCFG_RSTRX & 0xFFFF); + + b43_nphy_reset_cca(dev); + + /* wl sets useless phy_isspuravoid here */ } b43_phy_write(dev, B43_NPHY_NDATAT_DUP40, 0x3830); @@ -4039,6 +4410,10 @@ static int b43_nphy_set_channel(struct b43_wldev *dev, return 0; } +/************************************************** + * Basic PHY ops. + **************************************************/ + static int b43_nphy_op_allocate(struct b43_wldev *dev) { struct b43_phy_n *nphy; @@ -4055,10 +4430,13 @@ static void b43_nphy_op_prepare_structs(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; struct b43_phy_n *nphy = phy->n; + struct ssb_sprom *sprom = dev->dev->bus_sprom; memset(nphy, 0, sizeof(*nphy)); nphy->hang_avoid = (phy->rev == 3 || phy->rev == 4); + nphy->spur_avoid = (phy->rev >= 3) ? + B43_SPUR_AVOID_AUTO : B43_SPUR_AVOID_DISABLE; nphy->gain_boost = true; /* this way we follow wl, assume it is true */ nphy->txrx_chain = 2; /* sth different than 0 and 1 for now */ nphy->phyrxchain = 3; /* to avoid b43_nphy_set_rx_core_state like wl */ @@ -4067,6 +4445,38 @@ static void b43_nphy_op_prepare_structs(struct b43_wldev *dev) * 0x7f == 127 and we check for 128 when restoring TX pwr ctl. */ nphy->tx_pwr_idx[0] = 128; nphy->tx_pwr_idx[1] = 128; + + /* Hardware TX power control and 5GHz power gain */ + nphy->txpwrctrl = false; + nphy->pwg_gain_5ghz = false; + if (dev->phy.rev >= 3 || + (dev->dev->board_vendor == PCI_VENDOR_ID_APPLE && + (dev->dev->core_rev == 11 || dev->dev->core_rev == 12))) { + nphy->txpwrctrl = true; + nphy->pwg_gain_5ghz = true; + } else if (sprom->revision >= 4) { + if (dev->phy.rev >= 2 && + (sprom->boardflags2_lo & B43_BFL2_TXPWRCTRL_EN)) { + nphy->txpwrctrl = true; +#ifdef CONFIG_B43_SSB + if (dev->dev->bus_type == B43_BUS_SSB && + dev->dev->sdev->bus->bustype == SSB_BUSTYPE_PCI) { + struct pci_dev *pdev = + dev->dev->sdev->bus->host_pci; + if (pdev->device == 0x4328 || + pdev->device == 0x432a) + nphy->pwg_gain_5ghz = true; + } +#endif + } else if (sprom->boardflags2_lo & B43_BFL2_5G_PWRGAIN) { + nphy->pwg_gain_5ghz = true; + } + } + + if (dev->phy.rev >= 3) { + nphy->ipa2g_on = sprom->fem.ghz2.extpa_gain == 2; + nphy->ipa5g_on = sprom->fem.ghz5.extpa_gain == 2; + } } static void b43_nphy_op_free(struct b43_wldev *dev) diff --git a/drivers/net/wireless/b43/phy_n.h b/drivers/net/wireless/b43/phy_n.h index fbf520285bd..5de8f74cc02 100644 --- a/drivers/net/wireless/b43/phy_n.h +++ b/drivers/net/wireless/b43/phy_n.h @@ -716,6 +716,12 @@ struct b43_wldev; +enum b43_nphy_spur_avoid { + B43_SPUR_AVOID_DISABLE, + B43_SPUR_AVOID_AUTO, + B43_SPUR_AVOID_FORCE, +}; + struct b43_chanspec { u16 center_freq; enum nl80211_channel_type channel_type; @@ -759,6 +765,11 @@ struct b43_phy_n_txpwrindex { u16 locomp; }; +struct b43_phy_n_pwr_ctl_info { + u8 idle_tssi_2g; + u8 idle_tssi_5g; +}; + struct b43_phy_n { u8 antsel_type; u8 cal_orig_pwr_idx[2]; @@ -785,12 +796,14 @@ struct b43_phy_n { u16 mphase_txcal_bestcoeffs[11]; bool txpwrctrl; + bool pwg_gain_5ghz; u8 tx_pwr_idx[2]; u16 adj_pwr_tbl[84]; u16 txcal_bbmult; u16 txiqlocal_bestc[11]; bool txiqlocal_coeffsvalid; struct b43_phy_n_txpwrindex txpwrindex[2]; + struct b43_phy_n_pwr_ctl_info pwr_ctl_info[2]; struct b43_chanspec txiqlocal_chanspec; u8 txrx_chain; @@ -803,6 +816,7 @@ struct b43_phy_n { u16 classifier_state; u16 clip_state[2]; + enum b43_nphy_spur_avoid spur_avoid; bool aband_spurwar_en; bool gband_spurwar_en; diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c index 279a53eae4c..3533ab86bd3 100644 --- a/drivers/net/wireless/b43/pio.c +++ b/drivers/net/wireless/b43/pio.c @@ -539,7 +539,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb) /* Not enough memory on the queue. */ err = -EBUSY; ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb)); - q->stopped = 1; + q->stopped = true; goto out; } @@ -566,7 +566,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb) (q->free_packet_slots == 0)) { /* The queue is full. */ ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb)); - q->stopped = 1; + q->stopped = true; } out: @@ -601,7 +601,7 @@ void b43_pio_handle_txstatus(struct b43_wldev *dev, if (q->stopped) { ieee80211_wake_queue(dev->wl->hw, q->queue_prio); - q->stopped = 0; + q->stopped = false; } } diff --git a/drivers/net/wireless/b43/radio_2056.c b/drivers/net/wireless/b43/radio_2056.c index a01f776ca4d..ce037fb6789 100644 --- a/drivers/net/wireless/b43/radio_2056.c +++ b/drivers/net/wireless/b43/radio_2056.c @@ -1572,14 +1572,14 @@ static const struct b2056_inittab_entry b2056_inittab_rev6_syn[] = { [B2056_SYN_PLL_XTAL5] = { .ghz5 = 0x0077, .ghz2 = 0x0077, NOUPLOAD, }, [B2056_SYN_PLL_XTAL6] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, }, [B2056_SYN_PLL_REFDIV] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, }, - [B2056_SYN_PLL_PFD] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, }, + [B2056_SYN_PLL_PFD] = { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, }, [B2056_SYN_PLL_CP1] = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, }, - [B2056_SYN_PLL_CP2] = { .ghz5 = 0x0030, .ghz2 = 0x0030, NOUPLOAD, }, + [B2056_SYN_PLL_CP2] = { .ghz5 = 0x003f, .ghz2 = 0x003f, UPLOAD, }, [B2056_SYN_PLL_CP3] = { .ghz5 = 0x0032, .ghz2 = 0x0032, NOUPLOAD, }, - [B2056_SYN_PLL_LOOPFILTER1] = { .ghz5 = 0x000d, .ghz2 = 0x000d, NOUPLOAD, }, - [B2056_SYN_PLL_LOOPFILTER2] = { .ghz5 = 0x000d, .ghz2 = 0x000d, NOUPLOAD, }, + [B2056_SYN_PLL_LOOPFILTER1] = { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, }, + [B2056_SYN_PLL_LOOPFILTER2] = { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, }, [B2056_SYN_PLL_LOOPFILTER3] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, }, - [B2056_SYN_PLL_LOOPFILTER4] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, }, + [B2056_SYN_PLL_LOOPFILTER4] = { .ghz5 = 0x002b, .ghz2 = 0x002b, UPLOAD, }, [B2056_SYN_PLL_LOOPFILTER5] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, }, [B2056_SYN_PLL_MMD1] = { .ghz5 = 0x001c, .ghz2 = 0x001c, NOUPLOAD, }, [B2056_SYN_PLL_MMD2] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, }, @@ -9055,6 +9055,21 @@ void b2056_upload_inittabs(struct b43_wldev *dev, B2056_RX1, pts->rx, pts->rx_length); } +void b2056_upload_syn_pll_cp2(struct b43_wldev *dev, bool ghz5) +{ + struct b2056_inittabs_pts *pts; + const struct b2056_inittab_entry *e; + + if (dev->phy.rev >= ARRAY_SIZE(b2056_inittabs)) { + B43_WARN_ON(1); + return; + } + pts = &b2056_inittabs[dev->phy.rev]; + e = &pts->syn[B2056_SYN_PLL_CP2]; + + b43_radio_write(dev, B2056_SYN_PLL_CP2, ghz5 ? e->ghz5 : e->ghz2); +} + const struct b43_nphy_channeltab_entry_rev3 * b43_nphy_get_chantabent_rev3(struct b43_wldev *dev, u16 freq) { diff --git a/drivers/net/wireless/b43/radio_2056.h b/drivers/net/wireless/b43/radio_2056.h index a7159d8578b..5b86673459f 100644 --- a/drivers/net/wireless/b43/radio_2056.h +++ b/drivers/net/wireless/b43/radio_2056.h @@ -1090,6 +1090,7 @@ struct b43_nphy_channeltab_entry_rev3 { void b2056_upload_inittabs(struct b43_wldev *dev, bool ghz5, bool ignore_uploadflag); +void b2056_upload_syn_pll_cp2(struct b43_wldev *dev, bool ghz5); /* Get the NPHY Channel Switch Table entry for a channel. * Returns NULL on failure to find an entry. */ diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c index 7b326f2efdc..f7def13524d 100644 --- a/drivers/net/wireless/b43/tables_nphy.c +++ b/drivers/net/wireless/b43/tables_nphy.c @@ -2171,6 +2171,48 @@ static const u16 b43_ntab_loftlt1_r3[] = { 0x0000, 0x0000, }; +/* volatile tables, PHY revision >= 3 */ + +/* indexed by antswctl2g */ +static const u16 b43_ntab_antswctl2g_r3[4][32] = { + { + 0x0082, 0x0082, 0x0211, 0x0222, 0x0328, + 0x0000, 0x0000, 0x0000, 0x0144, 0x0000, + 0x0000, 0x0000, 0x0188, 0x0000, 0x0000, + 0x0000, 0x0082, 0x0082, 0x0211, 0x0222, + 0x0328, 0x0000, 0x0000, 0x0000, 0x0144, + 0x0000, 0x0000, 0x0000, 0x0188, 0x0000, + 0x0000, 0x0000, + }, + { + 0x0022, 0x0022, 0x0011, 0x0022, 0x0022, + 0x0000, 0x0000, 0x0000, 0x0011, 0x0000, + 0x0000, 0x0000, 0x0022, 0x0000, 0x0000, + 0x0000, 0x0022, 0x0022, 0x0011, 0x0022, + 0x0022, 0x0000, 0x0000, 0x0000, 0x0011, + 0x0000, 0x0000, 0x0000, 0x0022, 0x0000, + 0x0000, 0x0000, + }, + { + 0x0088, 0x0088, 0x0044, 0x0088, 0x0088, + 0x0000, 0x0000, 0x0000, 0x0044, 0x0000, + 0x0000, 0x0000, 0x0088, 0x0000, 0x0000, + 0x0000, 0x0088, 0x0088, 0x0044, 0x0088, + 0x0088, 0x0000, 0x0000, 0x0000, 0x0044, + 0x0000, 0x0000, 0x0000, 0x0088, 0x0000, + 0x0000, 0x0000, + }, + { + 0x0022, 0x0022, 0x0011, 0x0022, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0011, 0x0000, + 0x0000, 0x0000, 0x0022, 0x0000, 0x0000, + 0x03cc, 0x0022, 0x0022, 0x0011, 0x0022, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0011, + 0x0000, 0x0000, 0x0000, 0x0022, 0x0000, + 0x0000, 0x03cc, + } +}; + /* TX gain tables */ const u32 b43_ntab_tx_gain_rev0_1_2[] = { 0x03cc2b44, 0x03cc2b42, 0x03cc2a44, 0x03cc2a42, @@ -2652,7 +2694,7 @@ const u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[] = { const s16 tbl_tx_filter_coef_rev4[7][15] = { { -377, 137, -407, 208, -1527, 956, 93, 186, 93, 230, - -44, 230, 20, -191, 201 }, + -44, 230, 201, -191, 201 }, { -77, 20, -98, 49, -93, 60, 56, 111, 56, 26, -5, 26, 34, -32, 34 }, @@ -2710,7 +2752,18 @@ const struct nphy_rf_control_override_rev3 tbl_rf_control_override_rev3[] = { { 0x00C0, 6, 0xE7, 0xF9, 0xEC, 0xFB } /* field == 0x4000 (fls 15) */ }; -struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_workaround[2][3] = { +struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_wa_phy6_radio11_ghz2 = { + { 10, 14, 19, 27 }, + { -5, 6, 10, 15 }, + { 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA }, + { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 }, + 0x427E, + { 0x413F, 0x413F, 0x413F, 0x413F }, + 0x007E, 0x0066, 0x1074, + 0x18, 0x18, 0x18, + 0x01D0, 0x5, +}; +struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_workaround[2][4] = { { /* 2GHz */ { /* PHY rev 3 */ { 7, 11, 16, 23 }, @@ -2734,15 +2787,26 @@ struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_workaround[2][3] = { 0x18, 0x18, 0x18, 0x01A1, 0x5, }, - { /* PHY rev 5+ */ + { /* PHY rev 5 */ { 9, 13, 18, 26 }, { -3, 7, 11, 16 }, { 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA }, { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 }, 0x427E, /* invalid for external LNA! */ { 0x413F, 0x413F, 0x413F, 0x413F }, /* invalid for external LNA! */ - 0x1076, 0x0066, 0x106A, - 0xC, 0xC, 0xC, + 0x1076, 0x0066, 0x0000, /* low is invalid (the last one) */ + 0x18, 0x18, 0x18, + 0x01D0, 0x9, + }, + { /* PHY rev 6+ */ + { 8, 13, 18, 25 }, + { -5, 6, 10, 14 }, + { 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA }, + { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 }, + 0x527E, /* invalid for external LNA! */ + { 0x513F, 0x513F, 0x513F, 0x513F }, /* invalid for external LNA! */ + 0x1076, 0x0066, 0x0000, /* low is invalid (the last one) */ + 0x18, 0x18, 0x18, 0x01D0, 0x5, }, }, @@ -2769,7 +2833,7 @@ struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_workaround[2][3] = { 0x24, 0x24, 0x24, 0x0107, 25, }, - { /* PHY rev 5+ */ + { /* PHY rev 5 */ { 6, 10, 16, 21 }, { -7, 0, 4, 8 }, { 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD }, @@ -2780,6 +2844,17 @@ struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_workaround[2][3] = { 0x24, 0x24, 0x24, 0x00A9, 25, }, + { /* PHY rev 6+ */ + { 6, 10, 16, 21 }, + { -7, 0, 4, 8 }, + { 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD }, + { 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 }, + 0x729E, + { 0x714F, 0x714F, 0x714F, 0x714F }, + 0x029E, 0x2084, 0x2086, + 0x24, 0x24, 0x24, /* low is invalid for radio rev 11! */ + 0x00F0, 25, + }, }, }; @@ -2838,9 +2913,8 @@ u32 b43_ntab_read(struct b43_wldev *dev, u32 offset) break; case B43_NTAB_32BIT: b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset); - value = b43_phy_read(dev, B43_NPHY_TABLE_DATAHI); - value <<= 16; - value |= b43_phy_read(dev, B43_NPHY_TABLE_DATALO); + value = b43_phy_read(dev, B43_NPHY_TABLE_DATALO); + value |= b43_phy_read(dev, B43_NPHY_TABLE_DATAHI) << 16; break; default: B43_WARN_ON(1); @@ -2864,6 +2938,12 @@ void b43_ntab_read_bulk(struct b43_wldev *dev, u32 offset, b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset); for (i = 0; i < nr_elements; i++) { + /* Auto increment broken + caching issue on BCM43224? */ + if (dev->dev->chip_id == 43224 && dev->dev->chip_rev == 1) { + b43_phy_read(dev, B43_NPHY_TABLE_DATALO); + b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset + i); + } + switch (type) { case B43_NTAB_8BIT: *data = b43_phy_read(dev, B43_NPHY_TABLE_DATALO) & 0xFF; @@ -2874,9 +2954,10 @@ void b43_ntab_read_bulk(struct b43_wldev *dev, u32 offset, data += 2; break; case B43_NTAB_32BIT: - *((u32 *)data) = b43_phy_read(dev, B43_NPHY_TABLE_DATAHI); - *((u32 *)data) <<= 16; - *((u32 *)data) |= b43_phy_read(dev, B43_NPHY_TABLE_DATALO); + *((u32 *)data) = + b43_phy_read(dev, B43_NPHY_TABLE_DATALO); + *((u32 *)data) |= + b43_phy_read(dev, B43_NPHY_TABLE_DATAHI) << 16; data += 4; break; default: @@ -2932,6 +3013,13 @@ void b43_ntab_write_bulk(struct b43_wldev *dev, u32 offset, b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset); for (i = 0; i < nr_elements; i++) { + /* Auto increment broken + caching issue on BCM43224? */ + if ((offset >> 10) == 9 && dev->dev->chip_id == 43224 && + dev->dev->chip_rev == 1) { + b43_phy_read(dev, B43_NPHY_TABLE_DATALO); + b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset + i); + } + switch (type) { case B43_NTAB_8BIT: value = *data; @@ -2999,6 +3087,8 @@ void b43_nphy_rev0_1_2_tables_init(struct b43_wldev *dev) } while (0) void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev) { + struct ssb_sprom *sprom = dev->dev->bus_sprom; + /* Static tables */ ntab_upload_r3(dev, B43_NTAB_FRAMESTRUCT_R3, b43_ntab_framestruct_r3); ntab_upload_r3(dev, B43_NTAB_PILOT_R3, b43_ntab_pilot_r3); @@ -3029,7 +3119,11 @@ void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev) ntab_upload_r3(dev, B43_NTAB_C1_LOFEEDTH_R3, b43_ntab_loftlt1_r3); /* Volatile tables */ - /* TODO */ + if (sprom->fem.ghz2.antswlut < ARRAY_SIZE(b43_ntab_antswctl2g_r3)) + ntab_upload_r3(dev, B43_NTAB_ANT_SW_CTL_R3, + b43_ntab_antswctl2g_r3[sprom->fem.ghz2.antswlut]); + else + B43_WARN_ON(1); } struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent( @@ -3037,26 +3131,67 @@ struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent( { struct nphy_gain_ctl_workaround_entry *e; u8 phy_idx; + u8 tr_iso = ghz5 ? dev->dev->bus_sprom->fem.ghz5.tr_iso : + dev->dev->bus_sprom->fem.ghz2.tr_iso; + + if (!ghz5 && dev->phy.rev >= 6 && dev->phy.radio_rev == 11) + return &nphy_gain_ctl_wa_phy6_radio11_ghz2; B43_WARN_ON(dev->phy.rev < 3); - if (dev->phy.rev >= 5) + if (dev->phy.rev >= 6) + phy_idx = 3; + else if (dev->phy.rev == 5) phy_idx = 2; else if (dev->phy.rev == 4) phy_idx = 1; else phy_idx = 0; - e = &nphy_gain_ctl_workaround[ghz5][phy_idx]; - /* Only one entry differs for external LNA, so instead making whole - * table 2 times bigger, hack is here - */ - if (!ghz5 && dev->phy.rev >= 5 && ext_lna) { - e->rfseq_init[0] &= 0x0FFF; - e->rfseq_init[1] &= 0x0FFF; - e->rfseq_init[2] &= 0x0FFF; - e->rfseq_init[3] &= 0x0FFF; - e->init_gain &= 0x0FFF; + /* Some workarounds to the workarounds... */ + if (ghz5 && dev->phy.rev >= 6) { + if (dev->phy.radio_rev == 11 && + !b43_channel_type_is_40mhz(dev->phy.channel_type)) + e->cliplo_gain = 0x2d; + } else if (!ghz5 && dev->phy.rev >= 5) { + if (ext_lna) { + e->rfseq_init[0] &= ~0x4000; + e->rfseq_init[1] &= ~0x4000; + e->rfseq_init[2] &= ~0x4000; + e->rfseq_init[3] &= ~0x4000; + e->init_gain &= ~0x4000; + } + switch (tr_iso) { + case 0: + e->cliplo_gain = 0x0062; + case 1: + e->cliplo_gain = 0x0064; + case 2: + e->cliplo_gain = 0x006a; + case 3: + e->cliplo_gain = 0x106a; + case 4: + e->cliplo_gain = 0x106c; + case 5: + e->cliplo_gain = 0x1074; + case 6: + e->cliplo_gain = 0x107c; + case 7: + e->cliplo_gain = 0x207c; + default: + e->cliplo_gain = 0x106a; + } + } else if (ghz5 && dev->phy.rev == 4 && ext_lna) { + e->rfseq_init[0] &= ~0x4000; + e->rfseq_init[1] &= ~0x4000; + e->rfseq_init[2] &= ~0x4000; + e->rfseq_init[3] &= ~0x4000; + e->init_gain &= ~0x4000; + e->rfseq_init[0] |= 0x1000; + e->rfseq_init[1] |= 0x1000; + e->rfseq_init[2] |= 0x1000; + e->rfseq_init[3] |= 0x1000; + e->init_gain |= 0x1000; } return e; diff --git a/drivers/net/wireless/b43/tables_nphy.h b/drivers/net/wireless/b43/tables_nphy.h index a81696bff0e..97038c48193 100644 --- a/drivers/net/wireless/b43/tables_nphy.h +++ b/drivers/net/wireless/b43/tables_nphy.h @@ -126,26 +126,29 @@ struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent( #define B43_NTAB_C1_LOFEEDTH B43_NTAB16(0x1B, 0x1C0) /* Local Oscillator Feed Through Lookup Table Core 1 */ #define B43_NTAB_C1_LOFEEDTH_SIZE 128 +/* Volatile N-PHY tables, PHY revision >= 3 */ +#define B43_NTAB_ANT_SW_CTL_R3 B43_NTAB16( 9, 0) /* antenna software control */ + /* Static N-PHY tables, PHY revision >= 3 */ -#define B43_NTAB_FRAMESTRUCT_R3 B43_NTAB32(10, 000) /* frame struct */ -#define B43_NTAB_PILOT_R3 B43_NTAB16(11, 000) /* pilot */ -#define B43_NTAB_TMAP_R3 B43_NTAB32(12, 000) /* TM AP */ -#define B43_NTAB_INTLEVEL_R3 B43_NTAB32(13, 000) /* INT LV */ -#define B43_NTAB_TDTRN_R3 B43_NTAB32(14, 000) /* TD TRN */ -#define B43_NTAB_NOISEVAR0_R3 B43_NTAB32(16, 000) /* noise variance 0 */ +#define B43_NTAB_FRAMESTRUCT_R3 B43_NTAB32(10, 0) /* frame struct */ +#define B43_NTAB_PILOT_R3 B43_NTAB16(11, 0) /* pilot */ +#define B43_NTAB_TMAP_R3 B43_NTAB32(12, 0) /* TM AP */ +#define B43_NTAB_INTLEVEL_R3 B43_NTAB32(13, 0) /* INT LV */ +#define B43_NTAB_TDTRN_R3 B43_NTAB32(14, 0) /* TD TRN */ +#define B43_NTAB_NOISEVAR0_R3 B43_NTAB32(16, 0) /* noise variance 0 */ #define B43_NTAB_NOISEVAR1_R3 B43_NTAB32(16, 128) /* noise variance 1 */ -#define B43_NTAB_MCS_R3 B43_NTAB16(18, 000) /* MCS */ +#define B43_NTAB_MCS_R3 B43_NTAB16(18, 0) /* MCS */ #define B43_NTAB_TDI20A0_R3 B43_NTAB32(19, 128) /* TDI 20/0 */ #define B43_NTAB_TDI20A1_R3 B43_NTAB32(19, 256) /* TDI 20/1 */ #define B43_NTAB_TDI40A0_R3 B43_NTAB32(19, 640) /* TDI 40/0 */ #define B43_NTAB_TDI40A1_R3 B43_NTAB32(19, 768) /* TDI 40/1 */ -#define B43_NTAB_PILOTLT_R3 B43_NTAB32(20, 000) /* PLT lookup */ -#define B43_NTAB_CHANEST_R3 B43_NTAB32(22, 000) /* channel estimate */ -#define B43_NTAB_FRAMELT_R3 B43_NTAB8 (24, 000) /* frame lookup */ -#define B43_NTAB_C0_ESTPLT_R3 B43_NTAB8 (26, 000) /* estimated power lookup 0 */ -#define B43_NTAB_C1_ESTPLT_R3 B43_NTAB8 (27, 000) /* estimated power lookup 1 */ -#define B43_NTAB_C0_ADJPLT_R3 B43_NTAB8 (26, 064) /* adjusted power lookup 0 */ -#define B43_NTAB_C1_ADJPLT_R3 B43_NTAB8 (27, 064) /* adjusted power lookup 1 */ +#define B43_NTAB_PILOTLT_R3 B43_NTAB32(20, 0) /* PLT lookup */ +#define B43_NTAB_CHANEST_R3 B43_NTAB32(22, 0) /* channel estimate */ +#define B43_NTAB_FRAMELT_R3 B43_NTAB8(24, 0) /* frame lookup */ +#define B43_NTAB_C0_ESTPLT_R3 B43_NTAB8(26, 0) /* estimated power lookup 0 */ +#define B43_NTAB_C1_ESTPLT_R3 B43_NTAB8(27, 0) /* estimated power lookup 1 */ +#define B43_NTAB_C0_ADJPLT_R3 B43_NTAB8(26, 64) /* adjusted power lookup 0 */ +#define B43_NTAB_C1_ADJPLT_R3 B43_NTAB8(27, 64) /* adjusted power lookup 1 */ #define B43_NTAB_C0_GAINCTL_R3 B43_NTAB32(26, 192) /* gain control lookup 0 */ #define B43_NTAB_C1_GAINCTL_R3 B43_NTAB32(27, 192) /* gain control lookup 1 */ #define B43_NTAB_C0_IQLT_R3 B43_NTAB32(26, 320) /* I/Q lookup 0 */ diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c index 5f77cbe0b6a..2c5367884b3 100644 --- a/drivers/net/wireless/b43/xmit.c +++ b/drivers/net/wireless/b43/xmit.c @@ -874,7 +874,7 @@ bool b43_fill_txstatus_report(struct b43_wldev *dev, struct ieee80211_tx_info *report, const struct b43_txstatus *status) { - bool frame_success = 1; + bool frame_success = true; int retry_limit; /* preserve the confiured retry limit before clearing the status @@ -890,7 +890,7 @@ bool b43_fill_txstatus_report(struct b43_wldev *dev, /* The frame was not ACKed... */ if (!(report->flags & IEEE80211_TX_CTL_NO_ACK)) { /* ...but we expected an ACK. */ - frame_success = 0; + frame_success = false; } } if (status->frame_count == 0) { diff --git a/drivers/net/wireless/b43legacy/b43legacy.h b/drivers/net/wireless/b43legacy/b43legacy.h index 1d4fc9db7f5..98e3d44400c 100644 --- a/drivers/net/wireless/b43legacy/b43legacy.h +++ b/drivers/net/wireless/b43legacy/b43legacy.h @@ -560,8 +560,16 @@ struct b43legacy_key { u8 algorithm; }; +#define B43legacy_QOS_QUEUE_NUM 4 + struct b43legacy_wldev; +/* QOS parameters for a queue. */ +struct b43legacy_qos_params { + /* The QOS parameters */ + struct ieee80211_tx_queue_params p; +}; + /* Data structure for the WLAN parts (802.11 cores) of the b43legacy chip. */ struct b43legacy_wl { /* Pointer to the active wireless device on this chip */ @@ -611,6 +619,18 @@ struct b43legacy_wl { bool beacon1_uploaded; bool beacon_templates_virgin; /* Never wrote the templates? */ struct work_struct beacon_update_trigger; + /* The current QOS parameters for the 4 queues. */ + struct b43legacy_qos_params qos_params[B43legacy_QOS_QUEUE_NUM]; + + /* Packet transmit work */ + struct work_struct tx_work; + + /* Queue of packets to be transmitted. */ + struct sk_buff_head tx_queue[B43legacy_QOS_QUEUE_NUM]; + + /* Flag that implement the queues stopping. */ + bool tx_queue_stopped[B43legacy_QOS_QUEUE_NUM]; + }; /* Pointers to the firmware data and meta information about it. */ diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c index c5535adf699..f1f8bd09bd8 100644 --- a/drivers/net/wireless/b43legacy/dma.c +++ b/drivers/net/wireless/b43legacy/dma.c @@ -715,7 +715,7 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev, ring->mmio_base = b43legacy_dmacontroller_base(type, controller_index); ring->index = controller_index; if (for_tx) { - ring->tx = 1; + ring->tx = true; ring->current_slot = -1; } else { if (ring->index == 0) { @@ -727,7 +727,6 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev, } else B43legacy_WARN_ON(1); } - spin_lock_init(&ring->lock); #ifdef CONFIG_B43LEGACY_DEBUG ring->last_injected_overflow = jiffies; #endif @@ -806,7 +805,7 @@ void b43legacy_dma_free(struct b43legacy_wldev *dev) static int b43legacy_dma_set_mask(struct b43legacy_wldev *dev, u64 mask) { u64 orig_mask = mask; - bool fallback = 0; + bool fallback = false; int err; /* Try to set the DMA mask. If it fails, try falling back to a @@ -820,12 +819,12 @@ static int b43legacy_dma_set_mask(struct b43legacy_wldev *dev, u64 mask) } if (mask == DMA_BIT_MASK(64)) { mask = DMA_BIT_MASK(32); - fallback = 1; + fallback = true; continue; } if (mask == DMA_BIT_MASK(32)) { mask = DMA_BIT_MASK(30); - fallback = 1; + fallback = true; continue; } b43legacyerr(dev->wl, "The machine/kernel does not support " @@ -858,7 +857,7 @@ int b43legacy_dma_init(struct b43legacy_wldev *dev) #ifdef CONFIG_B43LEGACY_PIO b43legacywarn(dev->wl, "DMA for this device not supported. " "Falling back to PIO\n"); - dev->__using_pio = 1; + dev->__using_pio = true; return -EAGAIN; #else b43legacyerr(dev->wl, "DMA for this device not supported and " @@ -1068,7 +1067,7 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring, memset(meta, 0, sizeof(*meta)); meta->skb = skb; - meta->is_last_fragment = 1; + meta->is_last_fragment = true; meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); /* create a bounce buffer in zone_dma on mapping failure. */ @@ -1144,10 +1143,8 @@ int b43legacy_dma_tx(struct b43legacy_wldev *dev, { struct b43legacy_dmaring *ring; int err = 0; - unsigned long flags; ring = priority_to_txring(dev, skb_get_queue_mapping(skb)); - spin_lock_irqsave(&ring->lock, flags); B43legacy_WARN_ON(!ring->tx); if (unlikely(ring->stopped)) { @@ -1157,16 +1154,14 @@ int b43legacy_dma_tx(struct b43legacy_wldev *dev, * For now, just refuse the transmit. */ if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE)) b43legacyerr(dev->wl, "Packet after queue stopped\n"); - err = -ENOSPC; - goto out_unlock; + return -ENOSPC; } if (unlikely(WARN_ON(free_slots(ring) < SLOTS_PER_PACKET))) { /* If we get here, we have a real error with the queue * full, but queues not stopped. */ b43legacyerr(dev->wl, "DMA queue overflow\n"); - err = -ENOSPC; - goto out_unlock; + return -ENOSPC; } /* dma_tx_fragment might reallocate the skb, so invalidate pointers pointing @@ -1176,25 +1171,23 @@ int b43legacy_dma_tx(struct b43legacy_wldev *dev, /* Drop this packet, as we don't have the encryption key * anymore and must not transmit it unencrypted. */ dev_kfree_skb_any(skb); - err = 0; - goto out_unlock; + return 0; } if (unlikely(err)) { b43legacyerr(dev->wl, "DMA tx mapping failure\n"); - goto out_unlock; + return err; } if ((free_slots(ring) < SLOTS_PER_PACKET) || should_inject_overflow(ring)) { /* This TX ring is full. */ - ieee80211_stop_queue(dev->wl->hw, txring_to_priority(ring)); - ring->stopped = 1; + unsigned int skb_mapping = skb_get_queue_mapping(skb); + ieee80211_stop_queue(dev->wl->hw, skb_mapping); + dev->wl->tx_queue_stopped[skb_mapping] = 1; + ring->stopped = true; if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE)) b43legacydbg(dev->wl, "Stopped TX ring %d\n", ring->index); } -out_unlock: - spin_unlock_irqrestore(&ring->lock, flags); - return err; } @@ -1205,14 +1198,29 @@ void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev, struct b43legacy_dmadesc_meta *meta; int retry_limit; int slot; + int firstused; ring = parse_cookie(dev, status->cookie, &slot); if (unlikely(!ring)) return; - B43legacy_WARN_ON(!irqs_disabled()); - spin_lock(&ring->lock); - B43legacy_WARN_ON(!ring->tx); + + /* Sanity check: TX packets are processed in-order on one ring. + * Check if the slot deduced from the cookie really is the first + * used slot. */ + firstused = ring->current_slot - ring->used_slots + 1; + if (firstused < 0) + firstused = ring->nr_slots + firstused; + if (unlikely(slot != firstused)) { + /* This possibly is a firmware bug and will result in + * malfunction, memory leaks and/or stall of DMA functionality. + */ + b43legacydbg(dev->wl, "Out of order TX status report on DMA " + "ring %d. Expected %d, but got %d\n", + ring->index, firstused, slot); + return; + } + while (1) { B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); op32_idx2desc(ring, slot, &meta); @@ -1285,14 +1293,21 @@ void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev, dev->stats.last_tx = jiffies; if (ring->stopped) { B43legacy_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET); - ieee80211_wake_queue(dev->wl->hw, txring_to_priority(ring)); - ring->stopped = 0; + ring->stopped = false; + } + + if (dev->wl->tx_queue_stopped[ring->queue_prio]) { + dev->wl->tx_queue_stopped[ring->queue_prio] = 0; + } else { + /* If the driver queue is running wake the corresponding + * mac80211 queue. */ + ieee80211_wake_queue(dev->wl->hw, ring->queue_prio); if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE)) b43legacydbg(dev->wl, "Woke up TX ring %d\n", - ring->index); + ring->index); } - - spin_unlock(&ring->lock); + /* Add work to the queue. */ + ieee80211_queue_work(dev->wl->hw, &dev->wl->tx_work); } static void dma_rx(struct b43legacy_dmaring *ring, @@ -1415,22 +1430,14 @@ void b43legacy_dma_rx(struct b43legacy_dmaring *ring) static void b43legacy_dma_tx_suspend_ring(struct b43legacy_dmaring *ring) { - unsigned long flags; - - spin_lock_irqsave(&ring->lock, flags); B43legacy_WARN_ON(!ring->tx); op32_tx_suspend(ring); - spin_unlock_irqrestore(&ring->lock, flags); } static void b43legacy_dma_tx_resume_ring(struct b43legacy_dmaring *ring) { - unsigned long flags; - - spin_lock_irqsave(&ring->lock, flags); B43legacy_WARN_ON(!ring->tx); op32_tx_resume(ring); - spin_unlock_irqrestore(&ring->lock, flags); } void b43legacy_dma_tx_suspend(struct b43legacy_wldev *dev) diff --git a/drivers/net/wireless/b43legacy/dma.h b/drivers/net/wireless/b43legacy/dma.h index 504a58767e9..c3282f906bc 100644 --- a/drivers/net/wireless/b43legacy/dma.h +++ b/drivers/net/wireless/b43legacy/dma.h @@ -150,8 +150,9 @@ struct b43legacy_dmaring { enum b43legacy_dmatype type; /* Boolean. Is this ring stopped at ieee80211 level? */ bool stopped; - /* Lock, only used for TX. */ - spinlock_t lock; + /* The QOS priority assigned to this ring. Only used for TX rings. + * This is the mac80211 "queue" value. */ + u8 queue_prio; struct b43legacy_wldev *dev; #ifdef CONFIG_B43LEGACY_DEBUG /* Maximum number of used slots. */ diff --git a/drivers/net/wireless/b43legacy/leds.c b/drivers/net/wireless/b43legacy/leds.c index 2f1bfdc44f9..fd4565389c7 100644 --- a/drivers/net/wireless/b43legacy/leds.c +++ b/drivers/net/wireless/b43legacy/leds.c @@ -203,11 +203,11 @@ void b43legacy_leds_init(struct b43legacy_wldev *dev) if (sprom[i] == 0xFF) { /* There is no LED information in the SPROM * for this LED. Hardcode it here. */ - activelow = 0; + activelow = false; switch (i) { case 0: behaviour = B43legacy_LED_ACTIVITY; - activelow = 1; + activelow = true; if (bus->boardinfo.vendor == PCI_VENDOR_ID_COMPAQ) behaviour = B43legacy_LED_RADIO_ALL; break; diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c index 20f02437af8..75e70bce40f 100644 --- a/drivers/net/wireless/b43legacy/main.c +++ b/drivers/net/wireless/b43legacy/main.c @@ -722,9 +722,9 @@ void b43legacy_wireless_core_reset(struct b43legacy_wldev *dev, u32 flags) macctl &= ~B43legacy_MACCTL_GMODE; if (flags & B43legacy_TMSLOW_GMODE) { macctl |= B43legacy_MACCTL_GMODE; - dev->phy.gmode = 1; + dev->phy.gmode = true; } else - dev->phy.gmode = 0; + dev->phy.gmode = false; macctl |= B43legacy_MACCTL_IHR_ENABLED; b43legacy_write32(dev, B43legacy_MMIO_MACCTL, macctl); } @@ -811,7 +811,7 @@ static void b43legacy_calculate_link_quality(struct b43legacy_wldev *dev) if (dev->noisecalc.calculation_running) return; dev->noisecalc.channel_at_start = dev->phy.channel; - dev->noisecalc.calculation_running = 1; + dev->noisecalc.calculation_running = true; dev->noisecalc.nr_samples = 0; b43legacy_generate_noise_sample(dev); @@ -873,7 +873,7 @@ static void handle_irq_noise(struct b43legacy_wldev *dev) dev->stats.link_noise = average; drop_calculation: - dev->noisecalc.calculation_running = 0; + dev->noisecalc.calculation_running = false; return; } generate_new: @@ -889,7 +889,7 @@ static void handle_irq_tbtt_indication(struct b43legacy_wldev *dev) b43legacy_power_saving_ctl_bits(dev, -1, -1); } if (b43legacy_is_mode(dev->wl, NL80211_IFTYPE_ADHOC)) - dev->dfq_valid = 1; + dev->dfq_valid = true; } static void handle_irq_atim_end(struct b43legacy_wldev *dev) @@ -898,7 +898,7 @@ static void handle_irq_atim_end(struct b43legacy_wldev *dev) b43legacy_write32(dev, B43legacy_MMIO_MACCMD, b43legacy_read32(dev, B43legacy_MMIO_MACCMD) | B43legacy_MACCMD_DFQ_VALID); - dev->dfq_valid = 0; + dev->dfq_valid = false; } } @@ -971,7 +971,7 @@ static void b43legacy_write_beacon_template(struct b43legacy_wldev *dev, unsigned int i, len, variable_len; const struct ieee80211_mgmt *bcn; const u8 *ie; - bool tim_found = 0; + bool tim_found = false; unsigned int rate; u16 ctl; int antenna; @@ -1019,7 +1019,7 @@ static void b43legacy_write_beacon_template(struct b43legacy_wldev *dev, /* A valid TIM is at least 4 bytes long. */ if (ie_len < 4) break; - tim_found = 1; + tim_found = true; tim_position = sizeof(struct b43legacy_plcp_hdr6); tim_position += offsetof(struct ieee80211_mgmt, @@ -1172,7 +1172,7 @@ static void b43legacy_upload_beacon0(struct b43legacy_wldev *dev) * but we don't use that feature anyway. */ b43legacy_write_probe_resp_template(dev, 0x268, 0x4A, &__b43legacy_ratetable[3]); - wl->beacon0_uploaded = 1; + wl->beacon0_uploaded = true; } static void b43legacy_upload_beacon1(struct b43legacy_wldev *dev) @@ -1182,7 +1182,7 @@ static void b43legacy_upload_beacon1(struct b43legacy_wldev *dev) if (wl->beacon1_uploaded) return; b43legacy_write_beacon_template(dev, 0x468, 0x1A); - wl->beacon1_uploaded = 1; + wl->beacon1_uploaded = true; } static void handle_irq_beacon(struct b43legacy_wldev *dev) @@ -1212,7 +1212,7 @@ static void handle_irq_beacon(struct b43legacy_wldev *dev) if (unlikely(wl->beacon_templates_virgin)) { /* We never uploaded a beacon before. * Upload both templates now, but only mark one valid. */ - wl->beacon_templates_virgin = 0; + wl->beacon_templates_virgin = false; b43legacy_upload_beacon0(dev); b43legacy_upload_beacon1(dev); cmd = b43legacy_read32(dev, B43legacy_MMIO_MACCMD); @@ -1275,8 +1275,8 @@ static void b43legacy_update_templates(struct b43legacy_wl *wl) if (wl->current_beacon) dev_kfree_skb_any(wl->current_beacon); wl->current_beacon = beacon; - wl->beacon0_uploaded = 0; - wl->beacon1_uploaded = 0; + wl->beacon0_uploaded = false; + wl->beacon1_uploaded = false; ieee80211_queue_work(wl->hw, &wl->beacon_update_trigger); } @@ -2440,30 +2440,64 @@ static int b43legacy_rng_init(struct b43legacy_wl *wl) return err; } +static void b43legacy_tx_work(struct work_struct *work) +{ + struct b43legacy_wl *wl = container_of(work, struct b43legacy_wl, + tx_work); + struct b43legacy_wldev *dev; + struct sk_buff *skb; + int queue_num; + int err = 0; + + mutex_lock(&wl->mutex); + dev = wl->current_dev; + if (unlikely(!dev || b43legacy_status(dev) < B43legacy_STAT_STARTED)) { + mutex_unlock(&wl->mutex); + return; + } + + for (queue_num = 0; queue_num < B43legacy_QOS_QUEUE_NUM; queue_num++) { + while (skb_queue_len(&wl->tx_queue[queue_num])) { + skb = skb_dequeue(&wl->tx_queue[queue_num]); + if (b43legacy_using_pio(dev)) + err = b43legacy_pio_tx(dev, skb); + else + err = b43legacy_dma_tx(dev, skb); + if (err == -ENOSPC) { + wl->tx_queue_stopped[queue_num] = 1; + ieee80211_stop_queue(wl->hw, queue_num); + skb_queue_head(&wl->tx_queue[queue_num], skb); + break; + } + if (unlikely(err)) + dev_kfree_skb(skb); /* Drop it */ + err = 0; + } + + if (!err) + wl->tx_queue_stopped[queue_num] = 0; + } + + mutex_unlock(&wl->mutex); +} + static void b43legacy_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) { struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); - struct b43legacy_wldev *dev = wl->current_dev; - int err = -ENODEV; - unsigned long flags; - if (unlikely(!dev)) - goto out; - if (unlikely(b43legacy_status(dev) < B43legacy_STAT_STARTED)) - goto out; - /* DMA-TX is done without a global lock. */ - if (b43legacy_using_pio(dev)) { - spin_lock_irqsave(&wl->irq_lock, flags); - err = b43legacy_pio_tx(dev, skb); - spin_unlock_irqrestore(&wl->irq_lock, flags); - } else - err = b43legacy_dma_tx(dev, skb); -out: - if (unlikely(err)) { - /* Drop the packet. */ + if (unlikely(skb->len < 2 + 2 + 6)) { + /* Too short, this can't be a valid frame. */ dev_kfree_skb_any(skb); + return; } + B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags); + + skb_queue_tail(&wl->tx_queue[skb->queue_mapping], skb); + if (!wl->tx_queue_stopped[skb->queue_mapping]) + ieee80211_queue_work(wl->hw, &wl->tx_work); + else + ieee80211_stop_queue(wl->hw, skb->queue_mapping); } static int b43legacy_op_conf_tx(struct ieee80211_hw *hw, @@ -2510,7 +2544,7 @@ static int find_wldev_for_phymode(struct b43legacy_wl *wl, if (d->phy.possible_phymodes & phymode) { /* Ok, this device supports the PHY-mode. * Set the gmode bit. */ - *gmode = 1; + *gmode = true; *dev = d; return 0; @@ -2546,7 +2580,7 @@ static int b43legacy_switch_phymode(struct b43legacy_wl *wl, struct b43legacy_wldev *uninitialized_var(up_dev); struct b43legacy_wldev *down_dev; int err; - bool gmode = 0; + bool gmode = false; int prev_status; err = find_wldev_for_phymode(wl, new_mode, &up_dev, &gmode); @@ -2879,6 +2913,7 @@ static void b43legacy_wireless_core_stop(struct b43legacy_wldev *dev) { struct b43legacy_wl *wl = dev->wl; unsigned long flags; + int queue_num; if (b43legacy_status(dev) < B43legacy_STAT_STARTED) return; @@ -2898,11 +2933,16 @@ static void b43legacy_wireless_core_stop(struct b43legacy_wldev *dev) /* Must unlock as it would otherwise deadlock. No races here. * Cancel the possibly running self-rearming periodic work. */ cancel_delayed_work_sync(&dev->periodic_work); + cancel_work_sync(&wl->tx_work); mutex_lock(&wl->mutex); - ieee80211_stop_queues(wl->hw); /* FIXME this could cause a deadlock */ + /* Drain all TX queues. */ + for (queue_num = 0; queue_num < B43legacy_QOS_QUEUE_NUM; queue_num++) { + while (skb_queue_len(&wl->tx_queue[queue_num])) + dev_kfree_skb(skb_dequeue(&wl->tx_queue[queue_num])); + } - b43legacy_mac_suspend(dev); +b43legacy_mac_suspend(dev); free_irq(dev->dev->irq, dev); b43legacydbg(wl, "Wireless interface stopped\n"); } @@ -3044,12 +3084,12 @@ static void setup_struct_phy_for_init(struct b43legacy_wldev *dev, /* Assume the radio is enabled. If it's not enabled, the state will * immediately get fixed on the first periodic work run. */ - dev->radio_hw_enable = 1; + dev->radio_hw_enable = true; phy->savedpctlreg = 0xFFFF; - phy->aci_enable = 0; - phy->aci_wlan_automatic = 0; - phy->aci_hw_rssi = 0; + phy->aci_enable = false; + phy->aci_wlan_automatic = false; + phy->aci_hw_rssi = false; lo = phy->_lo_pairs; if (lo) @@ -3081,7 +3121,7 @@ static void setup_struct_phy_for_init(struct b43legacy_wldev *dev, static void setup_struct_wldev_for_init(struct b43legacy_wldev *dev) { /* Flags */ - dev->dfq_valid = 0; + dev->dfq_valid = false; /* Stats */ memset(&dev->stats, 0, sizeof(dev->stats)); @@ -3187,9 +3227,9 @@ static void prepare_phy_data_for_init(struct b43legacy_wldev *dev) phy->lofcal = 0xFFFF; phy->initval = 0xFFFF; - phy->aci_enable = 0; - phy->aci_wlan_automatic = 0; - phy->aci_hw_rssi = 0; + phy->aci_enable = false; + phy->aci_wlan_automatic = false; + phy->aci_hw_rssi = false; phy->antenna_diversity = 0xFFFF; memset(phy->minlowsig, 0xFF, sizeof(phy->minlowsig)); @@ -3355,7 +3395,7 @@ static int b43legacy_op_add_interface(struct ieee80211_hw *hw, b43legacydbg(wl, "Adding Interface type %d\n", vif->type); dev = wl->current_dev; - wl->operating = 1; + wl->operating = true; wl->vif = vif; wl->if_type = vif->type; memcpy(wl->mac_addr, vif->addr, ETH_ALEN); @@ -3389,7 +3429,7 @@ static void b43legacy_op_remove_interface(struct ieee80211_hw *hw, B43legacy_WARN_ON(wl->vif != vif); wl->vif = NULL; - wl->operating = 0; + wl->operating = false; spin_lock_irqsave(&wl->irq_lock, flags); b43legacy_adjust_opmode(dev); @@ -3413,10 +3453,10 @@ static int b43legacy_op_start(struct ieee80211_hw *hw) memset(wl->bssid, 0, ETH_ALEN); memset(wl->mac_addr, 0, ETH_ALEN); wl->filter_flags = 0; - wl->beacon0_uploaded = 0; - wl->beacon1_uploaded = 0; - wl->beacon_templates_virgin = 1; - wl->radio_enabled = 1; + wl->beacon0_uploaded = false; + wl->beacon1_uploaded = false; + wl->beacon_templates_virgin = true; + wl->radio_enabled = true; mutex_lock(&wl->mutex); @@ -3455,7 +3495,7 @@ static void b43legacy_op_stop(struct ieee80211_hw *hw) if (b43legacy_status(dev) >= B43legacy_STAT_STARTED) b43legacy_wireless_core_stop(dev); b43legacy_wireless_core_exit(dev); - wl->radio_enabled = 0; + wl->radio_enabled = false; mutex_unlock(&wl->mutex); } @@ -3614,7 +3654,7 @@ static int b43legacy_wireless_core_attach(struct b43legacy_wldev *dev) have_bphy = 1; dev->phy.gmode = (have_gphy || have_bphy); - dev->phy.radio_on = 1; + dev->phy.radio_on = true; tmp = dev->phy.gmode ? B43legacy_TMSLOW_GMODE : 0; b43legacy_wireless_core_reset(dev, tmp); @@ -3705,7 +3745,7 @@ static int b43legacy_one_core_attach(struct ssb_device *dev, (void (*)(unsigned long))b43legacy_interrupt_tasklet, (unsigned long)wldev); if (modparam_pio) - wldev->__using_pio = 1; + wldev->__using_pio = true; INIT_LIST_HEAD(&wldev->list); err = b43legacy_wireless_core_attach(wldev); @@ -3748,6 +3788,7 @@ static int b43legacy_wireless_init(struct ssb_device *dev) struct ieee80211_hw *hw; struct b43legacy_wl *wl; int err = -ENOMEM; + int queue_num; b43legacy_sprom_fixup(dev->bus); @@ -3782,6 +3823,13 @@ static int b43legacy_wireless_init(struct ssb_device *dev) mutex_init(&wl->mutex); INIT_LIST_HEAD(&wl->devlist); INIT_WORK(&wl->beacon_update_trigger, b43legacy_beacon_update_trigger_work); + INIT_WORK(&wl->tx_work, b43legacy_tx_work); + + /* Initialize queues and flags. */ + for (queue_num = 0; queue_num < B43legacy_QOS_QUEUE_NUM; queue_num++) { + skb_queue_head_init(&wl->tx_queue[queue_num]); + wl->tx_queue_stopped[queue_num] = 0; + } ssb_set_devtypedata(dev, wl); b43legacyinfo(wl, "Broadcom %04X WLAN found (core revision %u)\n", diff --git a/drivers/net/wireless/b43legacy/radio.c b/drivers/net/wireless/b43legacy/radio.c index 475eb14e665..fcbafcd603c 100644 --- a/drivers/net/wireless/b43legacy/radio.c +++ b/drivers/net/wireless/b43legacy/radio.c @@ -1067,7 +1067,7 @@ b43legacy_radio_interference_mitigation_enable(struct b43legacy_wldev *dev, if (b43legacy_phy_read(dev, 0x0033) & 0x0800) break; - phy->aci_enable = 1; + phy->aci_enable = true; phy_stacksave(B43legacy_PHY_RADIO_BITFIELD); phy_stacksave(B43legacy_PHY_G_CRS); @@ -1279,7 +1279,7 @@ b43legacy_radio_interference_mitigation_disable(struct b43legacy_wldev *dev, if (!(b43legacy_phy_read(dev, 0x0033) & 0x0800)) break; - phy->aci_enable = 0; + phy->aci_enable = false; phy_stackrestore(B43legacy_PHY_RADIO_BITFIELD); phy_stackrestore(B43legacy_PHY_G_CRS); @@ -1346,10 +1346,10 @@ int b43legacy_radio_set_interference_mitigation(struct b43legacy_wldev *dev, (phy->rev == 0) || (!phy->gmode)) return -ENODEV; - phy->aci_wlan_automatic = 0; + phy->aci_wlan_automatic = false; switch (mode) { case B43legacy_RADIO_INTERFMODE_AUTOWLAN: - phy->aci_wlan_automatic = 1; + phy->aci_wlan_automatic = true; if (phy->aci_enable) mode = B43legacy_RADIO_INTERFMODE_MANUALWLAN; else @@ -1371,8 +1371,8 @@ int b43legacy_radio_set_interference_mitigation(struct b43legacy_wldev *dev, currentmode); if (mode == B43legacy_RADIO_INTERFMODE_NONE) { - phy->aci_enable = 0; - phy->aci_hw_rssi = 0; + phy->aci_enable = false; + phy->aci_hw_rssi = false; } else b43legacy_radio_interference_mitigation_enable(dev, mode); phy->interfmode = mode; @@ -2102,7 +2102,7 @@ void b43legacy_radio_turn_on(struct b43legacy_wldev *dev) phy->radio_off_context.rfover); b43legacy_phy_write(dev, B43legacy_PHY_RFOVERVAL, phy->radio_off_context.rfoverval); - phy->radio_off_context.valid = 0; + phy->radio_off_context.valid = false; } channel = phy->channel; err = b43legacy_radio_selectchannel(dev, @@ -2113,7 +2113,7 @@ void b43legacy_radio_turn_on(struct b43legacy_wldev *dev) default: B43legacy_BUG_ON(1); } - phy->radio_on = 1; + phy->radio_on = true; } void b43legacy_radio_turn_off(struct b43legacy_wldev *dev, bool force) @@ -2131,14 +2131,14 @@ void b43legacy_radio_turn_off(struct b43legacy_wldev *dev, bool force) if (!force) { phy->radio_off_context.rfover = rfover; phy->radio_off_context.rfoverval = rfoverval; - phy->radio_off_context.valid = 1; + phy->radio_off_context.valid = true; } b43legacy_phy_write(dev, B43legacy_PHY_RFOVER, rfover | 0x008C); b43legacy_phy_write(dev, B43legacy_PHY_RFOVERVAL, rfoverval & 0xFF73); } else b43legacy_phy_write(dev, 0x0015, 0xAA00); - phy->radio_on = 0; + phy->radio_on = false; b43legacydbg(dev->wl, "Radio initialized\n"); } diff --git a/drivers/net/wireless/brcm80211/Kconfig b/drivers/net/wireless/brcm80211/Kconfig index 2069fc8f7ad..cd6375de2a6 100644 --- a/drivers/net/wireless/brcm80211/Kconfig +++ b/drivers/net/wireless/brcm80211/Kconfig @@ -3,9 +3,8 @@ config BRCMUTIL config BRCMSMAC tristate "Broadcom IEEE802.11n PCIe SoftMAC WLAN driver" - depends on PCI depends on MAC80211 - depends on BCMA=n + depends on BCMA select BRCMUTIL select FW_LOADER select CRC_CCITT @@ -18,16 +17,26 @@ config BRCMSMAC config BRCMFMAC tristate "Broadcom IEEE802.11n embedded FullMAC WLAN driver" - depends on MMC depends on CFG80211 select BRCMUTIL - select FW_LOADER ---help--- This module adds support for embedded wireless adapters based on - Broadcom IEEE802.11n FullMAC chipsets. This driver uses the kernel's - wireless extensions subsystem. If you choose to build a module, + Broadcom IEEE802.11n FullMAC chipsets. It has to work with at least + one of the bus interface support. If you choose to build a module, it'll be called brcmfmac.ko. +config BRCMFMAC_SDIO + bool "SDIO bus interface support for FullMAC" + depends on MMC + depends on BRCMFMAC + select FW_LOADER + default y + ---help--- + This option enables the SDIO bus interface support for Broadcom + FullMAC WLAN driver. + Say Y if you want to use brcmfmac for a compatible SDIO interface + wireless card. + config BRCMDBG bool "Broadcom driver debug functions" depends on BRCMSMAC || BRCMFMAC diff --git a/drivers/net/wireless/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/brcm80211/brcmfmac/Makefile index b44e3094588..9ca9ea1135e 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/Makefile +++ b/drivers/net/wireless/brcm80211/brcmfmac/Makefile @@ -19,15 +19,16 @@ ccflags-y += \ -Idrivers/net/wireless/brcm80211/brcmfmac \ -Idrivers/net/wireless/brcm80211/include -DHDOFILES = \ - wl_cfg80211.o \ - dhd_cdc.o \ - dhd_common.o \ - dhd_sdio.o \ - dhd_linux.o \ - bcmsdh.o \ - bcmsdh_sdmmc.o - obj-$(CONFIG_BRCMFMAC) += brcmfmac.o -brcmfmac-objs += $(DHDOFILES) +brcmfmac-objs += \ + wl_cfg80211.o \ + dhd_cdc.o \ + dhd_common.o \ + dhd_linux.o +brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \ + dhd_sdio.o \ + bcmsdh.o \ + bcmsdh_sdmmc.o \ + sdio_chip.o + ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmchip.h b/drivers/net/wireless/brcm80211/brcmfmac/bcmchip.h deleted file mode 100644 index d7d3afd5a10..00000000000 --- a/drivers/net/wireless/brcm80211/brcmfmac/bcmchip.h +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright (c) 2011 Broadcom Corporation - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#ifndef _bcmchip_h_ -#define _bcmchip_h_ - -/* bcm4329 */ -/* SDIO device core, ID 0x829 */ -#define BCM4329_CORE_BUS_BASE 0x18011000 -/* internal memory core, ID 0x80e */ -#define BCM4329_CORE_SOCRAM_BASE 0x18003000 -/* ARM Cortex M3 core, ID 0x82a */ -#define BCM4329_CORE_ARM_BASE 0x18002000 -#define BCM4329_RAMSIZE 0x48000 -/* firmware name */ -#define BCM4329_FW_NAME "brcm/bcm4329-fullmac-4.bin" -#define BCM4329_NV_NAME "brcm/bcm4329-fullmac-4.txt" - -#endif /* _bcmchip_h_ */ diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c index 89ff94da556..4bc8d251acf 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c @@ -31,7 +31,6 @@ #include <brcmu_utils.h> #include <brcmu_wifi.h> #include <soc.h> -#include "dhd.h" #include "dhd_bus.h" #include "dhd_dbg.h" #include "sdio_host.h" @@ -51,12 +50,18 @@ static void brcmf_sdioh_irqhandler(struct sdio_func *func) sdio_claim_host(func); } +/* dummy handler for SDIO function 2 interrupt */ +static void brcmf_sdioh_dummy_irq_handler(struct sdio_func *func) +{ +} + int brcmf_sdcard_intr_reg(struct brcmf_sdio_dev *sdiodev) { brcmf_dbg(TRACE, "Entering\n"); sdio_claim_host(sdiodev->func[1]); sdio_claim_irq(sdiodev->func[1], brcmf_sdioh_irqhandler); + sdio_claim_irq(sdiodev->func[2], brcmf_sdioh_dummy_irq_handler); sdio_release_host(sdiodev->func[1]); return 0; @@ -67,6 +72,7 @@ int brcmf_sdcard_intr_dereg(struct brcmf_sdio_dev *sdiodev) brcmf_dbg(TRACE, "Entering\n"); sdio_claim_host(sdiodev->func[1]); + sdio_release_irq(sdiodev->func[2]); sdio_release_irq(sdiodev->func[1]); sdio_release_host(sdiodev->func[1]); @@ -222,19 +228,12 @@ bool brcmf_sdcard_regfail(struct brcmf_sdio_dev *sdiodev) return sdiodev->regfail; } -int -brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, - uint flags, - u8 *buf, uint nbytes, struct sk_buff *pkt) +static int brcmf_sdcard_recv_prepare(struct brcmf_sdio_dev *sdiodev, uint fn, + uint flags, uint width, u32 *addr) { - int status; - uint incr_fix; - uint width; - uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK; + uint bar0 = *addr & ~SBSDIO_SB_OFT_ADDR_MASK; int err = 0; - brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n", fn, addr, nbytes); - /* Async not implemented yet */ if (flags & SDIO_REQ_ASYNC) return -ENOTSUPP; @@ -247,29 +246,114 @@ brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, sdiodev->sbwad = bar0; } - addr &= SBSDIO_SB_OFT_ADDR_MASK; + *addr &= SBSDIO_SB_OFT_ADDR_MASK; + + if (width == 4) + *addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; + + return 0; +} + +int +brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, + uint flags, u8 *buf, uint nbytes) +{ + struct sk_buff *mypkt; + int err; + + mypkt = brcmu_pkt_buf_get_skb(nbytes); + if (!mypkt) { + brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n", + nbytes); + return -EIO; + } + + err = brcmf_sdcard_recv_pkt(sdiodev, addr, fn, flags, mypkt); + if (!err) + memcpy(buf, mypkt->data, nbytes); + + brcmu_pkt_buf_free_skb(mypkt); + return err; +} + +int +brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, + uint flags, struct sk_buff *pkt) +{ + uint incr_fix; + uint width; + int err = 0; + + brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n", + fn, addr, pkt->len); + + width = (flags & SDIO_REQ_4BYTE) ? 4 : 2; + err = brcmf_sdcard_recv_prepare(sdiodev, fn, flags, width, &addr); + if (err) + return err; incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC; + err = brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_READ, + fn, addr, pkt); + + return err; +} + +int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, + uint flags, struct sk_buff_head *pktq) +{ + uint incr_fix; + uint width; + int err = 0; + + brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n", + fn, addr, pktq->qlen); + width = (flags & SDIO_REQ_4BYTE) ? 4 : 2; - if (width == 4) - addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; + err = brcmf_sdcard_recv_prepare(sdiodev, fn, flags, width, &addr); + if (err) + return err; - status = brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_READ, - fn, addr, width, nbytes, buf, pkt); + incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC; + err = brcmf_sdioh_request_chain(sdiodev, incr_fix, SDIOH_READ, fn, addr, + pktq); - return status; + return err; } int brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, - uint flags, u8 *buf, uint nbytes, struct sk_buff *pkt) + uint flags, u8 *buf, uint nbytes) +{ + struct sk_buff *mypkt; + int err; + + mypkt = brcmu_pkt_buf_get_skb(nbytes); + if (!mypkt) { + brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n", + nbytes); + return -EIO; + } + + memcpy(mypkt->data, buf, nbytes); + err = brcmf_sdcard_send_pkt(sdiodev, addr, fn, flags, mypkt); + + brcmu_pkt_buf_free_skb(mypkt); + return err; + +} + +int +brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, + uint flags, struct sk_buff *pkt) { uint incr_fix; uint width; uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK; int err = 0; - brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n", fn, addr, nbytes); + brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n", + fn, addr, pkt->len); /* Async not implemented yet */ if (flags & SDIO_REQ_ASYNC) @@ -291,18 +375,39 @@ brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; return brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_WRITE, fn, - addr, width, nbytes, buf, pkt); + addr, pkt); } int brcmf_sdcard_rwdata(struct brcmf_sdio_dev *sdiodev, uint rw, u32 addr, u8 *buf, uint nbytes) { + struct sk_buff *mypkt; + bool write = rw ? SDIOH_WRITE : SDIOH_READ; + int err; + addr &= SBSDIO_SB_OFT_ADDR_MASK; addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; - return brcmf_sdioh_request_buffer(sdiodev, SDIOH_DATA_INC, - (rw ? SDIOH_WRITE : SDIOH_READ), SDIO_FUNC_1, - addr, 4, nbytes, buf, NULL); + mypkt = brcmu_pkt_buf_get_skb(nbytes); + if (!mypkt) { + brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n", + nbytes); + return -EIO; + } + + /* For a write, copy the buffer data into the packet. */ + if (write) + memcpy(mypkt->data, buf, nbytes); + + err = brcmf_sdioh_request_buffer(sdiodev, SDIOH_DATA_INC, write, + SDIO_FUNC_1, addr, mypkt); + + /* For a read, copy the packet data back to the buffer. */ + if (!err && !write) + memcpy(buf, mypkt->data, nbytes); + + brcmu_pkt_buf_free_skb(mypkt); + return err; } int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn) @@ -333,7 +438,7 @@ int brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev) sdiodev->sbwad = SI_ENUM_BASE; /* try to attach to the target device */ - sdiodev->bus = brcmf_sdbrcm_probe(0, 0, 0, 0, regs, sdiodev); + sdiodev->bus = brcmf_sdbrcm_probe(regs, sdiodev); if (!sdiodev->bus) { brcmf_dbg(ERROR, "device attach failed\n"); ret = -ENODEV; diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c index bbaeb2d5c93..9b8c0ed833d 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c @@ -31,15 +31,15 @@ #include <brcmu_utils.h> #include <brcmu_wifi.h> #include "sdio_host.h" -#include "dhd.h" #include "dhd_dbg.h" -#include "wl_cfg80211.h" +#include "dhd_bus.h" #define SDIO_VENDOR_ID_BROADCOM 0x02d0 #define DMA_ALIGN_MASK 0x03 #define SDIO_DEVICE_ID_BROADCOM_4329 0x4329 +#define SDIO_DEVICE_ID_BROADCOM_4330 0x4330 #define SDIO_FUNC1_BLOCKSIZE 64 #define SDIO_FUNC2_BLOCKSIZE 512 @@ -47,6 +47,7 @@ /* devices we support, null terminated */ static const struct sdio_device_id brcmf_sdmmc_ids[] = { {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)}, + {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330)}, { /* end: all zeroes */ }, }; MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids); @@ -204,62 +205,75 @@ int brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev, return err_ret; } +/* precondition: host controller is claimed */ static int -brcmf_sdioh_request_packet(struct brcmf_sdio_dev *sdiodev, uint fix_inc, - uint write, uint func, uint addr, - struct sk_buff *pkt) +brcmf_sdioh_request_data(struct brcmf_sdio_dev *sdiodev, uint write, bool fifo, + uint func, uint addr, struct sk_buff *pkt, uint pktlen) +{ + int err_ret = 0; + + if ((write) && (!fifo)) { + err_ret = sdio_memcpy_toio(sdiodev->func[func], addr, + ((u8 *) (pkt->data)), pktlen); + } else if (write) { + err_ret = sdio_memcpy_toio(sdiodev->func[func], addr, + ((u8 *) (pkt->data)), pktlen); + } else if (fifo) { + err_ret = sdio_readsb(sdiodev->func[func], + ((u8 *) (pkt->data)), addr, pktlen); + } else { + err_ret = sdio_memcpy_fromio(sdiodev->func[func], + ((u8 *) (pkt->data)), + addr, pktlen); + } + + return err_ret; +} + +/* + * This function takes a queue of packets. The packets on the queue + * are assumed to be properly aligned by the caller. + */ +int +brcmf_sdioh_request_chain(struct brcmf_sdio_dev *sdiodev, uint fix_inc, + uint write, uint func, uint addr, + struct sk_buff_head *pktq) { bool fifo = (fix_inc == SDIOH_DATA_FIX); u32 SGCount = 0; int err_ret = 0; - struct sk_buff *pnext; + struct sk_buff *pkt; brcmf_dbg(TRACE, "Enter\n"); - brcmf_pm_resume_wait(sdiodev, &sdiodev->request_packet_wait); + brcmf_pm_resume_wait(sdiodev, &sdiodev->request_chain_wait); if (brcmf_pm_resume_error(sdiodev)) return -EIO; /* Claim host controller */ sdio_claim_host(sdiodev->func[func]); - for (pnext = pkt; pnext; pnext = pnext->next) { - uint pkt_len = pnext->len; + + skb_queue_walk(pktq, pkt) { + uint pkt_len = pkt->len; pkt_len += 3; pkt_len &= 0xFFFFFFFC; - if ((write) && (!fifo)) { - err_ret = sdio_memcpy_toio(sdiodev->func[func], addr, - ((u8 *) (pnext->data)), - pkt_len); - } else if (write) { - err_ret = sdio_memcpy_toio(sdiodev->func[func], addr, - ((u8 *) (pnext->data)), - pkt_len); - } else if (fifo) { - err_ret = sdio_readsb(sdiodev->func[func], - ((u8 *) (pnext->data)), - addr, pkt_len); - } else { - err_ret = sdio_memcpy_fromio(sdiodev->func[func], - ((u8 *) (pnext->data)), - addr, pkt_len); - } - + err_ret = brcmf_sdioh_request_data(sdiodev, write, fifo, func, + addr, pkt, pkt_len); if (err_ret) { brcmf_dbg(ERROR, "%s FAILED %p[%d], addr=0x%05x, pkt_len=%d, ERR=0x%08x\n", - write ? "TX" : "RX", pnext, SGCount, addr, + write ? "TX" : "RX", pkt, SGCount, addr, pkt_len, err_ret); } else { brcmf_dbg(TRACE, "%s xfr'd %p[%d], addr=0x%05x, len=%d\n", - write ? "TX" : "RX", pnext, SGCount, addr, + write ? "TX" : "RX", pkt, SGCount, addr, pkt_len); } - if (!fifo) addr += pkt_len; - SGCount++; + SGCount++; } /* Release host controller */ @@ -270,91 +284,45 @@ brcmf_sdioh_request_packet(struct brcmf_sdio_dev *sdiodev, uint fix_inc, } /* - * This function takes a buffer or packet, and fixes everything up - * so that in the end, a DMA-able packet is created. - * - * A buffer does not have an associated packet pointer, - * and may or may not be aligned. - * A packet may consist of a single packet, or a packet chain. - * If it is a packet chain, then all the packets in the chain - * must be properly aligned. - * - * If the packet data is not aligned, then there may only be - * one packet, and in this case, it is copied to a new - * aligned packet. - * + * This function takes a single DMA-able packet. */ int brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev, uint fix_inc, uint write, uint func, uint addr, - uint reg_width, uint buflen_u, u8 *buffer, struct sk_buff *pkt) { - int Status; - struct sk_buff *mypkt = NULL; + int status; + uint pkt_len = pkt->len; + bool fifo = (fix_inc == SDIOH_DATA_FIX); brcmf_dbg(TRACE, "Enter\n"); + if (pkt == NULL) + return -EINVAL; + brcmf_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait); if (brcmf_pm_resume_error(sdiodev)) return -EIO; - /* Case 1: we don't have a packet. */ - if (pkt == NULL) { - brcmf_dbg(DATA, "Creating new %s Packet, len=%d\n", - write ? "TX" : "RX", buflen_u); - mypkt = brcmu_pkt_buf_get_skb(buflen_u); - if (!mypkt) { - brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n", - buflen_u); - return -EIO; - } - - /* For a write, copy the buffer data into the packet. */ - if (write) - memcpy(mypkt->data, buffer, buflen_u); - - Status = brcmf_sdioh_request_packet(sdiodev, fix_inc, write, - func, addr, mypkt); - - /* For a read, copy the packet data back to the buffer. */ - if (!write) - memcpy(buffer, mypkt->data, buflen_u); - - brcmu_pkt_buf_free_skb(mypkt); - } else if (((ulong) (pkt->data) & DMA_ALIGN_MASK) != 0) { - /* - * Case 2: We have a packet, but it is unaligned. - * In this case, we cannot have a chain (pkt->next == NULL) - */ - brcmf_dbg(DATA, "Creating aligned %s Packet, len=%d\n", - write ? "TX" : "RX", pkt->len); - mypkt = brcmu_pkt_buf_get_skb(pkt->len); - if (!mypkt) { - brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n", - pkt->len); - return -EIO; - } - /* For a write, copy the buffer data into the packet. */ - if (write) - memcpy(mypkt->data, pkt->data, pkt->len); - - Status = brcmf_sdioh_request_packet(sdiodev, fix_inc, write, - func, addr, mypkt); + /* Claim host controller */ + sdio_claim_host(sdiodev->func[func]); - /* For a read, copy the packet data back to the buffer. */ - if (!write) - memcpy(pkt->data, mypkt->data, mypkt->len); + pkt_len += 3; + pkt_len &= (uint)~3; - brcmu_pkt_buf_free_skb(mypkt); - } else { /* case 3: We have a packet and - it is aligned. */ - brcmf_dbg(DATA, "Aligned %s Packet, direct DMA\n", - write ? "Tx" : "Rx"); - Status = brcmf_sdioh_request_packet(sdiodev, fix_inc, write, - func, addr, pkt); + status = brcmf_sdioh_request_data(sdiodev, write, fifo, func, + addr, pkt, pkt_len); + if (status) { + brcmf_dbg(ERROR, "%s FAILED %p, addr=0x%05x, pkt_len=%d, ERR=0x%08x\n", + write ? "TX" : "RX", pkt, addr, pkt_len, status); + } else { + brcmf_dbg(TRACE, "%s xfr'd %p, addr=0x%05x, len=%d\n", + write ? "TX" : "RX", pkt, addr, pkt_len); } - return Status; + /* Release host controller */ + sdio_release_host(sdiodev->func[func]); + + return status; } /* Read client card reg */ @@ -494,6 +462,7 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func, { int ret = 0; struct brcmf_sdio_dev *sdiodev; + struct brcmf_bus *bus_if; brcmf_dbg(TRACE, "Enter\n"); brcmf_dbg(TRACE, "func->class=%x\n", func->class); brcmf_dbg(TRACE, "sdio_vendor: 0x%04x\n", func->vendor); @@ -505,17 +474,26 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func, brcmf_dbg(ERROR, "card private drvdata occupied\n"); return -ENXIO; } + bus_if = kzalloc(sizeof(struct brcmf_bus), GFP_KERNEL); + if (!bus_if) + return -ENOMEM; sdiodev = kzalloc(sizeof(struct brcmf_sdio_dev), GFP_KERNEL); - if (!sdiodev) + if (!sdiodev) { + kfree(bus_if); return -ENOMEM; + } sdiodev->func[0] = func->card->sdio_func[0]; sdiodev->func[1] = func; + sdiodev->bus_if = bus_if; + bus_if->bus_priv = sdiodev; + bus_if->type = SDIO_BUS; + bus_if->align = BRCMF_SDALIGN; dev_set_drvdata(&func->card->dev, sdiodev); atomic_set(&sdiodev->suspend, false); init_waitqueue_head(&sdiodev->request_byte_wait); init_waitqueue_head(&sdiodev->request_word_wait); - init_waitqueue_head(&sdiodev->request_packet_wait); + init_waitqueue_head(&sdiodev->request_chain_wait); init_waitqueue_head(&sdiodev->request_buffer_wait); } @@ -525,6 +503,10 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func, return -ENODEV; sdiodev->func[2] = func; + bus_if = sdiodev->bus_if; + sdiodev->dev = &func->dev; + dev_set_drvdata(&func->dev, bus_if); + brcmf_dbg(TRACE, "F2 found, calling brcmf_sdio_probe...\n"); ret = brcmf_sdio_probe(sdiodev); } @@ -534,6 +516,7 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func, static void brcmf_ops_sdio_remove(struct sdio_func *func) { + struct brcmf_bus *bus_if; struct brcmf_sdio_dev *sdiodev; brcmf_dbg(TRACE, "Enter\n"); brcmf_dbg(INFO, "func->class=%x\n", func->class); @@ -542,10 +525,13 @@ static void brcmf_ops_sdio_remove(struct sdio_func *func) brcmf_dbg(INFO, "Function#: 0x%04x\n", func->num); if (func->num == 2) { - sdiodev = dev_get_drvdata(&func->card->dev); + bus_if = dev_get_drvdata(&func->dev); + sdiodev = bus_if->bus_priv; brcmf_dbg(TRACE, "F2 found, calling brcmf_sdio_remove...\n"); brcmf_sdio_remove(sdiodev); dev_set_drvdata(&func->card->dev, NULL); + dev_set_drvdata(&func->dev, NULL); + kfree(bus_if); kfree(sdiodev); } } @@ -554,14 +540,12 @@ static void brcmf_ops_sdio_remove(struct sdio_func *func) static int brcmf_sdio_suspend(struct device *dev) { mmc_pm_flag_t sdio_flags; - struct brcmf_sdio_dev *sdiodev; struct sdio_func *func = dev_to_sdio_func(dev); + struct brcmf_sdio_dev *sdiodev = dev_get_drvdata(&func->card->dev); int ret = 0; brcmf_dbg(TRACE, "\n"); - sdiodev = dev_get_drvdata(&func->card->dev); - atomic_set(&sdiodev->suspend, true); sdio_flags = sdio_get_host_pm_caps(sdiodev->func[1]); @@ -583,10 +567,9 @@ static int brcmf_sdio_suspend(struct device *dev) static int brcmf_sdio_resume(struct device *dev) { - struct brcmf_sdio_dev *sdiodev; struct sdio_func *func = dev_to_sdio_func(dev); + struct brcmf_sdio_dev *sdiodev = dev_get_drvdata(&func->card->dev); - sdiodev = dev_get_drvdata(&func->card->dev); brcmf_sdio_wdtmr_enable(sdiodev, true); atomic_set(&sdiodev->suspend, false); return 0; @@ -610,17 +593,26 @@ static struct sdio_driver brcmf_sdmmc_driver = { #endif /* CONFIG_PM_SLEEP */ }; -/* bus register interface */ -int brcmf_bus_register(void) +static void __exit brcmf_sdio_exit(void) { brcmf_dbg(TRACE, "Enter\n"); - return sdio_register_driver(&brcmf_sdmmc_driver); + sdio_unregister_driver(&brcmf_sdmmc_driver); } -void brcmf_bus_unregister(void) +static int __init brcmf_sdio_init(void) { + int ret; + brcmf_dbg(TRACE, "Enter\n"); - sdio_unregister_driver(&brcmf_sdmmc_driver); + ret = sdio_register_driver(&brcmf_sdmmc_driver); + + if (ret) + brcmf_dbg(ERROR, "sdio_register_driver failed: %d\n", ret); + + return ret; } + +module_init(brcmf_sdio_init); +module_exit(brcmf_sdio_exit); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h index 4645766b407..e58ea40a75b 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h @@ -87,7 +87,7 @@ #define TOE_TX_CSUM_OL 0x00000001 #define TOE_RX_CSUM_OL 0x00000002 -#define BRCMF_BSS_INFO_VERSION 108 /* current ver of brcmf_bss_info struct */ +#define BRCMF_BSS_INFO_VERSION 109 /* curr ver of brcmf_bss_info_le struct */ /* size of brcmf_scan_params not including variable length array */ #define BRCMF_SCAN_PARAMS_FIXED_SIZE 64 @@ -122,8 +122,6 @@ /* For supporting multiple interfaces */ #define BRCMF_MAX_IFS 16 -#define BRCMF_DEL_IF -0xe -#define BRCMF_BAD_IF -0xf #define DOT11_BSSTYPE_ANY 2 #define DOT11_MAX_DEFAULT_KEYS 4 @@ -158,18 +156,6 @@ struct brcmf_event { struct brcmf_event_msg msg; } __packed; -struct dngl_stats { - unsigned long rx_packets; /* total packets received */ - unsigned long tx_packets; /* total packets transmitted */ - unsigned long rx_bytes; /* total bytes received */ - unsigned long tx_bytes; /* total bytes transmitted */ - unsigned long rx_errors; /* bad packets received */ - unsigned long tx_errors; /* packet transmit problems */ - unsigned long rx_dropped; /* packets dropped by dongle */ - unsigned long tx_dropped; /* packets dropped by dongle */ - unsigned long multicast; /* multicast packets received */ -}; - /* event codes sent by the dongle to this driver */ #define BRCMF_E_SET_SSID 0 #define BRCMF_E_JOIN 1 @@ -319,13 +305,6 @@ struct dngl_stats { #define BRCMF_E_LINK_ASSOC_REC 3 #define BRCMF_E_LINK_BSSCFG_DIS 4 -/* The level of bus communication with the dongle */ -enum brcmf_bus_state { - BRCMF_BUS_DOWN, /* Not ready for frame transfers */ - BRCMF_BUS_LOAD, /* Download access only (CPU reset) */ - BRCMF_BUS_DATA /* Ready for frame transfers */ -}; - /* Pattern matching filter. Specifies an offset within received packets to * start matching, the pattern to match, the size of the pattern, and a bitmask * that indicates which bits within the pattern should be matched. @@ -365,7 +344,7 @@ struct brcmf_pkt_filter_enable_le { * Applications MUST CHECK ie_offset field and length field to access IEs and * next bss_info structure in a vector (in struct brcmf_scan_results) */ -struct brcmf_bss_info { +struct brcmf_bss_info_le { __le32 version; /* version field */ __le32 length; /* byte length of data in this record, * starting at version and including IEs @@ -466,14 +445,13 @@ struct brcmf_scan_results { u32 buflen; u32 version; u32 count; - struct brcmf_bss_info bss_info[1]; + struct brcmf_bss_info_le bss_info_le[]; }; struct brcmf_scan_results_le { __le32 buflen; __le32 version; __le32 count; - struct brcmf_bss_info bss_info[1]; }; /* used for association with a specific BSSID and chanspec list */ @@ -493,10 +471,6 @@ struct brcmf_join_params { struct brcmf_assoc_params_le params_le; }; -/* size of brcmf_scan_results not including variable length array */ -#define BRCMF_SCAN_RESULTS_FIXED_SIZE \ - (sizeof(struct brcmf_scan_results) - sizeof(struct brcmf_bss_info)) - /* incremental scan results struct */ struct brcmf_iscan_results { union { @@ -511,7 +485,7 @@ struct brcmf_iscan_results { /* size of brcmf_iscan_results not including variable length array */ #define BRCMF_ISCAN_RESULTS_FIXED_SIZE \ - (BRCMF_SCAN_RESULTS_FIXED_SIZE + \ + (sizeof(struct brcmf_scan_results) + \ offsetof(struct brcmf_iscan_results, results)) struct brcmf_wsec_key { @@ -579,25 +553,19 @@ struct brcmf_dcmd { }; /* Forward decls for struct brcmf_pub (see below) */ -struct brcmf_bus; /* device bus info */ struct brcmf_proto; /* device communication protocol info */ -struct brcmf_info; /* device driver info */ struct brcmf_cfg80211_dev; /* cfg80211 device info */ /* Common structure for module and instance linkage */ struct brcmf_pub { /* Linkage ponters */ - struct brcmf_bus *bus; + struct brcmf_bus *bus_if; struct brcmf_proto *prot; - struct brcmf_info *info; struct brcmf_cfg80211_dev *config; + struct device *dev; /* fullmac dongle device pointer */ /* Internal brcmf items */ - bool up; /* Driver up/down (to OS) */ - bool txoff; /* Transmit flow-controlled */ - enum brcmf_bus_state busstate; uint hdrlen; /* Total BRCMF header length (proto + bus) */ - uint maxctl; /* Max size rxctl request from proto to bus */ uint rxsz; /* Rx buffer size bus module should use */ u8 wme_dp; /* wme discard priority */ @@ -605,48 +573,21 @@ struct brcmf_pub { bool iswl; /* Dongle-resident driver is wl */ unsigned long drv_version; /* Version of dongle-resident driver */ u8 mac[ETH_ALEN]; /* MAC address obtained from dongle */ - struct dngl_stats dstats; /* Stats for dongle-based data */ /* Additional stats for the bus level */ - /* Data packets sent to dongle */ - unsigned long tx_packets; /* Multicast data packets sent to dongle */ unsigned long tx_multicast; - /* Errors in sending data to dongle */ - unsigned long tx_errors; - /* Control packets sent to dongle */ - unsigned long tx_ctlpkts; - /* Errors sending control frames to dongle */ - unsigned long tx_ctlerrs; - /* Packets sent up the network interface */ - unsigned long rx_packets; - /* Multicast packets sent up the network interface */ - unsigned long rx_multicast; - /* Errors processing rx data packets */ - unsigned long rx_errors; - /* Control frames processed from dongle */ - unsigned long rx_ctlpkts; - - /* Errors in processing rx control frames */ - unsigned long rx_ctlerrs; - /* Packets dropped locally (no memory) */ - unsigned long rx_dropped; /* Packets flushed due to unscheduled sendup thread */ unsigned long rx_flushed; /* Number of times dpc scheduled by watchdog timer */ unsigned long wd_dpc_sched; - /* Number of packets where header read-ahead was used. */ - unsigned long rx_readahead_cnt; - /* Number of tx packets we had to realloc for headroom */ - unsigned long tx_realloc; /* Number of flow control pkts recvd */ unsigned long fc_packets; /* Last error return */ int bcmerror; - uint tickcnt; /* Last error from dongle */ int dongle_error; @@ -664,6 +605,14 @@ struct brcmf_pub { u8 country_code[BRCM_CNTRY_BUF_SZ]; char eventmask[BRCMF_EVENTING_MASK_LEN]; + struct brcmf_if *iflist[BRCMF_MAX_IFS]; + + struct mutex proto_block; + + struct work_struct setmacaddr_work; + struct work_struct multicast_work; + u8 macvalue[ETH_ALEN]; + atomic_t pend_8021x_cnt; }; struct brcmf_if_event { @@ -683,67 +632,33 @@ extern const struct bcmevent_name bcmevent_names[]; extern uint brcmf_c_mkiovar(char *name, char *data, uint datalen, char *buf, uint len); -/* Indication from bus module regarding presence/insertion of dongle. - * Return struct brcmf_pub pointer, used as handle to OS module in later calls. - * Returned structure should have bus and prot pointers filled in. - * bus_hdrlen specifies required headroom for bus module header. - */ -extern struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus, - uint bus_hdrlen); extern int brcmf_net_attach(struct brcmf_pub *drvr, int idx); extern int brcmf_netdev_wait_pend8021x(struct net_device *ndev); extern s32 brcmf_exec_dcmd(struct net_device *dev, u32 cmd, void *arg, u32 len); -/* Indication from bus module regarding removal/absence of dongle */ -extern void brcmf_detach(struct brcmf_pub *drvr); - -/* Indication from bus module to change flow-control state */ -extern void brcmf_txflowcontrol(struct brcmf_pub *drvr, int ifidx, bool on); - -extern bool brcmf_c_prec_enq(struct brcmf_pub *drvr, struct pktq *q, - struct sk_buff *pkt, int prec); - -/* Receive frame for delivery to OS. Callee disposes of rxp. */ -extern void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx, - struct sk_buff *rxp, int numpkt); - /* Return pointer to interface name */ extern char *brcmf_ifname(struct brcmf_pub *drvr, int idx); -/* Notify tx completion */ -extern void brcmf_txcomplete(struct brcmf_pub *drvr, struct sk_buff *txp, - bool success); - /* Query dongle */ extern int brcmf_proto_cdc_query_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd, void *buf, uint len); -/* OS independent layer functions */ -extern int brcmf_os_proto_block(struct brcmf_pub *drvr); -extern int brcmf_os_proto_unblock(struct brcmf_pub *drvr); #ifdef BCMDBG extern int brcmf_write_to_file(struct brcmf_pub *drvr, const u8 *buf, int size); #endif /* BCMDBG */ -extern int brcmf_ifname2idx(struct brcmf_info *drvr_priv, char *name); -extern int brcmf_c_host_event(struct brcmf_info *drvr_priv, int *idx, +extern int brcmf_ifname2idx(struct brcmf_pub *drvr, char *name); +extern int brcmf_c_host_event(struct brcmf_pub *drvr, int *idx, void *pktdata, struct brcmf_event_msg *, void **data_ptr); -extern void brcmf_c_init(void); - -extern int brcmf_add_if(struct brcmf_info *drvr_priv, int ifidx, - struct net_device *ndev, char *name, u8 *mac_addr, - u32 flags, u8 bssidx); -extern void brcmf_del_if(struct brcmf_info *drvr_priv, int ifidx); +extern void brcmf_del_if(struct brcmf_pub *drvr, int ifidx); /* Send packet to dongle via data channel */ extern int brcmf_sendpkt(struct brcmf_pub *drvr, int ifidx,\ struct sk_buff *pkt); -extern int brcmf_bus_start(struct brcmf_pub *drvr); - extern void brcmf_c_pktfilter_offload_set(struct brcmf_pub *drvr, char *arg); extern void brcmf_c_pktfilter_offload_enable(struct brcmf_pub *drvr, char *arg, int enable, int master_mode); @@ -752,25 +667,4 @@ extern void brcmf_c_pktfilter_offload_enable(struct brcmf_pub *drvr, char *arg, #define BRCMF_DCMD_MEDLEN 1536 /* "med" cmd buffer required */ #define BRCMF_DCMD_MAXLEN 8192 /* max length cmd buffer required */ -/* message levels */ -#define BRCMF_ERROR_VAL 0x0001 -#define BRCMF_TRACE_VAL 0x0002 -#define BRCMF_INFO_VAL 0x0004 -#define BRCMF_DATA_VAL 0x0008 -#define BRCMF_CTL_VAL 0x0010 -#define BRCMF_TIMER_VAL 0x0020 -#define BRCMF_HDRS_VAL 0x0040 -#define BRCMF_BYTES_VAL 0x0080 -#define BRCMF_INTR_VAL 0x0100 -#define BRCMF_GLOM_VAL 0x0400 -#define BRCMF_EVENT_VAL 0x0800 -#define BRCMF_BTA_VAL 0x1000 -#define BRCMF_ISCAN_VAL 0x2000 - -/* Enter idle immediately (no timeout) */ -#define BRCMF_IDLE_IMMEDIATE (-1) -#define BRCMF_IDLE_ACTIVE 0 /* Do not request any SD clock change - when idle */ -#define BRCMF_IDLE_INTERVAL 1 - #endif /* _BRCMF_H_ */ diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h index a249407c9a1..ad9be2410b5 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h @@ -17,41 +17,89 @@ #ifndef _BRCMF_BUS_H_ #define _BRCMF_BUS_H_ -/* Packet alignment for most efficient SDIO (can change based on platform) */ -#define BRCMF_SDALIGN (1 << 6) +/* The level of bus communication with the dongle */ +enum brcmf_bus_state { + BRCMF_BUS_DOWN, /* Not ready for frame transfers */ + BRCMF_BUS_LOAD, /* Download access only (CPU reset) */ + BRCMF_BUS_DATA /* Ready for frame transfers */ +}; -/* watchdog polling interval in ms */ -#define BRCMF_WD_POLL_MS 10 +struct dngl_stats { + unsigned long rx_packets; /* total packets received */ + unsigned long tx_packets; /* total packets transmitted */ + unsigned long rx_bytes; /* total bytes received */ + unsigned long tx_bytes; /* total bytes transmitted */ + unsigned long rx_errors; /* bad packets received */ + unsigned long tx_errors; /* packet transmit problems */ + unsigned long rx_dropped; /* packets dropped by dongle */ + unsigned long tx_dropped; /* packets dropped by dongle */ + unsigned long multicast; /* multicast packets received */ +}; + +/* interface structure between common and bus layer */ +struct brcmf_bus { + u8 type; /* bus type */ + void *bus_priv; /* pointer to bus private structure */ + void *drvr; /* pointer to driver pub structure brcmf_pub */ + enum brcmf_bus_state state; + uint maxctl; /* Max size rxctl request from proto to bus */ + bool drvr_up; /* Status flag of driver up/down */ + unsigned long tx_realloc; /* Tx packets realloced for headroom */ + struct dngl_stats dstats; /* Stats for dongle-based data */ + u8 align; /* bus alignment requirement */ + + /* interface functions pointers */ + /* Stop bus module: clear pending frames, disable data flow */ + void (*brcmf_bus_stop)(struct device *); + /* Initialize bus module: prepare for communication w/dongle */ + int (*brcmf_bus_init)(struct device *); + /* Send a data frame to the dongle. Callee disposes of txp. */ + int (*brcmf_bus_txdata)(struct device *, struct sk_buff *); + /* Send/receive a control message to/from the dongle. + * Expects caller to enforce a single outstanding transaction. + */ + int (*brcmf_bus_txctl)(struct device *, unsigned char *, uint); + int (*brcmf_bus_rxctl)(struct device *, unsigned char *, uint); +}; /* - * Exported from brcmf bus module (brcmf_usb, brcmf_sdio) + * interface functions from common layer */ -/* Indicate (dis)interest in finding dongles. */ -extern int brcmf_bus_register(void); -extern void brcmf_bus_unregister(void); +/* Remove any protocol-specific data header. */ +extern int brcmf_proto_hdrpull(struct device *dev, int *ifidx, + struct sk_buff *rxp); -/* obtain linux device object providing bus function */ -extern struct device *brcmf_bus_get_device(struct brcmf_bus *bus); +extern bool brcmf_c_prec_enq(struct device *dev, struct pktq *q, + struct sk_buff *pkt, int prec); -/* Stop bus module: clear pending frames, disable data flow */ -extern void brcmf_sdbrcm_bus_stop(struct brcmf_bus *bus); +/* Receive frame for delivery to OS. Callee disposes of rxp. */ +extern void brcmf_rx_frame(struct device *dev, int ifidx, + struct sk_buff_head *rxlist); +static inline void brcmf_rx_packet(struct device *dev, int ifidx, + struct sk_buff *pkt) +{ + struct sk_buff_head q; -/* Initialize bus module: prepare for communication w/dongle */ -extern int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr); + skb_queue_head_init(&q); + skb_queue_tail(&q, pkt); + brcmf_rx_frame(dev, ifidx, &q); +} -/* Send a data frame to the dongle. Callee disposes of txp. */ -extern int brcmf_sdbrcm_bus_txdata(struct brcmf_bus *bus, struct sk_buff *txp); +/* Indication from bus module regarding presence/insertion of dongle. */ +extern int brcmf_attach(uint bus_hdrlen, struct device *dev); +/* Indication from bus module regarding removal/absence of dongle */ +extern void brcmf_detach(struct device *dev); -/* Send/receive a control message to/from the dongle. - * Expects caller to enforce a single outstanding transaction. - */ -extern int -brcmf_sdbrcm_bus_txctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen); +/* Indication from bus module to change flow-control state */ +extern void brcmf_txflowcontrol(struct device *dev, int ifidx, bool on); -extern int -brcmf_sdbrcm_bus_rxctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen); +/* Notify tx completion */ +extern void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, + bool success); -extern void brcmf_sdbrcm_wd_timer(struct brcmf_bus *bus, uint wdtick); +extern int brcmf_bus_start(struct device *dev); +extern int brcmf_add_if(struct device *dev, int ifidx, + char *name, u8 *mac_addr); #endif /* _BRCMF_BUS_H_ */ diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c index e34c5c3d1d5..ac8d1f43788 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c @@ -58,7 +58,7 @@ struct brcmf_proto_cdc_dcmd { * Used on data packets to convey priority across USB. */ #define BDC_HEADER_LEN 4 -#define BDC_PROTO_VER 1 /* Protocol version */ +#define BDC_PROTO_VER 2 /* Protocol version */ #define BDC_FLAG_VER_MASK 0xf0 /* Protocol version mask */ #define BDC_FLAG_VER_SHIFT 4 /* Protocol version shift */ #define BDC_FLAG_SUM_GOOD 0x04 /* Good RX checksums */ @@ -77,18 +77,19 @@ struct brcmf_proto_bdc_header { u8 flags; u8 priority; /* 802.1d Priority, 4:7 flow control info for usb */ u8 flags2; - u8 rssi; + u8 data_offset; }; #define RETRIES 2 /* # of retries to retrieve matching dcmd response */ -#define BUS_HEADER_LEN (16+BRCMF_SDALIGN) /* Must be atleast SDPCM_RESERVE +#define BUS_HEADER_LEN (16+64) /* Must be atleast SDPCM_RESERVE * (amount of header tha might be added) * plus any space that might be needed - * for alignment padding. + * for bus alignment padding. */ -#define ROUND_UP_MARGIN 2048 /* Biggest SDIO block size possible for +#define ROUND_UP_MARGIN 2048 /* Biggest bus block size possible for * round off at the end of buffer + * Currently is SDIO */ struct brcmf_proto { @@ -116,8 +117,9 @@ static int brcmf_proto_cdc_msg(struct brcmf_pub *drvr) len = CDC_MAX_MSG_SIZE; /* Send request */ - return brcmf_sdbrcm_bus_txctl(drvr->bus, (unsigned char *)&prot->msg, - len); + return drvr->bus_if->brcmf_bus_txctl(drvr->dev, + (unsigned char *)&prot->msg, + len); } static int brcmf_proto_cdc_cmplt(struct brcmf_pub *drvr, u32 id, u32 len) @@ -128,7 +130,7 @@ static int brcmf_proto_cdc_cmplt(struct brcmf_pub *drvr, u32 id, u32 len) brcmf_dbg(TRACE, "Enter\n"); do { - ret = brcmf_sdbrcm_bus_rxctl(drvr->bus, + ret = drvr->bus_if->brcmf_bus_rxctl(drvr->dev, (unsigned char *)&prot->msg, len + sizeof(struct brcmf_proto_cdc_dcmd)); if (ret < 0) @@ -280,11 +282,11 @@ brcmf_proto_dcmd(struct brcmf_pub *drvr, int ifidx, struct brcmf_dcmd *dcmd, struct brcmf_proto *prot = drvr->prot; int ret = -1; - if (drvr->busstate == BRCMF_BUS_DOWN) { + if (drvr->bus_if->state == BRCMF_BUS_DOWN) { brcmf_dbg(ERROR, "bus is down. we have nothing to do.\n"); return ret; } - brcmf_os_proto_block(drvr); + mutex_lock(&drvr->proto_block); brcmf_dbg(TRACE, "Enter\n"); @@ -338,7 +340,7 @@ brcmf_proto_dcmd(struct brcmf_pub *drvr, int ifidx, struct brcmf_dcmd *dcmd, prot->pending = false; done: - brcmf_os_proto_unblock(drvr); + mutex_unlock(&drvr->proto_block); return ret; } @@ -372,14 +374,16 @@ void brcmf_proto_hdrpush(struct brcmf_pub *drvr, int ifidx, h->priority = (pktbuf->priority & BDC_PRIORITY_MASK); h->flags2 = 0; - h->rssi = 0; + h->data_offset = 0; BDC_SET_IF_IDX(h, ifidx); } -int brcmf_proto_hdrpull(struct brcmf_pub *drvr, int *ifidx, +int brcmf_proto_hdrpull(struct device *dev, int *ifidx, struct sk_buff *pktbuf) { struct brcmf_proto_bdc_header *h; + struct brcmf_bus *bus_if = dev_get_drvdata(dev); + struct brcmf_pub *drvr = bus_if->drvr; brcmf_dbg(TRACE, "Enter\n"); @@ -435,7 +439,7 @@ int brcmf_proto_attach(struct brcmf_pub *drvr) drvr->prot = cdc; drvr->hdrlen += BDC_HEADER_LEN; - drvr->maxctl = BRCMF_DCMD_MAXLEN + + drvr->bus_if->maxctl = BRCMF_DCMD_MAXLEN + sizeof(struct brcmf_proto_cdc_dcmd) + ROUND_UP_MARGIN; return 0; @@ -451,18 +455,6 @@ void brcmf_proto_detach(struct brcmf_pub *drvr) drvr->prot = NULL; } -void brcmf_proto_dstats(struct brcmf_pub *drvr) -{ - /* No stats from dongle added yet, copy bus stats */ - drvr->dstats.tx_packets = drvr->tx_packets; - drvr->dstats.tx_errors = drvr->tx_errors; - drvr->dstats.rx_packets = drvr->rx_packets; - drvr->dstats.rx_errors = drvr->rx_errors; - drvr->dstats.rx_dropped = drvr->rx_dropped; - drvr->dstats.multicast = drvr->rx_multicast; - return; -} - int brcmf_proto_init(struct brcmf_pub *drvr) { int ret = 0; @@ -470,19 +462,19 @@ int brcmf_proto_init(struct brcmf_pub *drvr) brcmf_dbg(TRACE, "Enter\n"); - brcmf_os_proto_block(drvr); + mutex_lock(&drvr->proto_block); /* Get the device MAC address */ strcpy(buf, "cur_etheraddr"); ret = brcmf_proto_cdc_query_dcmd(drvr, 0, BRCMF_C_GET_VAR, buf, sizeof(buf)); if (ret < 0) { - brcmf_os_proto_unblock(drvr); + mutex_unlock(&drvr->proto_block); return ret; } memcpy(drvr->mac, buf, ETH_ALEN); - brcmf_os_proto_unblock(drvr); + mutex_unlock(&drvr->proto_block); ret = brcmf_c_preinit_dcmds(drvr); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c index 891826197f9..a51d8f5d36f 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c @@ -32,8 +32,6 @@ #define PKTFILTER_BUF_SIZE 2048 #define BRCMF_ARPOL_MODE 0xb /* agent|snoop|peer_autoreply */ -int brcmf_msg_level; - #define MSGTRACE_VERSION 1 #define BRCMF_PKT_FILTER_FIXED_LEN offsetof(struct brcmf_pkt_filter_le, u) @@ -85,25 +83,14 @@ brcmf_c_mkiovar(char *name, char *data, uint datalen, char *buf, uint buflen) return len; } -void brcmf_c_init(void) -{ - /* Init global variables at run-time, not as part of the declaration. - * This is required to support init/de-init of the driver. - * Initialization - * of globals as part of the declaration results in non-deterministic - * behaviour since the value of the globals may be different on the - * first time that the driver is initialized vs subsequent - * initializations. - */ - brcmf_msg_level = BRCMF_ERROR_VAL; -} - -bool brcmf_c_prec_enq(struct brcmf_pub *drvr, struct pktq *q, +bool brcmf_c_prec_enq(struct device *dev, struct pktq *q, struct sk_buff *pkt, int prec) { struct sk_buff *p; int eprec = -1; /* precedence to evict from */ bool discard_oldest; + struct brcmf_bus *bus_if = dev_get_drvdata(dev); + struct brcmf_pub *drvr = bus_if->drvr; /* Fast case, precedence queue is not full and we are also not * exceeding total queue length @@ -446,7 +433,7 @@ brcmf_c_show_host_event(struct brcmf_event_msg *event, void *event_data) #endif /* BCMDBG */ int -brcmf_c_host_event(struct brcmf_info *drvr_priv, int *ifidx, void *pktdata, +brcmf_c_host_event(struct brcmf_pub *drvr, int *ifidx, void *pktdata, struct brcmf_event_msg *event, void **data_ptr) { /* check whether packet is a BRCM event pkt */ @@ -488,19 +475,18 @@ brcmf_c_host_event(struct brcmf_info *drvr_priv, int *ifidx, void *pktdata, if (ifevent->ifidx > 0 && ifevent->ifidx < BRCMF_MAX_IFS) { if (ifevent->action == BRCMF_E_IF_ADD) - brcmf_add_if(drvr_priv, ifevent->ifidx, NULL, + brcmf_add_if(drvr->dev, ifevent->ifidx, event->ifname, - pvt_data->eth.h_dest, - ifevent->flags, ifevent->bssidx); + pvt_data->eth.h_dest); else - brcmf_del_if(drvr_priv, ifevent->ifidx); + brcmf_del_if(drvr, ifevent->ifidx); } else { brcmf_dbg(ERROR, "Invalid ifidx %d for %s\n", ifevent->ifidx, event->ifname); } /* send up the if event: btamp user needs it */ - *ifidx = brcmf_ifname2idx(drvr_priv, event->ifname); + *ifidx = brcmf_ifname2idx(drvr, event->ifname); break; /* These are what external supplicant/authenticator wants */ @@ -512,7 +498,7 @@ brcmf_c_host_event(struct brcmf_info *drvr_priv, int *ifidx, void *pktdata, default: /* Fall through: this should get _everything_ */ - *ifidx = brcmf_ifname2idx(drvr_priv, event->ifname); + *ifidx = brcmf_ifname2idx(drvr, event->ifname); brcmf_dbg(TRACE, "MAC event %d, flags %x, status %x\n", type, flags, status); @@ -812,7 +798,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr) "event_msgs" + '\0' + bitvec */ uint up = 0; char buf[128], *ptr; - u32 dongle_align = BRCMF_SDALIGN; + u32 dongle_align = drvr->bus_if->align; u32 glom = 0; u32 roaming = 1; uint bcn_timeout = 3; @@ -820,7 +806,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr) int scan_unassoc_time = 40; int i; - brcmf_os_proto_block(drvr); + mutex_lock(&drvr->proto_block); /* Set Country code */ if (drvr->country_code[0] != 0) { @@ -889,7 +875,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr) 0, true); } - brcmf_os_proto_unblock(drvr); + mutex_unlock(&drvr->proto_block); return 0; } diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h index 7467922f053..bb26ee36bc6 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h @@ -17,6 +17,21 @@ #ifndef _BRCMF_DBG_H_ #define _BRCMF_DBG_H_ +/* message levels */ +#define BRCMF_ERROR_VAL 0x0001 +#define BRCMF_TRACE_VAL 0x0002 +#define BRCMF_INFO_VAL 0x0004 +#define BRCMF_DATA_VAL 0x0008 +#define BRCMF_CTL_VAL 0x0010 +#define BRCMF_TIMER_VAL 0x0020 +#define BRCMF_HDRS_VAL 0x0040 +#define BRCMF_BYTES_VAL 0x0080 +#define BRCMF_INTR_VAL 0x0100 +#define BRCMF_GLOM_VAL 0x0400 +#define BRCMF_EVENT_VAL 0x0800 +#define BRCMF_BTA_VAL 0x1000 +#define BRCMF_ISCAN_VAL 0x2000 + #if defined(BCMDBG) #define brcmf_dbg(level, fmt, ...) \ diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c index 4acbac5a74c..eb9eb766ac2 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c @@ -43,7 +43,6 @@ #include "dhd_proto.h" #include "dhd_dbg.h" #include "wl_cfg80211.h" -#include "bcmchip.h" MODULE_AUTHOR("Broadcom Corporation"); MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN fullmac driver."); @@ -53,48 +52,19 @@ MODULE_LICENSE("Dual BSD/GPL"); /* Interface control information */ struct brcmf_if { - struct brcmf_info *info; /* back pointer to brcmf_info */ + struct brcmf_pub *drvr; /* back pointer to brcmf_pub */ /* OS/stack specifics */ struct net_device *ndev; struct net_device_stats stats; int idx; /* iface idx in dongle */ - int state; /* interface state */ u8 mac_addr[ETH_ALEN]; /* assigned MAC address */ }; -/* Local private structure (extension of pub) */ -struct brcmf_info { - struct brcmf_pub pub; - - /* OS/stack specifics */ - struct brcmf_if *iflist[BRCMF_MAX_IFS]; - - struct mutex proto_block; - - struct work_struct setmacaddr_work; - struct work_struct multicast_work; - u8 macvalue[ETH_ALEN]; - atomic_t pend_8021x_cnt; -}; - /* Error bits */ +int brcmf_msg_level = BRCMF_ERROR_VAL; module_param(brcmf_msg_level, int, 0); - -static int brcmf_net2idx(struct brcmf_info *drvr_priv, struct net_device *ndev) -{ - int i = 0; - - while (i < BRCMF_MAX_IFS) { - if (drvr_priv->iflist[i] && drvr_priv->iflist[i]->ndev == ndev) - return i; - i++; - } - - return BRCMF_BAD_IF; -} - -int brcmf_ifname2idx(struct brcmf_info *drvr_priv, char *name) +int brcmf_ifname2idx(struct brcmf_pub *drvr, char *name) { int i = BRCMF_MAX_IFS; struct brcmf_if *ifp; @@ -103,7 +73,7 @@ int brcmf_ifname2idx(struct brcmf_info *drvr_priv, char *name) return 0; while (--i > 0) { - ifp = drvr_priv->iflist[i]; + ifp = drvr->iflist[i]; if (ifp && !strncmp(ifp->ndev->name, name, IFNAMSIZ)) break; } @@ -115,20 +85,18 @@ int brcmf_ifname2idx(struct brcmf_info *drvr_priv, char *name) char *brcmf_ifname(struct brcmf_pub *drvr, int ifidx) { - struct brcmf_info *drvr_priv = drvr->info; - if (ifidx < 0 || ifidx >= BRCMF_MAX_IFS) { brcmf_dbg(ERROR, "ifidx %d out of range\n", ifidx); return "<if_bad>"; } - if (drvr_priv->iflist[ifidx] == NULL) { + if (drvr->iflist[ifidx] == NULL) { brcmf_dbg(ERROR, "null i/f %d\n", ifidx); return "<if_null>"; } - if (drvr_priv->iflist[ifidx]->ndev) - return drvr_priv->iflist[ifidx]->ndev->name; + if (drvr->iflist[ifidx]->ndev) + return drvr->iflist[ifidx]->ndev->name; return "<if_none>"; } @@ -146,10 +114,10 @@ static void _brcmf_set_multicast_list(struct work_struct *work) uint buflen; int ret; - struct brcmf_info *drvr_priv = container_of(work, struct brcmf_info, + struct brcmf_pub *drvr = container_of(work, struct brcmf_pub, multicast_work); - ndev = drvr_priv->iflist[0]->ndev; + ndev = drvr->iflist[0]->ndev; cnt = netdev_mc_count(ndev); /* Determine initial value of allmulti flag */ @@ -183,10 +151,10 @@ static void _brcmf_set_multicast_list(struct work_struct *work) dcmd.len = buflen; dcmd.set = true; - ret = brcmf_proto_dcmd(&drvr_priv->pub, 0, &dcmd, dcmd.len); + ret = brcmf_proto_dcmd(drvr, 0, &dcmd, dcmd.len); if (ret < 0) { brcmf_dbg(ERROR, "%s: set mcast_list failed, cnt %d\n", - brcmf_ifname(&drvr_priv->pub, 0), cnt); + brcmf_ifname(drvr, 0), cnt); dcmd_value = cnt ? true : dcmd_value; } @@ -208,7 +176,7 @@ static void _brcmf_set_multicast_list(struct work_struct *work) ("allmulti", (void *)&dcmd_le_value, sizeof(dcmd_le_value), buf, buflen)) { brcmf_dbg(ERROR, "%s: mkiovar failed for allmulti, datalen %d buflen %u\n", - brcmf_ifname(&drvr_priv->pub, 0), + brcmf_ifname(drvr, 0), (int)sizeof(dcmd_value), buflen); kfree(buf); return; @@ -220,10 +188,10 @@ static void _brcmf_set_multicast_list(struct work_struct *work) dcmd.len = buflen; dcmd.set = true; - ret = brcmf_proto_dcmd(&drvr_priv->pub, 0, &dcmd, dcmd.len); + ret = brcmf_proto_dcmd(drvr, 0, &dcmd, dcmd.len); if (ret < 0) { brcmf_dbg(ERROR, "%s: set allmulti %d failed\n", - brcmf_ifname(&drvr_priv->pub, 0), + brcmf_ifname(drvr, 0), le32_to_cpu(dcmd_le_value)); } @@ -241,10 +209,10 @@ static void _brcmf_set_multicast_list(struct work_struct *work) dcmd.len = sizeof(dcmd_le_value); dcmd.set = true; - ret = brcmf_proto_dcmd(&drvr_priv->pub, 0, &dcmd, dcmd.len); + ret = brcmf_proto_dcmd(drvr, 0, &dcmd, dcmd.len); if (ret < 0) { brcmf_dbg(ERROR, "%s: set promisc %d failed\n", - brcmf_ifname(&drvr_priv->pub, 0), + brcmf_ifname(drvr, 0), le32_to_cpu(dcmd_le_value)); } } @@ -256,14 +224,14 @@ _brcmf_set_mac_address(struct work_struct *work) struct brcmf_dcmd dcmd; int ret; - struct brcmf_info *drvr_priv = container_of(work, struct brcmf_info, + struct brcmf_pub *drvr = container_of(work, struct brcmf_pub, setmacaddr_work); brcmf_dbg(TRACE, "enter\n"); - if (!brcmf_c_mkiovar("cur_etheraddr", (char *)drvr_priv->macvalue, + if (!brcmf_c_mkiovar("cur_etheraddr", (char *)drvr->macvalue, ETH_ALEN, buf, 32)) { brcmf_dbg(ERROR, "%s: mkiovar failed for cur_etheraddr\n", - brcmf_ifname(&drvr_priv->pub, 0)); + brcmf_ifname(drvr, 0)); return; } memset(&dcmd, 0, sizeof(dcmd)); @@ -272,52 +240,40 @@ _brcmf_set_mac_address(struct work_struct *work) dcmd.len = 32; dcmd.set = true; - ret = brcmf_proto_dcmd(&drvr_priv->pub, 0, &dcmd, dcmd.len); + ret = brcmf_proto_dcmd(drvr, 0, &dcmd, dcmd.len); if (ret < 0) brcmf_dbg(ERROR, "%s: set cur_etheraddr failed\n", - brcmf_ifname(&drvr_priv->pub, 0)); + brcmf_ifname(drvr, 0)); else - memcpy(drvr_priv->iflist[0]->ndev->dev_addr, - drvr_priv->macvalue, ETH_ALEN); + memcpy(drvr->iflist[0]->ndev->dev_addr, + drvr->macvalue, ETH_ALEN); return; } static int brcmf_netdev_set_mac_address(struct net_device *ndev, void *addr) { - struct brcmf_info *drvr_priv = *(struct brcmf_info **) - netdev_priv(ndev); + struct brcmf_if *ifp = netdev_priv(ndev); + struct brcmf_pub *drvr = ifp->drvr; struct sockaddr *sa = (struct sockaddr *)addr; - int ifidx; - - ifidx = brcmf_net2idx(drvr_priv, ndev); - if (ifidx == BRCMF_BAD_IF) - return -1; - memcpy(&drvr_priv->macvalue, sa->sa_data, ETH_ALEN); - schedule_work(&drvr_priv->setmacaddr_work); + memcpy(&drvr->macvalue, sa->sa_data, ETH_ALEN); + schedule_work(&drvr->setmacaddr_work); return 0; } static void brcmf_netdev_set_multicast_list(struct net_device *ndev) { - struct brcmf_info *drvr_priv = *(struct brcmf_info **) - netdev_priv(ndev); - int ifidx; - - ifidx = brcmf_net2idx(drvr_priv, ndev); - if (ifidx == BRCMF_BAD_IF) - return; + struct brcmf_if *ifp = netdev_priv(ndev); + struct brcmf_pub *drvr = ifp->drvr; - schedule_work(&drvr_priv->multicast_work); + schedule_work(&drvr->multicast_work); } int brcmf_sendpkt(struct brcmf_pub *drvr, int ifidx, struct sk_buff *pktbuf) { - struct brcmf_info *drvr_priv = drvr->info; - /* Reject if down */ - if (!drvr->up || (drvr->busstate == BRCMF_BUS_DOWN)) + if (!drvr->bus_if->drvr_up || (drvr->bus_if->state == BRCMF_BUS_DOWN)) return -ENODEV; /* Update multicast statistic */ @@ -328,122 +284,118 @@ int brcmf_sendpkt(struct brcmf_pub *drvr, int ifidx, struct sk_buff *pktbuf) if (is_multicast_ether_addr(eh->h_dest)) drvr->tx_multicast++; if (ntohs(eh->h_proto) == ETH_P_PAE) - atomic_inc(&drvr_priv->pend_8021x_cnt); + atomic_inc(&drvr->pend_8021x_cnt); } /* If the protocol uses a data header, apply it */ brcmf_proto_hdrpush(drvr, ifidx, pktbuf); /* Use bus module to send data frame */ - return brcmf_sdbrcm_bus_txdata(drvr->bus, pktbuf); + return drvr->bus_if->brcmf_bus_txdata(drvr->dev, pktbuf); } static int brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev) { int ret; - struct brcmf_info *drvr_priv = *(struct brcmf_info **) - netdev_priv(ndev); - int ifidx; + struct brcmf_if *ifp = netdev_priv(ndev); + struct brcmf_pub *drvr = ifp->drvr; brcmf_dbg(TRACE, "Enter\n"); /* Reject if down */ - if (!drvr_priv->pub.up || (drvr_priv->pub.busstate == BRCMF_BUS_DOWN)) { - brcmf_dbg(ERROR, "xmit rejected pub.up=%d busstate=%d\n", - drvr_priv->pub.up, drvr_priv->pub.busstate); + if (!drvr->bus_if->drvr_up || + (drvr->bus_if->state == BRCMF_BUS_DOWN)) { + brcmf_dbg(ERROR, "xmit rejected drvup=%d state=%d\n", + drvr->bus_if->drvr_up, + drvr->bus_if->state); netif_stop_queue(ndev); return -ENODEV; } - ifidx = brcmf_net2idx(drvr_priv, ndev); - if (ifidx == BRCMF_BAD_IF) { - brcmf_dbg(ERROR, "bad ifidx %d\n", ifidx); + if (!drvr->iflist[ifp->idx]) { + brcmf_dbg(ERROR, "bad ifidx %d\n", ifp->idx); netif_stop_queue(ndev); return -ENODEV; } /* Make sure there's enough room for any header */ - if (skb_headroom(skb) < drvr_priv->pub.hdrlen) { + if (skb_headroom(skb) < drvr->hdrlen) { struct sk_buff *skb2; brcmf_dbg(INFO, "%s: insufficient headroom\n", - brcmf_ifname(&drvr_priv->pub, ifidx)); - drvr_priv->pub.tx_realloc++; - skb2 = skb_realloc_headroom(skb, drvr_priv->pub.hdrlen); + brcmf_ifname(drvr, ifp->idx)); + drvr->bus_if->tx_realloc++; + skb2 = skb_realloc_headroom(skb, drvr->hdrlen); dev_kfree_skb(skb); skb = skb2; if (skb == NULL) { brcmf_dbg(ERROR, "%s: skb_realloc_headroom failed\n", - brcmf_ifname(&drvr_priv->pub, ifidx)); + brcmf_ifname(drvr, ifp->idx)); ret = -ENOMEM; goto done; } } - ret = brcmf_sendpkt(&drvr_priv->pub, ifidx, skb); + ret = brcmf_sendpkt(drvr, ifp->idx, skb); done: if (ret) - drvr_priv->pub.dstats.tx_dropped++; + drvr->bus_if->dstats.tx_dropped++; else - drvr_priv->pub.tx_packets++; + drvr->bus_if->dstats.tx_packets++; /* Return ok: we always eat the packet */ return 0; } -void brcmf_txflowcontrol(struct brcmf_pub *drvr, int ifidx, bool state) +void brcmf_txflowcontrol(struct device *dev, int ifidx, bool state) { struct net_device *ndev; - struct brcmf_info *drvr_priv = drvr->info; + struct brcmf_bus *bus_if = dev_get_drvdata(dev); + struct brcmf_pub *drvr = bus_if->drvr; brcmf_dbg(TRACE, "Enter\n"); - drvr->txoff = state; - ndev = drvr_priv->iflist[ifidx]->ndev; + ndev = drvr->iflist[ifidx]->ndev; if (state == ON) netif_stop_queue(ndev); else netif_wake_queue(ndev); } -static int brcmf_host_event(struct brcmf_info *drvr_priv, int *ifidx, +static int brcmf_host_event(struct brcmf_pub *drvr, int *ifidx, void *pktdata, struct brcmf_event_msg *event, void **data) { int bcmerror = 0; - bcmerror = brcmf_c_host_event(drvr_priv, ifidx, pktdata, event, data); + bcmerror = brcmf_c_host_event(drvr, ifidx, pktdata, event, data); if (bcmerror != 0) return bcmerror; - if (drvr_priv->iflist[*ifidx]->ndev) - brcmf_cfg80211_event(drvr_priv->iflist[*ifidx]->ndev, + if (drvr->iflist[*ifidx]->ndev) + brcmf_cfg80211_event(drvr->iflist[*ifidx]->ndev, event, *data); return bcmerror; } -void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx, struct sk_buff *skb, - int numpkt) +void brcmf_rx_frame(struct device *dev, int ifidx, + struct sk_buff_head *skb_list) { - struct brcmf_info *drvr_priv = drvr->info; unsigned char *eth; uint len; void *data; - struct sk_buff *pnext, *save_pktbuf; - int i; + struct sk_buff *skb, *pnext; struct brcmf_if *ifp; struct brcmf_event_msg event; + struct brcmf_bus *bus_if = dev_get_drvdata(dev); + struct brcmf_pub *drvr = bus_if->drvr; brcmf_dbg(TRACE, "Enter\n"); - save_pktbuf = skb; - - for (i = 0; skb && i < numpkt; i++, skb = pnext) { - - pnext = skb->next; - skb->next = NULL; + skb_queue_walk_safe(skb_list, skb, pnext) { + skb_unlink(skb, skb_list); /* Get the protocol, maintain skb around eth_type_trans() * The main reason for this hack is for the limitation of @@ -460,15 +412,21 @@ void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx, struct sk_buff *skb, eth = skb->data; len = skb->len; - ifp = drvr_priv->iflist[ifidx]; + ifp = drvr->iflist[ifidx]; if (ifp == NULL) - ifp = drvr_priv->iflist[0]; + ifp = drvr->iflist[0]; + + if (!ifp || !ifp->ndev || + ifp->ndev->reg_state != NETREG_REGISTERED) { + brcmu_pkt_buf_free_skb(skb); + continue; + } skb->dev = ifp->ndev; skb->protocol = eth_type_trans(skb, skb->dev); if (skb->pkt_type == PACKET_MULTICAST) - drvr_priv->pub.rx_multicast++; + bus_if->dstats.multicast++; skb->data = eth; skb->len = len; @@ -478,19 +436,17 @@ void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx, struct sk_buff *skb, /* Process special event packets and then discard them */ if (ntohs(skb->protocol) == ETH_P_LINK_CTL) - brcmf_host_event(drvr_priv, &ifidx, + brcmf_host_event(drvr, &ifidx, skb_mac_header(skb), &event, &data); - if (drvr_priv->iflist[ifidx] && - !drvr_priv->iflist[ifidx]->state) - ifp = drvr_priv->iflist[ifidx]; - - if (ifp->ndev) + if (drvr->iflist[ifidx]) { + ifp = drvr->iflist[ifidx]; ifp->ndev->last_rx = jiffies; + } - drvr->dstats.rx_bytes += skb->len; - drvr->rx_packets++; /* Local count */ + bus_if->dstats.rx_bytes += skb->len; + bus_if->dstats.rx_packets++; /* Local count */ if (in_interrupt()) netif_rx(skb); @@ -505,59 +461,48 @@ void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx, struct sk_buff *skb, } } -void brcmf_txcomplete(struct brcmf_pub *drvr, struct sk_buff *txp, bool success) +void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success) { uint ifidx; - struct brcmf_info *drvr_priv = drvr->info; struct ethhdr *eh; u16 type; + struct brcmf_bus *bus_if = dev_get_drvdata(dev); + struct brcmf_pub *drvr = bus_if->drvr; - brcmf_proto_hdrpull(drvr, &ifidx, txp); + brcmf_proto_hdrpull(dev, &ifidx, txp); eh = (struct ethhdr *)(txp->data); type = ntohs(eh->h_proto); if (type == ETH_P_PAE) - atomic_dec(&drvr_priv->pend_8021x_cnt); + atomic_dec(&drvr->pend_8021x_cnt); } static struct net_device_stats *brcmf_netdev_get_stats(struct net_device *ndev) { - struct brcmf_info *drvr_priv = *(struct brcmf_info **) - netdev_priv(ndev); - struct brcmf_if *ifp; - int ifidx; + struct brcmf_if *ifp = netdev_priv(ndev); + struct brcmf_bus *bus_if = ifp->drvr->bus_if; brcmf_dbg(TRACE, "Enter\n"); - ifidx = brcmf_net2idx(drvr_priv, ndev); - if (ifidx == BRCMF_BAD_IF) - return NULL; - - ifp = drvr_priv->iflist[ifidx]; - - if (drvr_priv->pub.up) - /* Use the protocol to get dongle stats */ - brcmf_proto_dstats(&drvr_priv->pub); - /* Copy dongle stats to net device stats */ - ifp->stats.rx_packets = drvr_priv->pub.dstats.rx_packets; - ifp->stats.tx_packets = drvr_priv->pub.dstats.tx_packets; - ifp->stats.rx_bytes = drvr_priv->pub.dstats.rx_bytes; - ifp->stats.tx_bytes = drvr_priv->pub.dstats.tx_bytes; - ifp->stats.rx_errors = drvr_priv->pub.dstats.rx_errors; - ifp->stats.tx_errors = drvr_priv->pub.dstats.tx_errors; - ifp->stats.rx_dropped = drvr_priv->pub.dstats.rx_dropped; - ifp->stats.tx_dropped = drvr_priv->pub.dstats.tx_dropped; - ifp->stats.multicast = drvr_priv->pub.dstats.multicast; + ifp->stats.rx_packets = bus_if->dstats.rx_packets; + ifp->stats.tx_packets = bus_if->dstats.tx_packets; + ifp->stats.rx_bytes = bus_if->dstats.rx_bytes; + ifp->stats.tx_bytes = bus_if->dstats.tx_bytes; + ifp->stats.rx_errors = bus_if->dstats.rx_errors; + ifp->stats.tx_errors = bus_if->dstats.tx_errors; + ifp->stats.rx_dropped = bus_if->dstats.rx_dropped; + ifp->stats.tx_dropped = bus_if->dstats.tx_dropped; + ifp->stats.multicast = bus_if->dstats.multicast; return &ifp->stats; } /* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */ -static int brcmf_toe_get(struct brcmf_info *drvr_priv, int ifidx, u32 *toe_ol) +static int brcmf_toe_get(struct brcmf_pub *drvr, int ifidx, u32 *toe_ol) { struct brcmf_dcmd dcmd; __le32 toe_le; @@ -572,17 +517,17 @@ static int brcmf_toe_get(struct brcmf_info *drvr_priv, int ifidx, u32 *toe_ol) dcmd.set = false; strcpy(buf, "toe_ol"); - ret = brcmf_proto_dcmd(&drvr_priv->pub, ifidx, &dcmd, dcmd.len); + ret = brcmf_proto_dcmd(drvr, ifidx, &dcmd, dcmd.len); if (ret < 0) { /* Check for older dongle image that doesn't support toe_ol */ if (ret == -EIO) { brcmf_dbg(ERROR, "%s: toe not supported by device\n", - brcmf_ifname(&drvr_priv->pub, ifidx)); + brcmf_ifname(drvr, ifidx)); return -EOPNOTSUPP; } brcmf_dbg(INFO, "%s: could not get toe_ol: ret=%d\n", - brcmf_ifname(&drvr_priv->pub, ifidx), ret); + brcmf_ifname(drvr, ifidx), ret); return ret; } @@ -593,7 +538,7 @@ static int brcmf_toe_get(struct brcmf_info *drvr_priv, int ifidx, u32 *toe_ol) /* Set current toe component enables in toe_ol iovar, and set toe global enable iovar */ -static int brcmf_toe_set(struct brcmf_info *drvr_priv, int ifidx, u32 toe_ol) +static int brcmf_toe_set(struct brcmf_pub *drvr, int ifidx, u32 toe_ol) { struct brcmf_dcmd dcmd; char buf[32]; @@ -611,10 +556,10 @@ static int brcmf_toe_set(struct brcmf_info *drvr_priv, int ifidx, u32 toe_ol) strcpy(buf, "toe_ol"); memcpy(&buf[sizeof("toe_ol")], &toe_le, sizeof(u32)); - ret = brcmf_proto_dcmd(&drvr_priv->pub, ifidx, &dcmd, dcmd.len); + ret = brcmf_proto_dcmd(drvr, ifidx, &dcmd, dcmd.len); if (ret < 0) { brcmf_dbg(ERROR, "%s: could not set toe_ol: ret=%d\n", - brcmf_ifname(&drvr_priv->pub, ifidx), ret); + brcmf_ifname(drvr, ifidx), ret); return ret; } @@ -624,10 +569,10 @@ static int brcmf_toe_set(struct brcmf_info *drvr_priv, int ifidx, u32 toe_ol) strcpy(buf, "toe"); memcpy(&buf[sizeof("toe")], &toe_le, sizeof(u32)); - ret = brcmf_proto_dcmd(&drvr_priv->pub, ifidx, &dcmd, dcmd.len); + ret = brcmf_proto_dcmd(drvr, ifidx, &dcmd, dcmd.len); if (ret < 0) { brcmf_dbg(ERROR, "%s: could not set toe: ret=%d\n", - brcmf_ifname(&drvr_priv->pub, ifidx), ret); + brcmf_ifname(drvr, ifidx), ret); return ret; } @@ -637,21 +582,19 @@ static int brcmf_toe_set(struct brcmf_info *drvr_priv, int ifidx, u32 toe_ol) static void brcmf_ethtool_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) { - struct brcmf_info *drvr_priv = *(struct brcmf_info **) - netdev_priv(ndev); + struct brcmf_if *ifp = netdev_priv(ndev); + struct brcmf_pub *drvr = ifp->drvr; sprintf(info->driver, KBUILD_MODNAME); - sprintf(info->version, "%lu", drvr_priv->pub.drv_version); - sprintf(info->fw_version, "%s", BCM4329_FW_NAME); - sprintf(info->bus_info, "%s", - dev_name(brcmf_bus_get_device(drvr_priv->pub.bus))); + sprintf(info->version, "%lu", drvr->drv_version); + sprintf(info->bus_info, "%s", dev_name(drvr->dev)); } static struct ethtool_ops brcmf_ethtool_ops = { .get_drvinfo = brcmf_ethtool_get_drvinfo }; -static int brcmf_ethtool(struct brcmf_info *drvr_priv, void __user *uaddr) +static int brcmf_ethtool(struct brcmf_pub *drvr, void __user *uaddr) { struct ethtool_drvinfo info; char drvname[sizeof(info.driver)]; @@ -685,18 +628,18 @@ static int brcmf_ethtool(struct brcmf_info *drvr_priv, void __user *uaddr) } /* otherwise, require dongle to be up */ - else if (!drvr_priv->pub.up) { + else if (!drvr->bus_if->drvr_up) { brcmf_dbg(ERROR, "dongle is not up\n"); return -ENODEV; } /* finally, report dongle driver type */ - else if (drvr_priv->pub.iswl) + else if (drvr->iswl) sprintf(info.driver, "wl"); else sprintf(info.driver, "xx"); - sprintf(info.version, "%lu", drvr_priv->pub.drv_version); + sprintf(info.version, "%lu", drvr->drv_version); if (copy_to_user(uaddr, &info, sizeof(info))) return -EFAULT; brcmf_dbg(CTL, "given %*s, returning %s\n", @@ -706,7 +649,7 @@ static int brcmf_ethtool(struct brcmf_info *drvr_priv, void __user *uaddr) /* Get toe offload components from dongle */ case ETHTOOL_GRXCSUM: case ETHTOOL_GTXCSUM: - ret = brcmf_toe_get(drvr_priv, 0, &toe_cmpnt); + ret = brcmf_toe_get(drvr, 0, &toe_cmpnt); if (ret < 0) return ret; @@ -727,7 +670,7 @@ static int brcmf_ethtool(struct brcmf_info *drvr_priv, void __user *uaddr) return -EFAULT; /* Read the current settings, update and write back */ - ret = brcmf_toe_get(drvr_priv, 0, &toe_cmpnt); + ret = brcmf_toe_get(drvr, 0, &toe_cmpnt); if (ret < 0) return ret; @@ -739,17 +682,17 @@ static int brcmf_ethtool(struct brcmf_info *drvr_priv, void __user *uaddr) else toe_cmpnt &= ~csum_dir; - ret = brcmf_toe_set(drvr_priv, 0, toe_cmpnt); + ret = brcmf_toe_set(drvr, 0, toe_cmpnt); if (ret < 0) return ret; /* If setting TX checksum mode, tell Linux the new mode */ if (cmd == ETHTOOL_STXCSUM) { if (edata.data) - drvr_priv->iflist[0]->ndev->features |= + drvr->iflist[0]->ndev->features |= NETIF_F_IP_CSUM; else - drvr_priv->iflist[0]->ndev->features &= + drvr->iflist[0]->ndev->features &= ~NETIF_F_IP_CSUM; } @@ -765,18 +708,16 @@ static int brcmf_ethtool(struct brcmf_info *drvr_priv, void __user *uaddr) static int brcmf_netdev_ioctl_entry(struct net_device *ndev, struct ifreq *ifr, int cmd) { - struct brcmf_info *drvr_priv = *(struct brcmf_info **) - netdev_priv(ndev); - int ifidx; + struct brcmf_if *ifp = netdev_priv(ndev); + struct brcmf_pub *drvr = ifp->drvr; - ifidx = brcmf_net2idx(drvr_priv, ndev); - brcmf_dbg(TRACE, "ifidx %d, cmd 0x%04x\n", ifidx, cmd); + brcmf_dbg(TRACE, "ifidx %d, cmd 0x%04x\n", ifp->idx, cmd); - if (ifidx == BRCMF_BAD_IF) + if (!drvr->iflist[ifp->idx]) return -1; if (cmd == SIOCETHTOOL) - return brcmf_ethtool(drvr_priv, ifr->ifr_data); + return brcmf_ethtool(drvr, ifr->ifr_data); return -EOPNOTSUPP; } @@ -788,28 +729,25 @@ s32 brcmf_exec_dcmd(struct net_device *ndev, u32 cmd, void *arg, u32 len) s32 err = 0; int buflen = 0; bool is_set_key_cmd; - struct brcmf_info *drvr_priv = *(struct brcmf_info **) - netdev_priv(ndev); - int ifidx; + struct brcmf_if *ifp = netdev_priv(ndev); + struct brcmf_pub *drvr = ifp->drvr; memset(&dcmd, 0, sizeof(dcmd)); dcmd.cmd = cmd; dcmd.buf = arg; dcmd.len = len; - ifidx = brcmf_net2idx(drvr_priv, ndev); - if (dcmd.buf != NULL) buflen = min_t(uint, dcmd.len, BRCMF_DCMD_MAXLEN); /* send to dongle (must be up, and wl) */ - if ((drvr_priv->pub.busstate != BRCMF_BUS_DATA)) { + if ((drvr->bus_if->state != BRCMF_BUS_DATA)) { brcmf_dbg(ERROR, "DONGLE_DOWN\n"); err = -EIO; goto done; } - if (!drvr_priv->pub.iswl) { + if (!drvr->iswl) { err = -EIO; goto done; } @@ -826,7 +764,7 @@ s32 brcmf_exec_dcmd(struct net_device *ndev, u32 cmd, void *arg, u32 len) if (is_set_key_cmd) brcmf_netdev_wait_pend8021x(ndev); - err = brcmf_proto_dcmd(&drvr_priv->pub, ifidx, &dcmd, buflen); + err = brcmf_proto_dcmd(drvr, ifp->idx, &dcmd, buflen); done: if (err > 0) @@ -837,15 +775,16 @@ done: static int brcmf_netdev_stop(struct net_device *ndev) { - struct brcmf_pub *drvr = *(struct brcmf_pub **) netdev_priv(ndev); + struct brcmf_if *ifp = netdev_priv(ndev); + struct brcmf_pub *drvr = ifp->drvr; brcmf_dbg(TRACE, "Enter\n"); brcmf_cfg80211_down(drvr->config); - if (drvr->up == 0) + if (drvr->bus_if->drvr_up == 0) return 0; /* Set state and stop OS transmissions */ - drvr->up = 0; + drvr->bus_if->drvr_up = false; netif_stop_queue(ndev); return 0; @@ -853,39 +792,37 @@ static int brcmf_netdev_stop(struct net_device *ndev) static int brcmf_netdev_open(struct net_device *ndev) { - struct brcmf_info *drvr_priv = *(struct brcmf_info **) - netdev_priv(ndev); + struct brcmf_if *ifp = netdev_priv(ndev); + struct brcmf_pub *drvr = ifp->drvr; u32 toe_ol; - int ifidx = brcmf_net2idx(drvr_priv, ndev); s32 ret = 0; - brcmf_dbg(TRACE, "ifidx %d\n", ifidx); - - if (ifidx == 0) { /* do it only for primary eth0 */ + brcmf_dbg(TRACE, "ifidx %d\n", ifp->idx); + if (ifp->idx == 0) { /* do it only for primary eth0 */ /* try to bring up bus */ - ret = brcmf_bus_start(&drvr_priv->pub); + ret = brcmf_bus_start(drvr->dev); if (ret != 0) { brcmf_dbg(ERROR, "failed with code %d\n", ret); return -1; } - atomic_set(&drvr_priv->pend_8021x_cnt, 0); + atomic_set(&drvr->pend_8021x_cnt, 0); - memcpy(ndev->dev_addr, drvr_priv->pub.mac, ETH_ALEN); + memcpy(ndev->dev_addr, drvr->mac, ETH_ALEN); /* Get current TOE mode from dongle */ - if (brcmf_toe_get(drvr_priv, ifidx, &toe_ol) >= 0 + if (brcmf_toe_get(drvr, ifp->idx, &toe_ol) >= 0 && (toe_ol & TOE_TX_CSUM_OL) != 0) - drvr_priv->iflist[ifidx]->ndev->features |= + drvr->iflist[ifp->idx]->ndev->features |= NETIF_F_IP_CSUM; else - drvr_priv->iflist[ifidx]->ndev->features &= + drvr->iflist[ifp->idx]->ndev->features &= ~NETIF_F_IP_CSUM; } /* Allow transmit calls */ netif_start_queue(ndev); - drvr_priv->pub.up = 1; - if (brcmf_cfg80211_up(drvr_priv->pub.config)) { + drvr->bus_if->drvr_up = true; + if (brcmf_cfg80211_up(drvr->config)) { brcmf_dbg(ERROR, "failed to bring up cfg80211\n"); return -1; } @@ -893,193 +830,155 @@ static int brcmf_netdev_open(struct net_device *ndev) return ret; } +static const struct net_device_ops brcmf_netdev_ops_pri = { + .ndo_open = brcmf_netdev_open, + .ndo_stop = brcmf_netdev_stop, + .ndo_get_stats = brcmf_netdev_get_stats, + .ndo_do_ioctl = brcmf_netdev_ioctl_entry, + .ndo_start_xmit = brcmf_netdev_start_xmit, + .ndo_set_mac_address = brcmf_netdev_set_mac_address, + .ndo_set_rx_mode = brcmf_netdev_set_multicast_list +}; + int -brcmf_add_if(struct brcmf_info *drvr_priv, int ifidx, struct net_device *ndev, - char *name, u8 *mac_addr, u32 flags, u8 bssidx) +brcmf_add_if(struct device *dev, int ifidx, char *name, u8 *mac_addr) { struct brcmf_if *ifp; - int ret = 0, err = 0; + struct net_device *ndev; + struct brcmf_bus *bus_if = dev_get_drvdata(dev); + struct brcmf_pub *drvr = bus_if->drvr; - brcmf_dbg(TRACE, "idx %d, handle->%p\n", ifidx, ndev); + brcmf_dbg(TRACE, "idx %d\n", ifidx); - ifp = drvr_priv->iflist[ifidx]; - if (!ifp) { - ifp = kmalloc(sizeof(struct brcmf_if), GFP_ATOMIC); - if (!ifp) - return -ENOMEM; + ifp = drvr->iflist[ifidx]; + /* + * Delete the existing interface before overwriting it + * in case we missed the BRCMF_E_IF_DEL event. + */ + if (ifp) { + brcmf_dbg(ERROR, "ERROR: netdev:%s already exists, try free & unregister\n", + ifp->ndev->name); + netif_stop_queue(ifp->ndev); + unregister_netdev(ifp->ndev); + free_netdev(ifp->ndev); + drvr->iflist[ifidx] = NULL; } - memset(ifp, 0, sizeof(struct brcmf_if)); - ifp->info = drvr_priv; - drvr_priv->iflist[ifidx] = ifp; + /* Allocate netdev, including space for private structure */ + ndev = alloc_netdev(sizeof(struct brcmf_if), name, ether_setup); + if (!ndev) { + brcmf_dbg(ERROR, "OOM - alloc_netdev\n"); + return -ENOMEM; + } + + ifp = netdev_priv(ndev); + ifp->ndev = ndev; + ifp->drvr = drvr; + drvr->iflist[ifidx] = ifp; + ifp->idx = ifidx; if (mac_addr != NULL) memcpy(&ifp->mac_addr, mac_addr, ETH_ALEN); - if (ndev == NULL) { - ifp->state = BRCMF_E_IF_ADD; - ifp->idx = ifidx; - /* - * Delete the existing interface before overwriting it - * in case we missed the BRCMF_E_IF_DEL event. - */ - if (ifp->ndev != NULL) { - brcmf_dbg(ERROR, "ERROR: netdev:%s already exists, try free & unregister\n", - ifp->ndev->name); - netif_stop_queue(ifp->ndev); - unregister_netdev(ifp->ndev); - free_netdev(ifp->ndev); - } - - /* Allocate netdev, including space for private structure */ - ifp->ndev = alloc_netdev(sizeof(drvr_priv), "wlan%d", - ether_setup); - if (!ifp->ndev) { - brcmf_dbg(ERROR, "OOM - alloc_netdev\n"); - ret = -ENOMEM; - } - - if (ret == 0) { - memcpy(netdev_priv(ifp->ndev), &drvr_priv, - sizeof(drvr_priv)); - err = brcmf_net_attach(&drvr_priv->pub, ifp->idx); - if (err != 0) { - brcmf_dbg(ERROR, "brcmf_net_attach failed, err %d\n", - err); - ret = -EOPNOTSUPP; - } else { - brcmf_dbg(TRACE, " ==== pid:%x, net_device for if:%s created ===\n", - current->pid, ifp->ndev->name); - ifp->state = 0; - } - } - - if (ret < 0) { - if (ifp->ndev) - free_netdev(ifp->ndev); + if (brcmf_net_attach(drvr, ifp->idx)) { + brcmf_dbg(ERROR, "brcmf_net_attach failed"); + free_netdev(ifp->ndev); + drvr->iflist[ifidx] = NULL; + return -EOPNOTSUPP; + } - drvr_priv->iflist[ifp->idx] = NULL; - kfree(ifp); - } - } else - ifp->ndev = ndev; + brcmf_dbg(TRACE, " ==== pid:%x, net_device for if:%s created ===\n", + current->pid, ifp->ndev->name); return 0; } -void brcmf_del_if(struct brcmf_info *drvr_priv, int ifidx) +void brcmf_del_if(struct brcmf_pub *drvr, int ifidx) { struct brcmf_if *ifp; brcmf_dbg(TRACE, "idx %d\n", ifidx); - ifp = drvr_priv->iflist[ifidx]; + ifp = drvr->iflist[ifidx]; if (!ifp) { brcmf_dbg(ERROR, "Null interface\n"); return; } + if (ifp->ndev) { + if (ifidx == 0) { + if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) { + rtnl_lock(); + brcmf_netdev_stop(ifp->ndev); + rtnl_unlock(); + } + } else { + netif_stop_queue(ifp->ndev); + } - ifp->state = BRCMF_E_IF_DEL; - ifp->idx = ifidx; - if (ifp->ndev != NULL) { - netif_stop_queue(ifp->ndev); unregister_netdev(ifp->ndev); + drvr->iflist[ifidx] = NULL; + if (ifidx == 0) + brcmf_cfg80211_detach(drvr->config); free_netdev(ifp->ndev); - drvr_priv->iflist[ifidx] = NULL; - kfree(ifp); } } -struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus, uint bus_hdrlen) +int brcmf_attach(uint bus_hdrlen, struct device *dev) { - struct brcmf_info *drvr_priv = NULL; - struct net_device *ndev; + struct brcmf_pub *drvr = NULL; + int ret = 0; brcmf_dbg(TRACE, "Enter\n"); - /* Allocate netdev, including space for private structure */ - ndev = alloc_netdev(sizeof(drvr_priv), "wlan%d", ether_setup); - if (!ndev) { - brcmf_dbg(ERROR, "OOM - alloc_netdev\n"); - goto fail; - } - /* Allocate primary brcmf_info */ - drvr_priv = kzalloc(sizeof(struct brcmf_info), GFP_ATOMIC); - if (!drvr_priv) - goto fail; - - /* - * Save the brcmf_info into the priv - */ - memcpy(netdev_priv(ndev), &drvr_priv, sizeof(drvr_priv)); - - if (brcmf_add_if(drvr_priv, 0, ndev, ndev->name, NULL, 0, 0) == - BRCMF_BAD_IF) - goto fail; - - ndev->netdev_ops = NULL; - mutex_init(&drvr_priv->proto_block); + drvr = kzalloc(sizeof(struct brcmf_pub), GFP_ATOMIC); + if (!drvr) + return -ENOMEM; - /* Link to info module */ - drvr_priv->pub.info = drvr_priv; + mutex_init(&drvr->proto_block); /* Link to bus module */ - drvr_priv->pub.bus = bus; - drvr_priv->pub.hdrlen = bus_hdrlen; + drvr->hdrlen = bus_hdrlen; + drvr->bus_if = dev_get_drvdata(dev); + drvr->bus_if->drvr = drvr; + drvr->dev = dev; /* Attach and link in the protocol */ - if (brcmf_proto_attach(&drvr_priv->pub) != 0) { + ret = brcmf_proto_attach(drvr); + if (ret != 0) { brcmf_dbg(ERROR, "brcmf_prot_attach failed\n"); goto fail; } - /* Attach and link in the cfg80211 */ - drvr_priv->pub.config = - brcmf_cfg80211_attach(ndev, - brcmf_bus_get_device(bus), - &drvr_priv->pub); - if (drvr_priv->pub.config == NULL) { - brcmf_dbg(ERROR, "wl_cfg80211_attach failed\n"); - goto fail; - } - - INIT_WORK(&drvr_priv->setmacaddr_work, _brcmf_set_mac_address); - INIT_WORK(&drvr_priv->multicast_work, _brcmf_set_multicast_list); + INIT_WORK(&drvr->setmacaddr_work, _brcmf_set_mac_address); + INIT_WORK(&drvr->multicast_work, _brcmf_set_multicast_list); - /* - * Save the brcmf_info into the priv - */ - memcpy(netdev_priv(ndev), &drvr_priv, sizeof(drvr_priv)); - - return &drvr_priv->pub; + return ret; fail: - if (ndev) - free_netdev(ndev); - if (drvr_priv) - brcmf_detach(&drvr_priv->pub); + brcmf_detach(dev); - return NULL; + return ret; } -int brcmf_bus_start(struct brcmf_pub *drvr) +int brcmf_bus_start(struct device *dev) { int ret = -1; - struct brcmf_info *drvr_priv = drvr->info; /* Room for "event_msgs" + '\0' + bitvec */ char iovbuf[BRCMF_EVENTING_MASK_LEN + 12]; + struct brcmf_bus *bus_if = dev_get_drvdata(dev); + struct brcmf_pub *drvr = bus_if->drvr; brcmf_dbg(TRACE, "\n"); /* Bring up the bus */ - ret = brcmf_sdbrcm_bus_init(&drvr_priv->pub); + ret = bus_if->brcmf_bus_init(dev); if (ret != 0) { brcmf_dbg(ERROR, "brcmf_sdbrcm_bus_init failed %d\n", ret); return ret; } /* If bus is not ready, can't come up */ - if (drvr_priv->pub.busstate != BRCMF_BUS_DATA) { + if (bus_if->state != BRCMF_BUS_DATA) { brcmf_dbg(ERROR, "failed bus is not ready\n"); return -ENODEV; } @@ -1116,33 +1015,22 @@ int brcmf_bus_start(struct brcmf_pub *drvr) drvr->pktfilter[0] = "100 0 0 0 0x01 0x00"; /* Bus is ready, do any protocol initialization */ - ret = brcmf_proto_init(&drvr_priv->pub); + ret = brcmf_proto_init(drvr); if (ret < 0) return ret; return 0; } -static struct net_device_ops brcmf_netdev_ops_pri = { - .ndo_open = brcmf_netdev_open, - .ndo_stop = brcmf_netdev_stop, - .ndo_get_stats = brcmf_netdev_get_stats, - .ndo_do_ioctl = brcmf_netdev_ioctl_entry, - .ndo_start_xmit = brcmf_netdev_start_xmit, - .ndo_set_mac_address = brcmf_netdev_set_mac_address, - .ndo_set_rx_mode = brcmf_netdev_set_multicast_list -}; - int brcmf_net_attach(struct brcmf_pub *drvr, int ifidx) { - struct brcmf_info *drvr_priv = drvr->info; struct net_device *ndev; u8 temp_addr[ETH_ALEN] = { 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33}; brcmf_dbg(TRACE, "ifidx %d\n", ifidx); - ndev = drvr_priv->iflist[ifidx]->ndev; + ndev = drvr->iflist[ifidx]->ndev; ndev->netdev_ops = &brcmf_netdev_ops_pri; /* @@ -1150,7 +1038,7 @@ int brcmf_net_attach(struct brcmf_pub *drvr, int ifidx) */ if (ifidx != 0) { /* for virtual interfaces use the primary MAC */ - memcpy(temp_addr, drvr_priv->pub.mac, ETH_ALEN); + memcpy(temp_addr, drvr->mac, ETH_ALEN); } @@ -1161,14 +1049,23 @@ int brcmf_net_attach(struct brcmf_pub *drvr, int ifidx) - Locally Administered address */ } - ndev->hard_header_len = ETH_HLEN + drvr_priv->pub.hdrlen; + ndev->hard_header_len = ETH_HLEN + drvr->hdrlen; ndev->ethtool_ops = &brcmf_ethtool_ops; - drvr_priv->pub.rxsz = ndev->mtu + ndev->hard_header_len + - drvr_priv->pub.hdrlen; + drvr->rxsz = ndev->mtu + ndev->hard_header_len + + drvr->hdrlen; memcpy(ndev->dev_addr, temp_addr, ETH_ALEN); + /* attach to cfg80211 for primary interface */ + if (!ifidx) { + drvr->config = brcmf_cfg80211_attach(ndev, drvr->dev, drvr); + if (drvr->config == NULL) { + brcmf_dbg(ERROR, "wl_cfg80211_attach failed\n"); + goto fail; + } + } + if (register_netdev(ndev) != 0) { brcmf_dbg(ERROR, "couldn't register the net device\n"); goto fail; @@ -1185,127 +1082,57 @@ fail: static void brcmf_bus_detach(struct brcmf_pub *drvr) { - struct brcmf_info *drvr_priv; - - brcmf_dbg(TRACE, "Enter\n"); - - if (drvr) { - drvr_priv = drvr->info; - if (drvr_priv) { - /* Stop the protocol module */ - brcmf_proto_stop(&drvr_priv->pub); - - /* Stop the bus module */ - brcmf_sdbrcm_bus_stop(drvr_priv->pub.bus); - } - } -} - -void brcmf_detach(struct brcmf_pub *drvr) -{ - struct brcmf_info *drvr_priv; - brcmf_dbg(TRACE, "Enter\n"); if (drvr) { - drvr_priv = drvr->info; - if (drvr_priv) { - struct brcmf_if *ifp; - int i; - - for (i = 1; i < BRCMF_MAX_IFS; i++) - if (drvr_priv->iflist[i]) - brcmf_del_if(drvr_priv, i); - - ifp = drvr_priv->iflist[0]; - if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) { - rtnl_lock(); - brcmf_netdev_stop(ifp->ndev); - rtnl_unlock(); - unregister_netdev(ifp->ndev); - } - - cancel_work_sync(&drvr_priv->setmacaddr_work); - cancel_work_sync(&drvr_priv->multicast_work); - - brcmf_bus_detach(drvr); - - if (drvr->prot) - brcmf_proto_detach(drvr); - - brcmf_cfg80211_detach(drvr->config); + /* Stop the protocol module */ + brcmf_proto_stop(drvr); - free_netdev(ifp->ndev); - kfree(ifp); - kfree(drvr_priv); - } + /* Stop the bus module */ + drvr->bus_if->brcmf_bus_stop(drvr->dev); } } -static void __exit brcmf_module_cleanup(void) -{ - brcmf_dbg(TRACE, "Enter\n"); - - brcmf_bus_unregister(); -} - -static int __init brcmf_module_init(void) +void brcmf_detach(struct device *dev) { - int error; + int i; + struct brcmf_bus *bus_if = dev_get_drvdata(dev); + struct brcmf_pub *drvr = bus_if->drvr; brcmf_dbg(TRACE, "Enter\n"); - error = brcmf_bus_register(); - - if (error) { - brcmf_dbg(ERROR, "brcmf_bus_register failed\n"); - goto failed; - } - return 0; - -failed: - return -EINVAL; -} -module_init(brcmf_module_init); -module_exit(brcmf_module_cleanup); + /* make sure primary interface removed last */ + for (i = BRCMF_MAX_IFS-1; i > -1; i--) + if (drvr->iflist[i]) + brcmf_del_if(drvr, i); -int brcmf_os_proto_block(struct brcmf_pub *drvr) -{ - struct brcmf_info *drvr_priv = drvr->info; - - if (drvr_priv) { - mutex_lock(&drvr_priv->proto_block); - return 1; - } - return 0; -} + cancel_work_sync(&drvr->setmacaddr_work); + cancel_work_sync(&drvr->multicast_work); -int brcmf_os_proto_unblock(struct brcmf_pub *drvr) -{ - struct brcmf_info *drvr_priv = drvr->info; + brcmf_bus_detach(drvr); - if (drvr_priv) { - mutex_unlock(&drvr_priv->proto_block); - return 1; - } + if (drvr->prot) + brcmf_proto_detach(drvr); - return 0; + bus_if->drvr = NULL; + kfree(drvr); } -static int brcmf_get_pend_8021x_cnt(struct brcmf_info *drvr_priv) +static int brcmf_get_pend_8021x_cnt(struct brcmf_pub *drvr) { - return atomic_read(&drvr_priv->pend_8021x_cnt); + return atomic_read(&drvr->pend_8021x_cnt); } #define MAX_WAIT_FOR_8021X_TX 10 int brcmf_netdev_wait_pend8021x(struct net_device *ndev) { - struct brcmf_info *drvr_priv = *(struct brcmf_info **)netdev_priv(ndev); + struct brcmf_if *ifp = netdev_priv(ndev); + struct brcmf_pub *drvr = ifp->drvr; int timeout = 10 * HZ / 1000; int ntimes = MAX_WAIT_FOR_8021X_TX; - int pend = brcmf_get_pend_8021x_cnt(drvr_priv); + int pend = brcmf_get_pend_8021x_cnt(drvr); while (ntimes && pend) { if (pend) { @@ -1314,7 +1141,7 @@ int brcmf_netdev_wait_pend8021x(struct net_device *ndev) set_current_state(TASK_RUNNING); ntimes--; } - pend = brcmf_get_pend_8021x_cnt(drvr_priv); + pend = brcmf_get_pend_8021x_cnt(drvr); } return pend; } diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h index 4ee1ea846f6..6bc4425a8b0 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h @@ -41,17 +41,10 @@ extern void brcmf_proto_stop(struct brcmf_pub *drvr); extern void brcmf_proto_hdrpush(struct brcmf_pub *, int ifidx, struct sk_buff *txp); -/* Remove any protocol-specific data header. */ -extern int brcmf_proto_hdrpull(struct brcmf_pub *, int *ifidx, - struct sk_buff *rxp); - /* Use protocol to issue command to dongle */ extern int brcmf_proto_dcmd(struct brcmf_pub *drvr, int ifidx, struct brcmf_dcmd *dcmd, int len); -/* Update local copy of dongle statistics */ -extern void brcmf_proto_dstats(struct brcmf_pub *drvr); - extern int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr); extern int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c index 313b8bf592d..5a002a21f10 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c @@ -28,6 +28,7 @@ #include <linux/semaphore.h> #include <linux/firmware.h> #include <linux/module.h> +#include <linux/bcma/bcma.h> #include <asm/unaligned.h> #include <defs.h> #include <brcmu_wifi.h> @@ -35,6 +36,7 @@ #include <brcm_hw_ids.h> #include <soc.h> #include "sdio_host.h" +#include "sdio_chip.h" #define DCMD_RESP_TIMEOUT 2000 /* In milli second */ @@ -85,11 +87,8 @@ struct rte_console { #endif /* BCMDBG */ #include <chipcommon.h> -#include "dhd.h" #include "dhd_bus.h" -#include "dhd_proto.h" #include "dhd_dbg.h" -#include <bcmchip.h> #define TXQLEN 2048 /* bulk tx queue length */ #define TXHI (TXQLEN - 256) /* turn on flow control above TXHI */ @@ -134,33 +133,6 @@ struct rte_console { /* Force no backplane reset */ #define SBSDIO_DEVCTL_RST_NOBPRESET 0x20 -/* SBSDIO_FUNC1_CHIPCLKCSR */ - -/* Force ALP request to backplane */ -#define SBSDIO_FORCE_ALP 0x01 -/* Force HT request to backplane */ -#define SBSDIO_FORCE_HT 0x02 -/* Force ILP request to backplane */ -#define SBSDIO_FORCE_ILP 0x04 -/* Make ALP ready (power up xtal) */ -#define SBSDIO_ALP_AVAIL_REQ 0x08 -/* Make HT ready (power up PLL) */ -#define SBSDIO_HT_AVAIL_REQ 0x10 -/* Squelch clock requests from HW */ -#define SBSDIO_FORCE_HW_CLKREQ_OFF 0x20 -/* Status: ALP is ready */ -#define SBSDIO_ALP_AVAIL 0x40 -/* Status: HT is ready */ -#define SBSDIO_HT_AVAIL 0x80 - -#define SBSDIO_AVBITS (SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL) -#define SBSDIO_ALPAV(regval) ((regval) & SBSDIO_AVBITS) -#define SBSDIO_HTAV(regval) (((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS) -#define SBSDIO_ALPONLY(regval) (SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval)) - -#define SBSDIO_CLKAV(regval, alponly) \ - (SBSDIO_ALPAV(regval) && (alponly ? 1 : SBSDIO_HTAV(regval))) - /* direct(mapped) cis space */ /* MAPPED common CIS address */ @@ -335,49 +307,16 @@ struct rte_console { /* Flags for SDH calls */ #define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED) -/* sbimstate */ -#define SBIM_IBE 0x20000 /* inbanderror */ -#define SBIM_TO 0x40000 /* timeout */ -#define SBIM_BY 0x01800000 /* busy (sonics >= 2.3) */ -#define SBIM_RJ 0x02000000 /* reject (sonics >= 2.3) */ - -/* sbtmstatelow */ - -/* reset */ -#define SBTML_RESET 0x0001 -/* reject field */ -#define SBTML_REJ_MASK 0x0006 -/* reject */ -#define SBTML_REJ 0x0002 -/* temporary reject, for error recovery */ -#define SBTML_TMPREJ 0x0004 - -/* Shift to locate the SI control flags in sbtml */ -#define SBTML_SICF_SHIFT 16 - -/* sbtmstatehigh */ -#define SBTMH_SERR 0x0001 /* serror */ -#define SBTMH_INT 0x0002 /* interrupt */ -#define SBTMH_BUSY 0x0004 /* busy */ -#define SBTMH_TO 0x0020 /* timeout (sonics >= 2.3) */ - -/* Shift to locate the SI status flags in sbtmh */ -#define SBTMH_SISF_SHIFT 16 - -/* sbidlow */ -#define SBIDL_INIT 0x80 /* initiator */ - -/* sbidhigh */ -#define SBIDH_RC_MASK 0x000f /* revision code */ -#define SBIDH_RCE_MASK 0x7000 /* revision code extension field */ -#define SBIDH_RCE_SHIFT 8 -#define SBCOREREV(sbidh) \ - ((((sbidh) & SBIDH_RCE_MASK) >> SBIDH_RCE_SHIFT) | \ - ((sbidh) & SBIDH_RC_MASK)) -#define SBIDH_CC_MASK 0x8ff0 /* core code */ -#define SBIDH_CC_SHIFT 4 -#define SBIDH_VC_MASK 0xffff0000 /* vendor code */ -#define SBIDH_VC_SHIFT 16 +#define BRCMFMAC_FW_NAME "brcm/brcmfmac.bin" +#define BRCMFMAC_NV_NAME "brcm/brcmfmac.txt" +MODULE_FIRMWARE(BRCMFMAC_FW_NAME); +MODULE_FIRMWARE(BRCMFMAC_NV_NAME); + +#define BRCMF_IDLE_IMMEDIATE (-1) /* Enter idle immediately */ +#define BRCMF_IDLE_ACTIVE 0 /* Do not request any SD clock change + * when idle + */ +#define BRCMF_IDLE_INTERVAL 1 /* * Conversion of 802.1D priority to precedence level @@ -388,17 +327,6 @@ static uint prio2prec(u32 prio) (prio^2) : prio; } -/* - * Core reg address translation. - * Both macro's returns a 32 bits byte address on the backplane bus. - */ -#define CORE_CC_REG(base, field) \ - (base + offsetof(struct chipcregs, field)) -#define CORE_BUS_REG(base, field) \ - (base + offsetof(struct sdpcmd_regs, field)) -#define CORE_SB(base, field) \ - (base + SBCONFIGOFF + offsetof(struct sbconfig, field)) - /* core registers */ struct sdpcmd_regs { u32 corecontrol; /* 0x00, rev8 */ @@ -524,25 +452,8 @@ struct sdpcm_shared_le { /* misc chip info needed by some of the routines */ -struct chip_info { - u32 chip; - u32 chiprev; - u32 cccorebase; - u32 ccrev; - u32 cccaps; - u32 buscorebase; /* 32 bits backplane bus address */ - u32 buscorerev; - u32 buscoretype; - u32 ramcorebase; - u32 armcorebase; - u32 pmurev; - u32 ramsize; -}; - /* Private data for SDIO bus interaction */ -struct brcmf_bus { - struct brcmf_pub *drvr; - +struct brcmf_sdio { struct brcmf_sdio_dev *sdiodev; /* sdio device handler */ struct chip_info *ci; /* Chip info struct */ char *vars; /* Variables (from CIS and/or other) */ @@ -574,7 +485,7 @@ struct brcmf_bus { uint txminmax; struct sk_buff *glomd; /* Packet containing glomming descriptor */ - struct sk_buff *glom; /* Packet chain for glommed superframe */ + struct sk_buff_head glom; /* Packet list for glommed superframe */ uint glomerr; /* Glom packet read errors */ u8 *rxbuf; /* Buffer for receiving control packets */ @@ -637,6 +548,13 @@ struct brcmf_bus { uint f2rxdata; /* Number of frame data reads */ uint f2txdata; /* Number of f2 frame writes */ uint f1regdata; /* Number of f1 register accesses */ + uint tickcnt; /* Number of watchdog been schedule */ + unsigned long tx_ctlerrs; /* Err of sending ctrl frames */ + unsigned long tx_ctlpkts; /* Ctrl frames sent to dongle */ + unsigned long rx_ctlerrs; /* Err of processing rx ctrl frames */ + unsigned long rx_ctlpkts; /* Ctrl frames processed from dongle */ + unsigned long rx_readahead_cnt; /* Number of packets where header + * read-ahead was used. */ u8 *ctrl_frame_buf; u32 ctrl_frame_len; @@ -657,50 +575,10 @@ struct brcmf_bus { struct semaphore sdsem; - const char *fw_name; const struct firmware *firmware; - const char *nv_name; u32 fw_ptr; -}; -struct sbconfig { - u32 PAD[2]; - u32 sbipsflag; /* initiator port ocp slave flag */ - u32 PAD[3]; - u32 sbtpsflag; /* target port ocp slave flag */ - u32 PAD[11]; - u32 sbtmerrloga; /* (sonics >= 2.3) */ - u32 PAD; - u32 sbtmerrlog; /* (sonics >= 2.3) */ - u32 PAD[3]; - u32 sbadmatch3; /* address match3 */ - u32 PAD; - u32 sbadmatch2; /* address match2 */ - u32 PAD; - u32 sbadmatch1; /* address match1 */ - u32 PAD[7]; - u32 sbimstate; /* initiator agent state */ - u32 sbintvec; /* interrupt mask */ - u32 sbtmstatelow; /* target state */ - u32 sbtmstatehigh; /* target state */ - u32 sbbwa0; /* bandwidth allocation table0 */ - u32 PAD; - u32 sbimconfiglow; /* initiator configuration */ - u32 sbimconfighigh; /* initiator configuration */ - u32 sbadmatch0; /* address match0 */ - u32 PAD; - u32 sbtmconfiglow; /* target configuration */ - u32 sbtmconfighigh; /* target configuration */ - u32 sbbconfig; /* broadcast configuration */ - u32 PAD; - u32 sbbstate; /* broadcast state */ - u32 PAD[3]; - u32 sbactcnfg; /* activate configuration */ - u32 PAD[3]; - u32 sbflagst; /* current sbflags */ - u32 PAD[3]; - u32 sbidlow; /* identification */ - u32 sbidhigh; /* identification */ + bool txoff; /* Transmit flow-controlled */ }; /* clkstate */ @@ -737,7 +615,7 @@ static void pkt_align(struct sk_buff *p, int len, int align) } /* To check if there's window offered */ -static bool data_ok(struct brcmf_bus *bus) +static bool data_ok(struct brcmf_sdio *bus) { return (u8)(bus->tx_max - bus->tx_seq) != 0 && ((u8)(bus->tx_max - bus->tx_seq) & 0x80) == 0; @@ -748,12 +626,14 @@ static bool data_ok(struct brcmf_bus *bus) * adresses on the 32 bit backplane bus. */ static void -r_sdreg32(struct brcmf_bus *bus, u32 *regvar, u32 reg_offset, u32 *retryvar) +r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 reg_offset, u32 *retryvar) { + u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV); *retryvar = 0; do { *regvar = brcmf_sdcard_reg_read(bus->sdiodev, - bus->ci->buscorebase + reg_offset, sizeof(u32)); + bus->ci->c_inf[idx].base + reg_offset, + sizeof(u32)); } while (brcmf_sdcard_regfail(bus->sdiodev) && (++(*retryvar) <= retry_limit)); if (*retryvar) { @@ -766,12 +646,13 @@ r_sdreg32(struct brcmf_bus *bus, u32 *regvar, u32 reg_offset, u32 *retryvar) } static void -w_sdreg32(struct brcmf_bus *bus, u32 regval, u32 reg_offset, u32 *retryvar) +w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset, u32 *retryvar) { + u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV); *retryvar = 0; do { brcmf_sdcard_reg_write(bus->sdiodev, - bus->ci->buscorebase + reg_offset, + bus->ci->c_inf[idx].base + reg_offset, sizeof(u32), regval); } while (brcmf_sdcard_regfail(bus->sdiodev) && (++(*retryvar) <= retry_limit)); @@ -790,14 +671,14 @@ w_sdreg32(struct brcmf_bus *bus, u32 regval, u32 reg_offset, u32 *retryvar) /* Packet free applicable unconditionally for sdio and sdspi. * Conditional if bufpool was present for gspi bus. */ -static void brcmf_sdbrcm_pktfree2(struct brcmf_bus *bus, struct sk_buff *pkt) +static void brcmf_sdbrcm_pktfree2(struct brcmf_sdio *bus, struct sk_buff *pkt) { if (bus->usebufpool) brcmu_pkt_buf_free_skb(pkt); } /* Turn backplane clock on or off */ -static int brcmf_sdbrcm_htclk(struct brcmf_bus *bus, bool on, bool pendok) +static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok) { int err; u8 clkctl, clkreq, devctl; @@ -812,10 +693,6 @@ static int brcmf_sdbrcm_htclk(struct brcmf_bus *bus, bool on, bool pendok) clkreq = bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ; - if ((bus->ci->chip == BCM4329_CHIP_ID) - && (bus->ci->chiprev == 0)) - clkreq |= SBSDIO_FORCE_ALP; - brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err); if (err) { @@ -823,14 +700,6 @@ static int brcmf_sdbrcm_htclk(struct brcmf_bus *bus, bool on, bool pendok) return -EBADE; } - if (pendok && ((bus->ci->buscoretype == PCMCIA_CORE_ID) - && (bus->ci->buscorerev == 9))) { - u32 dummy, retries; - r_sdreg32(bus, &dummy, - offsetof(struct sdpcmd_regs, clockctlstatus), - &retries); - } - /* Check current status */ clkctl = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err); @@ -930,7 +799,7 @@ static int brcmf_sdbrcm_htclk(struct brcmf_bus *bus, bool on, bool pendok) } /* Change idle/active SD state */ -static int brcmf_sdbrcm_sdclk(struct brcmf_bus *bus, bool on) +static int brcmf_sdbrcm_sdclk(struct brcmf_sdio *bus, bool on) { brcmf_dbg(TRACE, "Enter\n"); @@ -943,7 +812,7 @@ static int brcmf_sdbrcm_sdclk(struct brcmf_bus *bus, bool on) } /* Transition SD and backplane clock readiness */ -static int brcmf_sdbrcm_clkctl(struct brcmf_bus *bus, uint target, bool pendok) +static int brcmf_sdbrcm_clkctl(struct brcmf_sdio *bus, uint target, bool pendok) { #ifdef BCMDBG uint oldstate = bus->clkstate; @@ -999,7 +868,7 @@ static int brcmf_sdbrcm_clkctl(struct brcmf_bus *bus, uint target, bool pendok) return 0; } -static int brcmf_sdbrcm_bussleep(struct brcmf_bus *bus, bool sleep) +static int brcmf_sdbrcm_bussleep(struct brcmf_sdio *bus, bool sleep) { uint retries = 0; @@ -1034,11 +903,9 @@ static int brcmf_sdbrcm_bussleep(struct brcmf_bus *bus, bool sleep) SBSDIO_FORCE_HW_CLKREQ_OFF, NULL); /* Isolate the bus */ - if (bus->ci->chip != BCM4329_CHIP_ID) { - brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1, - SBSDIO_DEVICE_CTL, - SBSDIO_DEVCTL_PADS_ISO, NULL); - } + brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1, + SBSDIO_DEVICE_CTL, + SBSDIO_DEVCTL_PADS_ISO, NULL); /* Change state */ bus->sleeping = true; @@ -1049,13 +916,6 @@ static int brcmf_sdbrcm_bussleep(struct brcmf_bus *bus, bool sleep) brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL); - /* Force pad isolation off if possible - (in case power never toggled) */ - if ((bus->ci->buscoretype == PCMCIA_CORE_ID) - && (bus->ci->buscorerev >= 10)) - brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1, - SBSDIO_DEVICE_CTL, 0, NULL); - /* Make sure the controller has the bus up */ brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false); @@ -1080,13 +940,13 @@ static int brcmf_sdbrcm_bussleep(struct brcmf_bus *bus, bool sleep) return 0; } -static void bus_wake(struct brcmf_bus *bus) +static void bus_wake(struct brcmf_sdio *bus) { if (bus->sleeping) brcmf_sdbrcm_bussleep(bus, false); } -static u32 brcmf_sdbrcm_hostmail(struct brcmf_bus *bus) +static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus) { u32 intstatus = 0; u32 hmb_data; @@ -1162,7 +1022,7 @@ static u32 brcmf_sdbrcm_hostmail(struct brcmf_bus *bus) return intstatus; } -static void brcmf_sdbrcm_rxfail(struct brcmf_bus *bus, bool abort, bool rtx) +static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx) { uint retries = 0; u16 lastrbc; @@ -1219,16 +1079,61 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_bus *bus, bool abort, bool rtx) /* If we can't reach the device, signal failure */ if (err || brcmf_sdcard_regfail(bus->sdiodev)) - bus->drvr->busstate = BRCMF_BUS_DOWN; + bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN; +} + +/* copy a buffer into a pkt buffer chain */ +static uint brcmf_sdbrcm_glom_from_buf(struct brcmf_sdio *bus, uint len) +{ + uint n, ret = 0; + struct sk_buff *p; + u8 *buf; + + buf = bus->dataptr; + + /* copy the data */ + skb_queue_walk(&bus->glom, p) { + n = min_t(uint, p->len, len); + memcpy(p->data, buf, n); + buf += n; + len -= n; + ret += n; + if (!len) + break; + } + + return ret; +} + +/* return total length of buffer chain */ +static uint brcmf_sdbrcm_glom_len(struct brcmf_sdio *bus) +{ + struct sk_buff *p; + uint total; + + total = 0; + skb_queue_walk(&bus->glom, p) + total += p->len; + return total; } -static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq) +static void brcmf_sdbrcm_free_glom(struct brcmf_sdio *bus) +{ + struct sk_buff *cur, *next; + + skb_queue_walk_safe(&bus->glom, cur, next) { + skb_unlink(cur, &bus->glom); + brcmu_pkt_buf_free_skb(cur); + } +} + +static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq) { u16 dlen, totlen; u8 *dptr, num = 0; u16 sublen, check; - struct sk_buff *pfirst, *plast, *pnext, *save_pfirst; + struct sk_buff *pfirst, *pnext; int errcode; u8 chan, seq, doff, sfdoff; @@ -1240,11 +1145,12 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq) /* If packets, issue read(s) and send up packet chain */ /* Return sequence numbers consumed? */ - brcmf_dbg(TRACE, "start: glomd %p glom %p\n", bus->glomd, bus->glom); + brcmf_dbg(TRACE, "start: glomd %p glom %p\n", + bus->glomd, skb_peek(&bus->glom)); /* If there's a descriptor, generate the packet chain */ if (bus->glomd) { - pfirst = plast = pnext = NULL; + pfirst = pnext = NULL; dlen = (u16) (bus->glomd->len); dptr = bus->glomd->data; if (!dlen || (dlen & 1)) { @@ -1287,12 +1193,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq) num, sublen); break; } - if (!pfirst) { - pfirst = plast = pnext; - } else { - plast->next = pnext; - plast = pnext; - } + skb_queue_tail(&bus->glom, pnext); /* Adhere to start alignment requirements */ pkt_align(pnext, sublen, BRCMF_SDALIGN); @@ -1308,12 +1209,9 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq) brcmf_dbg(GLOM, "glomdesc mismatch: nextlen %d glomdesc %d rxseq %d\n", bus->nextlen, totlen, rxseq); } - bus->glom = pfirst; pfirst = pnext = NULL; } else { - if (pfirst) - brcmu_pkt_buf_free_skb(pfirst); - bus->glom = NULL; + brcmf_sdbrcm_free_glom(bus); num = 0; } @@ -1325,37 +1223,33 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq) /* Ok -- either we just generated a packet chain, or had one from before */ - if (bus->glom) { + if (!skb_queue_empty(&bus->glom)) { if (BRCMF_GLOM_ON()) { brcmf_dbg(GLOM, "try superframe read, packet chain:\n"); - for (pnext = bus->glom; pnext; pnext = pnext->next) { + skb_queue_walk(&bus->glom, pnext) { brcmf_dbg(GLOM, " %p: %p len 0x%04x (%d)\n", pnext, (u8 *) (pnext->data), pnext->len, pnext->len); } } - pfirst = bus->glom; - dlen = (u16) brcmu_pkttotlen(pfirst); + pfirst = skb_peek(&bus->glom); + dlen = (u16) brcmf_sdbrcm_glom_len(bus); /* Do an SDIO read for the superframe. Configurable iovar to * read directly into the chained packet, or allocate a large * packet and and copy into the chain. */ if (usechain) { - errcode = brcmf_sdcard_recv_buf(bus->sdiodev, + errcode = brcmf_sdcard_recv_chain(bus->sdiodev, bus->sdiodev->sbwad, - SDIO_FUNC_2, - F2SYNC, (u8 *) pfirst->data, dlen, - pfirst); + SDIO_FUNC_2, F2SYNC, &bus->glom); } else if (bus->dataptr) { errcode = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad, - SDIO_FUNC_2, - F2SYNC, bus->dataptr, dlen, - NULL); - sublen = (u16) brcmu_pktfrombuf(pfirst, 0, dlen, - bus->dataptr); + SDIO_FUNC_2, F2SYNC, + bus->dataptr, dlen); + sublen = (u16) brcmf_sdbrcm_glom_from_buf(bus, dlen); if (sublen != dlen) { brcmf_dbg(ERROR, "FAILED TO COPY, dlen %d sublen %d\n", dlen, sublen); @@ -1373,16 +1267,15 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq) if (errcode < 0) { brcmf_dbg(ERROR, "glom read of %d bytes failed: %d\n", dlen, errcode); - bus->drvr->rx_errors++; + bus->sdiodev->bus_if->dstats.rx_errors++; if (bus->glomerr++ < 3) { brcmf_sdbrcm_rxfail(bus, true, true); } else { bus->glomerr = 0; brcmf_sdbrcm_rxfail(bus, true, false); - brcmu_pkt_buf_free_skb(bus->glom); bus->rxglomfail++; - bus->glom = NULL; + brcmf_sdbrcm_free_glom(bus); } return 0; } @@ -1455,10 +1348,14 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq) /* Remove superframe header, remember offset */ skb_pull(pfirst, doff); sfdoff = doff; + num = 0; /* Validate all the subframe headers */ - for (num = 0, pnext = pfirst; pnext && !errcode; - num++, pnext = pnext->next) { + skb_queue_walk(&bus->glom, pnext) { + /* leave when invalid subframe is found */ + if (errcode) + break; + dptr = (u8 *) (pnext->data); dlen = (u16) (pnext->len); sublen = get_unaligned_le16(dptr); @@ -1491,6 +1388,8 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq) num, doff, sublen, SDPCM_HDRLEN); errcode = -1; } + /* increase the subframe count */ + num++; } if (errcode) { @@ -1503,23 +1402,16 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq) } else { bus->glomerr = 0; brcmf_sdbrcm_rxfail(bus, true, false); - brcmu_pkt_buf_free_skb(bus->glom); bus->rxglomfail++; - bus->glom = NULL; + brcmf_sdbrcm_free_glom(bus); } bus->nextlen = 0; return 0; } /* Basic SD framing looks ok - process each packet (header) */ - save_pfirst = pfirst; - bus->glom = NULL; - plast = NULL; - - for (num = 0; pfirst; rxseq++, pfirst = pnext) { - pnext = pfirst->next; - pfirst->next = NULL; + skb_queue_walk_safe(&bus->glom, pfirst, pnext) { dptr = (u8 *) (pfirst->data); sublen = get_unaligned_le16(dptr); chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]); @@ -1539,6 +1431,8 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq) bus->rx_badseq++; rxseq = seq; } + rxseq++; + #ifdef BCMDBG if (BRCMF_BYTES_ON() && BRCMF_DATA_ON()) { printk(KERN_DEBUG "Rx Subframe Data:\n"); @@ -1551,36 +1445,22 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq) skb_pull(pfirst, doff); if (pfirst->len == 0) { + skb_unlink(pfirst, &bus->glom); brcmu_pkt_buf_free_skb(pfirst); - if (plast) - plast->next = pnext; - else - save_pfirst = pnext; - continue; - } else if (brcmf_proto_hdrpull(bus->drvr, &ifidx, - pfirst) != 0) { + } else if (brcmf_proto_hdrpull(bus->sdiodev->dev, + &ifidx, pfirst) != 0) { brcmf_dbg(ERROR, "rx protocol error\n"); - bus->drvr->rx_errors++; + bus->sdiodev->bus_if->dstats.rx_errors++; + skb_unlink(pfirst, &bus->glom); brcmu_pkt_buf_free_skb(pfirst); - if (plast) - plast->next = pnext; - else - save_pfirst = pnext; - continue; } - /* this packet will go up, link back into - chain and count it */ - pfirst->next = pnext; - plast = pfirst; - num++; - #ifdef BCMDBG if (BRCMF_GLOM_ON()) { brcmf_dbg(GLOM, "subframe %d to stack, %p (%p/%d) nxt/lnk %p/%p\n", - num, pfirst, pfirst->data, + bus->glom.qlen, pfirst, pfirst->data, pfirst->len, pfirst->next, pfirst->prev); print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, @@ -1589,19 +1469,20 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq) } #endif /* BCMDBG */ } - if (num) { + /* sent any remaining packets up */ + if (bus->glom.qlen) { up(&bus->sdsem); - brcmf_rx_frame(bus->drvr, ifidx, save_pfirst, num); + brcmf_rx_frame(bus->sdiodev->dev, ifidx, &bus->glom); down(&bus->sdsem); } bus->rxglomframes++; - bus->rxglompkts += num; + bus->rxglompkts += bus->glom.qlen; } return num; } -static int brcmf_sdbrcm_dcmd_resp_wait(struct brcmf_bus *bus, uint *condition, +static int brcmf_sdbrcm_dcmd_resp_wait(struct brcmf_sdio *bus, uint *condition, bool *pending) { DECLARE_WAITQUEUE(wait, current); @@ -1623,7 +1504,7 @@ static int brcmf_sdbrcm_dcmd_resp_wait(struct brcmf_bus *bus, uint *condition, return timeout; } -static int brcmf_sdbrcm_dcmd_resp_wake(struct brcmf_bus *bus) +static int brcmf_sdbrcm_dcmd_resp_wake(struct brcmf_sdio *bus) { if (waitqueue_active(&bus->dcmd_resp_wait)) wake_up_interruptible(&bus->dcmd_resp_wait); @@ -1631,7 +1512,7 @@ static int brcmf_sdbrcm_dcmd_resp_wake(struct brcmf_bus *bus) return 0; } static void -brcmf_sdbrcm_read_control(struct brcmf_bus *bus, u8 *hdr, uint len, uint doff) +brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff) { uint rdlen, pad; @@ -1657,7 +1538,7 @@ brcmf_sdbrcm_read_control(struct brcmf_bus *bus, u8 *hdr, uint len, uint doff) if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) { pad = bus->blocksize - (rdlen % bus->blocksize); if ((pad <= bus->roundup) && (pad < bus->blocksize) && - ((len + pad) < bus->drvr->maxctl)) + ((len + pad) < bus->sdiodev->bus_if->maxctl)) rdlen += pad; } else if (rdlen % BRCMF_SDALIGN) { rdlen += BRCMF_SDALIGN - (rdlen % BRCMF_SDALIGN); @@ -1668,18 +1549,18 @@ brcmf_sdbrcm_read_control(struct brcmf_bus *bus, u8 *hdr, uint len, uint doff) rdlen = roundup(rdlen, ALIGNMENT); /* Drop if the read is too big or it exceeds our maximum */ - if ((rdlen + BRCMF_FIRSTREAD) > bus->drvr->maxctl) { + if ((rdlen + BRCMF_FIRSTREAD) > bus->sdiodev->bus_if->maxctl) { brcmf_dbg(ERROR, "%d-byte control read exceeds %d-byte buffer\n", - rdlen, bus->drvr->maxctl); - bus->drvr->rx_errors++; + rdlen, bus->sdiodev->bus_if->maxctl); + bus->sdiodev->bus_if->dstats.rx_errors++; brcmf_sdbrcm_rxfail(bus, false, false); goto done; } - if ((len - doff) > bus->drvr->maxctl) { + if ((len - doff) > bus->sdiodev->bus_if->maxctl) { brcmf_dbg(ERROR, "%d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n", - len, len - doff, bus->drvr->maxctl); - bus->drvr->rx_errors++; + len, len - doff, bus->sdiodev->bus_if->maxctl); + bus->sdiodev->bus_if->dstats.rx_errors++; bus->rx_toolong++; brcmf_sdbrcm_rxfail(bus, false, false); goto done; @@ -1689,8 +1570,7 @@ brcmf_sdbrcm_read_control(struct brcmf_bus *bus, u8 *hdr, uint len, uint doff) sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad, SDIO_FUNC_2, - F2SYNC, (bus->rxctl + BRCMF_FIRSTREAD), rdlen, - NULL); + F2SYNC, (bus->rxctl + BRCMF_FIRSTREAD), rdlen); bus->f2rxdata++; /* Control frame failures need retransmission */ @@ -1721,7 +1601,7 @@ done: } /* Pad read to blocksize for efficiency */ -static void brcmf_pad(struct brcmf_bus *bus, u16 *pad, u16 *rdlen) +static void brcmf_pad(struct brcmf_sdio *bus, u16 *pad, u16 *rdlen) { if (bus->roundup && bus->blocksize && *rdlen > bus->blocksize) { *pad = bus->blocksize - (*rdlen % bus->blocksize); @@ -1734,7 +1614,7 @@ static void brcmf_pad(struct brcmf_bus *bus, u16 *pad, u16 *rdlen) } static void -brcmf_alloc_pkt_and_read(struct brcmf_bus *bus, u16 rdlen, +brcmf_alloc_pkt_and_read(struct brcmf_sdio *bus, u16 rdlen, struct sk_buff **pkt, u8 **rxbuf) { int sdret; /* Return code from calls */ @@ -1746,16 +1626,15 @@ brcmf_alloc_pkt_and_read(struct brcmf_bus *bus, u16 rdlen, pkt_align(*pkt, rdlen, BRCMF_SDALIGN); *rxbuf = (u8 *) ((*pkt)->data); /* Read the entire frame */ - sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad, - SDIO_FUNC_2, F2SYNC, - *rxbuf, rdlen, *pkt); + sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad, + SDIO_FUNC_2, F2SYNC, *pkt); bus->f2rxdata++; if (sdret < 0) { brcmf_dbg(ERROR, "(nextlen): read %d bytes failed: %d\n", rdlen, sdret); brcmu_pkt_buf_free_skb(*pkt); - bus->drvr->rx_errors++; + bus->sdiodev->bus_if->dstats.rx_errors++; /* Force retry w/normal header read. * Don't attempt NAK for * gSPI @@ -1767,7 +1646,7 @@ brcmf_alloc_pkt_and_read(struct brcmf_bus *bus, u16 rdlen, /* Checks the header */ static int -brcmf_check_rxbuf(struct brcmf_bus *bus, struct sk_buff *pkt, u8 *rxbuf, +brcmf_check_rxbuf(struct brcmf_sdio *bus, struct sk_buff *pkt, u8 *rxbuf, u8 rxseq, u16 nextlen, u16 *len) { u16 check; @@ -1823,7 +1702,7 @@ fail: /* Return true if there may be more frames to read */ static uint -brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished) +brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished) { u16 len, check; /* Extracted hardware header fields */ u8 chan, seq, doff; /* Extracted software header fields */ @@ -1846,14 +1725,15 @@ brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished) *finished = false; for (rxseq = bus->rx_seq, rxleft = maxframes; - !bus->rxskip && rxleft && bus->drvr->busstate != BRCMF_BUS_DOWN; + !bus->rxskip && rxleft && + bus->sdiodev->bus_if->state != BRCMF_BUS_DOWN; rxseq++, rxleft--) { /* Handle glomming separately */ - if (bus->glom || bus->glomd) { + if (bus->glomd || !skb_queue_empty(&bus->glom)) { u8 cnt; brcmf_dbg(GLOM, "calling rxglom: glomd %p, glom %p\n", - bus->glomd, bus->glom); + bus->glomd, skb_peek(&bus->glom)); cnt = brcmf_sdbrcm_rxglom(bus, rxseq); brcmf_dbg(GLOM, "rxglom returned %d\n", cnt); rxseq += cnt - 1; @@ -1905,7 +1785,7 @@ brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished) bus->nextlen = 0; } - bus->drvr->rx_readahead_cnt++; + bus->rx_readahead_cnt++; /* Handle Flow Control */ fcbits = SDPCM_FCMASK_VALUE( @@ -1976,7 +1856,7 @@ brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished) /* Read frame header (hardware and software) */ sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad, SDIO_FUNC_2, F2SYNC, bus->rxhdr, - BRCMF_FIRSTREAD, NULL); + BRCMF_FIRSTREAD); bus->f2rxhdrs++; if (sdret < 0) { @@ -2103,7 +1983,7 @@ brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished) /* Too long -- skip this frame */ brcmf_dbg(ERROR, "too long: len %d rdlen %d\n", len, rdlen); - bus->drvr->rx_errors++; + bus->sdiodev->bus_if->dstats.rx_errors++; bus->rx_toolong++; brcmf_sdbrcm_rxfail(bus, false, false); continue; @@ -2115,7 +1995,7 @@ brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished) /* Give up on data, request rtx of events */ brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: rdlen %d chan %d\n", rdlen, chan); - bus->drvr->rx_dropped++; + bus->sdiodev->bus_if->dstats.rx_dropped++; brcmf_sdbrcm_rxfail(bus, false, RETRYCHAN(chan)); continue; } @@ -2125,9 +2005,8 @@ brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished) pkt_align(pkt, rdlen, BRCMF_SDALIGN); /* Read the remaining frame data */ - sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad, - SDIO_FUNC_2, F2SYNC, ((u8 *) (pkt->data)), - rdlen, pkt); + sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad, + SDIO_FUNC_2, F2SYNC, pkt); bus->f2rxdata++; if (sdret < 0) { @@ -2136,7 +2015,7 @@ brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished) : ((chan == SDPCM_DATA_CHANNEL) ? "data" : "test")), sdret); brcmu_pkt_buf_free_skb(pkt); - bus->drvr->rx_errors++; + bus->sdiodev->bus_if->dstats.rx_errors++; brcmf_sdbrcm_rxfail(bus, true, RETRYCHAN(chan)); continue; } @@ -2185,16 +2064,17 @@ deliver: if (pkt->len == 0) { brcmu_pkt_buf_free_skb(pkt); continue; - } else if (brcmf_proto_hdrpull(bus->drvr, &ifidx, pkt) != 0) { + } else if (brcmf_proto_hdrpull(bus->sdiodev->dev, &ifidx, + pkt) != 0) { brcmf_dbg(ERROR, "rx protocol error\n"); brcmu_pkt_buf_free_skb(pkt); - bus->drvr->rx_errors++; + bus->sdiodev->bus_if->dstats.rx_errors++; continue; } /* Unlock during rx call */ up(&bus->sdsem); - brcmf_rx_frame(bus->drvr, ifidx, pkt, 1); + brcmf_rx_packet(bus->sdiodev->dev, ifidx, pkt); down(&bus->sdsem); } rxcount = maxframes - rxleft; @@ -2214,16 +2094,8 @@ deliver: return rxcount; } -static int -brcmf_sdbrcm_send_buf(struct brcmf_bus *bus, u32 addr, uint fn, uint flags, - u8 *buf, uint nbytes, struct sk_buff *pkt) -{ - return brcmf_sdcard_send_buf - (bus->sdiodev, addr, fn, flags, buf, nbytes, pkt); -} - static void -brcmf_sdbrcm_wait_for_event(struct brcmf_bus *bus, bool *lockvar) +brcmf_sdbrcm_wait_for_event(struct brcmf_sdio *bus, bool *lockvar) { up(&bus->sdsem); wait_event_interruptible_timeout(bus->ctrl_wait, @@ -2233,7 +2105,7 @@ brcmf_sdbrcm_wait_for_event(struct brcmf_bus *bus, bool *lockvar) } static void -brcmf_sdbrcm_wait_event_wakeup(struct brcmf_bus *bus) +brcmf_sdbrcm_wait_event_wakeup(struct brcmf_sdio *bus) { if (waitqueue_active(&bus->ctrl_wait)) wake_up_interruptible(&bus->ctrl_wait); @@ -2242,7 +2114,7 @@ brcmf_sdbrcm_wait_event_wakeup(struct brcmf_bus *bus) /* Writes a HW/SW header into the packet and sends it. */ /* Assumes: (a) header space already there, (b) caller holds lock */ -static int brcmf_sdbrcm_txpkt(struct brcmf_bus *bus, struct sk_buff *pkt, +static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt, uint chan, bool free_pkt) { int ret; @@ -2262,7 +2134,7 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_bus *bus, struct sk_buff *pkt, if (skb_headroom(pkt) < pad) { brcmf_dbg(INFO, "insufficient headroom %d for %d pad\n", skb_headroom(pkt), pad); - bus->drvr->tx_realloc++; + bus->sdiodev->bus_if->tx_realloc++; new = brcmu_pkt_buf_get_skb(pkt->len + BRCMF_SDALIGN); if (!new) { brcmf_dbg(ERROR, "couldn't allocate new %d-byte packet\n", @@ -2331,9 +2203,8 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_bus *bus, struct sk_buff *pkt, if (len & (ALIGNMENT - 1)) len = roundup(len, ALIGNMENT); - ret = brcmf_sdbrcm_send_buf(bus, bus->sdiodev->sbwad, - SDIO_FUNC_2, F2SYNC, frame, - len, pkt); + ret = brcmf_sdcard_send_pkt(bus->sdiodev, bus->sdiodev->sbwad, + SDIO_FUNC_2, F2SYNC, pkt); bus->f2txdata++; if (ret < 0) { @@ -2371,7 +2242,7 @@ done: /* restore pkt buffer pointer before calling tx complete routine */ skb_pull(pkt, SDPCM_HDRLEN + pad); up(&bus->sdsem); - brcmf_txcomplete(bus->drvr, pkt, ret != 0); + brcmf_txcomplete(bus->sdiodev->dev, pkt, ret != 0); down(&bus->sdsem); if (free_pkt) @@ -2380,7 +2251,7 @@ done: return ret; } -static uint brcmf_sdbrcm_sendfromq(struct brcmf_bus *bus, uint maxframes) +static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes) { struct sk_buff *pkt; u32 intstatus = 0; @@ -2390,8 +2261,6 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_bus *bus, uint maxframes) uint datalen; u8 tx_prec_map; - struct brcmf_pub *drvr = bus->drvr; - brcmf_dbg(TRACE, "Enter\n"); tx_prec_map = ~bus->flowcontrol; @@ -2409,9 +2278,9 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_bus *bus, uint maxframes) ret = brcmf_sdbrcm_txpkt(bus, pkt, SDPCM_DATA_CHANNEL, true); if (ret) - bus->drvr->tx_errors++; + bus->sdiodev->bus_if->dstats.tx_errors++; else - bus->drvr->dstats.tx_bytes += datalen; + bus->sdiodev->bus_if->dstats.tx_bytes += datalen; /* In poll mode, need to check for other events */ if (!bus->intr && cnt) { @@ -2428,14 +2297,98 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_bus *bus, uint maxframes) } /* Deflow-control stack if needed */ - if (drvr->up && (drvr->busstate == BRCMF_BUS_DATA) && - drvr->txoff && (pktq_len(&bus->txq) < TXLOW)) - brcmf_txflowcontrol(drvr, 0, OFF); + if (bus->sdiodev->bus_if->drvr_up && + (bus->sdiodev->bus_if->state == BRCMF_BUS_DATA) && + bus->txoff && (pktq_len(&bus->txq) < TXLOW)) { + bus->txoff = OFF; + brcmf_txflowcontrol(bus->sdiodev->dev, 0, OFF); + } return cnt; } -static bool brcmf_sdbrcm_dpc(struct brcmf_bus *bus) +static void brcmf_sdbrcm_bus_stop(struct device *dev) +{ + u32 local_hostintmask; + u8 saveclk; + uint retries; + int err; + struct brcmf_bus *bus_if = dev_get_drvdata(dev); + struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv; + struct brcmf_sdio *bus = sdiodev->bus; + + brcmf_dbg(TRACE, "Enter\n"); + + if (bus->watchdog_tsk) { + send_sig(SIGTERM, bus->watchdog_tsk, 1); + kthread_stop(bus->watchdog_tsk); + bus->watchdog_tsk = NULL; + } + + if (bus->dpc_tsk && bus->dpc_tsk != current) { + send_sig(SIGTERM, bus->dpc_tsk, 1); + kthread_stop(bus->dpc_tsk); + bus->dpc_tsk = NULL; + } + + down(&bus->sdsem); + + bus_wake(bus); + + /* Enable clock for device interrupts */ + brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false); + + /* Disable and clear interrupts at the chip level also */ + w_sdreg32(bus, 0, offsetof(struct sdpcmd_regs, hostintmask), &retries); + local_hostintmask = bus->hostintmask; + bus->hostintmask = 0; + + /* Change our idea of bus state */ + bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN; + + /* Force clocks on backplane to be sure F2 interrupt propagates */ + saveclk = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, &err); + if (!err) { + brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, + (saveclk | SBSDIO_FORCE_HT), &err); + } + if (err) + brcmf_dbg(ERROR, "Failed to force clock for F2: err %d\n", err); + + /* Turn off the bus (F2), free any pending packets */ + brcmf_dbg(INTR, "disable SDIO interrupts\n"); + brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_0, SDIO_CCCR_IOEx, + SDIO_FUNC_ENABLE_1, NULL); + + /* Clear any pending interrupts now that F2 is disabled */ + w_sdreg32(bus, local_hostintmask, + offsetof(struct sdpcmd_regs, intstatus), &retries); + + /* Turn off the backplane clock (only) */ + brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false); + + /* Clear the data packet queues */ + brcmu_pktq_flush(&bus->txq, true, NULL, NULL); + + /* Clear any held glomming stuff */ + if (bus->glomd) + brcmu_pkt_buf_free_skb(bus->glomd); + brcmf_sdbrcm_free_glom(bus); + + /* Clear rx control and wake any waiters */ + bus->rxlen = 0; + brcmf_sdbrcm_dcmd_resp_wake(bus); + + /* Reset some F2 state stuff */ + bus->rxskip = false; + bus->tx_seq = bus->rx_seq = 0; + + up(&bus->sdsem); +} + +static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus) { u32 intstatus, newstatus = 0; uint retries = 0; @@ -2463,7 +2416,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_bus *bus) SBSDIO_DEVICE_CTL, &err); if (err) { brcmf_dbg(ERROR, "error reading DEVCTL: %d\n", err); - bus->drvr->busstate = BRCMF_BUS_DOWN; + bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN; } #endif /* BCMDBG */ @@ -2473,7 +2426,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_bus *bus) if (err) { brcmf_dbg(ERROR, "error reading CSR: %d\n", err); - bus->drvr->busstate = BRCMF_BUS_DOWN; + bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN; } brcmf_dbg(INFO, "DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n", @@ -2486,7 +2439,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_bus *bus) if (err) { brcmf_dbg(ERROR, "error reading DEVCTL: %d\n", err); - bus->drvr->busstate = BRCMF_BUS_DOWN; + bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN; } devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY; brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1, @@ -2494,7 +2447,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_bus *bus) if (err) { brcmf_dbg(ERROR, "error writing DEVCTL: %d\n", err); - bus->drvr->busstate = BRCMF_BUS_DOWN; + bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN; } bus->clkstate = CLK_AVAIL; } else { @@ -2596,9 +2549,9 @@ clkwait: (bus->clkstate == CLK_AVAIL)) { int ret, i; - ret = brcmf_sdbrcm_send_buf(bus, bus->sdiodev->sbwad, + ret = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad, SDIO_FUNC_2, F2SYNC, (u8 *) bus->ctrl_frame_buf, - (u32) bus->ctrl_frame_len, NULL); + (u32) bus->ctrl_frame_len); if (ret < 0) { /* On failure, abort the command and @@ -2650,11 +2603,11 @@ clkwait: else await next interrupt */ /* On failed register access, all bets are off: no resched or interrupts */ - if ((bus->drvr->busstate == BRCMF_BUS_DOWN) || + if ((bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) || brcmf_sdcard_regfail(bus->sdiodev)) { brcmf_dbg(ERROR, "failed backplane access over SDIO, halting operation %d\n", brcmf_sdcard_regfail(bus->sdiodev)); - bus->drvr->busstate = BRCMF_BUS_DOWN; + bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN; bus->intstatus = 0; } else if (bus->clkstate == CLK_PENDING) { brcmf_dbg(INFO, "rescheduled due to CLK_PENDING awaiting I_CHIPACTIVE interrupt\n"); @@ -2681,7 +2634,7 @@ clkwait: static int brcmf_sdbrcm_dpc_thread(void *data) { - struct brcmf_bus *bus = (struct brcmf_bus *) data; + struct brcmf_sdio *bus = (struct brcmf_sdio *) data; allow_signal(SIGTERM); /* Run until signal received */ @@ -2691,12 +2644,12 @@ static int brcmf_sdbrcm_dpc_thread(void *data) if (!wait_for_completion_interruptible(&bus->dpc_wait)) { /* Call bus dpc unless it indicated down (then clean stop) */ - if (bus->drvr->busstate != BRCMF_BUS_DOWN) { + if (bus->sdiodev->bus_if->state != BRCMF_BUS_DOWN) { if (brcmf_sdbrcm_dpc(bus)) complete(&bus->dpc_wait); } else { /* after stopping the bus, exit thread */ - brcmf_sdbrcm_bus_stop(bus); + brcmf_sdbrcm_bus_stop(bus->sdiodev->dev); bus->dpc_tsk = NULL; break; } @@ -2706,10 +2659,13 @@ static int brcmf_sdbrcm_dpc_thread(void *data) return 0; } -int brcmf_sdbrcm_bus_txdata(struct brcmf_bus *bus, struct sk_buff *pkt) +static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt) { int ret = -EBADE; uint datalen, prec; + struct brcmf_bus *bus_if = dev_get_drvdata(dev); + struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv; + struct brcmf_sdio *bus = sdiodev->bus; brcmf_dbg(TRACE, "Enter\n"); @@ -2728,9 +2684,10 @@ int brcmf_sdbrcm_bus_txdata(struct brcmf_bus *bus, struct sk_buff *pkt) /* Priority based enq */ spin_lock_bh(&bus->txqlock); - if (brcmf_c_prec_enq(bus->drvr, &bus->txq, pkt, prec) == false) { + if (brcmf_c_prec_enq(bus->sdiodev->dev, &bus->txq, pkt, prec) == + false) { skb_pull(pkt, SDPCM_HDRLEN); - brcmf_txcomplete(bus->drvr, pkt, false); + brcmf_txcomplete(bus->sdiodev->dev, pkt, false); brcmu_pkt_buf_free_skb(pkt); brcmf_dbg(ERROR, "out of bus->txq !!!\n"); ret = -ENOSR; @@ -2739,8 +2696,10 @@ int brcmf_sdbrcm_bus_txdata(struct brcmf_bus *bus, struct sk_buff *pkt) } spin_unlock_bh(&bus->txqlock); - if (pktq_len(&bus->txq) >= TXHI) - brcmf_txflowcontrol(bus->drvr, 0, ON); + if (pktq_len(&bus->txq) >= TXHI) { + bus->txoff = ON; + brcmf_txflowcontrol(bus->sdiodev->dev, 0, ON); + } #ifdef BCMDBG if (pktq_plen(&bus->txq, prec) > qcount[prec]) @@ -2757,7 +2716,7 @@ int brcmf_sdbrcm_bus_txdata(struct brcmf_bus *bus, struct sk_buff *pkt) } static int -brcmf_sdbrcm_membytes(struct brcmf_bus *bus, bool write, u32 address, u8 *data, +brcmf_sdbrcm_membytes(struct brcmf_sdio *bus, bool write, u32 address, u8 *data, uint size) { int bcmerror = 0; @@ -2818,7 +2777,7 @@ xfer_done: #ifdef BCMDBG #define CONSOLE_LINE_MAX 192 -static int brcmf_sdbrcm_readconsole(struct brcmf_bus *bus) +static int brcmf_sdbrcm_readconsole(struct brcmf_sdio *bus) { struct brcmf_console *c = &bus->console; u8 line[CONSOLE_LINE_MAX], ch; @@ -2895,14 +2854,14 @@ break2: } #endif /* BCMDBG */ -static int brcmf_tx_frame(struct brcmf_bus *bus, u8 *frame, u16 len) +static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len) { int i; int ret; bus->ctrl_frame_stat = false; - ret = brcmf_sdbrcm_send_buf(bus, bus->sdiodev->sbwad, - SDIO_FUNC_2, F2SYNC, frame, len, NULL); + ret = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad, + SDIO_FUNC_2, F2SYNC, frame, len); if (ret < 0) { /* On failure, abort the command and terminate the frame */ @@ -2937,8 +2896,8 @@ static int brcmf_tx_frame(struct brcmf_bus *bus, u8 *frame, u16 len) return ret; } -int -brcmf_sdbrcm_bus_txctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen) +static int +brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen) { u8 *frame; u16 len; @@ -2946,6 +2905,9 @@ brcmf_sdbrcm_bus_txctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen) uint retries = 0; u8 doff = 0; int ret = -1; + struct brcmf_bus *bus_if = dev_get_drvdata(dev); + struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv; + struct brcmf_sdio *bus = sdiodev->bus; brcmf_dbg(TRACE, "Enter\n"); @@ -3045,19 +3007,22 @@ brcmf_sdbrcm_bus_txctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen) up(&bus->sdsem); if (ret) - bus->drvr->tx_ctlerrs++; + bus->tx_ctlerrs++; else - bus->drvr->tx_ctlpkts++; + bus->tx_ctlpkts++; return ret ? -EIO : 0; } -int -brcmf_sdbrcm_bus_rxctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen) +static int +brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen) { int timeleft; uint rxlen = 0; bool pending; + struct brcmf_bus *bus_if = dev_get_drvdata(dev); + struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv; + struct brcmf_sdio *bus = sdiodev->bus; brcmf_dbg(TRACE, "Enter\n"); @@ -3083,21 +3048,21 @@ brcmf_sdbrcm_bus_rxctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen) } if (rxlen) - bus->drvr->rx_ctlpkts++; + bus->rx_ctlpkts++; else - bus->drvr->rx_ctlerrs++; + bus->rx_ctlerrs++; return rxlen ? (int)rxlen : -ETIMEDOUT; } -static int brcmf_sdbrcm_downloadvars(struct brcmf_bus *bus, void *arg, int len) +static int brcmf_sdbrcm_downloadvars(struct brcmf_sdio *bus, void *arg, int len) { int bcmerror = 0; brcmf_dbg(TRACE, "Enter\n"); /* Basic sanity checks */ - if (bus->drvr->up) { + if (bus->sdiodev->bus_if->drvr_up) { bcmerror = -EISCONN; goto err; } @@ -3123,7 +3088,7 @@ err: return bcmerror; } -static int brcmf_sdbrcm_write_vars(struct brcmf_bus *bus) +static int brcmf_sdbrcm_write_vars(struct brcmf_sdio *bus) { int bcmerror = 0; u32 varsize; @@ -3210,135 +3175,11 @@ static int brcmf_sdbrcm_write_vars(struct brcmf_bus *bus) return bcmerror; } -static void -brcmf_sdbrcm_chip_disablecore(struct brcmf_sdio_dev *sdiodev, u32 corebase) -{ - u32 regdata; - - regdata = brcmf_sdcard_reg_read(sdiodev, - CORE_SB(corebase, sbtmstatelow), 4); - if (regdata & SBTML_RESET) - return; - - regdata = brcmf_sdcard_reg_read(sdiodev, - CORE_SB(corebase, sbtmstatelow), 4); - if ((regdata & (SICF_CLOCK_EN << SBTML_SICF_SHIFT)) != 0) { - /* - * set target reject and spin until busy is clear - * (preserve core-specific bits) - */ - regdata = brcmf_sdcard_reg_read(sdiodev, - CORE_SB(corebase, sbtmstatelow), 4); - brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbtmstatelow), - 4, regdata | SBTML_REJ); - - regdata = brcmf_sdcard_reg_read(sdiodev, - CORE_SB(corebase, sbtmstatelow), 4); - udelay(1); - SPINWAIT((brcmf_sdcard_reg_read(sdiodev, - CORE_SB(corebase, sbtmstatehigh), 4) & - SBTMH_BUSY), 100000); - - regdata = brcmf_sdcard_reg_read(sdiodev, - CORE_SB(corebase, sbtmstatehigh), 4); - if (regdata & SBTMH_BUSY) - brcmf_dbg(ERROR, "ARM core still busy\n"); - - regdata = brcmf_sdcard_reg_read(sdiodev, - CORE_SB(corebase, sbidlow), 4); - if (regdata & SBIDL_INIT) { - regdata = brcmf_sdcard_reg_read(sdiodev, - CORE_SB(corebase, sbimstate), 4) | - SBIM_RJ; - brcmf_sdcard_reg_write(sdiodev, - CORE_SB(corebase, sbimstate), 4, - regdata); - regdata = brcmf_sdcard_reg_read(sdiodev, - CORE_SB(corebase, sbimstate), 4); - udelay(1); - SPINWAIT((brcmf_sdcard_reg_read(sdiodev, - CORE_SB(corebase, sbimstate), 4) & - SBIM_BY), 100000); - } - - /* set reset and reject while enabling the clocks */ - brcmf_sdcard_reg_write(sdiodev, - CORE_SB(corebase, sbtmstatelow), 4, - (((SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) | - SBTML_REJ | SBTML_RESET)); - regdata = brcmf_sdcard_reg_read(sdiodev, - CORE_SB(corebase, sbtmstatelow), 4); - udelay(10); - - /* clear the initiator reject bit */ - regdata = brcmf_sdcard_reg_read(sdiodev, - CORE_SB(corebase, sbidlow), 4); - if (regdata & SBIDL_INIT) { - regdata = brcmf_sdcard_reg_read(sdiodev, - CORE_SB(corebase, sbimstate), 4) & - ~SBIM_RJ; - brcmf_sdcard_reg_write(sdiodev, - CORE_SB(corebase, sbimstate), 4, - regdata); - } - } - - /* leave reset and reject asserted */ - brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbtmstatelow), 4, - (SBTML_REJ | SBTML_RESET)); - udelay(1); -} - -static void -brcmf_sdbrcm_chip_resetcore(struct brcmf_sdio_dev *sdiodev, u32 corebase) -{ - u32 regdata; - - /* - * Must do the disable sequence first to work for - * arbitrary current core state. - */ - brcmf_sdbrcm_chip_disablecore(sdiodev, corebase); - - /* - * Now do the initialization sequence. - * set reset while enabling the clock and - * forcing them on throughout the core - */ - brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbtmstatelow), 4, - ((SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) | - SBTML_RESET); - udelay(1); - - regdata = brcmf_sdcard_reg_read(sdiodev, - CORE_SB(corebase, sbtmstatehigh), 4); - if (regdata & SBTMH_SERR) - brcmf_sdcard_reg_write(sdiodev, - CORE_SB(corebase, sbtmstatehigh), 4, 0); - - regdata = brcmf_sdcard_reg_read(sdiodev, - CORE_SB(corebase, sbimstate), 4); - if (regdata & (SBIM_IBE | SBIM_TO)) - brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbimstate), 4, - regdata & ~(SBIM_IBE | SBIM_TO)); - - /* clear reset and allow it to propagate throughout the core */ - brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbtmstatelow), 4, - (SICF_FGC << SBTML_SICF_SHIFT) | - (SICF_CLOCK_EN << SBTML_SICF_SHIFT)); - udelay(1); - - /* leave clock enabled */ - brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbtmstatelow), 4, - (SICF_CLOCK_EN << SBTML_SICF_SHIFT)); - udelay(1); -} - -static int brcmf_sdbrcm_download_state(struct brcmf_bus *bus, bool enter) +static int brcmf_sdbrcm_download_state(struct brcmf_sdio *bus, bool enter) { uint retries; - u32 regdata; int bcmerror = 0; + struct chip_info *ci = bus->ci; /* To enter download state, disable ARM and reset SOCRAM. * To exit download state, simply reset ARM (default is RAM boot). @@ -3346,10 +3187,9 @@ static int brcmf_sdbrcm_download_state(struct brcmf_bus *bus, bool enter) if (enter) { bus->alp_only = true; - brcmf_sdbrcm_chip_disablecore(bus->sdiodev, - bus->ci->armcorebase); + ci->coredisable(bus->sdiodev, ci, BCMA_CORE_ARM_CM3); - brcmf_sdbrcm_chip_resetcore(bus->sdiodev, bus->ci->ramcorebase); + ci->resetcore(bus->sdiodev, ci, BCMA_CORE_INTERNAL_MEM); /* Clear the top bit of memory */ if (bus->ramsize) { @@ -3358,11 +3198,7 @@ static int brcmf_sdbrcm_download_state(struct brcmf_bus *bus, bool enter) (u8 *)&zeros, 4); } } else { - regdata = brcmf_sdcard_reg_read(bus->sdiodev, - CORE_SB(bus->ci->ramcorebase, sbtmstatelow), 4); - regdata &= (SBTML_RESET | SBTML_REJ_MASK | - (SICF_CLOCK_EN << SBTML_SICF_SHIFT)); - if ((SICF_CLOCK_EN << SBTML_SICF_SHIFT) != regdata) { + if (!ci->iscoreup(bus->sdiodev, ci, BCMA_CORE_INTERNAL_MEM)) { brcmf_dbg(ERROR, "SOCRAM core is down after reset?\n"); bcmerror = -EBADE; goto fail; @@ -3377,18 +3213,18 @@ static int brcmf_sdbrcm_download_state(struct brcmf_bus *bus, bool enter) w_sdreg32(bus, 0xFFFFFFFF, offsetof(struct sdpcmd_regs, intstatus), &retries); - brcmf_sdbrcm_chip_resetcore(bus->sdiodev, bus->ci->armcorebase); + ci->resetcore(bus->sdiodev, ci, BCMA_CORE_ARM_CM3); /* Allow HT Clock now that the ARM is running. */ bus->alp_only = false; - bus->drvr->busstate = BRCMF_BUS_LOAD; + bus->sdiodev->bus_if->state = BRCMF_BUS_LOAD; } fail: return bcmerror; } -static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_bus *bus) +static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_sdio *bus) { if (bus->firmware->size < bus->fw_ptr + len) len = bus->firmware->size - bus->fw_ptr; @@ -3398,10 +3234,7 @@ static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_bus *bus) return len; } -MODULE_FIRMWARE(BCM4329_FW_NAME); -MODULE_FIRMWARE(BCM4329_NV_NAME); - -static int brcmf_sdbrcm_download_code_file(struct brcmf_bus *bus) +static int brcmf_sdbrcm_download_code_file(struct brcmf_sdio *bus) { int offset = 0; uint len; @@ -3410,8 +3243,7 @@ static int brcmf_sdbrcm_download_code_file(struct brcmf_bus *bus) brcmf_dbg(INFO, "Enter\n"); - bus->fw_name = BCM4329_FW_NAME; - ret = request_firmware(&bus->firmware, bus->fw_name, + ret = request_firmware(&bus->firmware, BRCMFMAC_FW_NAME, &bus->sdiodev->func[2]->dev); if (ret) { brcmf_dbg(ERROR, "Fail to request firmware %d\n", ret); @@ -3501,15 +3333,14 @@ static uint brcmf_process_nvram_vars(char *varbuf, uint len) return buf_len; } -static int brcmf_sdbrcm_download_nvram(struct brcmf_bus *bus) +static int brcmf_sdbrcm_download_nvram(struct brcmf_sdio *bus) { uint len; char *memblock = NULL; char *bufp; int ret; - bus->nv_name = BCM4329_NV_NAME; - ret = request_firmware(&bus->firmware, bus->nv_name, + ret = request_firmware(&bus->firmware, BRCMFMAC_NV_NAME, &bus->sdiodev->func[2]->dev); if (ret) { brcmf_dbg(ERROR, "Fail to request nvram %d\n", ret); @@ -3549,7 +3380,7 @@ err: return ret; } -static int _brcmf_sdbrcm_download_firmware(struct brcmf_bus *bus) +static int _brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus) { int bcmerror = -1; @@ -3582,7 +3413,7 @@ err: } static bool -brcmf_sdbrcm_download_firmware(struct brcmf_bus *bus) +brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus) { bool ret; @@ -3596,91 +3427,11 @@ brcmf_sdbrcm_download_firmware(struct brcmf_bus *bus) return ret; } -void brcmf_sdbrcm_bus_stop(struct brcmf_bus *bus) +static int brcmf_sdbrcm_bus_init(struct device *dev) { - u32 local_hostintmask; - u8 saveclk; - uint retries; - int err; - - brcmf_dbg(TRACE, "Enter\n"); - - if (bus->watchdog_tsk) { - send_sig(SIGTERM, bus->watchdog_tsk, 1); - kthread_stop(bus->watchdog_tsk); - bus->watchdog_tsk = NULL; - } - - if (bus->dpc_tsk && bus->dpc_tsk != current) { - send_sig(SIGTERM, bus->dpc_tsk, 1); - kthread_stop(bus->dpc_tsk); - bus->dpc_tsk = NULL; - } - - down(&bus->sdsem); - - bus_wake(bus); - - /* Enable clock for device interrupts */ - brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false); - - /* Disable and clear interrupts at the chip level also */ - w_sdreg32(bus, 0, offsetof(struct sdpcmd_regs, hostintmask), &retries); - local_hostintmask = bus->hostintmask; - bus->hostintmask = 0; - - /* Change our idea of bus state */ - bus->drvr->busstate = BRCMF_BUS_DOWN; - - /* Force clocks on backplane to be sure F2 interrupt propagates */ - saveclk = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1, - SBSDIO_FUNC1_CHIPCLKCSR, &err); - if (!err) { - brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1, - SBSDIO_FUNC1_CHIPCLKCSR, - (saveclk | SBSDIO_FORCE_HT), &err); - } - if (err) - brcmf_dbg(ERROR, "Failed to force clock for F2: err %d\n", err); - - /* Turn off the bus (F2), free any pending packets */ - brcmf_dbg(INTR, "disable SDIO interrupts\n"); - brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_0, SDIO_CCCR_IOEx, - SDIO_FUNC_ENABLE_1, NULL); - - /* Clear any pending interrupts now that F2 is disabled */ - w_sdreg32(bus, local_hostintmask, - offsetof(struct sdpcmd_regs, intstatus), &retries); - - /* Turn off the backplane clock (only) */ - brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false); - - /* Clear the data packet queues */ - brcmu_pktq_flush(&bus->txq, true, NULL, NULL); - - /* Clear any held glomming stuff */ - if (bus->glomd) - brcmu_pkt_buf_free_skb(bus->glomd); - - if (bus->glom) - brcmu_pkt_buf_free_skb(bus->glom); - - bus->glom = bus->glomd = NULL; - - /* Clear rx control and wake any waiters */ - bus->rxlen = 0; - brcmf_sdbrcm_dcmd_resp_wake(bus); - - /* Reset some F2 state stuff */ - bus->rxskip = false; - bus->tx_seq = bus->rx_seq = 0; - - up(&bus->sdsem); -} - -int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr) -{ - struct brcmf_bus *bus = drvr->bus; + struct brcmf_bus *bus_if = dev_get_drvdata(dev); + struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv; + struct brcmf_sdio *bus = sdiodev->bus; unsigned long timeout; uint retries = 0; u8 ready, enable; @@ -3690,16 +3441,16 @@ int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr) brcmf_dbg(TRACE, "Enter\n"); /* try to download image and nvram to the dongle */ - if (drvr->busstate == BRCMF_BUS_DOWN) { + if (bus_if->state == BRCMF_BUS_DOWN) { if (!(brcmf_sdbrcm_download_firmware(bus))) return -1; } - if (!bus->drvr) + if (!bus->sdiodev->bus_if->drvr) return 0; /* Start the watchdog timer */ - bus->drvr->tickcnt = 0; + bus->tickcnt = 0; brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS); down(&bus->sdsem); @@ -3756,7 +3507,7 @@ int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr) SBSDIO_WATERMARK, 8, &err); /* Set bus state according to enable result */ - drvr->busstate = BRCMF_BUS_DATA; + bus_if->state = BRCMF_BUS_DATA; } else { @@ -3771,7 +3522,7 @@ int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr) SBSDIO_FUNC1_CHIPCLKCSR, saveclk, &err); /* If we didn't come up, turn off backplane clock */ - if (drvr->busstate != BRCMF_BUS_DATA) + if (bus_if->state != BRCMF_BUS_DATA) brcmf_sdbrcm_clkctl(bus, CLK_NONE, false); exit: @@ -3782,7 +3533,7 @@ exit: void brcmf_sdbrcm_isr(void *arg) { - struct brcmf_bus *bus = (struct brcmf_bus *) arg; + struct brcmf_sdio *bus = (struct brcmf_sdio *) arg; brcmf_dbg(TRACE, "Enter\n"); @@ -3791,7 +3542,7 @@ void brcmf_sdbrcm_isr(void *arg) return; } - if (bus->drvr->busstate == BRCMF_BUS_DOWN) { + if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) { brcmf_dbg(ERROR, "bus is down. we have nothing to do\n"); return; } @@ -3814,14 +3565,14 @@ void brcmf_sdbrcm_isr(void *arg) complete(&bus->dpc_wait); } -static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_pub *drvr) +static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus) { - struct brcmf_bus *bus; +#ifdef BCMDBG + struct brcmf_bus *bus_if = dev_get_drvdata(bus->sdiodev->dev); +#endif /* BCMDBG */ brcmf_dbg(TIMER, "Enter\n"); - bus = drvr->bus; - /* Ignore the timer if simulating bus down */ if (bus->sleeping) return false; @@ -3865,7 +3616,8 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_pub *drvr) } #ifdef BCMDBG /* Poll for console output periodically */ - if (drvr->busstate == BRCMF_BUS_DATA && bus->console_interval != 0) { + if (bus_if->state == BRCMF_BUS_DATA && + bus->console_interval != 0) { bus->console.count += BRCMF_WD_POLL_MS; if (bus->console.count >= bus->console_interval) { bus->console.count -= bus->console_interval; @@ -3900,10 +3652,12 @@ static bool brcmf_sdbrcm_chipmatch(u16 chipid) { if (chipid == BCM4329_CHIP_ID) return true; + if (chipid == BCM4330_CHIP_ID) + return true; return false; } -static void brcmf_sdbrcm_release_malloc(struct brcmf_bus *bus) +static void brcmf_sdbrcm_release_malloc(struct brcmf_sdio *bus) { brcmf_dbg(TRACE, "Enter\n"); @@ -3915,13 +3669,13 @@ static void brcmf_sdbrcm_release_malloc(struct brcmf_bus *bus) bus->databuf = NULL; } -static bool brcmf_sdbrcm_probe_malloc(struct brcmf_bus *bus) +static bool brcmf_sdbrcm_probe_malloc(struct brcmf_sdio *bus) { brcmf_dbg(TRACE, "Enter\n"); - if (bus->drvr->maxctl) { + if (bus->sdiodev->bus_if->maxctl) { bus->rxblen = - roundup((bus->drvr->maxctl + SDPCM_HDRLEN), + roundup((bus->sdiodev->bus_if->maxctl + SDPCM_HDRLEN), ALIGNMENT) + BRCMF_SDALIGN; bus->rxbuf = kmalloc(bus->rxblen, GFP_ATOMIC); if (!(bus->rxbuf)) @@ -3950,276 +3704,14 @@ fail: return false; } -/* SDIO Pad drive strength to select value mappings */ -struct sdiod_drive_str { - u8 strength; /* Pad Drive Strength in mA */ - u8 sel; /* Chip-specific select value */ -}; - -/* SDIO Drive Strength to sel value table for PMU Rev 1 */ -static const struct sdiod_drive_str sdiod_drive_strength_tab1[] = { - { - 4, 0x2}, { - 2, 0x3}, { - 1, 0x0}, { - 0, 0x0} - }; - -/* SDIO Drive Strength to sel value table for PMU Rev 2, 3 */ -static const struct sdiod_drive_str sdiod_drive_strength_tab2[] = { - { - 12, 0x7}, { - 10, 0x6}, { - 8, 0x5}, { - 6, 0x4}, { - 4, 0x2}, { - 2, 0x1}, { - 0, 0x0} - }; - -/* SDIO Drive Strength to sel value table for PMU Rev 8 (1.8V) */ -static const struct sdiod_drive_str sdiod_drive_strength_tab3[] = { - { - 32, 0x7}, { - 26, 0x6}, { - 22, 0x5}, { - 16, 0x4}, { - 12, 0x3}, { - 8, 0x2}, { - 4, 0x1}, { - 0, 0x0} - }; - -#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu)) - -static char *brcmf_chipname(uint chipid, char *buf, uint len) -{ - const char *fmt; - - fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x"; - snprintf(buf, len, fmt, chipid); - return buf; -} - -static void brcmf_sdbrcm_sdiod_drive_strength_init(struct brcmf_bus *bus, - u32 drivestrength) { - struct sdiod_drive_str *str_tab = NULL; - u32 str_mask = 0; - u32 str_shift = 0; - char chn[8]; - - if (!(bus->ci->cccaps & CC_CAP_PMU)) - return; - - switch (SDIOD_DRVSTR_KEY(bus->ci->chip, bus->ci->pmurev)) { - case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 1): - str_tab = (struct sdiod_drive_str *)&sdiod_drive_strength_tab1; - str_mask = 0x30000000; - str_shift = 28; - break; - case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 2): - case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 3): - str_tab = (struct sdiod_drive_str *)&sdiod_drive_strength_tab2; - str_mask = 0x00003800; - str_shift = 11; - break; - case SDIOD_DRVSTR_KEY(BCM4336_CHIP_ID, 8): - str_tab = (struct sdiod_drive_str *)&sdiod_drive_strength_tab3; - str_mask = 0x00003800; - str_shift = 11; - break; - default: - brcmf_dbg(ERROR, "No SDIO Drive strength init done for chip %s rev %d pmurev %d\n", - brcmf_chipname(bus->ci->chip, chn, 8), - bus->ci->chiprev, bus->ci->pmurev); - break; - } - - if (str_tab != NULL) { - u32 drivestrength_sel = 0; - u32 cc_data_temp; - int i; - - for (i = 0; str_tab[i].strength != 0; i++) { - if (drivestrength >= str_tab[i].strength) { - drivestrength_sel = str_tab[i].sel; - break; - } - } - - brcmf_sdcard_reg_write(bus->sdiodev, - CORE_CC_REG(bus->ci->cccorebase, chipcontrol_addr), - 4, 1); - cc_data_temp = brcmf_sdcard_reg_read(bus->sdiodev, - CORE_CC_REG(bus->ci->cccorebase, chipcontrol_addr), 4); - cc_data_temp &= ~str_mask; - drivestrength_sel <<= str_shift; - cc_data_temp |= drivestrength_sel; - brcmf_sdcard_reg_write(bus->sdiodev, - CORE_CC_REG(bus->ci->cccorebase, chipcontrol_addr), - 4, cc_data_temp); - - brcmf_dbg(INFO, "SDIO: %dmA drive strength selected, set to 0x%08x\n", - drivestrength, cc_data_temp); - } -} - -static int -brcmf_sdbrcm_chip_recognition(struct brcmf_sdio_dev *sdiodev, - struct chip_info *ci, u32 regs) -{ - u32 regdata; - - /* - * Get CC core rev - * Chipid is assume to be at offset 0 from regs arg - * For different chiptypes or old sdio hosts w/o chipcommon, - * other ways of recognition should be added here. - */ - ci->cccorebase = regs; - regdata = brcmf_sdcard_reg_read(sdiodev, - CORE_CC_REG(ci->cccorebase, chipid), 4); - ci->chip = regdata & CID_ID_MASK; - ci->chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT; - - brcmf_dbg(INFO, "chipid=0x%x chiprev=%d\n", ci->chip, ci->chiprev); - - /* Address of cores for new chips should be added here */ - switch (ci->chip) { - case BCM4329_CHIP_ID: - ci->buscorebase = BCM4329_CORE_BUS_BASE; - ci->ramcorebase = BCM4329_CORE_SOCRAM_BASE; - ci->armcorebase = BCM4329_CORE_ARM_BASE; - ci->ramsize = BCM4329_RAMSIZE; - break; - default: - brcmf_dbg(ERROR, "chipid 0x%x is not supported\n", ci->chip); - return -ENODEV; - } - - regdata = brcmf_sdcard_reg_read(sdiodev, - CORE_SB(ci->cccorebase, sbidhigh), 4); - ci->ccrev = SBCOREREV(regdata); - - regdata = brcmf_sdcard_reg_read(sdiodev, - CORE_CC_REG(ci->cccorebase, pmucapabilities), 4); - ci->pmurev = regdata & PCAP_REV_MASK; - - regdata = brcmf_sdcard_reg_read(sdiodev, - CORE_SB(ci->buscorebase, sbidhigh), 4); - ci->buscorerev = SBCOREREV(regdata); - ci->buscoretype = (regdata & SBIDH_CC_MASK) >> SBIDH_CC_SHIFT; - - brcmf_dbg(INFO, "ccrev=%d, pmurev=%d, buscore rev/type=%d/0x%x\n", - ci->ccrev, ci->pmurev, ci->buscorerev, ci->buscoretype); - - /* get chipcommon capabilites */ - ci->cccaps = brcmf_sdcard_reg_read(sdiodev, - CORE_CC_REG(ci->cccorebase, capabilities), 4); - - return 0; -} - -static int -brcmf_sdbrcm_chip_attach(struct brcmf_bus *bus, u32 regs) -{ - struct chip_info *ci; - int err; - u8 clkval, clkset; - - brcmf_dbg(TRACE, "Enter\n"); - - /* alloc chip_info_t */ - ci = kzalloc(sizeof(struct chip_info), GFP_ATOMIC); - if (NULL == ci) - return -ENOMEM; - - /* bus/core/clk setup for register access */ - /* Try forcing SDIO core to do ALPAvail request only */ - clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ; - brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1, - SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err); - if (err) { - brcmf_dbg(ERROR, "error writing for HT off\n"); - goto fail; - } - - /* If register supported, wait for ALPAvail and then force ALP */ - /* This may take up to 15 milliseconds */ - clkval = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1, - SBSDIO_FUNC1_CHIPCLKCSR, NULL); - if ((clkval & ~SBSDIO_AVBITS) == clkset) { - SPINWAIT(((clkval = - brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1, - SBSDIO_FUNC1_CHIPCLKCSR, - NULL)), - !SBSDIO_ALPAV(clkval)), - PMU_MAX_TRANSITION_DLY); - if (!SBSDIO_ALPAV(clkval)) { - brcmf_dbg(ERROR, "timeout on ALPAV wait, clkval 0x%02x\n", - clkval); - err = -EBUSY; - goto fail; - } - clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | - SBSDIO_FORCE_ALP; - brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1, - SBSDIO_FUNC1_CHIPCLKCSR, - clkset, &err); - udelay(65); - } else { - brcmf_dbg(ERROR, "ChipClkCSR access: wrote 0x%02x read 0x%02x\n", - clkset, clkval); - err = -EACCES; - goto fail; - } - - /* Also, disable the extra SDIO pull-ups */ - brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1, - SBSDIO_FUNC1_SDIOPULLUP, 0, NULL); - - err = brcmf_sdbrcm_chip_recognition(bus->sdiodev, ci, regs); - if (err) - goto fail; - - /* - * Make sure any on-chip ARM is off (in case strapping is wrong), - * or downloaded code was already running. - */ - brcmf_sdbrcm_chip_disablecore(bus->sdiodev, ci->armcorebase); - - brcmf_sdcard_reg_write(bus->sdiodev, - CORE_CC_REG(ci->cccorebase, gpiopullup), 4, 0); - brcmf_sdcard_reg_write(bus->sdiodev, - CORE_CC_REG(ci->cccorebase, gpiopulldown), 4, 0); - - /* Disable F2 to clear any intermediate frame state on the dongle */ - brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_0, SDIO_CCCR_IOEx, - SDIO_FUNC_ENABLE_1, NULL); - - /* WAR: cmd52 backplane read so core HW will drop ALPReq */ - clkval = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1, - 0, NULL); - - /* Done with backplane-dependent accesses, can drop clock... */ - brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1, - SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL); - - bus->ci = ci; - return 0; -fail: - bus->ci = NULL; - kfree(ci); - return err; -} - static bool -brcmf_sdbrcm_probe_attach(struct brcmf_bus *bus, u32 regsva) +brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva) { u8 clkctl = 0; int err = 0; int reg_addr; u32 reg_val; + u8 idx; bus->alp_only = true; @@ -4234,7 +3726,7 @@ brcmf_sdbrcm_probe_attach(struct brcmf_bus *bus, u32 regsva) #endif /* BCMDBG */ /* - * Force PLL off until brcmf_sdbrcm_chip_attach() + * Force PLL off until brcmf_sdio_chip_attach() * programs PLL control regs */ @@ -4252,8 +3744,8 @@ brcmf_sdbrcm_probe_attach(struct brcmf_bus *bus, u32 regsva) goto fail; } - if (brcmf_sdbrcm_chip_attach(bus, regsva)) { - brcmf_dbg(ERROR, "brcmf_sdbrcm_chip_attach failed!\n"); + if (brcmf_sdio_chip_attach(bus->sdiodev, &bus->ci, regsva)) { + brcmf_dbg(ERROR, "brcmf_sdio_chip_attach failed!\n"); goto fail; } @@ -4262,11 +3754,10 @@ brcmf_sdbrcm_probe_attach(struct brcmf_bus *bus, u32 regsva) goto fail; } - brcmf_sdbrcm_sdiod_drive_strength_init(bus, SDIO_DRIVE_STRENGTH); + brcmf_sdio_chip_drivestrengthinit(bus->sdiodev, bus->ci, + SDIO_DRIVE_STRENGTH); - /* Get info on the ARM and SOCRAM cores... */ - brcmf_sdcard_reg_read(bus->sdiodev, - CORE_SB(bus->ci->armcorebase, sbidhigh), 4); + /* Get info on the SOCRAM cores... */ bus->ramsize = bus->ci->ramsize; if (!(bus->ramsize)) { brcmf_dbg(ERROR, "failed to find SOCRAM memory!\n"); @@ -4274,7 +3765,8 @@ brcmf_sdbrcm_probe_attach(struct brcmf_bus *bus, u32 regsva) } /* Set core control so an SDIO reset does a backplane reset */ - reg_addr = bus->ci->buscorebase + + idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV); + reg_addr = bus->ci->c_inf[idx].base + offsetof(struct sdpcmd_regs, corecontrol); reg_val = brcmf_sdcard_reg_read(bus->sdiodev, reg_addr, sizeof(u32)); brcmf_sdcard_reg_write(bus->sdiodev, reg_addr, sizeof(u32), @@ -4298,7 +3790,7 @@ fail: return false; } -static bool brcmf_sdbrcm_probe_init(struct brcmf_bus *bus) +static bool brcmf_sdbrcm_probe_init(struct brcmf_sdio *bus) { brcmf_dbg(TRACE, "Enter\n"); @@ -4306,7 +3798,7 @@ static bool brcmf_sdbrcm_probe_init(struct brcmf_bus *bus) brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_0, SDIO_CCCR_IOEx, SDIO_FUNC_ENABLE_1, NULL); - bus->drvr->busstate = BRCMF_BUS_DOWN; + bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN; bus->sleeping = false; bus->rxflow = false; @@ -4333,7 +3825,7 @@ static bool brcmf_sdbrcm_probe_init(struct brcmf_bus *bus) static int brcmf_sdbrcm_watchdog_thread(void *data) { - struct brcmf_bus *bus = (struct brcmf_bus *)data; + struct brcmf_sdio *bus = (struct brcmf_sdio *)data; allow_signal(SIGTERM); /* Run until signal received */ @@ -4341,9 +3833,9 @@ brcmf_sdbrcm_watchdog_thread(void *data) if (kthread_should_stop()) break; if (!wait_for_completion_interruptible(&bus->watchdog_wait)) { - brcmf_sdbrcm_bus_watchdog(bus->drvr); + brcmf_sdbrcm_bus_watchdog(bus); /* Count the tick for reference */ - bus->drvr->tickcnt++; + bus->tickcnt++; } else break; } @@ -4353,7 +3845,7 @@ brcmf_sdbrcm_watchdog_thread(void *data) static void brcmf_sdbrcm_watchdog(unsigned long data) { - struct brcmf_bus *bus = (struct brcmf_bus *)data; + struct brcmf_sdio *bus = (struct brcmf_sdio *)data; if (bus->watchdog_tsk) { complete(&bus->watchdog_wait); @@ -4364,23 +3856,14 @@ brcmf_sdbrcm_watchdog(unsigned long data) } } -static void -brcmf_sdbrcm_chip_detach(struct brcmf_bus *bus) -{ - brcmf_dbg(TRACE, "Enter\n"); - - kfree(bus->ci); - bus->ci = NULL; -} - -static void brcmf_sdbrcm_release_dongle(struct brcmf_bus *bus) +static void brcmf_sdbrcm_release_dongle(struct brcmf_sdio *bus) { brcmf_dbg(TRACE, "Enter\n"); if (bus->ci) { brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false); brcmf_sdbrcm_clkctl(bus, CLK_NONE, false); - brcmf_sdbrcm_chip_detach(bus); + brcmf_sdio_chip_detach(&bus->ci); if (bus->vars && bus->varsz) kfree(bus->vars); bus->vars = NULL; @@ -4390,7 +3873,7 @@ static void brcmf_sdbrcm_release_dongle(struct brcmf_bus *bus) } /* Detach and free everything */ -static void brcmf_sdbrcm_release(struct brcmf_bus *bus) +static void brcmf_sdbrcm_release(struct brcmf_sdio *bus) { brcmf_dbg(TRACE, "Enter\n"); @@ -4398,10 +3881,9 @@ static void brcmf_sdbrcm_release(struct brcmf_bus *bus) /* De-register interrupt handler */ brcmf_sdcard_intr_dereg(bus->sdiodev); - if (bus->drvr) { - brcmf_detach(bus->drvr); + if (bus->sdiodev->bus_if->drvr) { + brcmf_detach(bus->sdiodev->dev); brcmf_sdbrcm_release_dongle(bus); - bus->drvr = NULL; } brcmf_sdbrcm_release_malloc(bus); @@ -4412,21 +3894,10 @@ static void brcmf_sdbrcm_release(struct brcmf_bus *bus) brcmf_dbg(TRACE, "Disconnected\n"); } -void *brcmf_sdbrcm_probe(u16 bus_no, u16 slot, u16 func, uint bustype, - u32 regsva, struct brcmf_sdio_dev *sdiodev) +void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev) { int ret; - struct brcmf_bus *bus; - - /* Init global variables at run-time, not as part of the declaration. - * This is required to support init/de-init of the driver. - * Initialization - * of globals as part of the declaration results in non-deterministic - * behavior since the value of the globals may be different on the - * first time that the driver is initialized vs subsequent - * initializations. - */ - brcmf_c_init(); + struct brcmf_sdio *bus; brcmf_dbg(TRACE, "Enter\n"); @@ -4434,12 +3905,13 @@ void *brcmf_sdbrcm_probe(u16 bus_no, u16 slot, u16 func, uint bustype, * regsva == SI_ENUM_BASE*/ /* Allocate private bus interface state */ - bus = kzalloc(sizeof(struct brcmf_bus), GFP_ATOMIC); + bus = kzalloc(sizeof(struct brcmf_sdio), GFP_ATOMIC); if (!bus) goto fail; bus->sdiodev = sdiodev; sdiodev->bus = bus; + skb_queue_head_init(&bus->glom); bus->txbound = BRCMF_TXBOUND; bus->rxbound = BRCMF_RXBOUND; bus->txminmax = BRCMF_TXMINMAX; @@ -4484,9 +3956,15 @@ void *brcmf_sdbrcm_probe(u16 bus_no, u16 slot, u16 func, uint bustype, bus->dpc_tsk = NULL; } + /* Assign bus interface call back */ + bus->sdiodev->bus_if->brcmf_bus_stop = brcmf_sdbrcm_bus_stop; + bus->sdiodev->bus_if->brcmf_bus_init = brcmf_sdbrcm_bus_init; + bus->sdiodev->bus_if->brcmf_bus_txdata = brcmf_sdbrcm_bus_txdata; + bus->sdiodev->bus_if->brcmf_bus_txctl = brcmf_sdbrcm_bus_txctl; + bus->sdiodev->bus_if->brcmf_bus_rxctl = brcmf_sdbrcm_bus_rxctl; /* Attach to the brcmf/OS/network interface */ - bus->drvr = brcmf_attach(bus, SDPCM_RESERVE); - if (!bus->drvr) { + ret = brcmf_attach(SDPCM_RESERVE, bus->sdiodev->dev); + if (ret != 0) { brcmf_dbg(ERROR, "brcmf_attach failed\n"); goto fail; } @@ -4514,16 +3992,17 @@ void *brcmf_sdbrcm_probe(u16 bus_no, u16 slot, u16 func, uint bustype, brcmf_dbg(INFO, "completed!!\n"); /* if firmware path present try to download and bring up bus */ - ret = brcmf_bus_start(bus->drvr); + ret = brcmf_bus_start(bus->sdiodev->dev); if (ret != 0) { if (ret == -ENOLINK) { brcmf_dbg(ERROR, "dongle is not responding\n"); goto fail; } } - /* Ok, have the per-port tell the stack we're open for business */ - if (brcmf_net_attach(bus->drvr, 0) != 0) { - brcmf_dbg(ERROR, "Net attach failed!!\n"); + + /* add interface and open for business */ + if (brcmf_add_if(bus->sdiodev->dev, 0, "wlan%d", NULL)) { + brcmf_dbg(ERROR, "Add primary net device interface failed!!\n"); goto fail; } @@ -4536,7 +4015,7 @@ fail: void brcmf_sdbrcm_disconnect(void *ptr) { - struct brcmf_bus *bus = (struct brcmf_bus *)ptr; + struct brcmf_sdio *bus = (struct brcmf_sdio *)ptr; brcmf_dbg(TRACE, "Enter\n"); @@ -4546,18 +4025,9 @@ void brcmf_sdbrcm_disconnect(void *ptr) brcmf_dbg(TRACE, "Disconnected\n"); } -struct device *brcmf_bus_get_device(struct brcmf_bus *bus) -{ - return &bus->sdiodev->func[2]->dev; -} - void -brcmf_sdbrcm_wd_timer(struct brcmf_bus *bus, uint wdtick) +brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick) { - /* don't start the wd until fw is loaded */ - if (bus->drvr->busstate == BRCMF_BUS_DOWN) - return; - /* Totally stop the timer */ if (!wdtick && bus->wd_timer_valid == true) { del_timer_sync(&bus->timer); @@ -4566,6 +4036,10 @@ brcmf_sdbrcm_wd_timer(struct brcmf_bus *bus, uint wdtick) return; } + /* don't start the wd until fw is loaded */ + if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) + return; + if (wdtick) { if (bus->save_ms != BRCMF_WD_POLL_MS) { if (bus->wd_timer_valid == true) diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c new file mode 100644 index 00000000000..11b2d7c97ba --- /dev/null +++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c @@ -0,0 +1,607 @@ +/* + * Copyright (c) 2011 Broadcom Corporation + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +/* ***** SDIO interface chip backplane handle functions ***** */ + +#include <linux/types.h> +#include <linux/netdevice.h> +#include <linux/mmc/card.h> +#include <linux/ssb/ssb_regs.h> +#include <linux/bcma/bcma.h> + +#include <chipcommon.h> +#include <brcm_hw_ids.h> +#include <brcmu_wifi.h> +#include <brcmu_utils.h> +#include <soc.h> +#include "dhd_dbg.h" +#include "sdio_host.h" +#include "sdio_chip.h" + +/* chip core base & ramsize */ +/* bcm4329 */ +/* SDIO device core, ID 0x829 */ +#define BCM4329_CORE_BUS_BASE 0x18011000 +/* internal memory core, ID 0x80e */ +#define BCM4329_CORE_SOCRAM_BASE 0x18003000 +/* ARM Cortex M3 core, ID 0x82a */ +#define BCM4329_CORE_ARM_BASE 0x18002000 +#define BCM4329_RAMSIZE 0x48000 + +#define SBCOREREV(sbidh) \ + ((((sbidh) & SSB_IDHIGH_RCHI) >> SSB_IDHIGH_RCHI_SHIFT) | \ + ((sbidh) & SSB_IDHIGH_RCLO)) + +/* SOC Interconnect types (aka chip types) */ +#define SOCI_SB 0 +#define SOCI_AI 1 + +/* EROM CompIdentB */ +#define CIB_REV_MASK 0xff000000 +#define CIB_REV_SHIFT 24 + +#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu)) +/* SDIO Pad drive strength to select value mappings */ +struct sdiod_drive_str { + u8 strength; /* Pad Drive Strength in mA */ + u8 sel; /* Chip-specific select value */ +}; +/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.8V) */ +static const struct sdiod_drive_str sdiod_drvstr_tab1_1v8[] = { + {32, 0x6}, + {26, 0x7}, + {22, 0x4}, + {16, 0x5}, + {12, 0x2}, + {8, 0x3}, + {4, 0x0}, + {0, 0x1} +}; + +u8 +brcmf_sdio_chip_getinfidx(struct chip_info *ci, u16 coreid) +{ + u8 idx; + + for (idx = 0; idx < BRCMF_MAX_CORENUM; idx++) + if (coreid == ci->c_inf[idx].id) + return idx; + + return BRCMF_MAX_CORENUM; +} + +static u32 +brcmf_sdio_sb_corerev(struct brcmf_sdio_dev *sdiodev, + struct chip_info *ci, u16 coreid) +{ + u32 regdata; + u8 idx; + + idx = brcmf_sdio_chip_getinfidx(ci, coreid); + + regdata = brcmf_sdcard_reg_read(sdiodev, + CORE_SB(ci->c_inf[idx].base, sbidhigh), 4); + return SBCOREREV(regdata); +} + +static u32 +brcmf_sdio_ai_corerev(struct brcmf_sdio_dev *sdiodev, + struct chip_info *ci, u16 coreid) +{ + u8 idx; + + idx = brcmf_sdio_chip_getinfidx(ci, coreid); + + return (ci->c_inf[idx].cib & CIB_REV_MASK) >> CIB_REV_SHIFT; +} + +static bool +brcmf_sdio_sb_iscoreup(struct brcmf_sdio_dev *sdiodev, + struct chip_info *ci, u16 coreid) +{ + u32 regdata; + u8 idx; + + idx = brcmf_sdio_chip_getinfidx(ci, coreid); + + regdata = brcmf_sdcard_reg_read(sdiodev, + CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4); + regdata &= (SSB_TMSLOW_RESET | SSB_TMSLOW_REJECT | + SSB_IMSTATE_REJECT | SSB_TMSLOW_CLOCK); + return (SSB_TMSLOW_CLOCK == regdata); +} + +static bool +brcmf_sdio_ai_iscoreup(struct brcmf_sdio_dev *sdiodev, + struct chip_info *ci, u16 coreid) +{ + u32 regdata; + u8 idx; + bool ret; + + idx = brcmf_sdio_chip_getinfidx(ci, coreid); + + regdata = brcmf_sdcard_reg_read(sdiodev, + ci->c_inf[idx].wrapbase+BCMA_IOCTL, 4); + ret = (regdata & (BCMA_IOCTL_FGC | BCMA_IOCTL_CLK)) == BCMA_IOCTL_CLK; + + regdata = brcmf_sdcard_reg_read(sdiodev, + ci->c_inf[idx].wrapbase+BCMA_RESET_CTL, + 4); + ret = ret && ((regdata & BCMA_RESET_CTL_RESET) == 0); + + return ret; +} + +static void +brcmf_sdio_sb_coredisable(struct brcmf_sdio_dev *sdiodev, + struct chip_info *ci, u16 coreid) +{ + u32 regdata; + u8 idx; + + idx = brcmf_sdio_chip_getinfidx(ci, coreid); + + regdata = brcmf_sdcard_reg_read(sdiodev, + CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4); + if (regdata & SSB_TMSLOW_RESET) + return; + + regdata = brcmf_sdcard_reg_read(sdiodev, + CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4); + if ((regdata & SSB_TMSLOW_CLOCK) != 0) { + /* + * set target reject and spin until busy is clear + * (preserve core-specific bits) + */ + regdata = brcmf_sdcard_reg_read(sdiodev, + CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4); + brcmf_sdcard_reg_write(sdiodev, + CORE_SB(ci->c_inf[idx].base, sbtmstatelow), + 4, regdata | SSB_TMSLOW_REJECT); + + regdata = brcmf_sdcard_reg_read(sdiodev, + CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4); + udelay(1); + SPINWAIT((brcmf_sdcard_reg_read(sdiodev, + CORE_SB(ci->c_inf[idx].base, sbtmstatehigh), 4) & + SSB_TMSHIGH_BUSY), 100000); + + regdata = brcmf_sdcard_reg_read(sdiodev, + CORE_SB(ci->c_inf[idx].base, sbtmstatehigh), 4); + if (regdata & SSB_TMSHIGH_BUSY) + brcmf_dbg(ERROR, "core state still busy\n"); + + regdata = brcmf_sdcard_reg_read(sdiodev, + CORE_SB(ci->c_inf[idx].base, sbidlow), 4); + if (regdata & SSB_IDLOW_INITIATOR) { + regdata = brcmf_sdcard_reg_read(sdiodev, + CORE_SB(ci->c_inf[idx].base, sbimstate), 4) | + SSB_IMSTATE_REJECT; + brcmf_sdcard_reg_write(sdiodev, + CORE_SB(ci->c_inf[idx].base, sbimstate), 4, + regdata); + regdata = brcmf_sdcard_reg_read(sdiodev, + CORE_SB(ci->c_inf[idx].base, sbimstate), 4); + udelay(1); + SPINWAIT((brcmf_sdcard_reg_read(sdiodev, + CORE_SB(ci->c_inf[idx].base, sbimstate), 4) & + SSB_IMSTATE_BUSY), 100000); + } + + /* set reset and reject while enabling the clocks */ + brcmf_sdcard_reg_write(sdiodev, + CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4, + (SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK | + SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET)); + regdata = brcmf_sdcard_reg_read(sdiodev, + CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4); + udelay(10); + + /* clear the initiator reject bit */ + regdata = brcmf_sdcard_reg_read(sdiodev, + CORE_SB(ci->c_inf[idx].base, sbidlow), 4); + if (regdata & SSB_IDLOW_INITIATOR) { + regdata = brcmf_sdcard_reg_read(sdiodev, + CORE_SB(ci->c_inf[idx].base, sbimstate), 4) & + ~SSB_IMSTATE_REJECT; + brcmf_sdcard_reg_write(sdiodev, + CORE_SB(ci->c_inf[idx].base, sbimstate), 4, + regdata); + } + } + + /* leave reset and reject asserted */ + brcmf_sdcard_reg_write(sdiodev, + CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4, + (SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET)); + udelay(1); +} + +static void +brcmf_sdio_ai_coredisable(struct brcmf_sdio_dev *sdiodev, + struct chip_info *ci, u16 coreid) +{ + u8 idx; + u32 regdata; + + idx = brcmf_sdio_chip_getinfidx(ci, coreid); + + /* if core is already in reset, just return */ + regdata = brcmf_sdcard_reg_read(sdiodev, + ci->c_inf[idx].wrapbase+BCMA_RESET_CTL, + 4); + if ((regdata & BCMA_RESET_CTL_RESET) != 0) + return; + + brcmf_sdcard_reg_write(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL, + 4, 0); + regdata = brcmf_sdcard_reg_read(sdiodev, + ci->c_inf[idx].wrapbase+BCMA_IOCTL, 4); + udelay(10); + + brcmf_sdcard_reg_write(sdiodev, ci->c_inf[idx].wrapbase+BCMA_RESET_CTL, + 4, BCMA_RESET_CTL_RESET); + udelay(1); +} + +static void +brcmf_sdio_sb_resetcore(struct brcmf_sdio_dev *sdiodev, + struct chip_info *ci, u16 coreid) +{ + u32 regdata; + u8 idx; + + idx = brcmf_sdio_chip_getinfidx(ci, coreid); + + /* + * Must do the disable sequence first to work for + * arbitrary current core state. + */ + brcmf_sdio_sb_coredisable(sdiodev, ci, coreid); + + /* + * Now do the initialization sequence. + * set reset while enabling the clock and + * forcing them on throughout the core + */ + brcmf_sdcard_reg_write(sdiodev, + CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4, + SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK | SSB_TMSLOW_RESET); + regdata = brcmf_sdcard_reg_read(sdiodev, + CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4); + udelay(1); + + /* clear any serror */ + regdata = brcmf_sdcard_reg_read(sdiodev, + CORE_SB(ci->c_inf[idx].base, sbtmstatehigh), 4); + if (regdata & SSB_TMSHIGH_SERR) + brcmf_sdcard_reg_write(sdiodev, + CORE_SB(ci->c_inf[idx].base, sbtmstatehigh), 4, 0); + + regdata = brcmf_sdcard_reg_read(sdiodev, + CORE_SB(ci->c_inf[idx].base, sbimstate), 4); + if (regdata & (SSB_IMSTATE_IBE | SSB_IMSTATE_TO)) + brcmf_sdcard_reg_write(sdiodev, + CORE_SB(ci->c_inf[idx].base, sbimstate), 4, + regdata & ~(SSB_IMSTATE_IBE | SSB_IMSTATE_TO)); + + /* clear reset and allow it to propagate throughout the core */ + brcmf_sdcard_reg_write(sdiodev, + CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4, + SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK); + regdata = brcmf_sdcard_reg_read(sdiodev, + CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4); + udelay(1); + + /* leave clock enabled */ + brcmf_sdcard_reg_write(sdiodev, + CORE_SB(ci->c_inf[idx].base, sbtmstatelow), + 4, SSB_TMSLOW_CLOCK); + regdata = brcmf_sdcard_reg_read(sdiodev, + CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4); + udelay(1); +} + +static void +brcmf_sdio_ai_resetcore(struct brcmf_sdio_dev *sdiodev, + struct chip_info *ci, u16 coreid) +{ + u8 idx; + u32 regdata; + + idx = brcmf_sdio_chip_getinfidx(ci, coreid); + + /* must disable first to work for arbitrary current core state */ + brcmf_sdio_ai_coredisable(sdiodev, ci, coreid); + + /* now do initialization sequence */ + brcmf_sdcard_reg_write(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL, + 4, BCMA_IOCTL_FGC | BCMA_IOCTL_CLK); + regdata = brcmf_sdcard_reg_read(sdiodev, + ci->c_inf[idx].wrapbase+BCMA_IOCTL, 4); + brcmf_sdcard_reg_write(sdiodev, ci->c_inf[idx].wrapbase+BCMA_RESET_CTL, + 4, 0); + udelay(1); + + brcmf_sdcard_reg_write(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL, + 4, BCMA_IOCTL_CLK); + regdata = brcmf_sdcard_reg_read(sdiodev, + ci->c_inf[idx].wrapbase+BCMA_IOCTL, 4); + udelay(1); +} + +static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev, + struct chip_info *ci, u32 regs) +{ + u32 regdata; + + /* + * Get CC core rev + * Chipid is assume to be at offset 0 from regs arg + * For different chiptypes or old sdio hosts w/o chipcommon, + * other ways of recognition should be added here. + */ + ci->c_inf[0].id = BCMA_CORE_CHIPCOMMON; + ci->c_inf[0].base = regs; + regdata = brcmf_sdcard_reg_read(sdiodev, + CORE_CC_REG(ci->c_inf[0].base, chipid), 4); + ci->chip = regdata & CID_ID_MASK; + ci->chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT; + ci->socitype = (regdata & CID_TYPE_MASK) >> CID_TYPE_SHIFT; + + brcmf_dbg(INFO, "chipid=0x%x chiprev=%d\n", ci->chip, ci->chiprev); + + /* Address of cores for new chips should be added here */ + switch (ci->chip) { + case BCM4329_CHIP_ID: + ci->c_inf[1].id = BCMA_CORE_SDIO_DEV; + ci->c_inf[1].base = BCM4329_CORE_BUS_BASE; + ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM; + ci->c_inf[2].base = BCM4329_CORE_SOCRAM_BASE; + ci->c_inf[3].id = BCMA_CORE_ARM_CM3; + ci->c_inf[3].base = BCM4329_CORE_ARM_BASE; + ci->ramsize = BCM4329_RAMSIZE; + break; + case BCM4330_CHIP_ID: + ci->c_inf[0].wrapbase = 0x18100000; + ci->c_inf[0].cib = 0x27004211; + ci->c_inf[1].id = BCMA_CORE_SDIO_DEV; + ci->c_inf[1].base = 0x18002000; + ci->c_inf[1].wrapbase = 0x18102000; + ci->c_inf[1].cib = 0x07004211; + ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM; + ci->c_inf[2].base = 0x18004000; + ci->c_inf[2].wrapbase = 0x18104000; + ci->c_inf[2].cib = 0x0d080401; + ci->c_inf[3].id = BCMA_CORE_ARM_CM3; + ci->c_inf[3].base = 0x18003000; + ci->c_inf[3].wrapbase = 0x18103000; + ci->c_inf[3].cib = 0x03004211; + ci->ramsize = 0x48000; + break; + default: + brcmf_dbg(ERROR, "chipid 0x%x is not supported\n", ci->chip); + return -ENODEV; + } + + switch (ci->socitype) { + case SOCI_SB: + ci->iscoreup = brcmf_sdio_sb_iscoreup; + ci->corerev = brcmf_sdio_sb_corerev; + ci->coredisable = brcmf_sdio_sb_coredisable; + ci->resetcore = brcmf_sdio_sb_resetcore; + break; + case SOCI_AI: + ci->iscoreup = brcmf_sdio_ai_iscoreup; + ci->corerev = brcmf_sdio_ai_corerev; + ci->coredisable = brcmf_sdio_ai_coredisable; + ci->resetcore = brcmf_sdio_ai_resetcore; + break; + default: + brcmf_dbg(ERROR, "socitype %u not supported\n", ci->socitype); + return -ENODEV; + } + + return 0; +} + +static int +brcmf_sdio_chip_buscoreprep(struct brcmf_sdio_dev *sdiodev) +{ + int err = 0; + u8 clkval, clkset; + + /* Try forcing SDIO core to do ALPAvail request only */ + clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ; + brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err); + if (err) { + brcmf_dbg(ERROR, "error writing for HT off\n"); + return err; + } + + /* If register supported, wait for ALPAvail and then force ALP */ + /* This may take up to 15 milliseconds */ + clkval = brcmf_sdcard_cfg_read(sdiodev, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, NULL); + + if ((clkval & ~SBSDIO_AVBITS) != clkset) { + brcmf_dbg(ERROR, "ChipClkCSR access: wrote 0x%02x read 0x%02x\n", + clkset, clkval); + return -EACCES; + } + + SPINWAIT(((clkval = brcmf_sdcard_cfg_read(sdiodev, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, NULL)), + !SBSDIO_ALPAV(clkval)), + PMU_MAX_TRANSITION_DLY); + if (!SBSDIO_ALPAV(clkval)) { + brcmf_dbg(ERROR, "timeout on ALPAV wait, clkval 0x%02x\n", + clkval); + return -EBUSY; + } + + clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP; + brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err); + udelay(65); + + /* Also, disable the extra SDIO pull-ups */ + brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_1, + SBSDIO_FUNC1_SDIOPULLUP, 0, NULL); + + return 0; +} + +static void +brcmf_sdio_chip_buscoresetup(struct brcmf_sdio_dev *sdiodev, + struct chip_info *ci) +{ + /* get chipcommon rev */ + ci->c_inf[0].rev = ci->corerev(sdiodev, ci, ci->c_inf[0].id); + + /* get chipcommon capabilites */ + ci->c_inf[0].caps = + brcmf_sdcard_reg_read(sdiodev, + CORE_CC_REG(ci->c_inf[0].base, capabilities), 4); + + /* get pmu caps & rev */ + if (ci->c_inf[0].caps & CC_CAP_PMU) { + ci->pmucaps = brcmf_sdcard_reg_read(sdiodev, + CORE_CC_REG(ci->c_inf[0].base, pmucapabilities), 4); + ci->pmurev = ci->pmucaps & PCAP_REV_MASK; + } + + ci->c_inf[1].rev = ci->corerev(sdiodev, ci, ci->c_inf[1].id); + + brcmf_dbg(INFO, "ccrev=%d, pmurev=%d, buscore rev/type=%d/0x%x\n", + ci->c_inf[0].rev, ci->pmurev, + ci->c_inf[1].rev, ci->c_inf[1].id); + + /* + * Make sure any on-chip ARM is off (in case strapping is wrong), + * or downloaded code was already running. + */ + ci->coredisable(sdiodev, ci, BCMA_CORE_ARM_CM3); +} + +int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev, + struct chip_info **ci_ptr, u32 regs) +{ + int ret; + struct chip_info *ci; + + brcmf_dbg(TRACE, "Enter\n"); + + /* alloc chip_info_t */ + ci = kzalloc(sizeof(struct chip_info), GFP_ATOMIC); + if (!ci) + return -ENOMEM; + + ret = brcmf_sdio_chip_buscoreprep(sdiodev); + if (ret != 0) + goto err; + + ret = brcmf_sdio_chip_recognition(sdiodev, ci, regs); + if (ret != 0) + goto err; + + brcmf_sdio_chip_buscoresetup(sdiodev, ci); + + brcmf_sdcard_reg_write(sdiodev, + CORE_CC_REG(ci->c_inf[0].base, gpiopullup), 4, 0); + brcmf_sdcard_reg_write(sdiodev, + CORE_CC_REG(ci->c_inf[0].base, gpiopulldown), 4, 0); + + *ci_ptr = ci; + return 0; + +err: + kfree(ci); + return ret; +} + +void +brcmf_sdio_chip_detach(struct chip_info **ci_ptr) +{ + brcmf_dbg(TRACE, "Enter\n"); + + kfree(*ci_ptr); + *ci_ptr = NULL; +} + +static char *brcmf_sdio_chip_name(uint chipid, char *buf, uint len) +{ + const char *fmt; + + fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x"; + snprintf(buf, len, fmt, chipid); + return buf; +} + +void +brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev, + struct chip_info *ci, u32 drivestrength) +{ + struct sdiod_drive_str *str_tab = NULL; + u32 str_mask = 0; + u32 str_shift = 0; + char chn[8]; + + if (!(ci->c_inf[0].caps & CC_CAP_PMU)) + return; + + switch (SDIOD_DRVSTR_KEY(ci->chip, ci->pmurev)) { + case SDIOD_DRVSTR_KEY(BCM4330_CHIP_ID, 12): + str_tab = (struct sdiod_drive_str *)&sdiod_drvstr_tab1_1v8; + str_mask = 0x00003800; + str_shift = 11; + break; + default: + brcmf_dbg(ERROR, "No SDIO Drive strength init done for chip %s rev %d pmurev %d\n", + brcmf_sdio_chip_name(ci->chip, chn, 8), + ci->chiprev, ci->pmurev); + break; + } + + if (str_tab != NULL) { + u32 drivestrength_sel = 0; + u32 cc_data_temp; + int i; + + for (i = 0; str_tab[i].strength != 0; i++) { + if (drivestrength >= str_tab[i].strength) { + drivestrength_sel = str_tab[i].sel; + break; + } + } + + brcmf_sdcard_reg_write(sdiodev, + CORE_CC_REG(ci->c_inf[0].base, chipcontrol_addr), + 4, 1); + cc_data_temp = brcmf_sdcard_reg_read(sdiodev, + CORE_CC_REG(ci->c_inf[0].base, chipcontrol_addr), 4); + cc_data_temp &= ~str_mask; + drivestrength_sel <<= str_shift; + cc_data_temp |= drivestrength_sel; + brcmf_sdcard_reg_write(sdiodev, + CORE_CC_REG(ci->c_inf[0].base, chipcontrol_addr), + 4, cc_data_temp); + + brcmf_dbg(INFO, "SDIO: %dmA drive strength selected, set to 0x%08x\n", + drivestrength, cc_data_temp); + } +} diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h new file mode 100644 index 00000000000..ce974d76bd9 --- /dev/null +++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2011 Broadcom Corporation + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _BRCMFMAC_SDIO_CHIP_H_ +#define _BRCMFMAC_SDIO_CHIP_H_ + +/* + * Core reg address translation. + * Both macro's returns a 32 bits byte address on the backplane bus. + */ +#define CORE_CC_REG(base, field) \ + (base + offsetof(struct chipcregs, field)) +#define CORE_BUS_REG(base, field) \ + (base + offsetof(struct sdpcmd_regs, field)) +#define CORE_SB(base, field) \ + (base + SBCONFIGOFF + offsetof(struct sbconfig, field)) + +/* SDIO function 1 register CHIPCLKCSR */ +/* Force ALP request to backplane */ +#define SBSDIO_FORCE_ALP 0x01 +/* Force HT request to backplane */ +#define SBSDIO_FORCE_HT 0x02 +/* Force ILP request to backplane */ +#define SBSDIO_FORCE_ILP 0x04 +/* Make ALP ready (power up xtal) */ +#define SBSDIO_ALP_AVAIL_REQ 0x08 +/* Make HT ready (power up PLL) */ +#define SBSDIO_HT_AVAIL_REQ 0x10 +/* Squelch clock requests from HW */ +#define SBSDIO_FORCE_HW_CLKREQ_OFF 0x20 +/* Status: ALP is ready */ +#define SBSDIO_ALP_AVAIL 0x40 +/* Status: HT is ready */ +#define SBSDIO_HT_AVAIL 0x80 +#define SBSDIO_AVBITS (SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL) +#define SBSDIO_ALPAV(regval) ((regval) & SBSDIO_AVBITS) +#define SBSDIO_HTAV(regval) (((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS) +#define SBSDIO_ALPONLY(regval) (SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval)) +#define SBSDIO_CLKAV(regval, alponly) \ + (SBSDIO_ALPAV(regval) && (alponly ? 1 : SBSDIO_HTAV(regval))) + +#define BRCMF_MAX_CORENUM 6 + +struct chip_core_info { + u16 id; + u16 rev; + u32 base; + u32 wrapbase; + u32 caps; + u32 cib; +}; + +struct chip_info { + u32 chip; + u32 chiprev; + u32 socitype; + /* core info */ + /* always put chipcommon core at 0, bus core at 1 */ + struct chip_core_info c_inf[BRCMF_MAX_CORENUM]; + u32 pmurev; + u32 pmucaps; + u32 ramsize; + + bool (*iscoreup)(struct brcmf_sdio_dev *sdiodev, struct chip_info *ci, + u16 coreid); + u32 (*corerev)(struct brcmf_sdio_dev *sdiodev, struct chip_info *ci, + u16 coreid); + void (*coredisable)(struct brcmf_sdio_dev *sdiodev, + struct chip_info *ci, u16 coreid); + void (*resetcore)(struct brcmf_sdio_dev *sdiodev, + struct chip_info *ci, u16 coreid); +}; + +struct sbconfig { + u32 PAD[2]; + u32 sbipsflag; /* initiator port ocp slave flag */ + u32 PAD[3]; + u32 sbtpsflag; /* target port ocp slave flag */ + u32 PAD[11]; + u32 sbtmerrloga; /* (sonics >= 2.3) */ + u32 PAD; + u32 sbtmerrlog; /* (sonics >= 2.3) */ + u32 PAD[3]; + u32 sbadmatch3; /* address match3 */ + u32 PAD; + u32 sbadmatch2; /* address match2 */ + u32 PAD; + u32 sbadmatch1; /* address match1 */ + u32 PAD[7]; + u32 sbimstate; /* initiator agent state */ + u32 sbintvec; /* interrupt mask */ + u32 sbtmstatelow; /* target state */ + u32 sbtmstatehigh; /* target state */ + u32 sbbwa0; /* bandwidth allocation table0 */ + u32 PAD; + u32 sbimconfiglow; /* initiator configuration */ + u32 sbimconfighigh; /* initiator configuration */ + u32 sbadmatch0; /* address match0 */ + u32 PAD; + u32 sbtmconfiglow; /* target configuration */ + u32 sbtmconfighigh; /* target configuration */ + u32 sbbconfig; /* broadcast configuration */ + u32 PAD; + u32 sbbstate; /* broadcast state */ + u32 PAD[3]; + u32 sbactcnfg; /* activate configuration */ + u32 PAD[3]; + u32 sbflagst; /* current sbflags */ + u32 PAD[3]; + u32 sbidlow; /* identification */ + u32 sbidhigh; /* identification */ +}; + +extern int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev, + struct chip_info **ci_ptr, u32 regs); +extern void brcmf_sdio_chip_detach(struct chip_info **ci_ptr); +extern void brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev, + struct chip_info *ci, + u32 drivestrength); +extern u8 brcmf_sdio_chip_getinfidx(struct chip_info *ci, u16 coreid); + + +#endif /* _BRCMFMAC_SDIO_CHIP_H_ */ diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h index 726fa898111..0281d207d99 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h @@ -116,12 +116,20 @@ #define SUCCESS 0 #define ERROR 1 +/* Packet alignment for most efficient SDIO (can change based on platform) */ +#define BRCMF_SDALIGN (1 << 6) + +/* watchdog polling interval in ms */ +#define BRCMF_WD_POLL_MS 10 + struct brcmf_sdreg { int func; int offset; int value; }; +struct brcmf_sdio; + struct brcmf_sdio_dev { struct sdio_func *func[SDIO_MAX_FUNCS]; u8 num_funcs; /* Supported funcs on client */ @@ -132,9 +140,10 @@ struct brcmf_sdio_dev { atomic_t suspend; /* suspend flag */ wait_queue_head_t request_byte_wait; wait_queue_head_t request_word_wait; - wait_queue_head_t request_packet_wait; + wait_queue_head_t request_chain_wait; wait_queue_head_t request_buffer_wait; - + struct device *dev; + struct brcmf_bus *bus_if; }; /* Register/deregister device interrupt handler. */ @@ -182,11 +191,21 @@ extern bool brcmf_sdcard_regfail(struct brcmf_sdio_dev *sdiodev); * NOTE: Async operation is not currently supported. */ extern int +brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, + uint flags, struct sk_buff *pkt); +extern int brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, - uint flags, u8 *buf, uint nbytes, struct sk_buff *pkt); + uint flags, u8 *buf, uint nbytes); + +extern int +brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, + uint flags, struct sk_buff *pkt); extern int brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, - uint flags, u8 *buf, uint nbytes, struct sk_buff *pkt); + uint flags, u8 *buf, uint nbytes); +extern int +brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, + uint flags, struct sk_buff_head *pktq); /* Flags bits */ @@ -237,16 +256,20 @@ brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev, /* read or write any buffer using cmd53 */ extern int brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev, - uint fix_inc, uint rw, uint fnc_num, - u32 addr, uint regwidth, - u32 buflen, u8 *buffer, struct sk_buff *pkt); + uint fix_inc, uint rw, uint fnc_num, u32 addr, + struct sk_buff *pkt); +extern int +brcmf_sdioh_request_chain(struct brcmf_sdio_dev *sdiodev, uint fix_inc, + uint write, uint func, uint addr, + struct sk_buff_head *pktq); /* Watchdog timer interface for pm ops */ extern void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev *sdiodev, bool enable); -extern void *brcmf_sdbrcm_probe(u16 bus_no, u16 slot, u16 func, uint bustype, - u32 regsva, struct brcmf_sdio_dev *sdiodev); +extern void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev); extern void brcmf_sdbrcm_disconnect(void *ptr); extern void brcmf_sdbrcm_isr(void *arg); + +extern void brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick); #endif /* _BRCM_SDH_H_ */ diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c index 5eddabe5228..f23b0c3e4ea 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c @@ -1429,7 +1429,7 @@ brcmf_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *ndev, static s32 brcmf_cfg80211_set_tx_power(struct wiphy *wiphy, - enum nl80211_tx_power_setting type, s32 dbm) + enum nl80211_tx_power_setting type, s32 mbm) { struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); @@ -1437,6 +1437,7 @@ brcmf_cfg80211_set_tx_power(struct wiphy *wiphy, u16 txpwrmw; s32 err = 0; s32 disable = 0; + s32 dbm = MBM_TO_DBM(mbm); WL_TRACE("Enter\n"); if (!check_sys_up(wiphy)) @@ -1446,12 +1447,6 @@ brcmf_cfg80211_set_tx_power(struct wiphy *wiphy, case NL80211_TX_POWER_AUTOMATIC: break; case NL80211_TX_POWER_LIMITED: - if (dbm < 0) { - WL_ERR("TX_POWER_LIMITED - dbm is negative\n"); - err = -EINVAL; - goto done; - } - break; case NL80211_TX_POWER_FIXED: if (dbm < 0) { WL_ERR("TX_POWER_FIXED - dbm is negative\n"); @@ -1997,7 +1992,7 @@ done: } static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_priv *cfg_priv, - struct brcmf_bss_info *bi) + struct brcmf_bss_info_le *bi) { struct wiphy *wiphy = cfg_to_wiphy(cfg_priv); struct ieee80211_channel *notify_channel; @@ -2049,18 +2044,27 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_priv *cfg_priv, notify_timestamp, notify_capability, notify_interval, notify_ie, notify_ielen, notify_signal, GFP_KERNEL); - if (!bss) { - WL_ERR("cfg80211_inform_bss_frame error\n"); - return -EINVAL; - } + if (!bss) + return -ENOMEM; + + cfg80211_put_bss(bss); return err; } +static struct brcmf_bss_info_le * +next_bss_le(struct brcmf_scan_results *list, struct brcmf_bss_info_le *bss) +{ + if (bss == NULL) + return list->bss_info_le; + return (struct brcmf_bss_info_le *)((unsigned long)bss + + le32_to_cpu(bss->length)); +} + static s32 brcmf_inform_bss(struct brcmf_cfg80211_priv *cfg_priv) { struct brcmf_scan_results *bss_list; - struct brcmf_bss_info *bi = NULL; /* must be initialized */ + struct brcmf_bss_info_le *bi = NULL; /* must be initialized */ s32 err = 0; int i; @@ -2072,7 +2076,7 @@ static s32 brcmf_inform_bss(struct brcmf_cfg80211_priv *cfg_priv) } WL_SCAN("scanned AP count (%d)\n", bss_list->count); for (i = 0; i < bss_list->count && i < WL_AP_MAX; i++) { - bi = next_bss(bss_list, bi); + bi = next_bss_le(bss_list, bi); err = brcmf_inform_single_bss(cfg_priv, bi); if (err) break; @@ -2085,8 +2089,9 @@ static s32 wl_inform_ibss(struct brcmf_cfg80211_priv *cfg_priv, { struct wiphy *wiphy = cfg_to_wiphy(cfg_priv); struct ieee80211_channel *notify_channel; - struct brcmf_bss_info *bi = NULL; + struct brcmf_bss_info_le *bi = NULL; struct ieee80211_supported_band *band; + struct cfg80211_bss *bss; u8 *buf = NULL; s32 err = 0; u16 channel; @@ -2114,7 +2119,7 @@ static s32 wl_inform_ibss(struct brcmf_cfg80211_priv *cfg_priv, goto CleanUp; } - bi = (struct brcmf_bss_info *)(buf + 4); + bi = (struct brcmf_bss_info_le *)(buf + 4); channel = bi->ctl_ch ? bi->ctl_ch : CHSPEC_CHANNEL(le16_to_cpu(bi->chanspec)); @@ -2140,10 +2145,17 @@ static s32 wl_inform_ibss(struct brcmf_cfg80211_priv *cfg_priv, WL_CONN("signal: %d\n", notify_signal); WL_CONN("notify_timestamp: %#018llx\n", notify_timestamp); - cfg80211_inform_bss(wiphy, notify_channel, bssid, + bss = cfg80211_inform_bss(wiphy, notify_channel, bssid, notify_timestamp, notify_capability, notify_interval, notify_ie, notify_ielen, notify_signal, GFP_KERNEL); + if (!bss) { + err = -ENOMEM; + goto CleanUp; + } + + cfg80211_put_bss(bss); + CleanUp: kfree(buf); @@ -2188,7 +2200,7 @@ static struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key) static s32 brcmf_update_bss_info(struct brcmf_cfg80211_priv *cfg_priv) { - struct brcmf_bss_info *bi; + struct brcmf_bss_info_le *bi; struct brcmf_ssid *ssid; struct brcmf_tlv *tim; u16 beacon_interval; @@ -2211,7 +2223,7 @@ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_priv *cfg_priv) goto update_bss_info_out; } - bi = (struct brcmf_bss_info *)(cfg_priv->extra_buf + 4); + bi = (struct brcmf_bss_info_le *)(cfg_priv->extra_buf + 4); err = brcmf_inform_single_bss(cfg_priv, bi); if (err) goto update_bss_info_out; diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h index 62dc46144ed..a613b49cb13 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h @@ -352,15 +352,6 @@ brcmf_cfg80211_connect_info *cfg_to_conn(struct brcmf_cfg80211_priv *cfg) return &cfg->conn_info; } -static inline struct brcmf_bss_info *next_bss(struct brcmf_scan_results *list, - struct brcmf_bss_info *bss) -{ - return bss = bss ? - (struct brcmf_bss_info *)((unsigned long)bss + - le32_to_cpu(bss->length)) : - list->bss_info; -} - extern struct brcmf_cfg80211_dev *brcmf_cfg80211_attach(struct net_device *ndev, struct device *busdev, void *data); diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c index 025fa0eb6f4..ab9bb11abfb 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c @@ -16,6 +16,8 @@ * File contents: support functions for PCI/PCIe */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/delay.h> #include <linux/pci.h> @@ -316,51 +318,24 @@ #define BADIDX (SI_MAXCORES + 1) -/* Newer chips can access PCI/PCIE and CC core without requiring to change - * PCI BAR0 WIN - */ -#define SI_FAST(si) (((si)->pub.buscoretype == PCIE_CORE_ID) || \ - (((si)->pub.buscoretype == PCI_CORE_ID) && \ - (si)->pub.buscorerev >= 13)) - -#define CCREGS_FAST(si) (((char __iomem *)((si)->curmap) + \ - PCI_16KB0_CCREGS_OFFSET)) - #define IS_SIM(chippkg) \ ((chippkg == HDLSIM_PKG_ID) || (chippkg == HWSIM_PKG_ID)) -/* - * Macros to disable/restore function core(D11, ENET, ILINE20, etc) interrupts - * before after core switching to avoid invalid register accesss inside ISR. - */ -#define INTR_OFF(si, intr_val) \ - if ((si)->intrsoff_fn && \ - (si)->coreid[(si)->curidx] == (si)->dev_coreid) \ - intr_val = (*(si)->intrsoff_fn)((si)->intr_arg) +#define PCI(sih) (ai_get_buscoretype(sih) == PCI_CORE_ID) +#define PCIE(sih) (ai_get_buscoretype(sih) == PCIE_CORE_ID) -#define INTR_RESTORE(si, intr_val) \ - if ((si)->intrsrestore_fn && \ - (si)->coreid[(si)->curidx] == (si)->dev_coreid) \ - (*(si)->intrsrestore_fn)((si)->intr_arg, intr_val) - -#define PCI(si) ((si)->pub.buscoretype == PCI_CORE_ID) -#define PCIE(si) ((si)->pub.buscoretype == PCIE_CORE_ID) - -#define PCI_FORCEHT(si) (PCIE(si) && (si->pub.chip == BCM4716_CHIP_ID)) +#define PCI_FORCEHT(sih) (PCIE(sih) && (ai_get_chip_id(sih) == BCM4716_CHIP_ID)) #ifdef BCMDBG -#define SI_MSG(args) printk args +#define SI_MSG(fmt, ...) pr_debug(fmt, ##__VA_ARGS__) #else -#define SI_MSG(args) +#define SI_MSG(fmt, ...) no_printk(fmt, ##__VA_ARGS__) #endif /* BCMDBG */ #define GOODCOREADDR(x, b) \ (((x) >= (b)) && ((x) < ((b) + SI_MAXCORES * SI_CORE_SIZE)) && \ IS_ALIGNED((x), SI_CORE_SIZE)) -#define PCIEREGS(si) ((__iomem char *)((si)->curmap) + \ - PCI_16KB0_PCIREGS_OFFSET) - struct aidmp { u32 oobselina30; /* 0x000 */ u32 oobselina74; /* 0x004 */ @@ -479,406 +454,13 @@ struct aidmp { u32 componentid3; /* 0xffc */ }; -/* EROM parsing */ - -static u32 -get_erom_ent(struct si_pub *sih, u32 __iomem **eromptr, u32 mask, u32 match) -{ - u32 ent; - uint inv = 0, nom = 0; - - while (true) { - ent = R_REG(*eromptr); - (*eromptr)++; - - if (mask == 0) - break; - - if ((ent & ER_VALID) == 0) { - inv++; - continue; - } - - if (ent == (ER_END | ER_VALID)) - break; - - if ((ent & mask) == match) - break; - - nom++; - } - - return ent; -} - -static u32 -get_asd(struct si_pub *sih, u32 __iomem **eromptr, uint sp, uint ad, uint st, - u32 *addrl, u32 *addrh, u32 *sizel, u32 *sizeh) -{ - u32 asd, sz, szd; - - asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID); - if (((asd & ER_TAG1) != ER_ADD) || - (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) || - ((asd & AD_ST_MASK) != st)) { - /* This is not what we want, "push" it back */ - (*eromptr)--; - return 0; - } - *addrl = asd & AD_ADDR_MASK; - if (asd & AD_AG32) - *addrh = get_erom_ent(sih, eromptr, 0, 0); - else - *addrh = 0; - *sizeh = 0; - sz = asd & AD_SZ_MASK; - if (sz == AD_SZ_SZD) { - szd = get_erom_ent(sih, eromptr, 0, 0); - *sizel = szd & SD_SZ_MASK; - if (szd & SD_SG32) - *sizeh = get_erom_ent(sih, eromptr, 0, 0); - } else - *sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT); - - return asd; -} - -static void ai_hwfixup(struct si_info *sii) -{ -} - -/* parse the enumeration rom to identify all cores */ -static void ai_scan(struct si_pub *sih, struct chipcregs __iomem *cc) -{ - struct si_info *sii = (struct si_info *)sih; - - u32 erombase; - u32 __iomem *eromptr, *eromlim; - void __iomem *regs = cc; - - erombase = R_REG(&cc->eromptr); - - /* Set wrappers address */ - sii->curwrap = (void *)((unsigned long)cc + SI_CORE_SIZE); - - /* Now point the window at the erom */ - pci_write_config_dword(sii->pbus, PCI_BAR0_WIN, erombase); - eromptr = regs; - eromlim = eromptr + (ER_REMAPCONTROL / sizeof(u32)); - - while (eromptr < eromlim) { - u32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp; - u32 mpd, asd, addrl, addrh, sizel, sizeh; - u32 __iomem *base; - uint i, j, idx; - bool br; - - br = false; - - /* Grok a component */ - cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI); - if (cia == (ER_END | ER_VALID)) { - /* Found END of erom */ - ai_hwfixup(sii); - return; - } - base = eromptr - 1; - cib = get_erom_ent(sih, &eromptr, 0, 0); - - if ((cib & ER_TAG) != ER_CI) { - /* CIA not followed by CIB */ - goto error; - } - - cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT; - mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT; - crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT; - nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT; - nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT; - nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT; - nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT; - - if (((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) || (nsp == 0)) - continue; - if ((nmw + nsw == 0)) { - /* A component which is not a core */ - if (cid == OOB_ROUTER_CORE_ID) { - asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, - &addrl, &addrh, &sizel, &sizeh); - if (asd != 0) - sii->oob_router = addrl; - } - continue; - } - - idx = sii->numcores; -/* sii->eromptr[idx] = base; */ - sii->cia[idx] = cia; - sii->cib[idx] = cib; - sii->coreid[idx] = cid; - - for (i = 0; i < nmp; i++) { - mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID); - if ((mpd & ER_TAG) != ER_MP) { - /* Not enough MP entries for component */ - goto error; - } - } - - /* First Slave Address Descriptor should be port 0: - * the main register space for the core - */ - asd = - get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, - &sizel, &sizeh); - if (asd == 0) { - /* Try again to see if it is a bridge */ - asd = - get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, - &addrh, &sizel, &sizeh); - if (asd != 0) - br = true; - else if ((addrh != 0) || (sizeh != 0) - || (sizel != SI_CORE_SIZE)) { - /* First Slave ASD for core malformed */ - goto error; - } - } - sii->coresba[idx] = addrl; - sii->coresba_size[idx] = sizel; - /* Get any more ASDs in port 0 */ - j = 1; - do { - asd = - get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, - &addrh, &sizel, &sizeh); - if ((asd != 0) && (j == 1) && (sizel == SI_CORE_SIZE)) { - sii->coresba2[idx] = addrl; - sii->coresba2_size[idx] = sizel; - } - j++; - } while (asd != 0); - - /* Go through the ASDs for other slave ports */ - for (i = 1; i < nsp; i++) { - j = 0; - do { - asd = - get_asd(sih, &eromptr, i, j++, AD_ST_SLAVE, - &addrl, &addrh, &sizel, &sizeh); - } while (asd != 0); - if (j == 0) { - /* SP has no address descriptors */ - goto error; - } - } - - /* Now get master wrappers */ - for (i = 0; i < nmw; i++) { - asd = - get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl, - &addrh, &sizel, &sizeh); - if (asd == 0) { - /* Missing descriptor for MW */ - goto error; - } - if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) { - /* Master wrapper %d is not 4KB */ - goto error; - } - if (i == 0) - sii->wrapba[idx] = addrl; - } - - /* And finally slave wrappers */ - for (i = 0; i < nsw; i++) { - uint fwp = (nsp == 1) ? 0 : 1; - asd = - get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP, - &addrl, &addrh, &sizel, &sizeh); - if (asd == 0) { - /* Missing descriptor for SW */ - goto error; - } - if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) { - /* Slave wrapper is not 4KB */ - goto error; - } - if ((nmw == 0) && (i == 0)) - sii->wrapba[idx] = addrl; - } - - /* Don't record bridges */ - if (br) - continue; - - /* Done with core */ - sii->numcores++; - } - - error: - /* Reached end of erom without finding END */ - sii->numcores = 0; - return; -} - -/* - * This function changes the logical "focus" to the indicated core. - * Return the current core's virtual address. Since each core starts with the - * same set of registers (BIST, clock control, etc), the returned address - * contains the first register of this 'common' register block (not to be - * confused with 'common core'). - */ -void __iomem *ai_setcoreidx(struct si_pub *sih, uint coreidx) -{ - struct si_info *sii = (struct si_info *)sih; - u32 addr = sii->coresba[coreidx]; - u32 wrap = sii->wrapba[coreidx]; - - if (coreidx >= sii->numcores) - return NULL; - - /* point bar0 window */ - pci_write_config_dword(sii->pbus, PCI_BAR0_WIN, addr); - /* point bar0 2nd 4KB window */ - pci_write_config_dword(sii->pbus, PCI_BAR0_WIN2, wrap); - sii->curidx = coreidx; - - return sii->curmap; -} - -/* Return the number of address spaces in current core */ -int ai_numaddrspaces(struct si_pub *sih) -{ - return 2; -} - -/* Return the address of the nth address space in the current core */ -u32 ai_addrspace(struct si_pub *sih, uint asidx) -{ - struct si_info *sii; - uint cidx; - - sii = (struct si_info *)sih; - cidx = sii->curidx; - - if (asidx == 0) - return sii->coresba[cidx]; - else if (asidx == 1) - return sii->coresba2[cidx]; - else { - /* Need to parse the erom again to find addr space */ - return 0; - } -} - -/* Return the size of the nth address space in the current core */ -u32 ai_addrspacesize(struct si_pub *sih, uint asidx) -{ - struct si_info *sii; - uint cidx; - - sii = (struct si_info *)sih; - cidx = sii->curidx; - - if (asidx == 0) - return sii->coresba_size[cidx]; - else if (asidx == 1) - return sii->coresba2_size[cidx]; - else { - /* Need to parse the erom again to find addr */ - return 0; - } -} - -uint ai_flag(struct si_pub *sih) -{ - struct si_info *sii; - struct aidmp *ai; - - sii = (struct si_info *)sih; - ai = sii->curwrap; - - return R_REG(&ai->oobselouta30) & 0x1f; -} - -void ai_setint(struct si_pub *sih, int siflag) -{ -} - -uint ai_corevendor(struct si_pub *sih) -{ - struct si_info *sii; - u32 cia; - - sii = (struct si_info *)sih; - cia = sii->cia[sii->curidx]; - return (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT; -} - -uint ai_corerev(struct si_pub *sih) -{ - struct si_info *sii; - u32 cib; - - sii = (struct si_info *)sih; - cib = sii->cib[sii->curidx]; - return (cib & CIB_REV_MASK) >> CIB_REV_SHIFT; -} - -bool ai_iscoreup(struct si_pub *sih) -{ - struct si_info *sii; - struct aidmp *ai; - - sii = (struct si_info *)sih; - ai = sii->curwrap; - - return (((R_REG(&ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) == - SICF_CLOCK_EN) - && ((R_REG(&ai->resetctrl) & AIRC_RESET) == 0)); -} - -void ai_core_cflags_wo(struct si_pub *sih, u32 mask, u32 val) -{ - struct si_info *sii; - struct aidmp *ai; - u32 w; - - sii = (struct si_info *)sih; - - ai = sii->curwrap; - - if (mask || val) { - w = ((R_REG(&ai->ioctrl) & ~mask) | val); - W_REG(&ai->ioctrl, w); - } -} - -u32 ai_core_cflags(struct si_pub *sih, u32 mask, u32 val) -{ - struct si_info *sii; - struct aidmp *ai; - u32 w; - - sii = (struct si_info *)sih; - ai = sii->curwrap; - - if (mask || val) { - w = ((R_REG(&ai->ioctrl) & ~mask) | val); - W_REG(&ai->ioctrl, w); - } - - return R_REG(&ai->ioctrl); -} - /* return true if PCIE capability exists in the pci config space */ static bool ai_ispcie(struct si_info *sii) { u8 cap_ptr; cap_ptr = - pcicore_find_pci_capability(sii->pbus, PCI_CAP_ID_EXP, NULL, + pcicore_find_pci_capability(sii->pcibus, PCI_CAP_ID_EXP, NULL, NULL); if (!cap_ptr) return false; @@ -894,117 +476,69 @@ static bool ai_buscore_prep(struct si_info *sii) return true; } -u32 ai_core_sflags(struct si_pub *sih, u32 mask, u32 val) -{ - struct si_info *sii; - struct aidmp *ai; - u32 w; - - sii = (struct si_info *)sih; - ai = sii->curwrap; - - if (mask || val) { - w = ((R_REG(&ai->iostatus) & ~mask) | val); - W_REG(&ai->iostatus, w); - } - - return R_REG(&ai->iostatus); -} - static bool -ai_buscore_setup(struct si_info *sii, u32 savewin, uint *origidx) +ai_buscore_setup(struct si_info *sii, struct bcma_device *cc) { - bool pci, pcie; - uint i; - uint pciidx, pcieidx, pcirev, pcierev; - struct chipcregs __iomem *cc; + struct bcma_device *pci = NULL; + struct bcma_device *pcie = NULL; + struct bcma_device *core; - cc = ai_setcoreidx(&sii->pub, SI_CC_IDX); + + /* no cores found, bail out */ + if (cc->bus->nr_cores == 0) + return false; /* get chipcommon rev */ - sii->pub.ccrev = (int)ai_corerev(&sii->pub); + sii->pub.ccrev = cc->id.rev; /* get chipcommon chipstatus */ - if (sii->pub.ccrev >= 11) - sii->pub.chipst = R_REG(&cc->chipstatus); + if (ai_get_ccrev(&sii->pub) >= 11) + sii->chipst = bcma_read32(cc, CHIPCREGOFFS(chipstatus)); /* get chipcommon capabilites */ - sii->pub.cccaps = R_REG(&cc->capabilities); - /* get chipcommon extended capabilities */ - - if (sii->pub.ccrev >= 35) - sii->pub.cccaps_ext = R_REG(&cc->capabilities_ext); + sii->pub.cccaps = bcma_read32(cc, CHIPCREGOFFS(capabilities)); /* get pmu rev and caps */ - if (sii->pub.cccaps & CC_CAP_PMU) { - sii->pub.pmucaps = R_REG(&cc->pmucapabilities); + if (ai_get_cccaps(&sii->pub) & CC_CAP_PMU) { + sii->pub.pmucaps = bcma_read32(cc, + CHIPCREGOFFS(pmucapabilities)); sii->pub.pmurev = sii->pub.pmucaps & PCAP_REV_MASK; } - /* figure out bus/orignal core idx */ - sii->pub.buscoretype = NODEV_CORE_ID; - sii->pub.buscorerev = NOREV; - sii->pub.buscoreidx = BADIDX; - - pci = pcie = false; - pcirev = pcierev = NOREV; - pciidx = pcieidx = BADIDX; - - for (i = 0; i < sii->numcores; i++) { + /* figure out buscore */ + list_for_each_entry(core, &cc->bus->cores, list) { uint cid, crev; - ai_setcoreidx(&sii->pub, i); - cid = ai_coreid(&sii->pub); - crev = ai_corerev(&sii->pub); + cid = core->id.id; + crev = core->id.rev; if (cid == PCI_CORE_ID) { - pciidx = i; - pcirev = crev; - pci = true; + pci = core; } else if (cid == PCIE_CORE_ID) { - pcieidx = i; - pcierev = crev; - pcie = true; + pcie = core; } - - /* find the core idx before entering this func. */ - if ((savewin && (savewin == sii->coresba[i])) || - (cc == sii->regs[i])) - *origidx = i; } if (pci && pcie) { if (ai_ispcie(sii)) - pci = false; + pci = NULL; else - pcie = false; + pcie = NULL; } if (pci) { - sii->pub.buscoretype = PCI_CORE_ID; - sii->pub.buscorerev = pcirev; - sii->pub.buscoreidx = pciidx; + sii->buscore = pci; } else if (pcie) { - sii->pub.buscoretype = PCIE_CORE_ID; - sii->pub.buscorerev = pcierev; - sii->pub.buscoreidx = pcieidx; + sii->buscore = pcie; } /* fixup necessary chip/core configurations */ - if (SI_FAST(sii)) { - if (!sii->pch) { - sii->pch = pcicore_init(&sii->pub, sii->pbus, - (__iomem void *)PCIEREGS(sii)); - if (sii->pch == NULL) - return false; - } + if (!sii->pch) { + sii->pch = pcicore_init(&sii->pub, sii->icbus->drv_pci.core); + if (sii->pch == NULL) + return false; } - if (ai_pci_fixcfg(&sii->pub)) { - /* si_doattach: si_pci_fixcfg failed */ + if (ai_pci_fixcfg(&sii->pub)) return false; - } - - /* return to the original core */ - ai_setcoreidx(&sii->pub, *origidx); return true; } @@ -1017,39 +551,27 @@ static __used void ai_nvram_process(struct si_info *sii) uint w = 0; /* do a pci config read to get subsystem id and subvendor id */ - pci_read_config_dword(sii->pbus, PCI_SUBSYSTEM_VENDOR_ID, &w); + pci_read_config_dword(sii->pcibus, PCI_SUBSYSTEM_VENDOR_ID, &w); sii->pub.boardvendor = w & 0xffff; sii->pub.boardtype = (w >> 16) & 0xffff; - sii->pub.boardflags = getintvar(&sii->pub, BRCMS_SROM_BOARDFLAGS); } static struct si_info *ai_doattach(struct si_info *sii, - void __iomem *regs, struct pci_dev *pbus) + struct bcma_bus *pbus) { struct si_pub *sih = &sii->pub; u32 w, savewin; - struct chipcregs __iomem *cc; + struct bcma_device *cc; uint socitype; - uint origidx; - - memset((unsigned char *) sii, 0, sizeof(struct si_info)); savewin = 0; - sih->buscoreidx = BADIDX; - - sii->curmap = regs; - sii->pbus = pbus; - - /* find Chipcommon address */ - pci_read_config_dword(sii->pbus, PCI_BAR0_WIN, &savewin); - if (!GOODCOREADDR(savewin, SI_ENUM_BASE)) - savewin = SI_ENUM_BASE; + sii->icbus = pbus; + sii->pcibus = pbus->host_pci; - pci_write_config_dword(sii->pbus, PCI_BAR0_WIN, - SI_ENUM_BASE); - cc = (struct chipcregs __iomem *) regs; + /* switch to Chipcommon core */ + cc = pbus->drv_cc.core; /* bus/core/clk setup for register access */ if (!ai_buscore_prep(sii)) @@ -1062,94 +584,74 @@ static struct si_info *ai_doattach(struct si_info *sii, * hosts w/o chipcommon), some way of recognizing them needs to * be added here. */ - w = R_REG(&cc->chipid); + w = bcma_read32(cc, CHIPCREGOFFS(chipid)); socitype = (w & CID_TYPE_MASK) >> CID_TYPE_SHIFT; /* Might as wll fill in chip id rev & pkg */ sih->chip = w & CID_ID_MASK; sih->chiprev = (w & CID_REV_MASK) >> CID_REV_SHIFT; sih->chippkg = (w & CID_PKG_MASK) >> CID_PKG_SHIFT; - sih->issim = false; - /* scan for cores */ - if (socitype == SOCI_AI) { - SI_MSG(("Found chip type AI (0x%08x)\n", w)); - /* pass chipc address instead of original core base */ - ai_scan(&sii->pub, cc); - } else { - /* Found chip of unknown type */ - return NULL; - } - /* no cores found, bail out */ - if (sii->numcores == 0) + if (socitype != SOCI_AI) return NULL; - /* bus/core/clk setup */ - origidx = SI_CC_IDX; - if (!ai_buscore_setup(sii, savewin, &origidx)) + SI_MSG("Found chip type AI (0x%08x)\n", w); + if (!ai_buscore_setup(sii, cc)) goto exit; /* Init nvram from sprom/otp if they exist */ - if (srom_var_init(&sii->pub, cc)) + if (srom_var_init(&sii->pub)) goto exit; ai_nvram_process(sii); /* === NVRAM, clock is ready === */ - cc = (struct chipcregs __iomem *) ai_setcore(sih, CC_CORE_ID, 0); - W_REG(&cc->gpiopullup, 0); - W_REG(&cc->gpiopulldown, 0); - ai_setcoreidx(sih, origidx); + bcma_write32(cc, CHIPCREGOFFS(gpiopullup), 0); + bcma_write32(cc, CHIPCREGOFFS(gpiopulldown), 0); /* PMU specific initializations */ - if (sih->cccaps & CC_CAP_PMU) { - u32 xtalfreq; + if (ai_get_cccaps(sih) & CC_CAP_PMU) { si_pmu_init(sih); - si_pmu_chip_init(sih); - - xtalfreq = si_pmu_measure_alpclk(sih); - si_pmu_pll_init(sih, xtalfreq); + (void)si_pmu_measure_alpclk(sih); si_pmu_res_init(sih); - si_pmu_swreg_init(sih); } /* setup the GPIO based LED powersave register */ w = getintvar(sih, BRCMS_SROM_LEDDC); if (w == 0) w = DEFAULT_GPIOTIMERVAL; - ai_corereg(sih, SI_CC_IDX, offsetof(struct chipcregs, gpiotimerval), - ~0, w); + ai_cc_reg(sih, offsetof(struct chipcregs, gpiotimerval), + ~0, w); - if (PCIE(sii)) + if (PCIE(sih)) pcicore_attach(sii->pch, SI_DOATTACH); - if (sih->chip == BCM43224_CHIP_ID) { + if (ai_get_chip_id(sih) == BCM43224_CHIP_ID) { /* * enable 12 mA drive strenth for 43224 and * set chipControl register bit 15 */ - if (sih->chiprev == 0) { - SI_MSG(("Applying 43224A0 WARs\n")); - ai_corereg(sih, SI_CC_IDX, - offsetof(struct chipcregs, chipcontrol), - CCTRL43224_GPIO_TOGGLE, - CCTRL43224_GPIO_TOGGLE); + if (ai_get_chiprev(sih) == 0) { + SI_MSG("Applying 43224A0 WARs\n"); + ai_cc_reg(sih, offsetof(struct chipcregs, chipcontrol), + CCTRL43224_GPIO_TOGGLE, + CCTRL43224_GPIO_TOGGLE); si_pmu_chipcontrol(sih, 0, CCTRL_43224A0_12MA_LED_DRIVE, CCTRL_43224A0_12MA_LED_DRIVE); } - if (sih->chiprev >= 1) { - SI_MSG(("Applying 43224B0+ WARs\n")); + if (ai_get_chiprev(sih) >= 1) { + SI_MSG("Applying 43224B0+ WARs\n"); si_pmu_chipcontrol(sih, 0, CCTRL_43224B0_12MA_LED_DRIVE, CCTRL_43224B0_12MA_LED_DRIVE); } } - if (sih->chip == BCM4313_CHIP_ID) { + if (ai_get_chip_id(sih) == BCM4313_CHIP_ID) { /* * enable 12 mA drive strenth for 4313 and * set chipControl register bit 1 */ - SI_MSG(("Applying 4313 WARs\n")); + SI_MSG("Applying 4313 WARs\n"); si_pmu_chipcontrol(sih, 0, CCTRL_4313_12MA_LED_DRIVE, CCTRL_4313_12MA_LED_DRIVE); } @@ -1165,22 +667,19 @@ static struct si_info *ai_doattach(struct si_info *sii, } /* - * Allocate a si handle. - * devid - pci device id (used to determine chip#) - * osh - opaque OS handle - * regs - virtual address of initial core registers + * Allocate a si handle and do the attach. */ struct si_pub * -ai_attach(void __iomem *regs, struct pci_dev *sdh) +ai_attach(struct bcma_bus *pbus) { struct si_info *sii; /* alloc struct si_info */ - sii = kmalloc(sizeof(struct si_info), GFP_ATOMIC); + sii = kzalloc(sizeof(struct si_info), GFP_ATOMIC); if (sii == NULL) return NULL; - if (ai_doattach(sii, regs, sdh) == NULL) { + if (ai_doattach(sii, pbus) == NULL) { kfree(sii); return NULL; } @@ -1209,292 +708,66 @@ void ai_detach(struct si_pub *sih) kfree(sii); } -/* register driver interrupt disabling and restoring callback functions */ -void -ai_register_intr_callback(struct si_pub *sih, void *intrsoff_fn, - void *intrsrestore_fn, - void *intrsenabled_fn, void *intr_arg) -{ - struct si_info *sii; - - sii = (struct si_info *)sih; - sii->intr_arg = intr_arg; - sii->intrsoff_fn = (u32 (*)(void *)) intrsoff_fn; - sii->intrsrestore_fn = (void (*) (void *, u32)) intrsrestore_fn; - sii->intrsenabled_fn = (bool (*)(void *)) intrsenabled_fn; - /* save current core id. when this function called, the current core - * must be the core which provides driver functions(il, et, wl, etc.) - */ - sii->dev_coreid = sii->coreid[sii->curidx]; -} - -void ai_deregister_intr_callback(struct si_pub *sih) -{ - struct si_info *sii; - - sii = (struct si_info *)sih; - sii->intrsoff_fn = NULL; -} - -uint ai_coreid(struct si_pub *sih) -{ - struct si_info *sii; - - sii = (struct si_info *)sih; - return sii->coreid[sii->curidx]; -} - -uint ai_coreidx(struct si_pub *sih) -{ - struct si_info *sii; - - sii = (struct si_info *)sih; - return sii->curidx; -} - -bool ai_backplane64(struct si_pub *sih) -{ - return (sih->cccaps & CC_CAP_BKPLN64) != 0; -} - /* return index of coreid or BADIDX if not found */ -uint ai_findcoreidx(struct si_pub *sih, uint coreid, uint coreunit) +struct bcma_device *ai_findcore(struct si_pub *sih, u16 coreid, u16 coreunit) { + struct bcma_device *core; struct si_info *sii; uint found; - uint i; sii = (struct si_info *)sih; found = 0; - for (i = 0; i < sii->numcores; i++) - if (sii->coreid[i] == coreid) { + list_for_each_entry(core, &sii->icbus->cores, list) + if (core->id.id == coreid) { if (found == coreunit) - return i; + return core; found++; } - return BADIDX; -} - -/* - * This function changes logical "focus" to the indicated core; - * must be called with interrupts off. - * Moreover, callers should keep interrupts off during switching - * out of and back to d11 core. - */ -void __iomem *ai_setcore(struct si_pub *sih, uint coreid, uint coreunit) -{ - uint idx; - - idx = ai_findcoreidx(sih, coreid, coreunit); - if (idx >= SI_MAXCORES) - return NULL; - - return ai_setcoreidx(sih, idx); -} - -/* Turn off interrupt as required by ai_setcore, before switch core */ -void __iomem *ai_switch_core(struct si_pub *sih, uint coreid, uint *origidx, - uint *intr_val) -{ - void __iomem *cc; - struct si_info *sii; - - sii = (struct si_info *)sih; - - if (SI_FAST(sii)) { - /* Overloading the origidx variable to remember the coreid, - * this works because the core ids cannot be confused with - * core indices. - */ - *origidx = coreid; - if (coreid == CC_CORE_ID) - return CCREGS_FAST(sii); - else if (coreid == sih->buscoretype) - return PCIEREGS(sii); - } - INTR_OFF(sii, *intr_val); - *origidx = sii->curidx; - cc = ai_setcore(sih, coreid, 0); - return cc; -} - -/* restore coreidx and restore interrupt */ -void ai_restore_core(struct si_pub *sih, uint coreid, uint intr_val) -{ - struct si_info *sii; - - sii = (struct si_info *)sih; - if (SI_FAST(sii) - && ((coreid == CC_CORE_ID) || (coreid == sih->buscoretype))) - return; - - ai_setcoreidx(sih, coreid); - INTR_RESTORE(sii, intr_val); -} - -void ai_write_wrapperreg(struct si_pub *sih, u32 offset, u32 val) -{ - struct si_info *sii = (struct si_info *)sih; - u32 *w = (u32 *) sii->curwrap; - W_REG(w + (offset / 4), val); - return; + return NULL; } /* - * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set - * operation, switch back to the original core, and return the new value. - * - * When using the silicon backplane, no fiddling with interrupts or core - * switches is needed. - * - * Also, when using pci/pcie, we can optimize away the core switching for pci - * registers and (on newer pci cores) chipcommon registers. + * read/modify chipcommon core register. */ -uint ai_corereg(struct si_pub *sih, uint coreidx, uint regoff, uint mask, - uint val) +uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val) { - uint origidx = 0; - u32 __iomem *r = NULL; - uint w; - uint intr_val = 0; - bool fast = false; + struct bcma_device *cc; + u32 w; struct si_info *sii; sii = (struct si_info *)sih; - - if (coreidx >= SI_MAXCORES) - return 0; - - /* - * If pci/pcie, we can get at pci/pcie regs - * and on newer cores to chipc - */ - if ((sii->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) { - /* Chipc registers are mapped at 12KB */ - fast = true; - r = (u32 __iomem *)((__iomem char *)sii->curmap + - PCI_16KB0_CCREGS_OFFSET + regoff); - } else if (sii->pub.buscoreidx == coreidx) { - /* - * pci registers are at either in the last 2KB of - * an 8KB window or, in pcie and pci rev 13 at 8KB - */ - fast = true; - if (SI_FAST(sii)) - r = (u32 __iomem *)((__iomem char *)sii->curmap + - PCI_16KB0_PCIREGS_OFFSET + regoff); - else - r = (u32 __iomem *)((__iomem char *)sii->curmap + - ((regoff >= SBCONFIGOFF) ? - PCI_BAR0_PCISBR_OFFSET : - PCI_BAR0_PCIREGS_OFFSET) + regoff); - } - - if (!fast) { - INTR_OFF(sii, intr_val); - - /* save current core index */ - origidx = ai_coreidx(&sii->pub); - - /* switch core */ - r = (u32 __iomem *) ((unsigned char __iomem *) - ai_setcoreidx(&sii->pub, coreidx) + regoff); - } + cc = sii->icbus->drv_cc.core; /* mask and set */ if (mask || val) { - w = (R_REG(r) & ~mask) | val; - W_REG(r, w); + bcma_maskset32(cc, regoff, ~mask, val); } /* readback */ - w = R_REG(r); - - if (!fast) { - /* restore core index */ - if (origidx != coreidx) - ai_setcoreidx(&sii->pub, origidx); - - INTR_RESTORE(sii, intr_val); - } + w = bcma_read32(cc, regoff); return w; } -void ai_core_disable(struct si_pub *sih, u32 bits) -{ - struct si_info *sii; - u32 dummy; - struct aidmp *ai; - - sii = (struct si_info *)sih; - - ai = sii->curwrap; - - /* if core is already in reset, just return */ - if (R_REG(&ai->resetctrl) & AIRC_RESET) - return; - - W_REG(&ai->ioctrl, bits); - dummy = R_REG(&ai->ioctrl); - udelay(10); - - W_REG(&ai->resetctrl, AIRC_RESET); - udelay(1); -} - -/* reset and re-enable a core - * inputs: - * bits - core specific bits that are set during and after reset sequence - * resetbits - core specific bits that are set only during reset sequence - */ -void ai_core_reset(struct si_pub *sih, u32 bits, u32 resetbits) -{ - struct si_info *sii; - struct aidmp *ai; - u32 dummy; - - sii = (struct si_info *)sih; - ai = sii->curwrap; - - /* - * Must do the disable sequence first to work - * for arbitrary current core state. - */ - ai_core_disable(sih, (bits | resetbits)); - - /* - * Now do the initialization sequence. - */ - W_REG(&ai->ioctrl, (bits | SICF_FGC | SICF_CLOCK_EN)); - dummy = R_REG(&ai->ioctrl); - W_REG(&ai->resetctrl, 0); - udelay(1); - - W_REG(&ai->ioctrl, (bits | SICF_CLOCK_EN)); - dummy = R_REG(&ai->ioctrl); - udelay(1); -} - /* return the slow clock source - LPO, XTAL, or PCI */ -static uint ai_slowclk_src(struct si_info *sii) +static uint ai_slowclk_src(struct si_pub *sih, struct bcma_device *cc) { - struct chipcregs __iomem *cc; + struct si_info *sii; u32 val; - if (sii->pub.ccrev < 6) { - pci_read_config_dword(sii->pbus, PCI_GPIO_OUT, + sii = (struct si_info *)sih; + if (ai_get_ccrev(&sii->pub) < 6) { + pci_read_config_dword(sii->pcibus, PCI_GPIO_OUT, &val); if (val & PCI_CFG_GPIO_SCS) return SCC_SS_PCI; return SCC_SS_XTAL; - } else if (sii->pub.ccrev < 10) { - cc = (struct chipcregs __iomem *) - ai_setcoreidx(&sii->pub, sii->curidx); - return R_REG(&cc->slow_clk_ctl) & SCC_SS_MASK; + } else if (ai_get_ccrev(&sii->pub) < 10) { + return bcma_read32(cc, CHIPCREGOFFS(slow_clk_ctl)) & + SCC_SS_MASK; } else /* Insta-clock */ return SCC_SS_XTAL; } @@ -1503,24 +776,24 @@ static uint ai_slowclk_src(struct si_info *sii) * return the ILP (slowclock) min or max frequency * precondition: we've established the chip has dynamic clk control */ -static uint ai_slowclk_freq(struct si_info *sii, bool max_freq, - struct chipcregs __iomem *cc) +static uint ai_slowclk_freq(struct si_pub *sih, bool max_freq, + struct bcma_device *cc) { u32 slowclk; uint div; - slowclk = ai_slowclk_src(sii); - if (sii->pub.ccrev < 6) { + slowclk = ai_slowclk_src(sih, cc); + if (ai_get_ccrev(sih) < 6) { if (slowclk == SCC_SS_PCI) return max_freq ? (PCIMAXFREQ / 64) : (PCIMINFREQ / 64); else return max_freq ? (XTALMAXFREQ / 32) : (XTALMINFREQ / 32); - } else if (sii->pub.ccrev < 10) { + } else if (ai_get_ccrev(sih) < 10) { div = 4 * - (((R_REG(&cc->slow_clk_ctl) & SCC_CD_MASK) >> - SCC_CD_SHIFT) + 1); + (((bcma_read32(cc, CHIPCREGOFFS(slow_clk_ctl)) & + SCC_CD_MASK) >> SCC_CD_SHIFT) + 1); if (slowclk == SCC_SS_LPO) return max_freq ? LPOMAXFREQ : LPOMINFREQ; else if (slowclk == SCC_SS_XTAL) @@ -1531,15 +804,15 @@ static uint ai_slowclk_freq(struct si_info *sii, bool max_freq, : (PCIMINFREQ / div); } else { /* Chipc rev 10 is InstaClock */ - div = R_REG(&cc->system_clk_ctl) >> SYCC_CD_SHIFT; - div = 4 * (div + 1); + div = bcma_read32(cc, CHIPCREGOFFS(system_clk_ctl)); + div = 4 * ((div >> SYCC_CD_SHIFT) + 1); return max_freq ? XTALMAXFREQ : (XTALMINFREQ / div); } return 0; } static void -ai_clkctl_setdelay(struct si_info *sii, struct chipcregs __iomem *cc) +ai_clkctl_setdelay(struct si_pub *sih, struct bcma_device *cc) { uint slowmaxfreq, pll_delay, slowclk; uint pll_on_delay, fref_sel_delay; @@ -1552,55 +825,40 @@ ai_clkctl_setdelay(struct si_info *sii, struct chipcregs __iomem *cc) * powered down by dynamic clk control logic. */ - slowclk = ai_slowclk_src(sii); + slowclk = ai_slowclk_src(sih, cc); if (slowclk != SCC_SS_XTAL) pll_delay += XTAL_ON_DELAY; /* Starting with 4318 it is ILP that is used for the delays */ slowmaxfreq = - ai_slowclk_freq(sii, (sii->pub.ccrev >= 10) ? false : true, cc); + ai_slowclk_freq(sih, + (ai_get_ccrev(sih) >= 10) ? false : true, cc); pll_on_delay = ((slowmaxfreq * pll_delay) + 999999) / 1000000; fref_sel_delay = ((slowmaxfreq * FREF_DELAY) + 999999) / 1000000; - W_REG(&cc->pll_on_delay, pll_on_delay); - W_REG(&cc->fref_sel_delay, fref_sel_delay); + bcma_write32(cc, CHIPCREGOFFS(pll_on_delay), pll_on_delay); + bcma_write32(cc, CHIPCREGOFFS(fref_sel_delay), fref_sel_delay); } /* initialize power control delay registers */ void ai_clkctl_init(struct si_pub *sih) { - struct si_info *sii; - uint origidx = 0; - struct chipcregs __iomem *cc; - bool fast; + struct bcma_device *cc; - if (!(sih->cccaps & CC_CAP_PWR_CTL)) + if (!(ai_get_cccaps(sih) & CC_CAP_PWR_CTL)) return; - sii = (struct si_info *)sih; - fast = SI_FAST(sii); - if (!fast) { - origidx = sii->curidx; - cc = (struct chipcregs __iomem *) - ai_setcore(sih, CC_CORE_ID, 0); - if (cc == NULL) - return; - } else { - cc = (struct chipcregs __iomem *) CCREGS_FAST(sii); - if (cc == NULL) - return; - } + cc = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0); + if (cc == NULL) + return; /* set all Instaclk chip ILP to 1 MHz */ - if (sih->ccrev >= 10) - SET_REG(&cc->system_clk_ctl, SYCC_CD_MASK, - (ILP_DIV_1MHZ << SYCC_CD_SHIFT)); + if (ai_get_ccrev(sih) >= 10) + bcma_maskset32(cc, CHIPCREGOFFS(system_clk_ctl), SYCC_CD_MASK, + (ILP_DIV_1MHZ << SYCC_CD_SHIFT)); - ai_clkctl_setdelay(sii, cc); - - if (!fast) - ai_setcoreidx(sih, origidx); + ai_clkctl_setdelay(sih, cc); } /* @@ -1610,47 +868,25 @@ void ai_clkctl_init(struct si_pub *sih) u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih) { struct si_info *sii; - uint origidx = 0; - struct chipcregs __iomem *cc; + struct bcma_device *cc; uint slowminfreq; u16 fpdelay; - uint intr_val = 0; - bool fast; sii = (struct si_info *)sih; - if (sih->cccaps & CC_CAP_PMU) { - INTR_OFF(sii, intr_val); + if (ai_get_cccaps(sih) & CC_CAP_PMU) { fpdelay = si_pmu_fast_pwrup_delay(sih); - INTR_RESTORE(sii, intr_val); return fpdelay; } - if (!(sih->cccaps & CC_CAP_PWR_CTL)) + if (!(ai_get_cccaps(sih) & CC_CAP_PWR_CTL)) return 0; - fast = SI_FAST(sii); fpdelay = 0; - if (!fast) { - origidx = sii->curidx; - INTR_OFF(sii, intr_val); - cc = (struct chipcregs __iomem *) - ai_setcore(sih, CC_CORE_ID, 0); - if (cc == NULL) - goto done; - } else { - cc = (struct chipcregs __iomem *) CCREGS_FAST(sii); - if (cc == NULL) - goto done; - } - - slowminfreq = ai_slowclk_freq(sii, false, cc); - fpdelay = (((R_REG(&cc->pll_on_delay) + 2) * 1000000) + - (slowminfreq - 1)) / slowminfreq; - - done: - if (!fast) { - ai_setcoreidx(sih, origidx); - INTR_RESTORE(sii, intr_val); + cc = ai_findcore(sih, CC_CORE_ID, 0); + if (cc) { + slowminfreq = ai_slowclk_freq(sih, false, cc); + fpdelay = (((bcma_read32(cc, CHIPCREGOFFS(pll_on_delay)) + 2) + * 1000000) + (slowminfreq - 1)) / slowminfreq; } return fpdelay; } @@ -1664,12 +900,12 @@ int ai_clkctl_xtal(struct si_pub *sih, uint what, bool on) sii = (struct si_info *)sih; /* pcie core doesn't have any mapping to control the xtal pu */ - if (PCIE(sii)) + if (PCIE(sih)) return -1; - pci_read_config_dword(sii->pbus, PCI_GPIO_IN, &in); - pci_read_config_dword(sii->pbus, PCI_GPIO_OUT, &out); - pci_read_config_dword(sii->pbus, PCI_GPIO_OUTEN, &outen); + pci_read_config_dword(sii->pcibus, PCI_GPIO_IN, &in); + pci_read_config_dword(sii->pcibus, PCI_GPIO_OUT, &out); + pci_read_config_dword(sii->pcibus, PCI_GPIO_OUTEN, &outen); /* * Avoid glitching the clock if GPRS is already using it. @@ -1690,9 +926,9 @@ int ai_clkctl_xtal(struct si_pub *sih, uint what, bool on) out |= PCI_CFG_GPIO_XTAL; if (what & PLL) out |= PCI_CFG_GPIO_PLL; - pci_write_config_dword(sii->pbus, + pci_write_config_dword(sii->pcibus, PCI_GPIO_OUT, out); - pci_write_config_dword(sii->pbus, + pci_write_config_dword(sii->pcibus, PCI_GPIO_OUTEN, outen); udelay(XTAL_ON_DELAY); } @@ -1700,7 +936,7 @@ int ai_clkctl_xtal(struct si_pub *sih, uint what, bool on) /* turn pll on */ if (what & PLL) { out &= ~PCI_CFG_GPIO_PLL; - pci_write_config_dword(sii->pbus, + pci_write_config_dword(sii->pcibus, PCI_GPIO_OUT, out); mdelay(2); } @@ -1709,9 +945,9 @@ int ai_clkctl_xtal(struct si_pub *sih, uint what, bool on) out &= ~PCI_CFG_GPIO_XTAL; if (what & PLL) out |= PCI_CFG_GPIO_PLL; - pci_write_config_dword(sii->pbus, + pci_write_config_dword(sii->pcibus, PCI_GPIO_OUT, out); - pci_write_config_dword(sii->pbus, + pci_write_config_dword(sii->pcibus, PCI_GPIO_OUTEN, outen); } @@ -1721,63 +957,52 @@ int ai_clkctl_xtal(struct si_pub *sih, uint what, bool on) /* clk control mechanism through chipcommon, no policy checking */ static bool _ai_clkctl_cc(struct si_info *sii, uint mode) { - uint origidx = 0; - struct chipcregs __iomem *cc; + struct bcma_device *cc; u32 scc; - uint intr_val = 0; - bool fast = SI_FAST(sii); /* chipcommon cores prior to rev6 don't support dynamic clock control */ - if (sii->pub.ccrev < 6) + if (ai_get_ccrev(&sii->pub) < 6) return false; - if (!fast) { - INTR_OFF(sii, intr_val); - origidx = sii->curidx; - cc = (struct chipcregs __iomem *) - ai_setcore(&sii->pub, CC_CORE_ID, 0); - } else { - cc = (struct chipcregs __iomem *) CCREGS_FAST(sii); - if (cc == NULL) - goto done; - } + cc = ai_findcore(&sii->pub, BCMA_CORE_CHIPCOMMON, 0); - if (!(sii->pub.cccaps & CC_CAP_PWR_CTL) && (sii->pub.ccrev < 20)) - goto done; + if (!(ai_get_cccaps(&sii->pub) & CC_CAP_PWR_CTL) && + (ai_get_ccrev(&sii->pub) < 20)) + return mode == CLK_FAST; switch (mode) { case CLK_FAST: /* FORCEHT, fast (pll) clock */ - if (sii->pub.ccrev < 10) { + if (ai_get_ccrev(&sii->pub) < 10) { /* * don't forget to force xtal back * on before we clear SCC_DYN_XTAL.. */ ai_clkctl_xtal(&sii->pub, XTAL, ON); - SET_REG(&cc->slow_clk_ctl, - (SCC_XC | SCC_FS | SCC_IP), SCC_IP); - } else if (sii->pub.ccrev < 20) { - OR_REG(&cc->system_clk_ctl, SYCC_HR); + bcma_maskset32(cc, CHIPCREGOFFS(slow_clk_ctl), + (SCC_XC | SCC_FS | SCC_IP), SCC_IP); + } else if (ai_get_ccrev(&sii->pub) < 20) { + bcma_set32(cc, CHIPCREGOFFS(system_clk_ctl), SYCC_HR); } else { - OR_REG(&cc->clk_ctl_st, CCS_FORCEHT); + bcma_set32(cc, CHIPCREGOFFS(clk_ctl_st), CCS_FORCEHT); } /* wait for the PLL */ - if (sii->pub.cccaps & CC_CAP_PMU) { + if (ai_get_cccaps(&sii->pub) & CC_CAP_PMU) { u32 htavail = CCS_HTAVAIL; - SPINWAIT(((R_REG(&cc->clk_ctl_st) & htavail) - == 0), PMU_MAX_TRANSITION_DLY); + SPINWAIT(((bcma_read32(cc, CHIPCREGOFFS(clk_ctl_st)) & + htavail) == 0), PMU_MAX_TRANSITION_DLY); } else { udelay(PLL_DELAY); } break; case CLK_DYNAMIC: /* enable dynamic clock control */ - if (sii->pub.ccrev < 10) { - scc = R_REG(&cc->slow_clk_ctl); + if (ai_get_ccrev(&sii->pub) < 10) { + scc = bcma_read32(cc, CHIPCREGOFFS(slow_clk_ctl)); scc &= ~(SCC_FS | SCC_IP | SCC_XC); if ((scc & SCC_SS_MASK) != SCC_SS_XTAL) scc |= SCC_XC; - W_REG(&cc->slow_clk_ctl, scc); + bcma_write32(cc, CHIPCREGOFFS(slow_clk_ctl), scc); /* * for dynamic control, we have to @@ -1785,11 +1010,11 @@ static bool _ai_clkctl_cc(struct si_info *sii, uint mode) */ if (scc & SCC_XC) ai_clkctl_xtal(&sii->pub, XTAL, OFF); - } else if (sii->pub.ccrev < 20) { + } else if (ai_get_ccrev(&sii->pub) < 20) { /* Instaclock */ - AND_REG(&cc->system_clk_ctl, ~SYCC_HR); + bcma_mask32(cc, CHIPCREGOFFS(system_clk_ctl), ~SYCC_HR); } else { - AND_REG(&cc->clk_ctl_st, ~CCS_FORCEHT); + bcma_mask32(cc, CHIPCREGOFFS(clk_ctl_st), ~CCS_FORCEHT); } break; @@ -1797,11 +1022,6 @@ static bool _ai_clkctl_cc(struct si_info *sii, uint mode) break; } - done: - if (!fast) { - ai_setcoreidx(&sii->pub, origidx); - INTR_RESTORE(sii, intr_val); - } return mode == CLK_FAST; } @@ -1820,46 +1040,25 @@ bool ai_clkctl_cc(struct si_pub *sih, uint mode) sii = (struct si_info *)sih; /* chipcommon cores prior to rev6 don't support dynamic clock control */ - if (sih->ccrev < 6) + if (ai_get_ccrev(sih) < 6) return false; - if (PCI_FORCEHT(sii)) + if (PCI_FORCEHT(sih)) return mode == CLK_FAST; return _ai_clkctl_cc(sii, mode); } -/* Build device path */ -int ai_devpath(struct si_pub *sih, char *path, int size) -{ - int slen; - - if (!path || size <= 0) - return -1; - - slen = snprintf(path, (size_t) size, "pci/%u/%u/", - ((struct si_info *)sih)->pbus->bus->number, - PCI_SLOT(((struct pci_dev *) - (((struct si_info *)(sih))->pbus))->devfn)); - - if (slen < 0 || slen >= size) { - path[0] = '\0'; - return -1; - } - - return 0; -} - void ai_pci_up(struct si_pub *sih) { struct si_info *sii; sii = (struct si_info *)sih; - if (PCI_FORCEHT(sii)) + if (PCI_FORCEHT(sih)) _ai_clkctl_cc(sii, CLK_FAST); - if (PCIE(sii)) + if (PCIE(sih)) pcicore_up(sii->pch, SI_PCIUP); } @@ -1882,7 +1081,7 @@ void ai_pci_down(struct si_pub *sih) sii = (struct si_info *)sih; /* release FORCEHT since chip is going to "down" state */ - if (PCI_FORCEHT(sii)) + if (PCI_FORCEHT(sih)) _ai_clkctl_cc(sii, CLK_DYNAMIC); pcicore_down(sii->pch, SI_PCIDOWN); @@ -1895,42 +1094,23 @@ void ai_pci_down(struct si_pub *sih) void ai_pci_setup(struct si_pub *sih, uint coremask) { struct si_info *sii; - struct sbpciregs __iomem *regs = NULL; - u32 siflag = 0, w; - uint idx = 0; + u32 w; sii = (struct si_info *)sih; - if (PCI(sii)) { - /* get current core index */ - idx = sii->curidx; - - /* we interrupt on this backplane flag number */ - siflag = ai_flag(sih); - - /* switch over to pci core */ - regs = ai_setcoreidx(sih, sii->pub.buscoreidx); - } - /* * Enable sb->pci interrupts. Assume * PCI rev 2.3 support was added in pci core rev 6 and things changed.. */ - if (PCIE(sii) || (PCI(sii) && ((sii->pub.buscorerev) >= 6))) { + if (PCIE(sih) || (PCI(sih) && (ai_get_buscorerev(sih) >= 6))) { /* pci config write to set this core bit in PCIIntMask */ - pci_read_config_dword(sii->pbus, PCI_INT_MASK, &w); + pci_read_config_dword(sii->pcibus, PCI_INT_MASK, &w); w |= (coremask << PCI_SBIM_SHIFT); - pci_write_config_dword(sii->pbus, PCI_INT_MASK, w); - } else { - /* set sbintvec bit for our flag number */ - ai_setint(sih, siflag); + pci_write_config_dword(sii->pcibus, PCI_INT_MASK, w); } - if (PCI(sii)) { - pcicore_pci_setup(sii->pch, regs); - - /* switch back to previous core */ - ai_setcoreidx(sih, idx); + if (PCI(sih)) { + pcicore_pci_setup(sii->pch); } } @@ -1940,25 +1120,11 @@ void ai_pci_setup(struct si_pub *sih, uint coremask) */ int ai_pci_fixcfg(struct si_pub *sih) { - uint origidx; - void __iomem *regs = NULL; struct si_info *sii = (struct si_info *)sih; /* Fixup PI in SROM shadow area to enable the correct PCI core access */ - /* save the current index */ - origidx = ai_coreidx(&sii->pub); - /* check 'pi' is correct and fix it if not */ - regs = ai_setcore(&sii->pub, sii->pub.buscoretype, 0); - if (sii->pub.buscoretype == PCIE_CORE_ID) - pcicore_fixcfg_pcie(sii->pch, - (struct sbpcieregs __iomem *)regs); - else if (sii->pub.buscoretype == PCI_CORE_ID) - pcicore_fixcfg_pci(sii->pch, (struct sbpciregs __iomem *)regs); - - /* restore the original index */ - ai_setcoreidx(&sii->pub, origidx); - + pcicore_fixcfg(sii->pch); pcicore_hwup(sii->pch); return 0; } @@ -1969,58 +1135,42 @@ u32 ai_gpiocontrol(struct si_pub *sih, u32 mask, u32 val, u8 priority) uint regoff; regoff = offsetof(struct chipcregs, gpiocontrol); - return ai_corereg(sih, SI_CC_IDX, regoff, mask, val); + return ai_cc_reg(sih, regoff, mask, val); } void ai_chipcontrl_epa4331(struct si_pub *sih, bool on) { - struct si_info *sii; - struct chipcregs __iomem *cc; - uint origidx; + struct bcma_device *cc; u32 val; - sii = (struct si_info *)sih; - origidx = ai_coreidx(sih); - - cc = (struct chipcregs __iomem *) ai_setcore(sih, CC_CORE_ID, 0); - - val = R_REG(&cc->chipcontrol); + cc = ai_findcore(sih, CC_CORE_ID, 0); if (on) { - if (sih->chippkg == 9 || sih->chippkg == 0xb) + if (ai_get_chippkg(sih) == 9 || ai_get_chippkg(sih) == 0xb) /* Ext PA Controls for 4331 12x9 Package */ - W_REG(&cc->chipcontrol, val | - CCTRL4331_EXTPA_EN | - CCTRL4331_EXTPA_ON_GPIO2_5); + bcma_set32(cc, CHIPCREGOFFS(chipcontrol), + CCTRL4331_EXTPA_EN | + CCTRL4331_EXTPA_ON_GPIO2_5); else /* Ext PA Controls for 4331 12x12 Package */ - W_REG(&cc->chipcontrol, - val | CCTRL4331_EXTPA_EN); + bcma_set32(cc, CHIPCREGOFFS(chipcontrol), + CCTRL4331_EXTPA_EN); } else { val &= ~(CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5); - W_REG(&cc->chipcontrol, val); + bcma_mask32(cc, CHIPCREGOFFS(chipcontrol), + ~(CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5)); } - - ai_setcoreidx(sih, origidx); } /* Enable BT-COEX & Ex-PA for 4313 */ void ai_epa_4313war(struct si_pub *sih) { - struct si_info *sii; - struct chipcregs __iomem *cc; - uint origidx; + struct bcma_device *cc; - sii = (struct si_info *)sih; - origidx = ai_coreidx(sih); - - cc = ai_setcore(sih, CC_CORE_ID, 0); + cc = ai_findcore(sih, CC_CORE_ID, 0); /* EPA Fix */ - W_REG(&cc->gpiocontrol, - R_REG(&cc->gpiocontrol) | GPIO_CTRL_EPA_EN_MASK); - - ai_setcoreidx(sih, origidx); + bcma_set32(cc, CHIPCREGOFFS(gpiocontrol), GPIO_CTRL_EPA_EN_MASK); } /* check if the device is removed */ @@ -2031,7 +1181,7 @@ bool ai_deviceremoved(struct si_pub *sih) sii = (struct si_info *)sih; - pci_read_config_dword(sii->pbus, PCI_VENDOR_ID, &w); + pci_read_config_dword(sii->pcibus, PCI_VENDOR_ID, &w); if ((w & 0xFFFF) != PCI_VENDOR_ID_BROADCOM) return true; @@ -2040,26 +1190,23 @@ bool ai_deviceremoved(struct si_pub *sih) bool ai_is_sprom_available(struct si_pub *sih) { - if (sih->ccrev >= 31) { - struct si_info *sii; - uint origidx; - struct chipcregs __iomem *cc; + struct si_info *sii = (struct si_info *)sih; + + if (ai_get_ccrev(sih) >= 31) { + struct bcma_device *cc; u32 sromctrl; - if ((sih->cccaps & CC_CAP_SROM) == 0) + if ((ai_get_cccaps(sih) & CC_CAP_SROM) == 0) return false; - sii = (struct si_info *)sih; - origidx = sii->curidx; - cc = ai_setcoreidx(sih, SI_CC_IDX); - sromctrl = R_REG(&cc->sromcontrol); - ai_setcoreidx(sih, origidx); + cc = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0); + sromctrl = bcma_read32(cc, CHIPCREGOFFS(sromcontrol)); return sromctrl & SRC_PRESENT; } - switch (sih->chip) { + switch (ai_get_chip_id(sih)) { case BCM4313_CHIP_ID: - return (sih->chipst & CST4313_SPROM_PRESENT) != 0; + return (sii->chipst & CST4313_SPROM_PRESENT) != 0; default: return true; } @@ -2067,9 +1214,11 @@ bool ai_is_sprom_available(struct si_pub *sih) bool ai_is_otp_disabled(struct si_pub *sih) { - switch (sih->chip) { + struct si_info *sii = (struct si_info *)sih; + + switch (ai_get_chip_id(sih)) { case BCM4313_CHIP_ID: - return (sih->chipst & CST4313_OTP_PRESENT) == 0; + return (sii->chipst & CST4313_OTP_PRESENT) == 0; /* These chips always have their OTP on */ case BCM43224_CHIP_ID: case BCM43225_CHIP_ID: @@ -2077,3 +1226,15 @@ bool ai_is_otp_disabled(struct si_pub *sih) return false; } } + +uint ai_get_buscoretype(struct si_pub *sih) +{ + struct si_info *sii = (struct si_info *)sih; + return sii->buscore->id.id; +} + +uint ai_get_buscorerev(struct si_pub *sih) +{ + struct si_info *sii = (struct si_info *)sih; + return sii->buscore->id.rev; +} diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h index 106a7424a7c..f84c6f78169 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h +++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h @@ -17,6 +17,8 @@ #ifndef _BRCM_AIUTILS_H_ #define _BRCM_AIUTILS_H_ +#include <linux/bcma/bcma.h> + #include "types.h" /* @@ -38,88 +40,12 @@ /* PCIE Client Mode sb2pcitranslation2 (2 ZettaBytes), high 32 bits */ #define SI_PCIE_DMA_H32 0x80000000 -/* core codes */ -#define NODEV_CORE_ID 0x700 /* Invalid coreid */ -#define CC_CORE_ID 0x800 /* chipcommon core */ -#define ILINE20_CORE_ID 0x801 /* iline20 core */ -#define SRAM_CORE_ID 0x802 /* sram core */ -#define SDRAM_CORE_ID 0x803 /* sdram core */ -#define PCI_CORE_ID 0x804 /* pci core */ -#define MIPS_CORE_ID 0x805 /* mips core */ -#define ENET_CORE_ID 0x806 /* enet mac core */ -#define CODEC_CORE_ID 0x807 /* v90 codec core */ -#define USB_CORE_ID 0x808 /* usb 1.1 host/device core */ -#define ADSL_CORE_ID 0x809 /* ADSL core */ -#define ILINE100_CORE_ID 0x80a /* iline100 core */ -#define IPSEC_CORE_ID 0x80b /* ipsec core */ -#define UTOPIA_CORE_ID 0x80c /* utopia core */ -#define PCMCIA_CORE_ID 0x80d /* pcmcia core */ -#define SOCRAM_CORE_ID 0x80e /* internal memory core */ -#define MEMC_CORE_ID 0x80f /* memc sdram core */ -#define OFDM_CORE_ID 0x810 /* OFDM phy core */ -#define EXTIF_CORE_ID 0x811 /* external interface core */ -#define D11_CORE_ID 0x812 /* 802.11 MAC core */ -#define APHY_CORE_ID 0x813 /* 802.11a phy core */ -#define BPHY_CORE_ID 0x814 /* 802.11b phy core */ -#define GPHY_CORE_ID 0x815 /* 802.11g phy core */ -#define MIPS33_CORE_ID 0x816 /* mips3302 core */ -#define USB11H_CORE_ID 0x817 /* usb 1.1 host core */ -#define USB11D_CORE_ID 0x818 /* usb 1.1 device core */ -#define USB20H_CORE_ID 0x819 /* usb 2.0 host core */ -#define USB20D_CORE_ID 0x81a /* usb 2.0 device core */ -#define SDIOH_CORE_ID 0x81b /* sdio host core */ -#define ROBO_CORE_ID 0x81c /* roboswitch core */ -#define ATA100_CORE_ID 0x81d /* parallel ATA core */ -#define SATAXOR_CORE_ID 0x81e /* serial ATA & XOR DMA core */ -#define GIGETH_CORE_ID 0x81f /* gigabit ethernet core */ -#define PCIE_CORE_ID 0x820 /* pci express core */ -#define NPHY_CORE_ID 0x821 /* 802.11n 2x2 phy core */ -#define SRAMC_CORE_ID 0x822 /* SRAM controller core */ -#define MINIMAC_CORE_ID 0x823 /* MINI MAC/phy core */ -#define ARM11_CORE_ID 0x824 /* ARM 1176 core */ -#define ARM7S_CORE_ID 0x825 /* ARM7tdmi-s core */ -#define LPPHY_CORE_ID 0x826 /* 802.11a/b/g phy core */ -#define PMU_CORE_ID 0x827 /* PMU core */ -#define SSNPHY_CORE_ID 0x828 /* 802.11n single-stream phy core */ -#define SDIOD_CORE_ID 0x829 /* SDIO device core */ -#define ARMCM3_CORE_ID 0x82a /* ARM Cortex M3 core */ -#define HTPHY_CORE_ID 0x82b /* 802.11n 4x4 phy core */ -#define MIPS74K_CORE_ID 0x82c /* mips 74k core */ -#define GMAC_CORE_ID 0x82d /* Gigabit MAC core */ -#define DMEMC_CORE_ID 0x82e /* DDR1/2 memory controller core */ -#define PCIERC_CORE_ID 0x82f /* PCIE Root Complex core */ -#define OCP_CORE_ID 0x830 /* OCP2OCP bridge core */ -#define SC_CORE_ID 0x831 /* shared common core */ -#define AHB_CORE_ID 0x832 /* OCP2AHB bridge core */ -#define SPIH_CORE_ID 0x833 /* SPI host core */ -#define I2S_CORE_ID 0x834 /* I2S core */ -#define DMEMS_CORE_ID 0x835 /* SDR/DDR1 memory controller core */ -#define DEF_SHIM_COMP 0x837 /* SHIM component in ubus/6362 */ -#define OOB_ROUTER_CORE_ID 0x367 /* OOB router core ID */ -#define DEF_AI_COMP 0xfff /* Default component, in ai chips it - * maps all unused address ranges - */ - /* chipcommon being the first core: */ #define SI_CC_IDX 0 /* SOC Interconnect types (aka chip types) */ #define SOCI_AI 1 -/* Common core control flags */ -#define SICF_BIST_EN 0x8000 -#define SICF_PME_EN 0x4000 -#define SICF_CORE_BITS 0x3ffc -#define SICF_FGC 0x0002 -#define SICF_CLOCK_EN 0x0001 - -/* Common core status flags */ -#define SISF_BIST_DONE 0x8000 -#define SISF_BIST_ERROR 0x4000 -#define SISF_GATED_CLK 0x2000 -#define SISF_DMA64 0x1000 -#define SISF_CORE_BITS 0x0fff - /* A register that is common to all cores to * communicate w/PMU regarding clock control. */ @@ -220,26 +146,15 @@ * public (read-only) portion of aiutils handle returned by si_attach() */ struct si_pub { - uint buscoretype; /* PCI_CORE_ID, PCIE_CORE_ID, PCMCIA_CORE_ID */ - uint buscorerev; /* buscore rev */ - uint buscoreidx; /* buscore index */ int ccrev; /* chip common core rev */ u32 cccaps; /* chip common capabilities */ - u32 cccaps_ext; /* chip common capabilities extension */ int pmurev; /* pmu core rev */ u32 pmucaps; /* pmu capabilities */ uint boardtype; /* board type */ uint boardvendor; /* board vendor */ - uint boardflags; /* board flags */ - uint boardflags2; /* board flags2 */ uint chip; /* chip number */ uint chiprev; /* chip revision */ uint chippkg; /* chip package option */ - u32 chipst; /* chip status */ - bool issim; /* chip is in simulation or emulation */ - uint socirev; /* SOC interconnect rev */ - bool pci_pr32414; - }; struct pci_dev; @@ -255,38 +170,13 @@ struct gpioh_item { /* misc si info needed by some of the routines */ struct si_info { struct si_pub pub; /* back plane public state (must be first) */ - struct pci_dev *pbus; /* handle to pci bus */ - uint dev_coreid; /* the core provides driver functions */ - void *intr_arg; /* interrupt callback function arg */ - u32 (*intrsoff_fn) (void *intr_arg); /* turns chip interrupts off */ - /* restore chip interrupts */ - void (*intrsrestore_fn) (void *intr_arg, u32 arg); - /* check if interrupts are enabled */ - bool (*intrsenabled_fn) (void *intr_arg); - + struct bcma_bus *icbus; /* handle to soc interconnect bus */ + struct pci_dev *pcibus; /* handle to pci bus */ struct pcicore_info *pch; /* PCI/E core handle */ - + struct bcma_device *buscore; struct list_head var_list; /* list of srom variables */ - void __iomem *curmap; /* current regs va */ - void __iomem *regs[SI_MAXCORES]; /* other regs va */ - - uint curidx; /* current core index */ - uint numcores; /* # discovered cores */ - uint coreid[SI_MAXCORES]; /* id of each core */ - u32 coresba[SI_MAXCORES]; /* backplane address of each core */ - void *regs2[SI_MAXCORES]; /* 2nd virtual address per core (usbh20) */ - u32 coresba2[SI_MAXCORES]; /* 2nd phys address per core (usbh20) */ - u32 coresba_size[SI_MAXCORES]; /* backplane address space size */ - u32 coresba2_size[SI_MAXCORES]; /* second address space size */ - - void *curwrap; /* current wrapper va */ - void *wrappers[SI_MAXCORES]; /* other cores wrapper va */ - u32 wrapba[SI_MAXCORES]; /* address of controlling wrapper */ - - u32 cia[SI_MAXCORES]; /* erom cia entry for each core */ - u32 cib[SI_MAXCORES]; /* erom cia entry for each core */ - u32 oob_router; /* oob router registers for axi */ + u32 chipst; /* chip status */ }; /* @@ -299,52 +189,15 @@ struct si_info { /* AMBA Interconnect exported externs */ -extern uint ai_flag(struct si_pub *sih); -extern void ai_setint(struct si_pub *sih, int siflag); -extern uint ai_coreidx(struct si_pub *sih); -extern uint ai_corevendor(struct si_pub *sih); -extern uint ai_corerev(struct si_pub *sih); -extern bool ai_iscoreup(struct si_pub *sih); -extern u32 ai_core_cflags(struct si_pub *sih, u32 mask, u32 val); -extern void ai_core_cflags_wo(struct si_pub *sih, u32 mask, u32 val); -extern u32 ai_core_sflags(struct si_pub *sih, u32 mask, u32 val); -extern uint ai_corereg(struct si_pub *sih, uint coreidx, uint regoff, uint mask, - uint val); -extern void ai_core_reset(struct si_pub *sih, u32 bits, u32 resetbits); -extern void ai_core_disable(struct si_pub *sih, u32 bits); -extern int ai_numaddrspaces(struct si_pub *sih); -extern u32 ai_addrspace(struct si_pub *sih, uint asidx); -extern u32 ai_addrspacesize(struct si_pub *sih, uint asidx); -extern void ai_write_wrap_reg(struct si_pub *sih, u32 offset, u32 val); +extern struct bcma_device *ai_findcore(struct si_pub *sih, + u16 coreid, u16 coreunit); +extern u32 ai_core_cflags(struct bcma_device *core, u32 mask, u32 val); /* === exported functions === */ -extern struct si_pub *ai_attach(void __iomem *regs, struct pci_dev *sdh); +extern struct si_pub *ai_attach(struct bcma_bus *pbus); extern void ai_detach(struct si_pub *sih); -extern uint ai_coreid(struct si_pub *sih); -extern uint ai_corerev(struct si_pub *sih); -extern uint ai_corereg(struct si_pub *sih, uint coreidx, uint regoff, uint mask, - uint val); -extern void ai_write_wrapperreg(struct si_pub *sih, u32 offset, u32 val); -extern u32 ai_core_cflags(struct si_pub *sih, u32 mask, u32 val); -extern u32 ai_core_sflags(struct si_pub *sih, u32 mask, u32 val); -extern bool ai_iscoreup(struct si_pub *sih); -extern uint ai_findcoreidx(struct si_pub *sih, uint coreid, uint coreunit); -extern void __iomem *ai_setcoreidx(struct si_pub *sih, uint coreidx); -extern void __iomem *ai_setcore(struct si_pub *sih, uint coreid, uint coreunit); -extern void __iomem *ai_switch_core(struct si_pub *sih, uint coreid, - uint *origidx, uint *intr_val); -extern void ai_restore_core(struct si_pub *sih, uint coreid, uint intr_val); -extern void ai_core_reset(struct si_pub *sih, u32 bits, u32 resetbits); -extern void ai_core_disable(struct si_pub *sih, u32 bits); -extern u32 ai_alp_clock(struct si_pub *sih); -extern u32 ai_ilp_clock(struct si_pub *sih); +extern uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val); extern void ai_pci_setup(struct si_pub *sih, uint coremask); -extern void ai_setint(struct si_pub *sih, int siflag); -extern bool ai_backplane64(struct si_pub *sih); -extern void ai_register_intr_callback(struct si_pub *sih, void *intrsoff_fn, - void *intrsrestore_fn, - void *intrsenabled_fn, void *intr_arg); -extern void ai_deregister_intr_callback(struct si_pub *sih); extern void ai_clkctl_init(struct si_pub *sih); extern u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih); extern bool ai_clkctl_cc(struct si_pub *sih, uint mode); @@ -359,13 +212,6 @@ extern bool ai_is_otp_disabled(struct si_pub *sih); /* SPROM availability */ extern bool ai_is_sprom_available(struct si_pub *sih); -/* - * Build device path. Path size must be >= SI_DEVPATH_BUFSZ. - * The returned path is NULL terminated and has trailing '/'. - * Return 0 on success, nonzero otherwise. - */ -extern int ai_devpath(struct si_pub *sih, char *path, int size); - extern void ai_pci_sleep(struct si_pub *sih); extern void ai_pci_down(struct si_pub *sih); extern void ai_pci_up(struct si_pub *sih); @@ -375,4 +221,52 @@ extern void ai_chipcontrl_epa4331(struct si_pub *sih, bool on); /* Enable Ex-PA for 4313 */ extern void ai_epa_4313war(struct si_pub *sih); +extern uint ai_get_buscoretype(struct si_pub *sih); +extern uint ai_get_buscorerev(struct si_pub *sih); + +static inline int ai_get_ccrev(struct si_pub *sih) +{ + return sih->ccrev; +} + +static inline u32 ai_get_cccaps(struct si_pub *sih) +{ + return sih->cccaps; +} + +static inline int ai_get_pmurev(struct si_pub *sih) +{ + return sih->pmurev; +} + +static inline u32 ai_get_pmucaps(struct si_pub *sih) +{ + return sih->pmucaps; +} + +static inline uint ai_get_boardtype(struct si_pub *sih) +{ + return sih->boardtype; +} + +static inline uint ai_get_boardvendor(struct si_pub *sih) +{ + return sih->boardvendor; +} + +static inline uint ai_get_chip_id(struct si_pub *sih) +{ + return sih->chip; +} + +static inline uint ai_get_chiprev(struct si_pub *sih) +{ + return sih->chiprev; +} + +static inline uint ai_get_chippkg(struct si_pub *sih) +{ + return sih->chippkg; +} + #endif /* _BRCM_AIUTILS_H_ */ diff --git a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c index 7f27dbdb6b6..90911eec0cf 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c @@ -649,7 +649,7 @@ brcms_c_sendampdu(struct ampdu_info *ampdu, struct brcms_txq_info *qi, len = roundup(len, 4); ampdu_len += (len + (ndelim + 1) * AMPDU_DELIMITER_LEN); - dma_len += (u16) brcmu_pkttotlen(p); + dma_len += (u16) p->len; BCMMSG(wlc->wiphy, "wl%d: ampdu_len %d" " seg_cnt %d null delim %d\n", @@ -741,9 +741,7 @@ brcms_c_sendampdu(struct ampdu_info *ampdu, struct brcms_txq_info *qi, if (p) { if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && ((u8) (p->priority) == tid)) { - - plen = brcmu_pkttotlen(p) + - AMPDU_MAX_MPDU_OVERHEAD; + plen = p->len + AMPDU_MAX_MPDU_OVERHEAD; plen = max(scb_ampdu->min_len, plen); if ((plen + ampdu_len) > max_ampdu_bytes) { @@ -1120,14 +1118,17 @@ brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb, u8 status_delay = 0; /* wait till the next 8 bytes of txstatus is available */ - while (((s1 = R_REG(&wlc->regs->frmtxstatus)) & TXS_V) == 0) { + s1 = bcma_read32(wlc->hw->d11core, D11REGOFFS(frmtxstatus)); + while ((s1 & TXS_V) == 0) { udelay(1); status_delay++; if (status_delay > 10) return; /* error condition */ + s1 = bcma_read32(wlc->hw->d11core, + D11REGOFFS(frmtxstatus)); } - s2 = R_REG(&wlc->regs->frmtxstatus2); + s2 = bcma_read32(wlc->hw->d11core, D11REGOFFS(frmtxstatus2)); } if (scb) { diff --git a/drivers/net/wireless/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/brcm80211/brcmsmac/channel.c index 89ad1b7dab8..55e9f45fce2 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/channel.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/channel.c @@ -1153,121 +1153,6 @@ brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec, &txpwr); } -#ifdef POWER_DBG -static void wlc_phy_txpower_limits_dump(struct txpwr_limits *txpwr) -{ - int i; - char buf[80]; - char fraction[4][4] = { " ", ".25", ".5 ", ".75" }; - - sprintf(buf, "CCK "); - for (i = 0; i < BRCMS_NUM_RATES_CCK; i++) - sprintf(buf[strlen(buf)], " %2d%s", - txpwr->cck[i] / BRCMS_TXPWR_DB_FACTOR, - fraction[txpwr->cck[i] % BRCMS_TXPWR_DB_FACTOR]); - printk(KERN_DEBUG "%s\n", buf); - - sprintf(buf, "20 MHz OFDM SISO "); - for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++) - sprintf(buf[strlen(buf)], " %2d%s", - txpwr->ofdm[i] / BRCMS_TXPWR_DB_FACTOR, - fraction[txpwr->ofdm[i] % BRCMS_TXPWR_DB_FACTOR]); - printk(KERN_DEBUG "%s\n", buf); - - sprintf(buf, "20 MHz OFDM CDD "); - for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++) - sprintf(buf[strlen(buf)], " %2d%s", - txpwr->ofdm_cdd[i] / BRCMS_TXPWR_DB_FACTOR, - fraction[txpwr->ofdm_cdd[i] % BRCMS_TXPWR_DB_FACTOR]); - printk(KERN_DEBUG "%s\n", buf); - - sprintf(buf, "40 MHz OFDM SISO "); - for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++) - sprintf(buf[strlen(buf)], " %2d%s", - txpwr->ofdm_40_siso[i] / BRCMS_TXPWR_DB_FACTOR, - fraction[txpwr->ofdm_40_siso[i] % - BRCMS_TXPWR_DB_FACTOR]); - printk(KERN_DEBUG "%s\n", buf); - - sprintf(buf, "40 MHz OFDM CDD "); - for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++) - sprintf(buf[strlen(buf)], " %2d%s", - txpwr->ofdm_40_cdd[i] / BRCMS_TXPWR_DB_FACTOR, - fraction[txpwr->ofdm_40_cdd[i] % - BRCMS_TXPWR_DB_FACTOR]); - printk(KERN_DEBUG "%s\n", buf); - - sprintf(buf, "20 MHz MCS0-7 SISO "); - for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) - sprintf(buf[strlen(buf)], " %2d%s", - txpwr->mcs_20_siso[i] / BRCMS_TXPWR_DB_FACTOR, - fraction[txpwr->mcs_20_siso[i] % - BRCMS_TXPWR_DB_FACTOR]); - printk(KERN_DEBUG "%s\n", buf); - - sprintf(buf, "20 MHz MCS0-7 CDD "); - for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) - sprintf(buf[strlen(buf)], " %2d%s", - txpwr->mcs_20_cdd[i] / BRCMS_TXPWR_DB_FACTOR, - fraction[txpwr->mcs_20_cdd[i] % - BRCMS_TXPWR_DB_FACTOR]); - printk(KERN_DEBUG "%s\n", buf); - - sprintf(buf, "20 MHz MCS0-7 STBC "); - for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) - sprintf(buf[strlen(buf)], " %2d%s", - txpwr->mcs_20_stbc[i] / BRCMS_TXPWR_DB_FACTOR, - fraction[txpwr->mcs_20_stbc[i] % - BRCMS_TXPWR_DB_FACTOR]); - printk(KERN_DEBUG "%s\n", buf); - - sprintf(buf, "20 MHz MCS8-15 SDM "); - for (i = 0; i < BRCMS_NUM_RATES_MCS_2_STREAM; i++) - sprintf(buf[strlen(buf)], " %2d%s", - txpwr->mcs_20_mimo[i] / BRCMS_TXPWR_DB_FACTOR, - fraction[txpwr->mcs_20_mimo[i] % - BRCMS_TXPWR_DB_FACTOR]); - printk(KERN_DEBUG "%s\n", buf); - - sprintf(buf, "40 MHz MCS0-7 SISO "); - for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) - sprintf(buf[strlen(buf)], " %2d%s", - txpwr->mcs_40_siso[i] / BRCMS_TXPWR_DB_FACTOR, - fraction[txpwr->mcs_40_siso[i] % - BRCMS_TXPWR_DB_FACTOR]); - printk(KERN_DEBUG "%s\n", buf); - - sprintf(buf, "40 MHz MCS0-7 CDD "); - for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) - sprintf(buf[strlen(buf)], " %2d%s", - txpwr->mcs_40_cdd[i] / BRCMS_TXPWR_DB_FACTOR, - fraction[txpwr->mcs_40_cdd[i] % - BRCMS_TXPWR_DB_FACTOR]); - printk(KERN_DEBUG "%s\n", buf); - - sprintf(buf, "40 MHz MCS0-7 STBC "); - for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) - sprintf(buf[strlen(buf)], " %2d%s", - txpwr->mcs_40_stbc[i] / BRCMS_TXPWR_DB_FACTOR, - fraction[txpwr->mcs_40_stbc[i] % - BRCMS_TXPWR_DB_FACTOR]); - printk(KERN_DEBUG "%s\n", buf); - - sprintf(buf, "40 MHz MCS8-15 SDM "); - for (i = 0; i < BRCMS_NUM_RATES_MCS_2_STREAM; i++) - sprintf(buf[strlen(buf)], " %2d%s", - txpwr->mcs_40_mimo[i] / BRCMS_TXPWR_DB_FACTOR, - fraction[txpwr->mcs_40_mimo[i] % - BRCMS_TXPWR_DB_FACTOR]); - } - printk(KERN_DEBUG "%s\n", buf); - - printk(KERN_DEBUG "MCS32 %2d%s\n", - txpwr->mcs32 / BRCMS_TXPWR_DB_FACTOR, - fraction[txpwr->mcs32 % BRCMS_TXPWR_DB_FACTOR]); -} -#endif /* POWER_DBG */ - void brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec, struct txpwr_limits *txpwr) @@ -1478,9 +1363,6 @@ brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec, txpwr->mcs_40_stbc[i] = txpwr->mcs_40_cdd[i]; } -#ifdef POWER_DBG - wlc_phy_txpower_limits_dump(txpwr); -#endif return; } diff --git a/drivers/net/wireless/brcm80211/brcmsmac/d11.h b/drivers/net/wireless/brcm80211/brcmsmac/d11.h index ed51616abc8..1948cb2771e 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/d11.h +++ b/drivers/net/wireless/brcm80211/brcmsmac/d11.h @@ -430,6 +430,9 @@ struct d11regs { u16 PAD[0x380]; /* 0x800 - 0xEFE */ }; +/* d11 register field offset */ +#define D11REGOFFS(field) offsetof(struct d11regs, field) + #define PIHR_BASE 0x0400 /* byte address of packed IHR region */ /* biststatus */ diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.c b/drivers/net/wireless/brcm80211/brcmsmac/dma.c index 6ebec8f4284..2e90a9a16ed 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/dma.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.c @@ -13,8 +13,10 @@ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/slab.h> -#include <linux/skbuff.h> #include <linux/delay.h> #include <linux/pci.h> @@ -22,6 +24,14 @@ #include <aiutils.h> #include "types.h" #include "dma.h" +#include "soc.h" + +/* + * dma register field offset calculation + */ +#define DMA64REGOFFS(field) offsetof(struct dma64regs, field) +#define DMA64TXREGOFFS(di, field) (di->d64txregbase + DMA64REGOFFS(field)) +#define DMA64RXREGOFFS(di, field) (di->d64rxregbase + DMA64REGOFFS(field)) /* * DMA hardware requires each descriptor ring to be 8kB aligned, and fit within @@ -168,26 +178,25 @@ /* debug/trace */ #ifdef BCMDBG -#define DMA_ERROR(args) \ - do { \ - if (!(*di->msg_level & 1)) \ - ; \ - else \ - printk args; \ - } while (0) -#define DMA_TRACE(args) \ - do { \ - if (!(*di->msg_level & 2)) \ - ; \ - else \ - printk args; \ - } while (0) +#define DMA_ERROR(fmt, ...) \ +do { \ + if (*di->msg_level & 1) \ + pr_debug("%s: " fmt, __func__, ##__VA_ARGS__); \ +} while (0) +#define DMA_TRACE(fmt, ...) \ +do { \ + if (*di->msg_level & 2) \ + pr_debug("%s: " fmt, __func__, ##__VA_ARGS__); \ +} while (0) #else -#define DMA_ERROR(args) -#define DMA_TRACE(args) +#define DMA_ERROR(fmt, ...) \ + no_printk(fmt, ##__VA_ARGS__) +#define DMA_TRACE(fmt, ...) \ + no_printk(fmt, ##__VA_ARGS__) #endif /* BCMDBG */ -#define DMA_NONE(args) +#define DMA_NONE(fmt, ...) \ + no_printk(fmt, ##__VA_ARGS__) #define MAXNAMEL 8 /* 8 char names */ @@ -218,15 +227,16 @@ struct dma_info { uint *msg_level; /* message level pointer */ char name[MAXNAMEL]; /* callers name for diag msgs */ - struct pci_dev *pbus; /* bus handle */ + struct bcma_device *core; + struct device *dmadev; bool dma64; /* this dma engine is operating in 64-bit mode */ bool addrext; /* this dma engine supports DmaExtendedAddrChanges */ /* 64-bit dma tx engine registers */ - struct dma64regs __iomem *d64txregs; + uint d64txregbase; /* 64-bit dma rx engine registers */ - struct dma64regs __iomem *d64rxregs; + uint d64rxregbase; /* pointer to dma64 tx descriptor ring */ struct dma64desc *txd64; /* pointer to dma64 rx descriptor ring */ @@ -361,7 +371,7 @@ static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags) uint dmactrlflags; if (di == NULL) { - DMA_ERROR(("_dma_ctrlflags: NULL dma handle\n")); + DMA_ERROR("NULL dma handle\n"); return 0; } @@ -373,15 +383,16 @@ static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags) if (dmactrlflags & DMA_CTRL_PEN) { u32 control; - control = R_REG(&di->d64txregs->control); - W_REG(&di->d64txregs->control, + control = bcma_read32(di->core, DMA64TXREGOFFS(di, control)); + bcma_write32(di->core, DMA64TXREGOFFS(di, control), control | D64_XC_PD); - if (R_REG(&di->d64txregs->control) & D64_XC_PD) + if (bcma_read32(di->core, DMA64TXREGOFFS(di, control)) & + D64_XC_PD) /* We *can* disable it so it is supported, * restore control register */ - W_REG(&di->d64txregs->control, - control); + bcma_write32(di->core, DMA64TXREGOFFS(di, control), + control); else /* Not supported, don't allow it to be enabled */ dmactrlflags &= ~DMA_CTRL_PEN; @@ -392,12 +403,12 @@ static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags) return dmactrlflags; } -static bool _dma64_addrext(struct dma64regs __iomem *dma64regs) +static bool _dma64_addrext(struct dma_info *di, uint ctrl_offset) { u32 w; - OR_REG(&dma64regs->control, D64_XC_AE); - w = R_REG(&dma64regs->control); - AND_REG(&dma64regs->control, ~D64_XC_AE); + bcma_set32(di->core, ctrl_offset, D64_XC_AE); + w = bcma_read32(di->core, ctrl_offset); + bcma_mask32(di->core, ctrl_offset, ~D64_XC_AE); return (w & D64_XC_AE) == D64_XC_AE; } @@ -410,15 +421,15 @@ static bool _dma_isaddrext(struct dma_info *di) /* DMA64 supports full 32- or 64-bit operation. AE is always valid */ /* not all tx or rx channel are available */ - if (di->d64txregs != NULL) { - if (!_dma64_addrext(di->d64txregs)) - DMA_ERROR(("%s: _dma_isaddrext: DMA64 tx doesn't have " - "AE set\n", di->name)); + if (di->d64txregbase != 0) { + if (!_dma64_addrext(di, DMA64TXREGOFFS(di, control))) + DMA_ERROR("%s: DMA64 tx doesn't have AE set\n", + di->name); return true; - } else if (di->d64rxregs != NULL) { - if (!_dma64_addrext(di->d64rxregs)) - DMA_ERROR(("%s: _dma_isaddrext: DMA64 rx doesn't have " - "AE set\n", di->name)); + } else if (di->d64rxregbase != 0) { + if (!_dma64_addrext(di, DMA64RXREGOFFS(di, control))) + DMA_ERROR("%s: DMA64 rx doesn't have AE set\n", + di->name); return true; } @@ -430,14 +441,14 @@ static bool _dma_descriptor_align(struct dma_info *di) u32 addrl; /* Check to see if the descriptors need to be aligned on 4K/8K or not */ - if (di->d64txregs != NULL) { - W_REG(&di->d64txregs->addrlow, 0xff0); - addrl = R_REG(&di->d64txregs->addrlow); + if (di->d64txregbase != 0) { + bcma_write32(di->core, DMA64TXREGOFFS(di, addrlow), 0xff0); + addrl = bcma_read32(di->core, DMA64TXREGOFFS(di, addrlow)); if (addrl != 0) return false; - } else if (di->d64rxregs != NULL) { - W_REG(&di->d64rxregs->addrlow, 0xff0); - addrl = R_REG(&di->d64rxregs->addrlow); + } else if (di->d64rxregbase != 0) { + bcma_write32(di->core, DMA64RXREGOFFS(di, addrlow), 0xff0); + addrl = bcma_read32(di->core, DMA64RXREGOFFS(di, addrlow)); if (addrl != 0) return false; } @@ -448,7 +459,7 @@ static bool _dma_descriptor_align(struct dma_info *di) * Descriptor table must start at the DMA hardware dictated alignment, so * allocated memory must be large enough to support this requirement. */ -static void *dma_alloc_consistent(struct pci_dev *pdev, uint size, +static void *dma_alloc_consistent(struct dma_info *di, uint size, u16 align_bits, uint *alloced, dma_addr_t *pap) { @@ -458,7 +469,7 @@ static void *dma_alloc_consistent(struct pci_dev *pdev, uint size, size += align; *alloced = size; } - return pci_alloc_consistent(pdev, size, pap); + return dma_alloc_coherent(di->dmadev, size, pap, GFP_ATOMIC); } static @@ -484,7 +495,7 @@ static void *dma_ringalloc(struct dma_info *di, u32 boundary, uint size, u32 desc_strtaddr; u32 alignbytes = 1 << *alignbits; - va = dma_alloc_consistent(di->pbus, size, *alignbits, alloced, descpa); + va = dma_alloc_consistent(di, size, *alignbits, alloced, descpa); if (NULL == va) return NULL; @@ -493,8 +504,8 @@ static void *dma_ringalloc(struct dma_info *di, u32 boundary, uint size, if (((desc_strtaddr + size - 1) & boundary) != (desc_strtaddr & boundary)) { *alignbits = dma_align_sizetobits(size); - pci_free_consistent(di->pbus, size, va, *descpa); - va = dma_alloc_consistent(di->pbus, size, *alignbits, + dma_free_coherent(di->dmadev, size, va, *descpa); + va = dma_alloc_consistent(di, size, *alignbits, alloced, descpa); } return va; @@ -519,8 +530,8 @@ static bool dma64_alloc(struct dma_info *di, uint direction) va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits, &alloced, &di->txdpaorig); if (va == NULL) { - DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(ntxd)" - " failed\n", di->name)); + DMA_ERROR("%s: DMA_ALLOC_CONSISTENT(ntxd) failed\n", + di->name); return false; } align = (1 << align_bits); @@ -533,8 +544,8 @@ static bool dma64_alloc(struct dma_info *di, uint direction) va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits, &alloced, &di->rxdpaorig); if (va == NULL) { - DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(nrxd)" - " failed\n", di->name)); + DMA_ERROR("%s: DMA_ALLOC_CONSISTENT(nrxd) failed\n", + di->name); return false; } align = (1 << align_bits); @@ -554,12 +565,13 @@ static bool _dma_alloc(struct dma_info *di, uint direction) } struct dma_pub *dma_attach(char *name, struct si_pub *sih, - void __iomem *dmaregstx, void __iomem *dmaregsrx, - uint ntxd, uint nrxd, - uint rxbufsize, int rxextheadroom, - uint nrxpost, uint rxoffset, uint *msg_level) + struct bcma_device *core, + uint txregbase, uint rxregbase, uint ntxd, uint nrxd, + uint rxbufsize, int rxextheadroom, + uint nrxpost, uint rxoffset, uint *msg_level) { struct dma_info *di; + u8 rev = core->id.rev; uint size; /* allocate private info structure */ @@ -570,11 +582,13 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih, di->msg_level = msg_level ? msg_level : &dma_msg_level; - di->dma64 = ((ai_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64); + di->dma64 = + ((bcma_aread32(core, BCMA_IOST) & SISF_DMA64) == SISF_DMA64); - /* init dma reg pointer */ - di->d64txregs = (struct dma64regs __iomem *) dmaregstx; - di->d64rxregs = (struct dma64regs __iomem *) dmaregsrx; + /* init dma reg info */ + di->core = core; + di->d64txregbase = txregbase; + di->d64rxregbase = rxregbase; /* * Default flags (which can be changed by the driver calling @@ -583,17 +597,17 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih, */ _dma_ctrlflags(di, DMA_CTRL_ROC | DMA_CTRL_PEN, 0); - DMA_TRACE(("%s: dma_attach: %s flags 0x%x ntxd %d nrxd %d " - "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d " - "dmaregstx %p dmaregsrx %p\n", name, "DMA64", - di->dma.dmactrlflags, ntxd, nrxd, rxbufsize, - rxextheadroom, nrxpost, rxoffset, dmaregstx, dmaregsrx)); + DMA_TRACE("%s: %s flags 0x%x ntxd %d nrxd %d " + "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d " + "txregbase %u rxregbase %u\n", name, "DMA64", + di->dma.dmactrlflags, ntxd, nrxd, rxbufsize, + rxextheadroom, nrxpost, rxoffset, txregbase, rxregbase); /* make a private copy of our callers name */ strncpy(di->name, name, MAXNAMEL); di->name[MAXNAMEL - 1] = '\0'; - di->pbus = ((struct si_info *)sih)->pbus; + di->dmadev = core->dma_dev; /* save tunables */ di->ntxd = (u16) ntxd; @@ -625,12 +639,12 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih, di->dataoffsetlow = di->ddoffsetlow; di->dataoffsethigh = di->ddoffsethigh; /* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */ - if ((ai_coreid(sih) == SDIOD_CORE_ID) - && ((ai_corerev(sih) > 0) && (ai_corerev(sih) <= 2))) - di->addrext = 0; - else if ((ai_coreid(sih) == I2S_CORE_ID) && - ((ai_corerev(sih) == 0) || (ai_corerev(sih) == 1))) - di->addrext = 0; + if ((core->id.id == SDIOD_CORE_ID) + && ((rev > 0) && (rev <= 2))) + di->addrext = false; + else if ((core->id.id == I2S_CORE_ID) && + ((rev == 0) || (rev == 1))) + di->addrext = false; else di->addrext = _dma_isaddrext(di); @@ -645,8 +659,8 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih, di->dmadesc_align = 4; /* 16 byte alignment */ } - DMA_NONE(("DMA descriptor align_needed %d, align %d\n", - di->aligndesc_4k, di->dmadesc_align)); + DMA_NONE("DMA descriptor align_needed %d, align %d\n", + di->aligndesc_4k, di->dmadesc_align); /* allocate tx packet pointer vector */ if (ntxd) { @@ -684,21 +698,21 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih, if ((di->ddoffsetlow != 0) && !di->addrext) { if (di->txdpa > SI_PCI_DMA_SZ) { - DMA_ERROR(("%s: dma_attach: txdpa 0x%x: addrext not " - "supported\n", di->name, (u32)di->txdpa)); + DMA_ERROR("%s: txdpa 0x%x: addrext not supported\n", + di->name, (u32)di->txdpa); goto fail; } if (di->rxdpa > SI_PCI_DMA_SZ) { - DMA_ERROR(("%s: dma_attach: rxdpa 0x%x: addrext not " - "supported\n", di->name, (u32)di->rxdpa)); + DMA_ERROR("%s: rxdpa 0x%x: addrext not supported\n", + di->name, (u32)di->rxdpa); goto fail; } } - DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x " - "dataoffsethigh " "0x%x addrext %d\n", di->ddoffsetlow, - di->ddoffsethigh, di->dataoffsetlow, di->dataoffsethigh, - di->addrext)); + DMA_TRACE("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh 0x%x addrext %d\n", + di->ddoffsetlow, di->ddoffsethigh, + di->dataoffsetlow, di->dataoffsethigh, + di->addrext); return (struct dma_pub *) di; @@ -744,17 +758,17 @@ void dma_detach(struct dma_pub *pub) { struct dma_info *di = (struct dma_info *)pub; - DMA_TRACE(("%s: dma_detach\n", di->name)); + DMA_TRACE("%s:\n", di->name); /* free dma descriptor rings */ if (di->txd64) - pci_free_consistent(di->pbus, di->txdalloc, - ((s8 *)di->txd64 - di->txdalign), - (di->txdpaorig)); + dma_free_coherent(di->dmadev, di->txdalloc, + ((s8 *)di->txd64 - di->txdalign), + (di->txdpaorig)); if (di->rxd64) - pci_free_consistent(di->pbus, di->rxdalloc, - ((s8 *)di->rxd64 - di->rxdalign), - (di->rxdpaorig)); + dma_free_coherent(di->dmadev, di->rxdalloc, + ((s8 *)di->rxd64 - di->rxdalign), + (di->rxdpaorig)); /* free packet pointer vectors */ kfree(di->txp); @@ -779,11 +793,15 @@ _dma_ddtable_init(struct dma_info *di, uint direction, dma_addr_t pa) if ((di->ddoffsetlow == 0) || !(pa & PCI32ADDR_HIGH)) { if (direction == DMA_TX) { - W_REG(&di->d64txregs->addrlow, pa + di->ddoffsetlow); - W_REG(&di->d64txregs->addrhigh, di->ddoffsethigh); + bcma_write32(di->core, DMA64TXREGOFFS(di, addrlow), + pa + di->ddoffsetlow); + bcma_write32(di->core, DMA64TXREGOFFS(di, addrhigh), + di->ddoffsethigh); } else { - W_REG(&di->d64rxregs->addrlow, pa + di->ddoffsetlow); - W_REG(&di->d64rxregs->addrhigh, di->ddoffsethigh); + bcma_write32(di->core, DMA64RXREGOFFS(di, addrlow), + pa + di->ddoffsetlow); + bcma_write32(di->core, DMA64RXREGOFFS(di, addrhigh), + di->ddoffsethigh); } } else { /* DMA64 32bits address extension */ @@ -794,15 +812,19 @@ _dma_ddtable_init(struct dma_info *di, uint direction, dma_addr_t pa) pa &= ~PCI32ADDR_HIGH; if (direction == DMA_TX) { - W_REG(&di->d64txregs->addrlow, pa + di->ddoffsetlow); - W_REG(&di->d64txregs->addrhigh, di->ddoffsethigh); - SET_REG(&di->d64txregs->control, - D64_XC_AE, (ae << D64_XC_AE_SHIFT)); + bcma_write32(di->core, DMA64TXREGOFFS(di, addrlow), + pa + di->ddoffsetlow); + bcma_write32(di->core, DMA64TXREGOFFS(di, addrhigh), + di->ddoffsethigh); + bcma_maskset32(di->core, DMA64TXREGOFFS(di, control), + D64_XC_AE, (ae << D64_XC_AE_SHIFT)); } else { - W_REG(&di->d64rxregs->addrlow, pa + di->ddoffsetlow); - W_REG(&di->d64rxregs->addrhigh, di->ddoffsethigh); - SET_REG(&di->d64rxregs->control, - D64_RC_AE, (ae << D64_RC_AE_SHIFT)); + bcma_write32(di->core, DMA64RXREGOFFS(di, addrlow), + pa + di->ddoffsetlow); + bcma_write32(di->core, DMA64RXREGOFFS(di, addrhigh), + di->ddoffsethigh); + bcma_maskset32(di->core, DMA64RXREGOFFS(di, control), + D64_RC_AE, (ae << D64_RC_AE_SHIFT)); } } } @@ -812,11 +834,11 @@ static void _dma_rxenable(struct dma_info *di) uint dmactrlflags = di->dma.dmactrlflags; u32 control; - DMA_TRACE(("%s: dma_rxenable\n", di->name)); + DMA_TRACE("%s:\n", di->name); - control = - (R_REG(&di->d64rxregs->control) & D64_RC_AE) | - D64_RC_RE; + control = D64_RC_RE | (bcma_read32(di->core, + DMA64RXREGOFFS(di, control)) & + D64_RC_AE); if ((dmactrlflags & DMA_CTRL_PEN) == 0) control |= D64_RC_PD; @@ -824,7 +846,7 @@ static void _dma_rxenable(struct dma_info *di) if (dmactrlflags & DMA_CTRL_ROC) control |= D64_RC_OC; - W_REG(&di->d64rxregs->control, + bcma_write32(di->core, DMA64RXREGOFFS(di, control), ((di->rxoffset << D64_RC_RO_SHIFT) | control)); } @@ -832,7 +854,7 @@ void dma_rxinit(struct dma_pub *pub) { struct dma_info *di = (struct dma_info *)pub; - DMA_TRACE(("%s: dma_rxinit\n", di->name)); + DMA_TRACE("%s:\n", di->name); if (di->nrxd == 0) return; @@ -867,7 +889,8 @@ static struct sk_buff *dma64_getnextrxp(struct dma_info *di, bool forceall) return NULL; curr = - B2I(((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) - + B2I(((bcma_read32(di->core, + DMA64RXREGOFFS(di, status0)) & D64_RS0_CD_MASK) - di->rcvptrbase) & D64_RS0_CD_MASK, struct dma64desc); /* ignore curr if forceall */ @@ -881,7 +904,7 @@ static struct sk_buff *dma64_getnextrxp(struct dma_info *di, bool forceall) pa = le32_to_cpu(di->rxd64[i].addrlow) - di->dataoffsetlow; /* clear this packet from the descriptor ring */ - pci_unmap_single(di->pbus, pa, di->rxbufsize, PCI_DMA_FROMDEVICE); + dma_unmap_single(di->dmadev, pa, di->rxbufsize, DMA_FROM_DEVICE); di->rxd64[i].addrlow = cpu_to_le32(0xdeadbeef); di->rxd64[i].addrhigh = cpu_to_le32(0xdeadbeef); @@ -901,7 +924,7 @@ static struct sk_buff *_dma_getnextrxp(struct dma_info *di, bool forceall) /* * !! rx entry routine - * returns a pointer to the next frame received, or NULL if there are no more + * returns the number packages in the next frame, or 0 if there are no more * if DMA_CTRL_RXMULTI is defined, DMA scattering(multiple buffers) is * supported with pkts chain * otherwise, it's treated as giant pkt and will be tossed. @@ -909,74 +932,83 @@ static struct sk_buff *_dma_getnextrxp(struct dma_info *di, bool forceall) * buffer data. After it reaches the max size of buffer, the data continues * in next DMA descriptor buffer WITHOUT DMA header */ -struct sk_buff *dma_rx(struct dma_pub *pub) +int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list) { struct dma_info *di = (struct dma_info *)pub; - struct sk_buff *p, *head, *tail; + struct sk_buff_head dma_frames; + struct sk_buff *p, *next; uint len; uint pkt_len; int resid = 0; + int pktcnt = 1; + skb_queue_head_init(&dma_frames); next_frame: - head = _dma_getnextrxp(di, false); - if (head == NULL) - return NULL; + p = _dma_getnextrxp(di, false); + if (p == NULL) + return 0; - len = le16_to_cpu(*(__le16 *) (head->data)); - DMA_TRACE(("%s: dma_rx len %d\n", di->name, len)); - dma_spin_for_len(len, head); + len = le16_to_cpu(*(__le16 *) (p->data)); + DMA_TRACE("%s: dma_rx len %d\n", di->name, len); + dma_spin_for_len(len, p); /* set actual length */ pkt_len = min((di->rxoffset + len), di->rxbufsize); - __skb_trim(head, pkt_len); + __skb_trim(p, pkt_len); + skb_queue_tail(&dma_frames, p); resid = len - (di->rxbufsize - di->rxoffset); /* check for single or multi-buffer rx */ if (resid > 0) { - tail = head; while ((resid > 0) && (p = _dma_getnextrxp(di, false))) { - tail->next = p; pkt_len = min_t(uint, resid, di->rxbufsize); __skb_trim(p, pkt_len); - - tail = p; + skb_queue_tail(&dma_frames, p); resid -= di->rxbufsize; + pktcnt++; } #ifdef BCMDBG if (resid > 0) { uint cur; cur = - B2I(((R_REG(&di->d64rxregs->status0) & - D64_RS0_CD_MASK) - - di->rcvptrbase) & D64_RS0_CD_MASK, - struct dma64desc); - DMA_ERROR(("dma_rx, rxin %d rxout %d, hw_curr %d\n", - di->rxin, di->rxout, cur)); + B2I(((bcma_read32(di->core, + DMA64RXREGOFFS(di, status0)) & + D64_RS0_CD_MASK) - di->rcvptrbase) & + D64_RS0_CD_MASK, struct dma64desc); + DMA_ERROR("rxin %d rxout %d, hw_curr %d\n", + di->rxin, di->rxout, cur); } #endif /* BCMDBG */ if ((di->dma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) { - DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n", - di->name, len)); - brcmu_pkt_buf_free_skb(head); + DMA_ERROR("%s: bad frame length (%d)\n", + di->name, len); + skb_queue_walk_safe(&dma_frames, p, next) { + skb_unlink(p, &dma_frames); + brcmu_pkt_buf_free_skb(p); + } di->dma.rxgiants++; + pktcnt = 1; goto next_frame; } } - return head; + skb_queue_splice_tail(&dma_frames, skb_list); + return pktcnt; } static bool dma64_rxidle(struct dma_info *di) { - DMA_TRACE(("%s: dma_rxidle\n", di->name)); + DMA_TRACE("%s:\n", di->name); if (di->nrxd == 0) return true; - return ((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) == - (R_REG(&di->d64rxregs->ptr) & D64_RS0_CD_MASK)); + return ((bcma_read32(di->core, + DMA64RXREGOFFS(di, status0)) & D64_RS0_CD_MASK) == + (bcma_read32(di->core, DMA64RXREGOFFS(di, ptr)) & + D64_RS0_CD_MASK)); } /* @@ -1010,7 +1042,7 @@ bool dma_rxfill(struct dma_pub *pub) n = di->nrxpost - nrxdactive(di, rxin, rxout); - DMA_TRACE(("%s: dma_rxfill: post %d\n", di->name, n)); + DMA_TRACE("%s: post %d\n", di->name, n); if (di->rxbufsize > BCMEXTRAHDROOM) extra_offset = di->rxextrahdrroom; @@ -1023,11 +1055,9 @@ bool dma_rxfill(struct dma_pub *pub) p = brcmu_pkt_buf_get_skb(di->rxbufsize + extra_offset); if (p == NULL) { - DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n", - di->name)); + DMA_ERROR("%s: out of rxbufs\n", di->name); if (i == 0 && dma64_rxidle(di)) { - DMA_ERROR(("%s: rxfill64: ring is empty !\n", - di->name)); + DMA_ERROR("%s: ring is empty !\n", di->name); ring_empty = true; } di->dma.rxnobuf++; @@ -1042,8 +1072,8 @@ bool dma_rxfill(struct dma_pub *pub) */ *(u32 *) (p->data) = 0; - pa = pci_map_single(di->pbus, p->data, - di->rxbufsize, PCI_DMA_FROMDEVICE); + pa = dma_map_single(di->dmadev, p->data, di->rxbufsize, + DMA_FROM_DEVICE); /* save the free packet pointer */ di->rxp[rxout] = p; @@ -1061,7 +1091,7 @@ bool dma_rxfill(struct dma_pub *pub) di->rxout = rxout; /* update the chip lastdscr pointer */ - W_REG(&di->d64rxregs->ptr, + bcma_write32(di->core, DMA64RXREGOFFS(di, ptr), di->rcvptrbase + I2B(rxout, struct dma64desc)); return ring_empty; @@ -1072,7 +1102,7 @@ void dma_rxreclaim(struct dma_pub *pub) struct dma_info *di = (struct dma_info *)pub; struct sk_buff *p; - DMA_TRACE(("%s: dma_rxreclaim\n", di->name)); + DMA_TRACE("%s:\n", di->name); while ((p = _dma_getnextrxp(di, true))) brcmu_pkt_buf_free_skb(p); @@ -1103,7 +1133,7 @@ void dma_txinit(struct dma_pub *pub) struct dma_info *di = (struct dma_info *)pub; u32 control = D64_XC_XE; - DMA_TRACE(("%s: dma_txinit\n", di->name)); + DMA_TRACE("%s:\n", di->name); if (di->ntxd == 0) return; @@ -1122,7 +1152,7 @@ void dma_txinit(struct dma_pub *pub) if ((di->dma.dmactrlflags & DMA_CTRL_PEN) == 0) control |= D64_XC_PD; - OR_REG(&di->d64txregs->control, control); + bcma_set32(di->core, DMA64TXREGOFFS(di, control), control); /* DMA engine with alignment requirement requires table to be inited * before enabling the engine @@ -1135,24 +1165,24 @@ void dma_txsuspend(struct dma_pub *pub) { struct dma_info *di = (struct dma_info *)pub; - DMA_TRACE(("%s: dma_txsuspend\n", di->name)); + DMA_TRACE("%s:\n", di->name); if (di->ntxd == 0) return; - OR_REG(&di->d64txregs->control, D64_XC_SE); + bcma_set32(di->core, DMA64TXREGOFFS(di, control), D64_XC_SE); } void dma_txresume(struct dma_pub *pub) { struct dma_info *di = (struct dma_info *)pub; - DMA_TRACE(("%s: dma_txresume\n", di->name)); + DMA_TRACE("%s:\n", di->name); if (di->ntxd == 0) return; - AND_REG(&di->d64txregs->control, ~D64_XC_SE); + bcma_mask32(di->core, DMA64TXREGOFFS(di, control), ~D64_XC_SE); } bool dma_txsuspended(struct dma_pub *pub) @@ -1160,8 +1190,9 @@ bool dma_txsuspended(struct dma_pub *pub) struct dma_info *di = (struct dma_info *)pub; return (di->ntxd == 0) || - ((R_REG(&di->d64txregs->control) & D64_XC_SE) == - D64_XC_SE); + ((bcma_read32(di->core, + DMA64TXREGOFFS(di, control)) & D64_XC_SE) == + D64_XC_SE); } void dma_txreclaim(struct dma_pub *pub, enum txd_range range) @@ -1169,11 +1200,11 @@ void dma_txreclaim(struct dma_pub *pub, enum txd_range range) struct dma_info *di = (struct dma_info *)pub; struct sk_buff *p; - DMA_TRACE(("%s: dma_txreclaim %s\n", di->name, - (range == DMA_RANGE_ALL) ? "all" : - ((range == - DMA_RANGE_TRANSMITTED) ? "transmitted" : - "transferred"))); + DMA_TRACE("%s: %s\n", + di->name, + range == DMA_RANGE_ALL ? "all" : + range == DMA_RANGE_TRANSMITTED ? "transmitted" : + "transferred"); if (di->txin == di->txout) return; @@ -1194,16 +1225,17 @@ bool dma_txreset(struct dma_pub *pub) return true; /* suspend tx DMA first */ - W_REG(&di->d64txregs->control, D64_XC_SE); + bcma_write32(di->core, DMA64TXREGOFFS(di, control), D64_XC_SE); SPINWAIT(((status = - (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK)) - != D64_XS0_XS_DISABLED) && (status != D64_XS0_XS_IDLE) - && (status != D64_XS0_XS_STOPPED), 10000); + (bcma_read32(di->core, DMA64TXREGOFFS(di, status0)) & + D64_XS0_XS_MASK)) != D64_XS0_XS_DISABLED) && + (status != D64_XS0_XS_IDLE) && (status != D64_XS0_XS_STOPPED), + 10000); - W_REG(&di->d64txregs->control, 0); + bcma_write32(di->core, DMA64TXREGOFFS(di, control), 0); SPINWAIT(((status = - (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK)) - != D64_XS0_XS_DISABLED), 10000); + (bcma_read32(di->core, DMA64TXREGOFFS(di, status0)) & + D64_XS0_XS_MASK)) != D64_XS0_XS_DISABLED), 10000); /* wait for the last transaction to complete */ udelay(300); @@ -1219,10 +1251,10 @@ bool dma_rxreset(struct dma_pub *pub) if (di->nrxd == 0) return true; - W_REG(&di->d64rxregs->control, 0); + bcma_write32(di->core, DMA64RXREGOFFS(di, control), 0); SPINWAIT(((status = - (R_REG(&di->d64rxregs->status0) & D64_RS0_RS_MASK)) - != D64_RS0_RS_DISABLED), 10000); + (bcma_read32(di->core, DMA64RXREGOFFS(di, status0)) & + D64_RS0_RS_MASK)) != D64_RS0_RS_DISABLED), 10000); return status == D64_RS0_RS_DISABLED; } @@ -1233,72 +1265,58 @@ bool dma_rxreset(struct dma_pub *pub) * the error(toss frames) could be fatal and cause many subsequent hard * to debug problems */ -int dma_txfast(struct dma_pub *pub, struct sk_buff *p0, bool commit) +int dma_txfast(struct dma_pub *pub, struct sk_buff *p, bool commit) { struct dma_info *di = (struct dma_info *)pub; - struct sk_buff *p, *next; unsigned char *data; uint len; u16 txout; u32 flags = 0; dma_addr_t pa; - DMA_TRACE(("%s: dma_txfast\n", di->name)); + DMA_TRACE("%s:\n", di->name); txout = di->txout; /* - * Walk the chain of packet buffers - * allocating and initializing transmit descriptor entries. + * obtain and initialize transmit descriptor entry. */ - for (p = p0; p; p = next) { - data = p->data; - len = p->len; - next = p->next; + data = p->data; + len = p->len; - /* return nonzero if out of tx descriptors */ - if (nexttxd(di, txout) == di->txin) - goto outoftxd; + /* no use to transmit a zero length packet */ + if (len == 0) + return 0; - if (len == 0) - continue; + /* return nonzero if out of tx descriptors */ + if (nexttxd(di, txout) == di->txin) + goto outoftxd; - /* get physical address of buffer start */ - pa = pci_map_single(di->pbus, data, len, PCI_DMA_TODEVICE); + /* get physical address of buffer start */ + pa = dma_map_single(di->dmadev, data, len, DMA_TO_DEVICE); - flags = 0; - if (p == p0) - flags |= D64_CTRL1_SOF; - - /* With a DMA segment list, Descriptor table is filled - * using the segment list instead of looping over - * buffers in multi-chain DMA. Therefore, EOF for SGLIST - * is when end of segment list is reached. - */ - if (next == NULL) - flags |= (D64_CTRL1_IOC | D64_CTRL1_EOF); - if (txout == (di->ntxd - 1)) - flags |= D64_CTRL1_EOT; - - dma64_dd_upd(di, di->txd64, pa, txout, &flags, len); + /* With a DMA segment list, Descriptor table is filled + * using the segment list instead of looping over + * buffers in multi-chain DMA. Therefore, EOF for SGLIST + * is when end of segment list is reached. + */ + flags = D64_CTRL1_SOF | D64_CTRL1_IOC | D64_CTRL1_EOF; + if (txout == (di->ntxd - 1)) + flags |= D64_CTRL1_EOT; - txout = nexttxd(di, txout); - } + dma64_dd_upd(di, di->txd64, pa, txout, &flags, len); - /* if last txd eof not set, fix it */ - if (!(flags & D64_CTRL1_EOF)) - di->txd64[prevtxd(di, txout)].ctrl1 = - cpu_to_le32(flags | D64_CTRL1_IOC | D64_CTRL1_EOF); + txout = nexttxd(di, txout); /* save the packet */ - di->txp[prevtxd(di, txout)] = p0; + di->txp[prevtxd(di, txout)] = p; /* bump the tx descriptor index */ di->txout = txout; /* kick the chip */ if (commit) - W_REG(&di->d64txregs->ptr, + bcma_write32(di->core, DMA64TXREGOFFS(di, ptr), di->xmtptrbase + I2B(txout, struct dma64desc)); /* tx flow control */ @@ -1307,8 +1325,8 @@ int dma_txfast(struct dma_pub *pub, struct sk_buff *p0, bool commit) return 0; outoftxd: - DMA_ERROR(("%s: dma_txfast: out of txds !!!\n", di->name)); - brcmu_pkt_buf_free_skb(p0); + DMA_ERROR("%s: out of txds !!!\n", di->name); + brcmu_pkt_buf_free_skb(p); di->dma.txavail = 0; di->dma.txnobuf++; return -1; @@ -1331,11 +1349,11 @@ struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range) u16 active_desc; struct sk_buff *txp; - DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name, - (range == DMA_RANGE_ALL) ? "all" : - ((range == - DMA_RANGE_TRANSMITTED) ? "transmitted" : - "transferred"))); + DMA_TRACE("%s: %s\n", + di->name, + range == DMA_RANGE_ALL ? "all" : + range == DMA_RANGE_TRANSMITTED ? "transmitted" : + "transferred"); if (di->ntxd == 0) return NULL; @@ -1346,16 +1364,15 @@ struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range) if (range == DMA_RANGE_ALL) end = di->txout; else { - struct dma64regs __iomem *dregs = di->d64txregs; - - end = (u16) (B2I(((R_REG(&dregs->status0) & - D64_XS0_CD_MASK) - - di->xmtptrbase) & D64_XS0_CD_MASK, - struct dma64desc)); + end = (u16) (B2I(((bcma_read32(di->core, + DMA64TXREGOFFS(di, status0)) & + D64_XS0_CD_MASK) - di->xmtptrbase) & + D64_XS0_CD_MASK, struct dma64desc)); if (range == DMA_RANGE_TRANSFERED) { active_desc = - (u16) (R_REG(&dregs->status1) & + (u16)(bcma_read32(di->core, + DMA64TXREGOFFS(di, status1)) & D64_XS1_AD_MASK); active_desc = (active_desc - di->xmtptrbase) & D64_XS0_CD_MASK; @@ -1384,7 +1401,7 @@ struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range) txp = di->txp[i]; di->txp[i] = NULL; - pci_unmap_single(di->pbus, pa, size, PCI_DMA_TODEVICE); + dma_unmap_single(di->dmadev, pa, size, DMA_TO_DEVICE); } di->txin = i; @@ -1395,8 +1412,8 @@ struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range) return txp; bogus: - DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d " - "force %d\n", start, end, di->txout, forceall)); + DMA_NONE("bogus curr: start %d end %d txout %d\n", + start, end, di->txout); return NULL; } diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.h b/drivers/net/wireless/brcm80211/brcmsmac/dma.h index ebc5bc546f3..cc269ee5c49 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/dma.h +++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.h @@ -18,6 +18,7 @@ #define _BRCM_DMA_H_ #include <linux/delay.h> +#include <linux/skbuff.h> #include "types.h" /* forward structure declarations */ /* map/unmap direction */ @@ -74,13 +75,14 @@ struct dma_pub { }; extern struct dma_pub *dma_attach(char *name, struct si_pub *sih, - void __iomem *dmaregstx, void __iomem *dmaregsrx, - uint ntxd, uint nrxd, - uint rxbufsize, int rxextheadroom, - uint nrxpost, uint rxoffset, uint *msg_level); + struct bcma_device *d11core, + uint txregbase, uint rxregbase, + uint ntxd, uint nrxd, + uint rxbufsize, int rxextheadroom, + uint nrxpost, uint rxoffset, uint *msg_level); void dma_rxinit(struct dma_pub *pub); -struct sk_buff *dma_rx(struct dma_pub *pub); +int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list); bool dma_rxfill(struct dma_pub *pub); bool dma_rxreset(struct dma_pub *pub); bool dma_txreset(struct dma_pub *pub); diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c index 0d8a9cdf897..d106576ce33 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c @@ -17,11 +17,11 @@ #define __UNDEF_NO_VERSION__ #include <linux/etherdevice.h> -#include <linux/pci.h> #include <linux/sched.h> #include <linux/firmware.h> #include <linux/interrupt.h> #include <linux/module.h> +#include <linux/bcma/bcma.h> #include <net/mac80211.h> #include <defs.h> #include "nicpci.h" @@ -40,10 +40,10 @@ #define MAC_FILTERS (FIF_PROMISC_IN_BSS | \ FIF_ALLMULTI | \ FIF_FCSFAIL | \ - FIF_PLCPFAIL | \ FIF_CONTROL | \ FIF_OTHER_BSS | \ - FIF_BCN_PRBRESP_PROMISC) + FIF_BCN_PRBRESP_PROMISC | \ + FIF_PSPOLL) #define CHAN2GHZ(channel, freqency, chflags) { \ .band = IEEE80211_BAND_2GHZ, \ @@ -87,16 +87,14 @@ MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN driver."); MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN cards"); MODULE_LICENSE("Dual BSD/GPL"); -/* recognized PCI IDs */ -static DEFINE_PCI_DEVICE_TABLE(brcms_pci_id_table) = { - { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) }, /* 43225 2G */ - { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) }, /* 43224 DUAL */ - { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) }, /* 4313 DUAL */ - { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) }, /* 43224 Ven */ - {0} -}; -MODULE_DEVICE_TABLE(pci, brcms_pci_id_table); +/* recognized BCMA Core IDs */ +static struct bcma_device_id brcms_coreid_table[] = { + BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 23, BCMA_ANY_CLASS), + BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 24, BCMA_ANY_CLASS), + BCMA_CORETABLE_END +}; +MODULE_DEVICE_TABLE(bcma, brcms_coreid_table); #ifdef BCMDBG static int msglevel = 0xdeadbeef; @@ -216,8 +214,7 @@ static const struct ieee80211_supported_band brcms_band_2GHz_nphy_template = { .ht_cap = { /* from include/linux/ieee80211.h */ .cap = IEEE80211_HT_CAP_GRN_FLD | - IEEE80211_HT_CAP_SGI_20 | - IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_40MHZ_INTOLERANT, + IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40, .ht_supported = true, .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K, .ampdu_density = AMPDU_DEF_MPDU_DENSITY, @@ -238,8 +235,7 @@ static const struct ieee80211_supported_band brcms_band_5GHz_nphy_template = { BRCMS_LEGACY_5G_RATE_OFFSET, .ht_cap = { .cap = IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20 | - IEEE80211_HT_CAP_SGI_40 | - IEEE80211_HT_CAP_40MHZ_INTOLERANT, /* No 40 mhz yet */ + IEEE80211_HT_CAP_SGI_40, .ht_supported = true, .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K, .ampdu_density = AMPDU_DEF_MPDU_DENSITY, @@ -287,6 +283,7 @@ static int brcms_ops_start(struct ieee80211_hw *hw) { struct brcms_info *wl = hw->priv; bool blocked; + int err; ieee80211_wake_queues(hw); spin_lock_bh(&wl->lock); @@ -295,57 +292,69 @@ static int brcms_ops_start(struct ieee80211_hw *hw) if (!blocked) wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy); - return 0; + spin_lock_bh(&wl->lock); + /* avoid acknowledging frames before a non-monitor device is added */ + wl->mute_tx = true; + + if (!wl->pub->up) + err = brcms_up(wl); + else + err = -ENODEV; + spin_unlock_bh(&wl->lock); + + if (err != 0) + wiphy_err(hw->wiphy, "%s: brcms_up() returned %d\n", __func__, + err); + return err; } static void brcms_ops_stop(struct ieee80211_hw *hw) { + struct brcms_info *wl = hw->priv; + int status; + ieee80211_stop_queues(hw); + + if (wl->wlc == NULL) + return; + + spin_lock_bh(&wl->lock); + status = brcms_c_chipmatch(wl->wlc->hw->vendorid, + wl->wlc->hw->deviceid); + spin_unlock_bh(&wl->lock); + if (!status) { + wiphy_err(wl->wiphy, + "wl: brcms_ops_stop: chipmatch failed\n"); + return; + } + + /* put driver in down state */ + spin_lock_bh(&wl->lock); + brcms_down(wl); + spin_unlock_bh(&wl->lock); } static int brcms_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { - struct brcms_info *wl; - int err; + struct brcms_info *wl = hw->priv; /* Just STA for now */ - if (vif->type != NL80211_IFTYPE_AP && - vif->type != NL80211_IFTYPE_MESH_POINT && - vif->type != NL80211_IFTYPE_STATION && - vif->type != NL80211_IFTYPE_WDS && - vif->type != NL80211_IFTYPE_ADHOC) { + if (vif->type != NL80211_IFTYPE_STATION) { wiphy_err(hw->wiphy, "%s: Attempt to add type %d, only" " STA for now\n", __func__, vif->type); return -EOPNOTSUPP; } - wl = hw->priv; - spin_lock_bh(&wl->lock); - if (!wl->pub->up) - err = brcms_up(wl); - else - err = -ENODEV; - spin_unlock_bh(&wl->lock); + wl->mute_tx = false; + brcms_c_mute(wl->wlc, false); - if (err != 0) - wiphy_err(hw->wiphy, "%s: brcms_up() returned %d\n", __func__, - err); - - return err; + return 0; } static void brcms_ops_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { - struct brcms_info *wl; - - wl = hw->priv; - - /* put driver in down state */ - spin_lock_bh(&wl->lock); - brcms_down(wl); - spin_unlock_bh(&wl->lock); } static int brcms_ops_config(struct ieee80211_hw *hw, u32 changed) @@ -362,7 +371,7 @@ static int brcms_ops_config(struct ieee80211_hw *hw, u32 changed) conf->listen_interval); } if (changed & IEEE80211_CONF_CHANGE_MONITOR) - wiphy_err(wiphy, "%s: change monitor mode: %s (implement)\n", + wiphy_dbg(wiphy, "%s: change monitor mode: %s\n", __func__, conf->flags & IEEE80211_CONF_MONITOR ? "true" : "false"); if (changed & IEEE80211_CONF_CHANGE_PS) @@ -539,29 +548,25 @@ brcms_ops_configure_filter(struct ieee80211_hw *hw, changed_flags &= MAC_FILTERS; *total_flags &= MAC_FILTERS; + if (changed_flags & FIF_PROMISC_IN_BSS) - wiphy_err(wiphy, "FIF_PROMISC_IN_BSS\n"); + wiphy_dbg(wiphy, "FIF_PROMISC_IN_BSS\n"); if (changed_flags & FIF_ALLMULTI) - wiphy_err(wiphy, "FIF_ALLMULTI\n"); + wiphy_dbg(wiphy, "FIF_ALLMULTI\n"); if (changed_flags & FIF_FCSFAIL) - wiphy_err(wiphy, "FIF_FCSFAIL\n"); - if (changed_flags & FIF_PLCPFAIL) - wiphy_err(wiphy, "FIF_PLCPFAIL\n"); + wiphy_dbg(wiphy, "FIF_FCSFAIL\n"); if (changed_flags & FIF_CONTROL) - wiphy_err(wiphy, "FIF_CONTROL\n"); + wiphy_dbg(wiphy, "FIF_CONTROL\n"); if (changed_flags & FIF_OTHER_BSS) - wiphy_err(wiphy, "FIF_OTHER_BSS\n"); - if (changed_flags & FIF_BCN_PRBRESP_PROMISC) { - spin_lock_bh(&wl->lock); - if (*total_flags & FIF_BCN_PRBRESP_PROMISC) { - wl->pub->mac80211_state |= MAC80211_PROMISC_BCNS; - brcms_c_mac_bcn_promisc_change(wl->wlc, 1); - } else { - brcms_c_mac_bcn_promisc_change(wl->wlc, 0); - wl->pub->mac80211_state &= ~MAC80211_PROMISC_BCNS; - } - spin_unlock_bh(&wl->lock); - } + wiphy_dbg(wiphy, "FIF_OTHER_BSS\n"); + if (changed_flags & FIF_PSPOLL) + wiphy_dbg(wiphy, "FIF_PSPOLL\n"); + if (changed_flags & FIF_BCN_PRBRESP_PROMISC) + wiphy_dbg(wiphy, "FIF_BCN_PRBRESP_PROMISC\n"); + + spin_lock_bh(&wl->lock); + brcms_c_mac_promisc(wl->wlc, *total_flags); + spin_unlock_bh(&wl->lock); return; } @@ -609,13 +614,6 @@ brcms_ops_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, wl->pub->global_ampdu->scb = scb; wl->pub->global_ampdu->max_pdu = 16; - sta->ht_cap.ht_supported = true; - sta->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; - sta->ht_cap.ampdu_density = AMPDU_DEF_MPDU_DENSITY; - sta->ht_cap.cap = IEEE80211_HT_CAP_GRN_FLD | - IEEE80211_HT_CAP_SGI_20 | - IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_40MHZ_INTOLERANT; - /* * minstrel_ht initiates addBA on our behalf by calling * ieee80211_start_tx_ba_session() @@ -724,7 +722,7 @@ static const struct ieee80211_ops brcms_ops = { }; /* - * is called in brcms_pci_probe() context, therefore no locking required. + * is called in brcms_bcma_probe() context, therefore no locking required. */ static int brcms_set_hint(struct brcms_info *wl, char *abbrev) { @@ -864,56 +862,26 @@ static void brcms_free(struct brcms_info *wl) #endif kfree(t); } - - /* - * unregister_netdev() calls get_stats() which may read chip - * registers so we cannot unmap the chip registers until - * after calling unregister_netdev() . - */ - if (wl->regsva) - iounmap(wl->regsva); - - wl->regsva = NULL; } /* -* called from both kernel as from this kernel module. +* called from both kernel as from this kernel module (error flow on attach) * precondition: perimeter lock is not acquired. */ -static void brcms_remove(struct pci_dev *pdev) +static void brcms_remove(struct bcma_device *pdev) { - struct brcms_info *wl; - struct ieee80211_hw *hw; - int status; - - hw = pci_get_drvdata(pdev); - wl = hw->priv; - if (!wl) { - pr_err("wl: brcms_remove: pci_get_drvdata failed\n"); - return; - } + struct ieee80211_hw *hw = bcma_get_drvdata(pdev); + struct brcms_info *wl = hw->priv; - spin_lock_bh(&wl->lock); - status = brcms_c_chipmatch(pdev->vendor, pdev->device); - spin_unlock_bh(&wl->lock); - if (!status) { - wiphy_err(wl->wiphy, "wl: brcms_remove: chipmatch " - "failed\n"); - return; - } if (wl->wlc) { wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, false); wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy); ieee80211_unregister_hw(hw); - spin_lock_bh(&wl->lock); - brcms_down(wl); - spin_unlock_bh(&wl->lock); } - pci_disable_device(pdev); brcms_free(wl); - pci_set_drvdata(pdev, NULL); + bcma_set_drvdata(pdev, NULL); ieee80211_free_hw(hw); } @@ -1021,11 +989,9 @@ static int ieee_hw_init(struct ieee80211_hw *hw) * it as static. * * - * is called in brcms_pci_probe() context, therefore no locking required. + * is called in brcms_bcma_probe() context, therefore no locking required. */ -static struct brcms_info *brcms_attach(u16 vendor, u16 device, - resource_size_t regs, - struct pci_dev *btparam, uint irq) +static struct brcms_info *brcms_attach(struct bcma_device *pdev) { struct brcms_info *wl = NULL; int unit, err; @@ -1039,7 +1005,7 @@ static struct brcms_info *brcms_attach(u16 vendor, u16 device, return NULL; /* allocate private info */ - hw = pci_get_drvdata(btparam); /* btparam == pdev */ + hw = bcma_get_drvdata(pdev); if (hw != NULL) wl = hw->priv; if (WARN_ON(hw == NULL) || WARN_ON(wl == NULL)) @@ -1051,26 +1017,20 @@ static struct brcms_info *brcms_attach(u16 vendor, u16 device, /* setup the bottom half handler */ tasklet_init(&wl->tasklet, brcms_dpc, (unsigned long) wl); - wl->regsva = ioremap_nocache(regs, PCI_BAR0_WINSZ); - if (wl->regsva == NULL) { - wiphy_err(wl->wiphy, "wl%d: ioremap() failed\n", unit); - goto fail; - } spin_lock_init(&wl->lock); spin_lock_init(&wl->isr_lock); /* prepare ucode */ - if (brcms_request_fw(wl, btparam) < 0) { + if (brcms_request_fw(wl, pdev->bus->host_pci) < 0) { wiphy_err(wl->wiphy, "%s: Failed to find firmware usually in " "%s\n", KBUILD_MODNAME, "/lib/firmware/brcm"); brcms_release_fw(wl); - brcms_remove(btparam); + brcms_remove(pdev); return NULL; } /* common load-time initialization */ - wl->wlc = brcms_c_attach(wl, vendor, device, unit, false, - wl->regsva, btparam, &err); + wl->wlc = brcms_c_attach((void *)wl, pdev, unit, false, &err); brcms_release_fw(wl); if (!wl->wlc) { wiphy_err(wl->wiphy, "%s: attach() failed with code %d\n", @@ -1081,15 +1041,13 @@ static struct brcms_info *brcms_attach(u16 vendor, u16 device, wl->pub->ieee_hw = hw; - /* disable mpc */ - brcms_c_set_radio_mpc(wl->wlc, false); - /* register our interrupt handler */ - if (request_irq(irq, brcms_isr, IRQF_SHARED, KBUILD_MODNAME, wl)) { + if (request_irq(pdev->bus->host_pci->irq, brcms_isr, + IRQF_SHARED, KBUILD_MODNAME, wl)) { wiphy_err(wl->wiphy, "wl%d: request_irq() failed\n", unit); goto fail; } - wl->irq = irq; + wl->irq = pdev->bus->host_pci->irq; /* register module */ brcms_c_module_register(wl->pub, "linux", wl, NULL); @@ -1136,38 +1094,19 @@ fail: * * Perimeter lock is initialized in the course of this function. */ -static int __devinit -brcms_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +static int __devinit brcms_bcma_probe(struct bcma_device *pdev) { - int rc; struct brcms_info *wl; struct ieee80211_hw *hw; - u32 val; - dev_info(&pdev->dev, "bus %d slot %d func %d irq %d\n", - pdev->bus->number, PCI_SLOT(pdev->devfn), - PCI_FUNC(pdev->devfn), pdev->irq); + dev_info(&pdev->dev, "mfg %x core %x rev %d class %d irq %d\n", + pdev->id.manuf, pdev->id.id, pdev->id.rev, pdev->id.class, + pdev->bus->host_pci->irq); - if ((pdev->vendor != PCI_VENDOR_ID_BROADCOM) || - ((pdev->device != 0x0576) && - ((pdev->device & 0xff00) != 0x4300) && - ((pdev->device & 0xff00) != 0x4700) && - ((pdev->device < 43000) || (pdev->device > 43999)))) + if ((pdev->id.manuf != BCMA_MANUF_BCM) || + (pdev->id.id != BCMA_CORE_80211)) return -ENODEV; - rc = pci_enable_device(pdev); - if (rc) { - pr_err("%s: Cannot enable device %d-%d_%d\n", - __func__, pdev->bus->number, PCI_SLOT(pdev->devfn), - PCI_FUNC(pdev->devfn)); - return -ENODEV; - } - pci_set_master(pdev); - - pci_read_config_dword(pdev, 0x40, &val); - if ((val & 0x0000ff00) != 0) - pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); - hw = ieee80211_alloc_hw(sizeof(struct brcms_info), &brcms_ops); if (!hw) { pr_err("%s: ieee80211_alloc_hw failed\n", __func__); @@ -1176,14 +1115,11 @@ brcms_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) SET_IEEE80211_DEV(hw, &pdev->dev); - pci_set_drvdata(pdev, hw); + bcma_set_drvdata(pdev, hw); memset(hw->priv, 0, sizeof(*wl)); - wl = brcms_attach(pdev->vendor, pdev->device, - pci_resource_start(pdev, 0), pdev, - pdev->irq); - + wl = brcms_attach(pdev); if (!wl) { pr_err("%s: %s: brcms_attach failed!\n", KBUILD_MODNAME, __func__); @@ -1192,16 +1128,23 @@ brcms_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; } -static int brcms_suspend(struct pci_dev *pdev, pm_message_t state) +static int brcms_pci_suspend(struct pci_dev *pdev) +{ + pci_save_state(pdev); + pci_disable_device(pdev); + return pci_set_power_state(pdev, PCI_D3hot); +} + +static int brcms_suspend(struct bcma_device *pdev, pm_message_t state) { struct brcms_info *wl; struct ieee80211_hw *hw; - hw = pci_get_drvdata(pdev); + hw = bcma_get_drvdata(pdev); wl = hw->priv; if (!wl) { wiphy_err(wl->wiphy, - "brcms_suspend: pci_get_drvdata failed\n"); + "brcms_suspend: bcma_get_drvdata failed\n"); return -ENODEV; } @@ -1210,25 +1153,14 @@ static int brcms_suspend(struct pci_dev *pdev, pm_message_t state) wl->pub->hw_up = false; spin_unlock_bh(&wl->lock); - pci_save_state(pdev); - pci_disable_device(pdev); - return pci_set_power_state(pdev, PCI_D3hot); + /* temporarily do suspend ourselves */ + return brcms_pci_suspend(pdev->bus->host_pci); } -static int brcms_resume(struct pci_dev *pdev) +static int brcms_pci_resume(struct pci_dev *pdev) { - struct brcms_info *wl; - struct ieee80211_hw *hw; int err = 0; - u32 val; - - hw = pci_get_drvdata(pdev); - wl = hw->priv; - if (!wl) { - wiphy_err(wl->wiphy, - "wl: brcms_resume: pci_get_drvdata failed\n"); - return -ENODEV; - } + uint val; err = pci_set_power_state(pdev, PCI_D0); if (err) @@ -1246,24 +1178,28 @@ static int brcms_resume(struct pci_dev *pdev) if ((val & 0x0000ff00) != 0) pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); + return 0; +} + +static int brcms_resume(struct bcma_device *pdev) +{ /* - * done. driver will be put in up state - * in brcms_ops_add_interface() call. + * just do pci resume for now until bcma supports it. */ - return err; + return brcms_pci_resume(pdev->bus->host_pci); } -static struct pci_driver brcms_pci_driver = { +static struct bcma_driver brcms_bcma_driver = { .name = KBUILD_MODNAME, - .probe = brcms_pci_probe, + .probe = brcms_bcma_probe, .suspend = brcms_suspend, .resume = brcms_resume, .remove = __devexit_p(brcms_remove), - .id_table = brcms_pci_id_table, + .id_table = brcms_coreid_table, }; /** - * This is the main entry point for the WL driver. + * This is the main entry point for the brcmsmac driver. * * This function determines if a device pointed to by pdev is a WL device, * and if so, performs a brcms_attach() on it. @@ -1278,26 +1214,24 @@ static int __init brcms_module_init(void) brcm_msg_level = msglevel; #endif /* BCMDBG */ - error = pci_register_driver(&brcms_pci_driver); + error = bcma_driver_register(&brcms_bcma_driver); + printk(KERN_ERR "%s: register returned %d\n", __func__, error); if (!error) return 0; - - return error; } /** - * This function unloads the WL driver from the system. + * This function unloads the brcmsmac driver from the system. * - * This function unconditionally unloads the WL driver module from the + * This function unconditionally unloads the brcmsmac driver module from the * system. * */ static void __exit brcms_module_exit(void) { - pci_unregister_driver(&brcms_pci_driver); - + bcma_driver_unregister(&brcms_bcma_driver); } module_init(brcms_module_init); @@ -1319,8 +1253,7 @@ void brcms_init(struct brcms_info *wl) { BCMMSG(wl->pub->ieee_hw->wiphy, "wl%d\n", wl->pub->unit); brcms_reset(wl); - - brcms_c_init(wl->wlc); + brcms_c_init(wl->wlc, wl->mute_tx); } /* @@ -1332,11 +1265,19 @@ uint brcms_reset(struct brcms_info *wl) brcms_c_reset(wl->wlc); /* dpc will not be rescheduled */ - wl->resched = 0; + wl->resched = false; return 0; } +void brcms_fatal_error(struct brcms_info *wl) +{ + wiphy_err(wl->wlc->wiphy, "wl%d: fatal error, reinitializing\n", + wl->wlc->pub->unit); + brcms_reset(wl); + ieee80211_restart_hw(wl->pub->ieee_hw); +} + /* * These are interrupt on/off entry points. Disable interrupts * during interrupt state transition. @@ -1561,11 +1502,10 @@ int brcms_ucode_init_buf(struct brcms_info *wl, void **pbuf, u32 idx) if (le32_to_cpu(hdr->idx) == idx) { pdata = wl->fw.fw_bin[i]->data + le32_to_cpu(hdr->offset); - *pbuf = kmalloc(len, GFP_ATOMIC); + *pbuf = kmemdup(pdata, len, GFP_ATOMIC); if (*pbuf == NULL) goto fail; - memcpy(*pbuf, pdata, len); return 0; } } @@ -1578,7 +1518,7 @@ fail: } /* - * Precondition: Since this function is called in brcms_pci_probe() context, + * Precondition: Since this function is called in brcms_bcma_probe() context, * no locking is required. */ int brcms_ucode_init_uint(struct brcms_info *wl, size_t *n_bytes, u32 idx) @@ -1618,7 +1558,7 @@ void brcms_ucode_free_buf(void *p) /* * checks validity of all firmware images loaded from user space * - * Precondition: Since this function is called in brcms_pci_probe() context, + * Precondition: Since this function is called in brcms_bcma_probe() context, * no locking is required. */ int brcms_check_firmwares(struct brcms_info *wl) diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h index 177f0e44e4b..8f60419c37b 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h +++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h @@ -68,8 +68,6 @@ struct brcms_info { spinlock_t lock; /* per-device perimeter lock */ spinlock_t isr_lock; /* per-device ISR synchronization lock */ - /* regsva for unmap in brcms_free() */ - void __iomem *regsva; /* opaque chip registers virtual address */ /* timer related fields */ atomic_t callbacks; /* # outstanding callback functions */ @@ -80,6 +78,7 @@ struct brcms_info { struct brcms_firmware fw; struct wiphy *wiphy; struct brcms_ucode ucode; + bool mute_tx; }; /* misc callbacks */ @@ -104,5 +103,6 @@ extern bool brcms_del_timer(struct brcms_timer *timer); extern void brcms_msleep(struct brcms_info *wl, uint ms); extern void brcms_dpc(unsigned long data); extern void brcms_timer(struct brcms_timer *t); +extern void brcms_fatal_error(struct brcms_info *wl); #endif /* _BRCM_MAC80211_IF_H_ */ diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c index 510e9bb5228..f7ed34034f8 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/main.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c @@ -30,44 +30,21 @@ #include "mac80211_if.h" #include "ucode_loader.h" #include "main.h" +#include "soc.h" /* * Indication for txflowcontrol that all priority bits in * TXQ_STOP_FOR_PRIOFC_MASK are to be considered. */ -#define ALLPRIO -1 - -/* - * 32 SSID chars, max of 4 chars for each SSID char "\xFF", plus NULL. - */ -#define SSID_FMT_BUF_LEN ((4 * IEEE80211_MAX_SSID_LEN) + 1) +#define ALLPRIO -1 /* watchdog timer, in unit of ms */ -#define TIMER_INTERVAL_WATCHDOG 1000 +#define TIMER_INTERVAL_WATCHDOG 1000 /* radio monitor timer, in unit of ms */ -#define TIMER_INTERVAL_RADIOCHK 800 - -/* Max MPC timeout, in unit of watchdog */ -#ifndef BRCMS_MPC_MAX_DELAYCNT -#define BRCMS_MPC_MAX_DELAYCNT 10 -#endif - -/* Min MPC timeout, in unit of watchdog */ -#define BRCMS_MPC_MIN_DELAYCNT 1 -#define BRCMS_MPC_THRESHOLD 3 /* MPC count threshold level */ +#define TIMER_INTERVAL_RADIOCHK 800 /* beacon interval, in unit of 1024TU */ -#define BEACON_INTERVAL_DEFAULT 100 -/* DTIM interval, in unit of beacon interval */ -#define DTIM_INTERVAL_DEFAULT 3 - -/* Scale down delays to accommodate QT slow speed */ -/* beacon interval, in unit of 1024TU */ -#define BEACON_INTERVAL_DEF_QT 20 -/* DTIM interval, in unit of beacon interval */ -#define DTIM_INTERVAL_DEF_QT 1 - -#define TBTT_ALIGN_LEEWAY_US 100 /* min leeway before first TBTT in us */ +#define BEACON_INTERVAL_DEFAULT 100 /* n-mode support capability */ /* 2x2 includes both 1x1 & 2x2 devices @@ -78,113 +55,71 @@ #define WL_11N_3x3 3 #define WL_11N_4x4 4 -/* define 11n feature disable flags */ -#define WLFEATURE_DISABLE_11N 0x00000001 -#define WLFEATURE_DISABLE_11N_STBC_TX 0x00000002 -#define WLFEATURE_DISABLE_11N_STBC_RX 0x00000004 -#define WLFEATURE_DISABLE_11N_SGI_TX 0x00000008 -#define WLFEATURE_DISABLE_11N_SGI_RX 0x00000010 -#define WLFEATURE_DISABLE_11N_AMPDU_TX 0x00000020 -#define WLFEATURE_DISABLE_11N_AMPDU_RX 0x00000040 -#define WLFEATURE_DISABLE_11N_GF 0x00000080 - -#define EDCF_ACI_MASK 0x60 -#define EDCF_ACI_SHIFT 5 -#define EDCF_ECWMIN_MASK 0x0f -#define EDCF_ECWMAX_SHIFT 4 -#define EDCF_AIFSN_MASK 0x0f -#define EDCF_AIFSN_MAX 15 -#define EDCF_ECWMAX_MASK 0xf0 - -#define EDCF_AC_BE_TXOP_STA 0x0000 -#define EDCF_AC_BK_TXOP_STA 0x0000 -#define EDCF_AC_VO_ACI_STA 0x62 -#define EDCF_AC_VO_ECW_STA 0x32 -#define EDCF_AC_VI_ACI_STA 0x42 -#define EDCF_AC_VI_ECW_STA 0x43 -#define EDCF_AC_BK_ECW_STA 0xA4 -#define EDCF_AC_VI_TXOP_STA 0x005e -#define EDCF_AC_VO_TXOP_STA 0x002f -#define EDCF_AC_BE_ACI_STA 0x03 -#define EDCF_AC_BE_ECW_STA 0xA4 -#define EDCF_AC_BK_ACI_STA 0x27 -#define EDCF_AC_VO_TXOP_AP 0x002f - -#define EDCF_TXOP2USEC(txop) ((txop) << 5) -#define EDCF_ECW2CW(exp) ((1 << (exp)) - 1) - -#define APHY_SYMBOL_TIME 4 -#define APHY_PREAMBLE_TIME 16 -#define APHY_SIGNAL_TIME 4 -#define APHY_SIFS_TIME 16 -#define APHY_SERVICE_NBITS 16 -#define APHY_TAIL_NBITS 6 -#define BPHY_SIFS_TIME 10 -#define BPHY_PLCP_SHORT_TIME 96 - -#define PREN_PREAMBLE 24 -#define PREN_MM_EXT 12 -#define PREN_PREAMBLE_EXT 4 +#define EDCF_ACI_MASK 0x60 +#define EDCF_ACI_SHIFT 5 +#define EDCF_ECWMIN_MASK 0x0f +#define EDCF_ECWMAX_SHIFT 4 +#define EDCF_AIFSN_MASK 0x0f +#define EDCF_AIFSN_MAX 15 +#define EDCF_ECWMAX_MASK 0xf0 + +#define EDCF_AC_BE_TXOP_STA 0x0000 +#define EDCF_AC_BK_TXOP_STA 0x0000 +#define EDCF_AC_VO_ACI_STA 0x62 +#define EDCF_AC_VO_ECW_STA 0x32 +#define EDCF_AC_VI_ACI_STA 0x42 +#define EDCF_AC_VI_ECW_STA 0x43 +#define EDCF_AC_BK_ECW_STA 0xA4 +#define EDCF_AC_VI_TXOP_STA 0x005e +#define EDCF_AC_VO_TXOP_STA 0x002f +#define EDCF_AC_BE_ACI_STA 0x03 +#define EDCF_AC_BE_ECW_STA 0xA4 +#define EDCF_AC_BK_ACI_STA 0x27 +#define EDCF_AC_VO_TXOP_AP 0x002f + +#define EDCF_TXOP2USEC(txop) ((txop) << 5) +#define EDCF_ECW2CW(exp) ((1 << (exp)) - 1) + +#define APHY_SYMBOL_TIME 4 +#define APHY_PREAMBLE_TIME 16 +#define APHY_SIGNAL_TIME 4 +#define APHY_SIFS_TIME 16 +#define APHY_SERVICE_NBITS 16 +#define APHY_TAIL_NBITS 6 +#define BPHY_SIFS_TIME 10 +#define BPHY_PLCP_SHORT_TIME 96 + +#define PREN_PREAMBLE 24 +#define PREN_MM_EXT 12 +#define PREN_PREAMBLE_EXT 4 #define DOT11_MAC_HDR_LEN 24 -#define DOT11_ACK_LEN 10 -#define DOT11_BA_LEN 4 +#define DOT11_ACK_LEN 10 +#define DOT11_BA_LEN 4 #define DOT11_OFDM_SIGNAL_EXTENSION 6 #define DOT11_MIN_FRAG_LEN 256 -#define DOT11_RTS_LEN 16 -#define DOT11_CTS_LEN 10 +#define DOT11_RTS_LEN 16 +#define DOT11_CTS_LEN 10 #define DOT11_BA_BITMAP_LEN 128 #define DOT11_MIN_BEACON_PERIOD 1 #define DOT11_MAX_BEACON_PERIOD 0xFFFF -#define DOT11_MAXNUMFRAGS 16 +#define DOT11_MAXNUMFRAGS 16 #define DOT11_MAX_FRAG_LEN 2346 -#define BPHY_PLCP_TIME 192 -#define RIFS_11N_TIME 2 - -#define WME_VER 1 -#define WME_SUBTYPE_PARAM_IE 1 -#define WME_TYPE 2 -#define WME_OUI "\x00\x50\xf2" +#define BPHY_PLCP_TIME 192 +#define RIFS_11N_TIME 2 -#define AC_BE 0 -#define AC_BK 1 -#define AC_VI 2 -#define AC_VO 3 - -#define BCN_TMPL_LEN 512 /* length of the BCN template area */ +/* length of the BCN template area */ +#define BCN_TMPL_LEN 512 /* brcms_bss_info flag bit values */ -#define BRCMS_BSS_HT 0x0020 /* BSS is HT (MIMO) capable */ - -/* Flags used in brcms_c_txq_info.stopped */ -/* per prio flow control bits */ -#define TXQ_STOP_FOR_PRIOFC_MASK 0x000000FF -/* stop txq enqueue for packet drain */ -#define TXQ_STOP_FOR_PKT_DRAIN 0x00000100 -/* stop txq enqueue for ampdu flow control */ -#define TXQ_STOP_FOR_AMPDU_FLOW_CNTRL 0x00000200 - -#define BRCMS_HWRXOFF 38 /* chip rx buffer offset */ +#define BRCMS_BSS_HT 0x0020 /* BSS is HT (MIMO) capable */ -/* Find basic rate for a given rate */ -static u8 brcms_basic_rate(struct brcms_c_info *wlc, u32 rspec) -{ - if (is_mcs_rate(rspec)) - return wlc->band->basic_rate[mcs_table[rspec & RSPEC_RATE_MASK] - .leg_ofdm]; - return wlc->band->basic_rate[rspec & RSPEC_RATE_MASK]; -} - -static u16 frametype(u32 rspec, u8 mimoframe) -{ - if (is_mcs_rate(rspec)) - return mimoframe; - return is_cck_rate(rspec) ? FT_CCK : FT_OFDM; -} +/* chip rx buffer offset */ +#define BRCMS_HWRXOFF 38 /* rfdisable delay timer 500 ms, runs of ALP clock */ -#define RFDISABLE_DEFAULT 10000000 +#define RFDISABLE_DEFAULT 10000000 #define BRCMS_TEMPSENSE_PERIOD 10 /* 10 second timeout */ @@ -194,87 +129,83 @@ static u16 frametype(u32 rspec, u8 mimoframe) * These constants are used ONLY by wlc_prio2prec_map. Do not use them * elsewhere. */ -#define _BRCMS_PREC_NONE 0 /* None = - */ -#define _BRCMS_PREC_BK 2 /* BK - Background */ -#define _BRCMS_PREC_BE 4 /* BE - Best-effort */ -#define _BRCMS_PREC_EE 6 /* EE - Excellent-effort */ -#define _BRCMS_PREC_CL 8 /* CL - Controlled Load */ -#define _BRCMS_PREC_VI 10 /* Vi - Video */ -#define _BRCMS_PREC_VO 12 /* Vo - Voice */ -#define _BRCMS_PREC_NC 14 /* NC - Network Control */ - -/* The BSS is generating beacons in HW */ -#define BRCMS_BSSCFG_HW_BCN 0x20 - -#define SYNTHPU_DLY_APHY_US 3700 /* a phy synthpu_dly time in us */ -#define SYNTHPU_DLY_BPHY_US 1050 /* b/g phy synthpu_dly time in us */ -#define SYNTHPU_DLY_NPHY_US 2048 /* n phy REV3 synthpu_dly time in us */ -#define SYNTHPU_DLY_LPPHY_US 300 /* lpphy synthpu_dly time in us */ - -#define SYNTHPU_DLY_PHY_US_QT 100 /* QT synthpu_dly time in us */ - -#define ANTCNT 10 /* vanilla M_MAX_ANTCNT value */ +#define _BRCMS_PREC_NONE 0 /* None = - */ +#define _BRCMS_PREC_BK 2 /* BK - Background */ +#define _BRCMS_PREC_BE 4 /* BE - Best-effort */ +#define _BRCMS_PREC_EE 6 /* EE - Excellent-effort */ +#define _BRCMS_PREC_CL 8 /* CL - Controlled Load */ +#define _BRCMS_PREC_VI 10 /* Vi - Video */ +#define _BRCMS_PREC_VO 12 /* Vo - Voice */ +#define _BRCMS_PREC_NC 14 /* NC - Network Control */ + +/* synthpu_dly times in us */ +#define SYNTHPU_DLY_APHY_US 3700 +#define SYNTHPU_DLY_BPHY_US 1050 +#define SYNTHPU_DLY_NPHY_US 2048 +#define SYNTHPU_DLY_LPPHY_US 300 + +#define ANTCNT 10 /* vanilla M_MAX_ANTCNT val */ /* Per-AC retry limit register definitions; uses defs.h bitfield macros */ -#define EDCF_SHORT_S 0 -#define EDCF_SFB_S 4 -#define EDCF_LONG_S 8 -#define EDCF_LFB_S 12 -#define EDCF_SHORT_M BITFIELD_MASK(4) -#define EDCF_SFB_M BITFIELD_MASK(4) -#define EDCF_LONG_M BITFIELD_MASK(4) -#define EDCF_LFB_M BITFIELD_MASK(4) +#define EDCF_SHORT_S 0 +#define EDCF_SFB_S 4 +#define EDCF_LONG_S 8 +#define EDCF_LFB_S 12 +#define EDCF_SHORT_M BITFIELD_MASK(4) +#define EDCF_SFB_M BITFIELD_MASK(4) +#define EDCF_LONG_M BITFIELD_MASK(4) +#define EDCF_LFB_M BITFIELD_MASK(4) -#define RETRY_SHORT_DEF 7 /* Default Short retry Limit */ -#define RETRY_SHORT_MAX 255 /* Maximum Short retry Limit */ -#define RETRY_LONG_DEF 4 /* Default Long retry count */ -#define RETRY_SHORT_FB 3 /* Short count for fallback rate */ -#define RETRY_LONG_FB 2 /* Long count for fallback rate */ +#define RETRY_SHORT_DEF 7 /* Default Short retry Limit */ +#define RETRY_SHORT_MAX 255 /* Maximum Short retry Limit */ +#define RETRY_LONG_DEF 4 /* Default Long retry count */ +#define RETRY_SHORT_FB 3 /* Short count for fb rate */ +#define RETRY_LONG_FB 2 /* Long count for fb rate */ -#define APHY_CWMIN 15 -#define PHY_CWMAX 1023 +#define APHY_CWMIN 15 +#define PHY_CWMAX 1023 -#define EDCF_AIFSN_MIN 1 +#define EDCF_AIFSN_MIN 1 -#define FRAGNUM_MASK 0xF +#define FRAGNUM_MASK 0xF -#define APHY_SLOT_TIME 9 -#define BPHY_SLOT_TIME 20 +#define APHY_SLOT_TIME 9 +#define BPHY_SLOT_TIME 20 -#define WL_SPURAVOID_OFF 0 -#define WL_SPURAVOID_ON1 1 -#define WL_SPURAVOID_ON2 2 +#define WL_SPURAVOID_OFF 0 +#define WL_SPURAVOID_ON1 1 +#define WL_SPURAVOID_ON2 2 /* invalid core flags, use the saved coreflags */ -#define BRCMS_USE_COREFLAGS 0xffffffff +#define BRCMS_USE_COREFLAGS 0xffffffff /* values for PLCPHdr_override */ -#define BRCMS_PLCP_AUTO -1 -#define BRCMS_PLCP_SHORT 0 -#define BRCMS_PLCP_LONG 1 +#define BRCMS_PLCP_AUTO -1 +#define BRCMS_PLCP_SHORT 0 +#define BRCMS_PLCP_LONG 1 /* values for g_protection_override and n_protection_override */ #define BRCMS_PROTECTION_AUTO -1 #define BRCMS_PROTECTION_OFF 0 #define BRCMS_PROTECTION_ON 1 #define BRCMS_PROTECTION_MMHDR_ONLY 2 -#define BRCMS_PROTECTION_CTS_ONLY 3 +#define BRCMS_PROTECTION_CTS_ONLY 3 /* values for g_protection_control and n_protection_control */ -#define BRCMS_PROTECTION_CTL_OFF 0 +#define BRCMS_PROTECTION_CTL_OFF 0 #define BRCMS_PROTECTION_CTL_LOCAL 1 #define BRCMS_PROTECTION_CTL_OVERLAP 2 /* values for n_protection */ #define BRCMS_N_PROTECTION_OFF 0 #define BRCMS_N_PROTECTION_OPTIONAL 1 -#define BRCMS_N_PROTECTION_20IN40 2 +#define BRCMS_N_PROTECTION_20IN40 2 #define BRCMS_N_PROTECTION_MIXEDMODE 3 /* values for band specific 40MHz capabilities */ -#define BRCMS_N_BW_20ALL 0 -#define BRCMS_N_BW_40ALL 1 -#define BRCMS_N_BW_20IN2G_40IN5G 2 +#define BRCMS_N_BW_20ALL 0 +#define BRCMS_N_BW_40ALL 1 +#define BRCMS_N_BW_20IN2G_40IN5G 2 /* bitflags for SGI support (sgi_rx iovar) */ #define BRCMS_N_SGI_20 0x01 @@ -282,48 +213,42 @@ static u16 frametype(u32 rspec, u8 mimoframe) /* defines used by the nrate iovar */ /* MSC in use,indicates b0-6 holds an mcs */ -#define NRATE_MCS_INUSE 0x00000080 +#define NRATE_MCS_INUSE 0x00000080 /* rate/mcs value */ -#define NRATE_RATE_MASK 0x0000007f +#define NRATE_RATE_MASK 0x0000007f /* stf mode mask: siso, cdd, stbc, sdm */ -#define NRATE_STF_MASK 0x0000ff00 +#define NRATE_STF_MASK 0x0000ff00 /* stf mode shift */ -#define NRATE_STF_SHIFT 8 -/* bit indicates override both rate & mode */ -#define NRATE_OVERRIDE 0x80000000 +#define NRATE_STF_SHIFT 8 /* bit indicate to override mcs only */ -#define NRATE_OVERRIDE_MCS_ONLY 0x40000000 -#define NRATE_SGI_MASK 0x00800000 /* sgi mode */ -#define NRATE_SGI_SHIFT 23 /* sgi mode */ -#define NRATE_LDPC_CODING 0x00400000 /* bit indicates adv coding in use */ -#define NRATE_LDPC_SHIFT 22 /* ldpc shift */ +#define NRATE_OVERRIDE_MCS_ONLY 0x40000000 +#define NRATE_SGI_MASK 0x00800000 /* sgi mode */ +#define NRATE_SGI_SHIFT 23 /* sgi mode */ +#define NRATE_LDPC_CODING 0x00400000 /* adv coding in use */ +#define NRATE_LDPC_SHIFT 22 /* ldpc shift */ -#define NRATE_STF_SISO 0 /* stf mode SISO */ -#define NRATE_STF_CDD 1 /* stf mode CDD */ -#define NRATE_STF_STBC 2 /* stf mode STBC */ -#define NRATE_STF_SDM 3 /* stf mode SDM */ +#define NRATE_STF_SISO 0 /* stf mode SISO */ +#define NRATE_STF_CDD 1 /* stf mode CDD */ +#define NRATE_STF_STBC 2 /* stf mode STBC */ +#define NRATE_STF_SDM 3 /* stf mode SDM */ -#define MAX_DMA_SEGS 4 +#define MAX_DMA_SEGS 4 /* Max # of entries in Tx FIFO based on 4kb page size */ -#define NTXD 256 +#define NTXD 256 /* Max # of entries in Rx FIFO based on 4kb page size */ -#define NRXD 256 +#define NRXD 256 /* try to keep this # rbufs posted to the chip */ -#define NRXBUFPOST 32 +#define NRXBUFPOST 32 /* data msg txq hiwat mark */ -#define BRCMS_DATAHIWAT 50 +#define BRCMS_DATAHIWAT 50 -/* bounded rx loops */ -#define RXBND 8 /* max # frames to process in brcms_c_recv() */ -#define TXSBND 8 /* max # tx status to process in wlc_txstatus() */ - -/* - * 32 SSID chars, max of 4 chars for each SSID char "\xFF", plus NULL. - */ -#define SSID_FMT_BUF_LEN ((4 * IEEE80211_MAX_SSID_LEN) + 1) +/* max # frames to process in brcms_c_recv() */ +#define RXBND 8 +/* max # tx status to process in wlc_txstatus() */ +#define TXSBND 8 /* brcmu_format_flags() bit description structure */ struct brcms_c_bit_desc { @@ -375,10 +300,22 @@ uint brcm_msg_level = #endif /* BCMDBG */ /* TX FIFO number to WME/802.1E Access Category */ -static const u8 wme_fifo2ac[] = { AC_BK, AC_BE, AC_VI, AC_VO, AC_BE, AC_BE }; +static const u8 wme_fifo2ac[] = { + IEEE80211_AC_BK, + IEEE80211_AC_BE, + IEEE80211_AC_VI, + IEEE80211_AC_VO, + IEEE80211_AC_BE, + IEEE80211_AC_BE +}; -/* WME/802.1E Access Category to TX FIFO number */ -static const u8 wme_ac2fifo[] = { 1, 0, 2, 3 }; +/* ieee80211 Access Category to TX FIFO number */ +static const u8 wme_ac2fifo[] = { + TX_AC_VO_FIFO, + TX_AC_VI_FIFO, + TX_AC_BE_FIFO, + TX_AC_BK_FIFO +}; /* 802.1D Priority to precedence queue mapping */ const u8 wlc_prio2prec_map[] = { @@ -405,13 +342,6 @@ static const u16 xmtfifo_sz[][NFIFO] = { {9, 58, 22, 14, 14, 5}, }; -static const u8 acbitmap2maxprio[] = { - PRIO_8021D_BE, PRIO_8021D_BE, PRIO_8021D_BK, PRIO_8021D_BK, - PRIO_8021D_VI, PRIO_8021D_VI, PRIO_8021D_VI, PRIO_8021D_VI, - PRIO_8021D_VO, PRIO_8021D_VO, PRIO_8021D_VO, PRIO_8021D_VO, - PRIO_8021D_VO, PRIO_8021D_VO, PRIO_8021D_VO, PRIO_8021D_VO -}; - #ifdef BCMDBG static const char * const fifo_names[] = { "AC_BK", "AC_BE", "AC_VI", "AC_VO", "BCMC", "ATIM" }; @@ -424,6 +354,22 @@ static const char fifo_names[6][0]; static struct brcms_c_info *wlc_info_dbg = (struct brcms_c_info *) (NULL); #endif +/* Find basic rate for a given rate */ +static u8 brcms_basic_rate(struct brcms_c_info *wlc, u32 rspec) +{ + if (is_mcs_rate(rspec)) + return wlc->band->basic_rate[mcs_table[rspec & RSPEC_RATE_MASK] + .leg_ofdm]; + return wlc->band->basic_rate[rspec & RSPEC_RATE_MASK]; +} + +static u16 frametype(u32 rspec, u8 mimoframe) +{ + if (is_mcs_rate(rspec)) + return mimoframe; + return is_cck_rate(rspec) ? FT_CCK : FT_OFDM; +} + /* currently the best mechanism for determining SIFS is the band in use */ static u16 get_sifs(struct brcms_band *band) { @@ -442,10 +388,13 @@ static u16 get_sifs(struct brcms_band *band) */ static bool brcms_deviceremoved(struct brcms_c_info *wlc) { + u32 macctrl; + if (!wlc->hw->clk) return ai_deviceremoved(wlc->hw->sih); - return (R_REG(&wlc->hw->regs->maccontrol) & - (MCTL_PSM_JMP_0 | MCTL_IHR_EN)) != MCTL_IHR_EN; + macctrl = bcma_read32(wlc->hw->d11core, + D11REGOFFS(maccontrol)); + return (macctrl & (MCTL_PSM_JMP_0 | MCTL_IHR_EN)) != MCTL_IHR_EN; } /* sum the individual fifo tx pending packet counts */ @@ -470,20 +419,6 @@ static int brcms_chspec_bw(u16 chanspec) return BRCMS_10_MHZ; } -/* - * return true if Minimum Power Consumption should - * be entered, false otherwise - */ -static bool brcms_c_is_non_delay_mpc(struct brcms_c_info *wlc) -{ - return false; -} - -static bool brcms_c_ismpc(struct brcms_c_info *wlc) -{ - return (wlc->mpc_delay_off == 0) && (brcms_c_is_non_delay_mpc(wlc)); -} - static void brcms_c_bsscfg_mfree(struct brcms_bss_cfg *cfg) { if (cfg == NULL) @@ -650,17 +585,15 @@ brcms_c_attach_malloc(uint unit, uint *err, uint devid) static void brcms_b_update_slot_timing(struct brcms_hardware *wlc_hw, bool shortslot) { - struct d11regs __iomem *regs; - - regs = wlc_hw->regs; + struct bcma_device *core = wlc_hw->d11core; if (shortslot) { /* 11g short slot: 11a timing */ - W_REG(®s->ifs_slot, 0x0207); /* APHY_SLOT_TIME */ + bcma_write16(core, D11REGOFFS(ifs_slot), 0x0207); brcms_b_write_shm(wlc_hw, M_DOT11_SLOT, APHY_SLOT_TIME); } else { /* 11g long slot: 11b timing */ - W_REG(®s->ifs_slot, 0x0212); /* BPHY_SLOT_TIME */ + bcma_write16(core, D11REGOFFS(ifs_slot), 0x0212); brcms_b_write_shm(wlc_hw, M_DOT11_SLOT, BPHY_SLOT_TIME); } } @@ -669,9 +602,8 @@ static void brcms_b_update_slot_timing(struct brcms_hardware *wlc_hw, * calculate frame duration of a given rate and length, return * time in usec unit */ -uint -brcms_c_calc_frame_time(struct brcms_c_info *wlc, u32 ratespec, - u8 preamble_type, uint mac_len) +static uint brcms_c_calc_frame_time(struct brcms_c_info *wlc, u32 ratespec, + u8 preamble_type, uint mac_len) { uint nsyms, dur = 0, Ndps, kNdps; uint rate = rspec2rate(ratespec); @@ -741,24 +673,22 @@ brcms_c_calc_frame_time(struct brcms_c_info *wlc, u32 ratespec, static void brcms_c_write_inits(struct brcms_hardware *wlc_hw, const struct d11init *inits) { + struct bcma_device *core = wlc_hw->d11core; int i; - u8 __iomem *base; - u8 __iomem *addr; + uint offset; u16 size; u32 value; BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit); - base = (u8 __iomem *)wlc_hw->regs; - for (i = 0; inits[i].addr != cpu_to_le16(0xffff); i++) { size = le16_to_cpu(inits[i].size); - addr = base + le16_to_cpu(inits[i].addr); + offset = le16_to_cpu(inits[i].addr); value = le32_to_cpu(inits[i].value); if (size == 2) - W_REG((u16 __iomem *)addr, value); + bcma_write16(core, offset, value); else if (size == 4) - W_REG((u32 __iomem *)addr, value); + bcma_write32(core, offset, value); else break; } @@ -808,6 +738,14 @@ static void brcms_c_ucode_bsinit(struct brcms_hardware *wlc_hw) } } +static void brcms_b_core_ioctl(struct brcms_hardware *wlc_hw, u32 m, u32 v) +{ + struct bcma_device *core = wlc_hw->d11core; + u32 ioctl = bcma_aread32(core, BCMA_IOCTL) & ~m; + + bcma_awrite32(core, BCMA_IOCTL, ioctl | v); +} + static void brcms_b_core_phy_clk(struct brcms_hardware *wlc_hw, bool clk) { BCMMSG(wlc_hw->wlc->wiphy, "wl%d: clk %d\n", wlc_hw->unit, clk); @@ -816,17 +754,17 @@ static void brcms_b_core_phy_clk(struct brcms_hardware *wlc_hw, bool clk) if (OFF == clk) { /* clear gmode bit, put phy into reset */ - ai_core_cflags(wlc_hw->sih, (SICF_PRST | SICF_FGC | SICF_GMODE), - (SICF_PRST | SICF_FGC)); + brcms_b_core_ioctl(wlc_hw, (SICF_PRST | SICF_FGC | SICF_GMODE), + (SICF_PRST | SICF_FGC)); udelay(1); - ai_core_cflags(wlc_hw->sih, (SICF_PRST | SICF_FGC), SICF_PRST); + brcms_b_core_ioctl(wlc_hw, (SICF_PRST | SICF_FGC), SICF_PRST); udelay(1); } else { /* take phy out of reset */ - ai_core_cflags(wlc_hw->sih, (SICF_PRST | SICF_FGC), SICF_FGC); + brcms_b_core_ioctl(wlc_hw, (SICF_PRST | SICF_FGC), SICF_FGC); udelay(1); - ai_core_cflags(wlc_hw->sih, (SICF_FGC), 0); + brcms_b_core_ioctl(wlc_hw, SICF_FGC, 0); udelay(1); } @@ -847,9 +785,14 @@ static void brcms_c_setxband(struct brcms_hardware *wlc_hw, uint bandunit) wlc_hw->wlc->band = wlc_hw->wlc->bandstate[bandunit]; /* set gmode core flag */ - if (wlc_hw->sbclk && !wlc_hw->noreset) - ai_core_cflags(wlc_hw->sih, SICF_GMODE, - ((bandunit == 0) ? SICF_GMODE : 0)); + if (wlc_hw->sbclk && !wlc_hw->noreset) { + u32 gmode = 0; + + if (bandunit == 0) + gmode = SICF_GMODE; + + brcms_b_core_ioctl(wlc_hw, SICF_GMODE, gmode); + } } /* switch to new band but leave it inactive */ @@ -857,10 +800,12 @@ static u32 brcms_c_setband_inact(struct brcms_c_info *wlc, uint bandunit) { struct brcms_hardware *wlc_hw = wlc->hw; u32 macintmask; + u32 macctrl; BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit); - - WARN_ON((R_REG(&wlc_hw->regs->maccontrol) & MCTL_EN_MAC) != 0); + macctrl = bcma_read32(wlc_hw->d11core, + D11REGOFFS(maccontrol)); + WARN_ON((macctrl & MCTL_EN_MAC) != 0); /* disable interrupts */ macintmask = brcms_intrsoff(wlc->wl); @@ -969,7 +914,7 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs) lfbl, /* Long Frame Rate Fallback Limit */ fbl; - if (queue < AC_COUNT) { + if (queue < IEEE80211_NUM_ACS) { sfbl = GFIELD(wlc->wme_retries[wme_fifo2ac[queue]], EDCF_SFB); lfbl = GFIELD(wlc->wme_retries[wme_fifo2ac[queue]], @@ -1018,14 +963,12 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs) tx_info->flags |= IEEE80211_TX_STAT_ACK; } - totlen = brcmu_pkttotlen(p); + totlen = p->len; free_pdu = true; brcms_c_txfifo_complete(wlc, queue, 1); if (lastframe) { - p->next = NULL; - p->prev = NULL; /* remove PLCP & Broadcom tx descriptor header */ skb_pull(p, D11_PHY_HDR_LEN); skb_pull(p, D11_TXH_LEN); @@ -1053,7 +996,7 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal) { bool morepending = false; struct brcms_c_info *wlc = wlc_hw->wlc; - struct d11regs __iomem *regs; + struct bcma_device *core; struct tx_status txstatus, *txs; u32 s1, s2; uint n = 0; @@ -1066,18 +1009,18 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal) BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit); txs = &txstatus; - regs = wlc_hw->regs; + core = wlc_hw->d11core; *fatal = false; + s1 = bcma_read32(core, D11REGOFFS(frmtxstatus)); while (!(*fatal) - && (s1 = R_REG(®s->frmtxstatus)) & TXS_V) { + && (s1 & TXS_V)) { if (s1 == 0xffffffff) { wiphy_err(wlc->wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit, __func__); return morepending; } - - s2 = R_REG(®s->frmtxstatus2); + s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2)); txs->status = s1 & TXS_STATUS_MASK; txs->frameid = (s1 & TXS_FID_MASK) >> TXS_FID_SHIFT; @@ -1090,6 +1033,7 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal) /* !give others some time to run! */ if (++n >= max_tx_num) break; + s1 = bcma_read32(core, D11REGOFFS(frmtxstatus)); } if (*fatal) @@ -1134,12 +1078,12 @@ brcms_c_mhfdef(struct brcms_c_info *wlc, u16 *mhfs, u16 mhf2_init) } } -static struct dma64regs __iomem * -dmareg(struct brcms_hardware *hw, uint direction, uint fifonum) +static uint +dmareg(uint direction, uint fifonum) { if (direction == DMA_TX) - return &(hw->regs->fifo64regs[fifonum].dmaxmt); - return &(hw->regs->fifo64regs[fifonum].dmarcv); + return offsetof(struct d11regs, fifo64regs[fifonum].dmaxmt); + return offsetof(struct d11regs, fifo64regs[fifonum].dmarcv); } static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme) @@ -1165,9 +1109,9 @@ static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme) * TX: TX_AC_BK_FIFO (TX AC Background data packets) * RX: RX_FIFO (RX data packets) */ - wlc_hw->di[0] = dma_attach(name, wlc_hw->sih, - (wme ? dmareg(wlc_hw, DMA_TX, 0) : - NULL), dmareg(wlc_hw, DMA_RX, 0), + wlc_hw->di[0] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core, + (wme ? dmareg(DMA_TX, 0) : 0), + dmareg(DMA_RX, 0), (wme ? NTXD : 0), NRXD, RXBUFSZ, -1, NRXBUFPOST, BRCMS_HWRXOFF, &brcm_msg_level); @@ -1179,8 +1123,8 @@ static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme) * (legacy) TX_DATA_FIFO (TX data packets) * RX: UNUSED */ - wlc_hw->di[1] = dma_attach(name, wlc_hw->sih, - dmareg(wlc_hw, DMA_TX, 1), NULL, + wlc_hw->di[1] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core, + dmareg(DMA_TX, 1), 0, NTXD, 0, 0, -1, 0, 0, &brcm_msg_level); dma_attach_err |= (NULL == wlc_hw->di[1]); @@ -1190,8 +1134,8 @@ static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme) * TX: TX_AC_VI_FIFO (TX AC Video data packets) * RX: UNUSED */ - wlc_hw->di[2] = dma_attach(name, wlc_hw->sih, - dmareg(wlc_hw, DMA_TX, 2), NULL, + wlc_hw->di[2] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core, + dmareg(DMA_TX, 2), 0, NTXD, 0, 0, -1, 0, 0, &brcm_msg_level); dma_attach_err |= (NULL == wlc_hw->di[2]); @@ -1200,9 +1144,9 @@ static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme) * TX: TX_AC_VO_FIFO (TX AC Voice data packets) * (legacy) TX_CTL_FIFO (TX control & mgmt packets) */ - wlc_hw->di[3] = dma_attach(name, wlc_hw->sih, - dmareg(wlc_hw, DMA_TX, 3), - NULL, NTXD, 0, 0, -1, + wlc_hw->di[3] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core, + dmareg(DMA_TX, 3), + 0, NTXD, 0, 0, -1, 0, 0, &brcm_msg_level); dma_attach_err |= (NULL == wlc_hw->di[3]); /* Cleaner to leave this as if with AP defined */ @@ -1276,7 +1220,7 @@ static void brcms_b_wait_for_wake(struct brcms_hardware *wlc_hw) /* control chip clock to save power, enable dynamic clock or force fast clock */ static void brcms_b_clkctl_clk(struct brcms_hardware *wlc_hw, uint mode) { - if (wlc_hw->sih->cccaps & CC_CAP_PMU) { + if (ai_get_cccaps(wlc_hw->sih) & CC_CAP_PMU) { /* new chips with PMU, CCS_FORCEHT will distribute the HT clock * on backplane, but mac core will still run on ALP(not HT) when * it enters powersave mode, which means the FCA bit may not be @@ -1285,29 +1229,33 @@ static void brcms_b_clkctl_clk(struct brcms_hardware *wlc_hw, uint mode) if (wlc_hw->clk) { if (mode == CLK_FAST) { - OR_REG(&wlc_hw->regs->clk_ctl_st, - CCS_FORCEHT); + bcma_set32(wlc_hw->d11core, + D11REGOFFS(clk_ctl_st), + CCS_FORCEHT); udelay(64); - SPINWAIT(((R_REG - (&wlc_hw->regs-> - clk_ctl_st) & CCS_HTAVAIL) == 0), - PMU_MAX_TRANSITION_DLY); - WARN_ON(!(R_REG - (&wlc_hw->regs-> - clk_ctl_st) & CCS_HTAVAIL)); + SPINWAIT( + ((bcma_read32(wlc_hw->d11core, + D11REGOFFS(clk_ctl_st)) & + CCS_HTAVAIL) == 0), + PMU_MAX_TRANSITION_DLY); + WARN_ON(!(bcma_read32(wlc_hw->d11core, + D11REGOFFS(clk_ctl_st)) & + CCS_HTAVAIL)); } else { - if ((wlc_hw->sih->pmurev == 0) && - (R_REG - (&wlc_hw->regs-> - clk_ctl_st) & (CCS_FORCEHT | CCS_HTAREQ))) - SPINWAIT(((R_REG - (&wlc_hw->regs-> - clk_ctl_st) & CCS_HTAVAIL) - == 0), - PMU_MAX_TRANSITION_DLY); - AND_REG(&wlc_hw->regs->clk_ctl_st, + if ((ai_get_pmurev(wlc_hw->sih) == 0) && + (bcma_read32(wlc_hw->d11core, + D11REGOFFS(clk_ctl_st)) & + (CCS_FORCEHT | CCS_HTAREQ))) + SPINWAIT( + ((bcma_read32(wlc_hw->d11core, + offsetof(struct d11regs, + clk_ctl_st)) & + CCS_HTAVAIL) == 0), + PMU_MAX_TRANSITION_DLY); + bcma_mask32(wlc_hw->d11core, + D11REGOFFS(clk_ctl_st), ~CCS_FORCEHT); } } @@ -1322,7 +1270,7 @@ static void brcms_b_clkctl_clk(struct brcms_hardware *wlc_hw, uint mode) /* check fast clock is available (if core is not in reset) */ if (wlc_hw->forcefastclk && wlc_hw->clk) - WARN_ON(!(ai_core_sflags(wlc_hw->sih, 0, 0) & + WARN_ON(!(bcma_aread32(wlc_hw->d11core, BCMA_IOST) & SISF_FCLKA)); /* @@ -1439,7 +1387,8 @@ static void brcms_c_mctrl_write(struct brcms_hardware *wlc_hw) maccontrol |= MCTL_INFRA; } - W_REG(&wlc_hw->regs->maccontrol, maccontrol); + bcma_write32(wlc_hw->d11core, D11REGOFFS(maccontrol), + maccontrol); } /* set or clear maccontrol bits */ @@ -1533,7 +1482,7 @@ static void brcms_b_set_addrmatch(struct brcms_hardware *wlc_hw, int match_reg_offset, const u8 *addr) { - struct d11regs __iomem *regs; + struct bcma_device *core = wlc_hw->d11core; u16 mac_l; u16 mac_m; u16 mac_h; @@ -1541,38 +1490,36 @@ brcms_b_set_addrmatch(struct brcms_hardware *wlc_hw, int match_reg_offset, BCMMSG(wlc_hw->wlc->wiphy, "wl%d: brcms_b_set_addrmatch\n", wlc_hw->unit); - regs = wlc_hw->regs; mac_l = addr[0] | (addr[1] << 8); mac_m = addr[2] | (addr[3] << 8); mac_h = addr[4] | (addr[5] << 8); /* enter the MAC addr into the RXE match registers */ - W_REG(®s->rcm_ctl, RCM_INC_DATA | match_reg_offset); - W_REG(®s->rcm_mat_data, mac_l); - W_REG(®s->rcm_mat_data, mac_m); - W_REG(®s->rcm_mat_data, mac_h); - + bcma_write16(core, D11REGOFFS(rcm_ctl), + RCM_INC_DATA | match_reg_offset); + bcma_write16(core, D11REGOFFS(rcm_mat_data), mac_l); + bcma_write16(core, D11REGOFFS(rcm_mat_data), mac_m); + bcma_write16(core, D11REGOFFS(rcm_mat_data), mac_h); } void brcms_b_write_template_ram(struct brcms_hardware *wlc_hw, int offset, int len, void *buf) { - struct d11regs __iomem *regs; + struct bcma_device *core = wlc_hw->d11core; u32 word; __le32 word_le; __be32 word_be; bool be_bit; BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit); - regs = wlc_hw->regs; - W_REG(®s->tplatewrptr, offset); + bcma_write32(core, D11REGOFFS(tplatewrptr), offset); /* if MCTL_BIGEND bit set in mac control register, * the chip swaps data in fifo, as well as data in * template ram */ - be_bit = (R_REG(®s->maccontrol) & MCTL_BIGEND) != 0; + be_bit = (bcma_read32(core, D11REGOFFS(maccontrol)) & MCTL_BIGEND) != 0; while (len > 0) { memcpy(&word, buf, sizeof(u32)); @@ -1585,7 +1532,7 @@ brcms_b_write_template_ram(struct brcms_hardware *wlc_hw, int offset, int len, word = *(u32 *)&word_le; } - W_REG(®s->tplatewrdata, word); + bcma_write32(core, D11REGOFFS(tplatewrdata), word); buf = (u8 *) buf + sizeof(u32); len -= sizeof(u32); @@ -1596,18 +1543,20 @@ static void brcms_b_set_cwmin(struct brcms_hardware *wlc_hw, u16 newmin) { wlc_hw->band->CWmin = newmin; - W_REG(&wlc_hw->regs->objaddr, OBJADDR_SCR_SEL | S_DOT11_CWMIN); - (void)R_REG(&wlc_hw->regs->objaddr); - W_REG(&wlc_hw->regs->objdata, newmin); + bcma_write32(wlc_hw->d11core, D11REGOFFS(objaddr), + OBJADDR_SCR_SEL | S_DOT11_CWMIN); + (void)bcma_read32(wlc_hw->d11core, D11REGOFFS(objaddr)); + bcma_write32(wlc_hw->d11core, D11REGOFFS(objdata), newmin); } static void brcms_b_set_cwmax(struct brcms_hardware *wlc_hw, u16 newmax) { wlc_hw->band->CWmax = newmax; - W_REG(&wlc_hw->regs->objaddr, OBJADDR_SCR_SEL | S_DOT11_CWMAX); - (void)R_REG(&wlc_hw->regs->objaddr); - W_REG(&wlc_hw->regs->objdata, newmax); + bcma_write32(wlc_hw->d11core, D11REGOFFS(objaddr), + OBJADDR_SCR_SEL | S_DOT11_CWMAX); + (void)bcma_read32(wlc_hw->d11core, D11REGOFFS(objaddr)); + bcma_write32(wlc_hw->d11core, D11REGOFFS(objdata), newmax); } void brcms_b_bw_set(struct brcms_hardware *wlc_hw, u16 bw) @@ -1773,17 +1722,17 @@ void brcms_b_core_phypll_reset(struct brcms_hardware *wlc_hw) { BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit); - ai_corereg(wlc_hw->sih, SI_CC_IDX, - offsetof(struct chipcregs, chipcontrol_addr), ~0, 0); + ai_cc_reg(wlc_hw->sih, offsetof(struct chipcregs, chipcontrol_addr), + ~0, 0); udelay(1); - ai_corereg(wlc_hw->sih, SI_CC_IDX, - offsetof(struct chipcregs, chipcontrol_data), 0x4, 0); + ai_cc_reg(wlc_hw->sih, offsetof(struct chipcregs, chipcontrol_data), + 0x4, 0); udelay(1); - ai_corereg(wlc_hw->sih, SI_CC_IDX, - offsetof(struct chipcregs, chipcontrol_data), 0x4, 4); + ai_cc_reg(wlc_hw->sih, offsetof(struct chipcregs, chipcontrol_data), + 0x4, 4); udelay(1); - ai_corereg(wlc_hw->sih, SI_CC_IDX, - offsetof(struct chipcregs, chipcontrol_data), 0x4, 0); + ai_cc_reg(wlc_hw->sih, offsetof(struct chipcregs, chipcontrol_data), + 0x4, 0); udelay(1); } @@ -1797,18 +1746,18 @@ void brcms_b_phyclk_fgc(struct brcms_hardware *wlc_hw, bool clk) return; if (ON == clk) - ai_core_cflags(wlc_hw->sih, SICF_FGC, SICF_FGC); + brcms_b_core_ioctl(wlc_hw, SICF_FGC, SICF_FGC); else - ai_core_cflags(wlc_hw->sih, SICF_FGC, 0); + brcms_b_core_ioctl(wlc_hw, SICF_FGC, 0); } void brcms_b_macphyclk_set(struct brcms_hardware *wlc_hw, bool clk) { if (ON == clk) - ai_core_cflags(wlc_hw->sih, SICF_MPCLKE, SICF_MPCLKE); + brcms_b_core_ioctl(wlc_hw, SICF_MPCLKE, SICF_MPCLKE); else - ai_core_cflags(wlc_hw->sih, SICF_MPCLKE, 0); + brcms_b_core_ioctl(wlc_hw, SICF_MPCLKE, 0); } void brcms_b_phy_reset(struct brcms_hardware *wlc_hw) @@ -1828,7 +1777,7 @@ void brcms_b_phy_reset(struct brcms_hardware *wlc_hw) if (BRCMS_ISNPHY(wlc_hw->band) && NREV_GE(wlc_hw->band->phyrev, 3) && NREV_LE(wlc_hw->band->phyrev, 4)) { /* Set the PHY bandwidth */ - ai_core_cflags(wlc_hw->sih, SICF_BWMASK, phy_bw_clkbits); + brcms_b_core_ioctl(wlc_hw, SICF_BWMASK, phy_bw_clkbits); udelay(1); @@ -1836,13 +1785,13 @@ void brcms_b_phy_reset(struct brcms_hardware *wlc_hw) brcms_b_core_phypll_reset(wlc_hw); /* reset the PHY */ - ai_core_cflags(wlc_hw->sih, (SICF_PRST | SICF_PCLKE), - (SICF_PRST | SICF_PCLKE)); + brcms_b_core_ioctl(wlc_hw, (SICF_PRST | SICF_PCLKE), + (SICF_PRST | SICF_PCLKE)); phy_in_reset = true; } else { - ai_core_cflags(wlc_hw->sih, - (SICF_PRST | SICF_PCLKE | SICF_BWMASK), - (SICF_PRST | SICF_PCLKE | phy_bw_clkbits)); + brcms_b_core_ioctl(wlc_hw, + (SICF_PRST | SICF_PCLKE | SICF_BWMASK), + (SICF_PRST | SICF_PCLKE | phy_bw_clkbits)); } udelay(2); @@ -1859,8 +1808,8 @@ static void brcms_b_setband(struct brcms_hardware *wlc_hw, uint bandunit, u32 macintmask; /* Enable the d11 core before accessing it */ - if (!ai_iscoreup(wlc_hw->sih)) { - ai_core_reset(wlc_hw->sih, 0, 0); + if (!bcma_core_is_enabled(wlc_hw->d11core)) { + bcma_core_enable(wlc_hw->d11core, 0); brcms_c_mctrl_reset(wlc_hw); } @@ -1886,7 +1835,8 @@ static void brcms_b_setband(struct brcms_hardware *wlc_hw, uint bandunit, brcms_intrsrestore(wlc->wl, macintmask); /* ucode should still be suspended.. */ - WARN_ON((R_REG(&wlc_hw->regs->maccontrol) & MCTL_EN_MAC) != 0); + WARN_ON((bcma_read32(wlc_hw->d11core, D11REGOFFS(maccontrol)) & + MCTL_EN_MAC) != 0); } static bool brcms_c_isgoodchip(struct brcms_hardware *wlc_hw) @@ -1914,7 +1864,7 @@ static bool brcms_c_validboardtype(struct brcms_hardware *wlc_hw) uint b2 = boardrev & 0xf; /* voards from other vendors are always considered valid */ - if (wlc_hw->sih->boardvendor != PCI_VENDOR_ID_BROADCOM) + if (ai_get_boardvendor(wlc_hw->sih) != PCI_VENDOR_ID_BROADCOM) return true; /* do some boardrev sanity checks when boardvendor is Broadcom */ @@ -1986,7 +1936,7 @@ static void brcms_b_xtal(struct brcms_hardware *wlc_hw, bool want) static bool brcms_b_radio_read_hwdisabled(struct brcms_hardware *wlc_hw) { bool v, clk, xtal; - u32 resetbits = 0, flags = 0; + u32 flags = 0; xtal = wlc_hw->sbclk; if (!xtal) @@ -2003,22 +1953,22 @@ static bool brcms_b_radio_read_hwdisabled(struct brcms_hardware *wlc_hw) flags |= SICF_PCLKE; /* + * TODO: test suspend/resume + * * AI chip doesn't restore bar0win2 on * hibernation/resume, need sw fixup */ - if ((wlc_hw->sih->chip == BCM43224_CHIP_ID) || - (wlc_hw->sih->chip == BCM43225_CHIP_ID)) - wlc_hw->regs = (struct d11regs __iomem *) - ai_setcore(wlc_hw->sih, D11_CORE_ID, 0); - ai_core_reset(wlc_hw->sih, flags, resetbits); + + bcma_core_enable(wlc_hw->d11core, flags); brcms_c_mctrl_reset(wlc_hw); } - v = ((R_REG(&wlc_hw->regs->phydebug) & PDBG_RFD) != 0); + v = ((bcma_read32(wlc_hw->d11core, + D11REGOFFS(phydebug)) & PDBG_RFD) != 0); /* put core back into reset */ if (!clk) - ai_core_disable(wlc_hw->sih, 0); + bcma_core_disable(wlc_hw->d11core, 0); if (!xtal) brcms_b_xtal(wlc_hw, OFF); @@ -2042,25 +1992,21 @@ static bool wlc_dma_rxreset(struct brcms_hardware *wlc_hw, uint fifo) */ void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags) { - struct d11regs __iomem *regs; uint i; bool fastclk; - u32 resetbits = 0; if (flags == BRCMS_USE_COREFLAGS) flags = (wlc_hw->band->pi ? wlc_hw->band->core_flags : 0); BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit); - regs = wlc_hw->regs; - /* request FAST clock if not on */ fastclk = wlc_hw->forcefastclk; if (!fastclk) brcms_b_clkctl_clk(wlc_hw, CLK_FAST); /* reset the dma engines except first time thru */ - if (ai_iscoreup(wlc_hw->sih)) { + if (bcma_core_is_enabled(wlc_hw->d11core)) { for (i = 0; i < NFIFO; i++) if ((wlc_hw->di[i]) && (!dma_txreset(wlc_hw->di[i]))) wiphy_err(wlc_hw->wlc->wiphy, "wl%d: %s: " @@ -2098,14 +2044,14 @@ void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags) * they may touch chipcommon as well. */ wlc_hw->clk = false; - ai_core_reset(wlc_hw->sih, flags, resetbits); + bcma_core_enable(wlc_hw->d11core, flags); wlc_hw->clk = true; if (wlc_hw->band && wlc_hw->band->pi) wlc_phy_hw_clk_state_upd(wlc_hw->band->pi, true); brcms_c_mctrl_reset(wlc_hw); - if (wlc_hw->sih->cccaps & CC_CAP_PMU) + if (ai_get_cccaps(wlc_hw->sih) & CC_CAP_PMU) brcms_b_clkctl_clk(wlc_hw, CLK_FAST); brcms_b_phy_reset(wlc_hw); @@ -2126,7 +2072,7 @@ void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags) */ static void brcms_b_corerev_fifofixup(struct brcms_hardware *wlc_hw) { - struct d11regs __iomem *regs = wlc_hw->regs; + struct bcma_device *core = wlc_hw->d11core; u16 fifo_nu; u16 txfifo_startblk = TXFIFO_START_BLK, txfifo_endblk; u16 txfifo_def, txfifo_def1; @@ -2147,11 +2093,11 @@ static void brcms_b_corerev_fifofixup(struct brcms_hardware *wlc_hw) txfifo_cmd = TXFIFOCMD_RESET_MASK | (fifo_nu << TXFIFOCMD_FIFOSEL_SHIFT); - W_REG(®s->xmtfifocmd, txfifo_cmd); - W_REG(®s->xmtfifodef, txfifo_def); - W_REG(®s->xmtfifodef1, txfifo_def1); + bcma_write16(core, D11REGOFFS(xmtfifocmd), txfifo_cmd); + bcma_write16(core, D11REGOFFS(xmtfifodef), txfifo_def); + bcma_write16(core, D11REGOFFS(xmtfifodef1), txfifo_def1); - W_REG(®s->xmtfifocmd, txfifo_cmd); + bcma_write16(core, D11REGOFFS(xmtfifocmd), txfifo_cmd); txfifo_startblk += wlc_hw->xmtfifo_sz[fifo_nu]; } @@ -2186,27 +2132,27 @@ static void brcms_b_corerev_fifofixup(struct brcms_hardware *wlc_hw) void brcms_b_switch_macfreq(struct brcms_hardware *wlc_hw, u8 spurmode) { - struct d11regs __iomem *regs = wlc_hw->regs; + struct bcma_device *core = wlc_hw->d11core; - if ((wlc_hw->sih->chip == BCM43224_CHIP_ID) || - (wlc_hw->sih->chip == BCM43225_CHIP_ID)) { + if ((ai_get_chip_id(wlc_hw->sih) == BCM43224_CHIP_ID) || + (ai_get_chip_id(wlc_hw->sih) == BCM43225_CHIP_ID)) { if (spurmode == WL_SPURAVOID_ON2) { /* 126Mhz */ - W_REG(®s->tsf_clk_frac_l, 0x2082); - W_REG(®s->tsf_clk_frac_h, 0x8); + bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0x2082); + bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0x8); } else if (spurmode == WL_SPURAVOID_ON1) { /* 123Mhz */ - W_REG(®s->tsf_clk_frac_l, 0x5341); - W_REG(®s->tsf_clk_frac_h, 0x8); + bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0x5341); + bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0x8); } else { /* 120Mhz */ - W_REG(®s->tsf_clk_frac_l, 0x8889); - W_REG(®s->tsf_clk_frac_h, 0x8); + bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0x8889); + bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0x8); } } else if (BRCMS_ISLCNPHY(wlc_hw->band)) { if (spurmode == WL_SPURAVOID_ON1) { /* 82Mhz */ - W_REG(®s->tsf_clk_frac_l, 0x7CE0); - W_REG(®s->tsf_clk_frac_h, 0xC); + bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0x7CE0); + bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0xC); } else { /* 80Mhz */ - W_REG(®s->tsf_clk_frac_l, 0xCCCD); - W_REG(®s->tsf_clk_frac_h, 0xC); + bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0xCCCD); + bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0xC); } } } @@ -2215,11 +2161,8 @@ void brcms_b_switch_macfreq(struct brcms_hardware *wlc_hw, u8 spurmode) static void brcms_c_gpio_init(struct brcms_c_info *wlc) { struct brcms_hardware *wlc_hw = wlc->hw; - struct d11regs __iomem *regs; u32 gc, gm; - regs = wlc_hw->regs; - /* use GPIO select 0 to get all gpio signals from the gpio out reg */ brcms_b_mctrl(wlc_hw, MCTL_GPOUT_SEL_MASK, 0); @@ -2250,10 +2193,10 @@ static void brcms_c_gpio_init(struct brcms_c_info *wlc) * The board itself is powered by these GPIOs * (when not sending pattern) so set them high */ - OR_REG(®s->psm_gpio_oe, - (BOARD_GPIO_12 | BOARD_GPIO_13)); - OR_REG(®s->psm_gpio_out, - (BOARD_GPIO_12 | BOARD_GPIO_13)); + bcma_set16(wlc_hw->d11core, D11REGOFFS(psm_gpio_oe), + (BOARD_GPIO_12 | BOARD_GPIO_13)); + bcma_set16(wlc_hw->d11core, D11REGOFFS(psm_gpio_out), + (BOARD_GPIO_12 | BOARD_GPIO_13)); /* Enable antenna diversity, use 2x4 mode */ brcms_b_mhf(wlc_hw, MHF3, MHF3_ANTSEL_EN, @@ -2280,7 +2223,7 @@ static void brcms_c_gpio_init(struct brcms_c_info *wlc) static void brcms_ucode_write(struct brcms_hardware *wlc_hw, const __le32 ucode[], const size_t nbytes) { - struct d11regs __iomem *regs = wlc_hw->regs; + struct bcma_device *core = wlc_hw->d11core; uint i; uint count; @@ -2288,10 +2231,11 @@ static void brcms_ucode_write(struct brcms_hardware *wlc_hw, count = (nbytes / sizeof(u32)); - W_REG(®s->objaddr, (OBJADDR_AUTO_INC | OBJADDR_UCM_SEL)); - (void)R_REG(®s->objaddr); + bcma_write32(core, D11REGOFFS(objaddr), + OBJADDR_AUTO_INC | OBJADDR_UCM_SEL); + (void)bcma_read32(core, D11REGOFFS(objaddr)); for (i = 0; i < count; i++) - W_REG(®s->objdata, le32_to_cpu(ucode[i])); + bcma_write32(core, D11REGOFFS(objdata), le32_to_cpu(ucode[i])); } @@ -2352,19 +2296,12 @@ void brcms_b_antsel_type_set(struct brcms_hardware *wlc_hw, u8 antsel_type) wlc_phy_antsel_type_set(wlc_hw->band->pi, antsel_type); } -static void brcms_c_fatal_error(struct brcms_c_info *wlc) -{ - wiphy_err(wlc->wiphy, "wl%d: fatal error, reinitializing\n", - wlc->pub->unit); - brcms_init(wlc->wl); -} - static void brcms_b_fifoerrors(struct brcms_hardware *wlc_hw) { bool fatal = false; uint unit; uint intstatus, idx; - struct d11regs __iomem *regs = wlc_hw->regs; + struct bcma_device *core = wlc_hw->d11core; struct wiphy *wiphy = wlc_hw->wlc->wiphy; unit = wlc_hw->unit; @@ -2372,7 +2309,9 @@ static void brcms_b_fifoerrors(struct brcms_hardware *wlc_hw) for (idx = 0; idx < NFIFO; idx++) { /* read intstatus register and ignore any non-error bits */ intstatus = - R_REG(®s->intctrlregs[idx].intstatus) & I_ERRORS; + bcma_read32(core, + D11REGOFFS(intctrlregs[idx].intstatus)) & + I_ERRORS; if (!intstatus) continue; @@ -2414,11 +2353,12 @@ static void brcms_b_fifoerrors(struct brcms_hardware *wlc_hw) } if (fatal) { - brcms_c_fatal_error(wlc_hw->wlc); /* big hammer */ + brcms_fatal_error(wlc_hw->wlc->wl); /* big hammer */ break; } else - W_REG(®s->intctrlregs[idx].intstatus, - intstatus); + bcma_write32(core, + D11REGOFFS(intctrlregs[idx].intstatus), + intstatus); } } @@ -2426,28 +2366,7 @@ void brcms_c_intrson(struct brcms_c_info *wlc) { struct brcms_hardware *wlc_hw = wlc->hw; wlc->macintmask = wlc->defmacintmask; - W_REG(&wlc_hw->regs->macintmask, wlc->macintmask); -} - -/* - * callback for siutils.c, which has only wlc handler, no wl they both check - * up, not only because there is no need to off/restore d11 interrupt but also - * because per-port code may require sync with valid interrupt. - */ -static u32 brcms_c_wlintrsoff(struct brcms_c_info *wlc) -{ - if (!wlc->hw->up) - return 0; - - return brcms_intrsoff(wlc->wl); -} - -static void brcms_c_wlintrsrestore(struct brcms_c_info *wlc, u32 macintmask) -{ - if (!wlc->hw->up) - return; - - brcms_intrsrestore(wlc->wl, macintmask); + bcma_write32(wlc_hw->d11core, D11REGOFFS(macintmask), wlc->macintmask); } u32 brcms_c_intrsoff(struct brcms_c_info *wlc) @@ -2460,8 +2379,8 @@ u32 brcms_c_intrsoff(struct brcms_c_info *wlc) macintmask = wlc->macintmask; /* isr can still happen */ - W_REG(&wlc_hw->regs->macintmask, 0); - (void)R_REG(&wlc_hw->regs->macintmask); /* sync readback */ + bcma_write32(wlc_hw->d11core, D11REGOFFS(macintmask), 0); + (void)bcma_read32(wlc_hw->d11core, D11REGOFFS(macintmask)); udelay(1); /* ensure int line is no longer driven */ wlc->macintmask = 0; @@ -2476,9 +2395,10 @@ void brcms_c_intrsrestore(struct brcms_c_info *wlc, u32 macintmask) return; wlc->macintmask = macintmask; - W_REG(&wlc_hw->regs->macintmask, wlc->macintmask); + bcma_write32(wlc_hw->d11core, D11REGOFFS(macintmask), wlc->macintmask); } +/* assumes that the d11 MAC is enabled */ static void brcms_b_tx_fifo_suspend(struct brcms_hardware *wlc_hw, uint tx_fifo) { @@ -2535,11 +2455,12 @@ static void brcms_b_tx_fifo_resume(struct brcms_hardware *wlc_hw, } } -static void brcms_b_mute(struct brcms_hardware *wlc_hw, bool on, u32 flags) +/* precondition: requires the mac core to be enabled */ +static void brcms_b_mute(struct brcms_hardware *wlc_hw, bool mute_tx) { static const u8 null_ether_addr[ETH_ALEN] = {0, 0, 0, 0, 0, 0}; - if (on) { + if (mute_tx) { /* suspend tx fifos */ brcms_b_tx_fifo_suspend(wlc_hw, TX_DATA_FIFO); brcms_b_tx_fifo_suspend(wlc_hw, TX_CTL_FIFO); @@ -2561,14 +2482,20 @@ static void brcms_b_mute(struct brcms_hardware *wlc_hw, bool on, u32 flags) wlc_hw->etheraddr); } - wlc_phy_mute_upd(wlc_hw->band->pi, on, flags); + wlc_phy_mute_upd(wlc_hw->band->pi, mute_tx, 0); - if (on) + if (mute_tx) brcms_c_ucode_mute_override_set(wlc_hw); else brcms_c_ucode_mute_override_clear(wlc_hw); } +void +brcms_c_mute(struct brcms_c_info *wlc, bool mute_tx) +{ + brcms_b_mute(wlc->hw, mute_tx); +} + /* * Read and clear macintmask and macintstatus and intstatus registers. * This routine should be called with interrupts off @@ -2580,11 +2507,11 @@ static void brcms_b_mute(struct brcms_hardware *wlc_hw, bool on, u32 flags) static inline u32 wlc_intstatus(struct brcms_c_info *wlc, bool in_isr) { struct brcms_hardware *wlc_hw = wlc->hw; - struct d11regs __iomem *regs = wlc_hw->regs; + struct bcma_device *core = wlc_hw->d11core; u32 macintstatus; /* macintstatus includes a DMA interrupt summary bit */ - macintstatus = R_REG(®s->macintstatus); + macintstatus = bcma_read32(core, D11REGOFFS(macintstatus)); BCMMSG(wlc->wiphy, "wl%d: macintstatus: 0x%x\n", wlc_hw->unit, macintstatus); @@ -2611,12 +2538,12 @@ static inline u32 wlc_intstatus(struct brcms_c_info *wlc, bool in_isr) * consequences */ /* turn off the interrupts */ - W_REG(®s->macintmask, 0); - (void)R_REG(®s->macintmask); /* sync readback */ + bcma_write32(core, D11REGOFFS(macintmask), 0); + (void)bcma_read32(core, D11REGOFFS(macintmask)); wlc->macintmask = 0; /* clear device interrupts */ - W_REG(®s->macintstatus, macintstatus); + bcma_write32(core, D11REGOFFS(macintstatus), macintstatus); /* MI_DMAINT is indication of non-zero intstatus */ if (macintstatus & MI_DMAINT) @@ -2625,8 +2552,8 @@ static inline u32 wlc_intstatus(struct brcms_c_info *wlc, bool in_isr) * RX_FIFO. If MI_DMAINT is set, assume it * is set and clear the interrupt. */ - W_REG(®s->intctrlregs[RX_FIFO].intstatus, - DEF_RXINTMASK); + bcma_write32(core, D11REGOFFS(intctrlregs[RX_FIFO].intstatus), + DEF_RXINTMASK); return macintstatus; } @@ -2689,7 +2616,7 @@ bool brcms_c_isr(struct brcms_c_info *wlc, bool *wantdpc) void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc) { struct brcms_hardware *wlc_hw = wlc->hw; - struct d11regs __iomem *regs = wlc_hw->regs; + struct bcma_device *core = wlc_hw->d11core; u32 mc, mi; struct wiphy *wiphy = wlc->wiphy; @@ -2706,7 +2633,7 @@ void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc) /* force the core awake */ brcms_c_ucode_wake_override_set(wlc_hw, BRCMS_WAKE_OVERRIDE_MACSUSPEND); - mc = R_REG(®s->maccontrol); + mc = bcma_read32(core, D11REGOFFS(maccontrol)); if (mc == 0xffffffff) { wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit, @@ -2718,7 +2645,7 @@ void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc) WARN_ON(!(mc & MCTL_PSM_RUN)); WARN_ON(!(mc & MCTL_EN_MAC)); - mi = R_REG(®s->macintstatus); + mi = bcma_read32(core, D11REGOFFS(macintstatus)); if (mi == 0xffffffff) { wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit, __func__); @@ -2729,21 +2656,21 @@ void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc) brcms_b_mctrl(wlc_hw, MCTL_EN_MAC, 0); - SPINWAIT(!(R_REG(®s->macintstatus) & MI_MACSSPNDD), + SPINWAIT(!(bcma_read32(core, D11REGOFFS(macintstatus)) & MI_MACSSPNDD), BRCMS_MAX_MAC_SUSPEND); - if (!(R_REG(®s->macintstatus) & MI_MACSSPNDD)) { + if (!(bcma_read32(core, D11REGOFFS(macintstatus)) & MI_MACSSPNDD)) { wiphy_err(wiphy, "wl%d: wlc_suspend_mac_and_wait: waited %d uS" " and MI_MACSSPNDD is still not on.\n", wlc_hw->unit, BRCMS_MAX_MAC_SUSPEND); wiphy_err(wiphy, "wl%d: psmdebug 0x%08x, phydebug 0x%08x, " "psm_brc 0x%04x\n", wlc_hw->unit, - R_REG(®s->psmdebug), - R_REG(®s->phydebug), - R_REG(®s->psm_brc)); + bcma_read32(core, D11REGOFFS(psmdebug)), + bcma_read32(core, D11REGOFFS(phydebug)), + bcma_read16(core, D11REGOFFS(psm_brc))); } - mc = R_REG(®s->maccontrol); + mc = bcma_read32(core, D11REGOFFS(maccontrol)); if (mc == 0xffffffff) { wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit, __func__); @@ -2758,7 +2685,7 @@ void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc) void brcms_c_enable_mac(struct brcms_c_info *wlc) { struct brcms_hardware *wlc_hw = wlc->hw; - struct d11regs __iomem *regs = wlc_hw->regs; + struct bcma_device *core = wlc_hw->d11core; u32 mc, mi; BCMMSG(wlc->wiphy, "wl%d: bandunit %d\n", wlc_hw->unit, @@ -2771,20 +2698,20 @@ void brcms_c_enable_mac(struct brcms_c_info *wlc) if (wlc_hw->mac_suspend_depth > 0) return; - mc = R_REG(®s->maccontrol); + mc = bcma_read32(core, D11REGOFFS(maccontrol)); WARN_ON(mc & MCTL_PSM_JMP_0); WARN_ON(mc & MCTL_EN_MAC); WARN_ON(!(mc & MCTL_PSM_RUN)); brcms_b_mctrl(wlc_hw, MCTL_EN_MAC, MCTL_EN_MAC); - W_REG(®s->macintstatus, MI_MACSSPNDD); + bcma_write32(core, D11REGOFFS(macintstatus), MI_MACSSPNDD); - mc = R_REG(®s->maccontrol); + mc = bcma_read32(core, D11REGOFFS(maccontrol)); WARN_ON(mc & MCTL_PSM_JMP_0); WARN_ON(!(mc & MCTL_EN_MAC)); WARN_ON(!(mc & MCTL_PSM_RUN)); - mi = R_REG(®s->macintstatus); + mi = bcma_read32(core, D11REGOFFS(macintstatus)); WARN_ON(mi & MI_MACSSPNDD); brcms_c_ucode_wake_override_clear(wlc_hw, @@ -2801,55 +2728,53 @@ void brcms_b_band_stf_ss_set(struct brcms_hardware *wlc_hw, u8 stf_mode) static bool brcms_b_validate_chip_access(struct brcms_hardware *wlc_hw) { - struct d11regs __iomem *regs; + struct bcma_device *core = wlc_hw->d11core; u32 w, val; struct wiphy *wiphy = wlc_hw->wlc->wiphy; BCMMSG(wiphy, "wl%d\n", wlc_hw->unit); - regs = wlc_hw->regs; - /* Validate dchip register access */ - W_REG(®s->objaddr, OBJADDR_SHM_SEL | 0); - (void)R_REG(®s->objaddr); - w = R_REG(®s->objdata); + bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0); + (void)bcma_read32(core, D11REGOFFS(objaddr)); + w = bcma_read32(core, D11REGOFFS(objdata)); /* Can we write and read back a 32bit register? */ - W_REG(®s->objaddr, OBJADDR_SHM_SEL | 0); - (void)R_REG(®s->objaddr); - W_REG(®s->objdata, (u32) 0xaa5555aa); + bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0); + (void)bcma_read32(core, D11REGOFFS(objaddr)); + bcma_write32(core, D11REGOFFS(objdata), (u32) 0xaa5555aa); - W_REG(®s->objaddr, OBJADDR_SHM_SEL | 0); - (void)R_REG(®s->objaddr); - val = R_REG(®s->objdata); + bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0); + (void)bcma_read32(core, D11REGOFFS(objaddr)); + val = bcma_read32(core, D11REGOFFS(objdata)); if (val != (u32) 0xaa5555aa) { wiphy_err(wiphy, "wl%d: validate_chip_access: SHM = 0x%x, " "expected 0xaa5555aa\n", wlc_hw->unit, val); return false; } - W_REG(®s->objaddr, OBJADDR_SHM_SEL | 0); - (void)R_REG(®s->objaddr); - W_REG(®s->objdata, (u32) 0x55aaaa55); + bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0); + (void)bcma_read32(core, D11REGOFFS(objaddr)); + bcma_write32(core, D11REGOFFS(objdata), (u32) 0x55aaaa55); - W_REG(®s->objaddr, OBJADDR_SHM_SEL | 0); - (void)R_REG(®s->objaddr); - val = R_REG(®s->objdata); + bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0); + (void)bcma_read32(core, D11REGOFFS(objaddr)); + val = bcma_read32(core, D11REGOFFS(objdata)); if (val != (u32) 0x55aaaa55) { wiphy_err(wiphy, "wl%d: validate_chip_access: SHM = 0x%x, " "expected 0x55aaaa55\n", wlc_hw->unit, val); return false; } - W_REG(®s->objaddr, OBJADDR_SHM_SEL | 0); - (void)R_REG(®s->objaddr); - W_REG(®s->objdata, w); + bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0); + (void)bcma_read32(core, D11REGOFFS(objaddr)); + bcma_write32(core, D11REGOFFS(objdata), w); /* clear CFPStart */ - W_REG(®s->tsf_cfpstart, 0); + bcma_write32(core, D11REGOFFS(tsf_cfpstart), 0); - w = R_REG(®s->maccontrol); + w = bcma_read32(core, D11REGOFFS(maccontrol)); if ((w != (MCTL_IHR_EN | MCTL_WAKE)) && (w != (MCTL_IHR_EN | MCTL_GMODE | MCTL_WAKE))) { wiphy_err(wiphy, "wl%d: validate_chip_access: maccontrol = " @@ -2866,38 +2791,38 @@ static bool brcms_b_validate_chip_access(struct brcms_hardware *wlc_hw) void brcms_b_core_phypll_ctl(struct brcms_hardware *wlc_hw, bool on) { - struct d11regs __iomem *regs; + struct bcma_device *core = wlc_hw->d11core; u32 tmp; BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit); tmp = 0; - regs = wlc_hw->regs; if (on) { - if ((wlc_hw->sih->chip == BCM4313_CHIP_ID)) { - OR_REG(®s->clk_ctl_st, - (CCS_ERSRC_REQ_HT | CCS_ERSRC_REQ_D11PLL | - CCS_ERSRC_REQ_PHYPLL)); - SPINWAIT((R_REG(®s->clk_ctl_st) & - (CCS_ERSRC_AVAIL_HT)) != (CCS_ERSRC_AVAIL_HT), + if ((ai_get_chip_id(wlc_hw->sih) == BCM4313_CHIP_ID)) { + bcma_set32(core, D11REGOFFS(clk_ctl_st), + CCS_ERSRC_REQ_HT | + CCS_ERSRC_REQ_D11PLL | + CCS_ERSRC_REQ_PHYPLL); + SPINWAIT((bcma_read32(core, D11REGOFFS(clk_ctl_st)) & + CCS_ERSRC_AVAIL_HT) != CCS_ERSRC_AVAIL_HT, PHYPLL_WAIT_US); - tmp = R_REG(®s->clk_ctl_st); - if ((tmp & (CCS_ERSRC_AVAIL_HT)) != - (CCS_ERSRC_AVAIL_HT)) + tmp = bcma_read32(core, D11REGOFFS(clk_ctl_st)); + if ((tmp & CCS_ERSRC_AVAIL_HT) != CCS_ERSRC_AVAIL_HT) wiphy_err(wlc_hw->wlc->wiphy, "%s: turn on PHY" " PLL failed\n", __func__); } else { - OR_REG(®s->clk_ctl_st, - (CCS_ERSRC_REQ_D11PLL | CCS_ERSRC_REQ_PHYPLL)); - SPINWAIT((R_REG(®s->clk_ctl_st) & + bcma_set32(core, D11REGOFFS(clk_ctl_st), + tmp | CCS_ERSRC_REQ_D11PLL | + CCS_ERSRC_REQ_PHYPLL); + SPINWAIT((bcma_read32(core, D11REGOFFS(clk_ctl_st)) & (CCS_ERSRC_AVAIL_D11PLL | CCS_ERSRC_AVAIL_PHYPLL)) != (CCS_ERSRC_AVAIL_D11PLL | CCS_ERSRC_AVAIL_PHYPLL), PHYPLL_WAIT_US); - tmp = R_REG(®s->clk_ctl_st); + tmp = bcma_read32(core, D11REGOFFS(clk_ctl_st)); if ((tmp & (CCS_ERSRC_AVAIL_D11PLL | CCS_ERSRC_AVAIL_PHYPLL)) != @@ -2911,8 +2836,9 @@ void brcms_b_core_phypll_ctl(struct brcms_hardware *wlc_hw, bool on) * be requesting it; so we'll deassert the request but * not wait for status to comply. */ - AND_REG(®s->clk_ctl_st, ~CCS_ERSRC_REQ_PHYPLL); - tmp = R_REG(®s->clk_ctl_st); + bcma_mask32(core, D11REGOFFS(clk_ctl_st), + ~CCS_ERSRC_REQ_PHYPLL); + (void)bcma_read32(core, D11REGOFFS(clk_ctl_st)); } } @@ -2940,7 +2866,7 @@ static void brcms_c_coredisable(struct brcms_hardware *wlc_hw) brcms_b_core_phypll_ctl(wlc_hw, false); wlc_hw->clk = false; - ai_core_disable(wlc_hw->sih, 0); + bcma_core_disable(wlc_hw->d11core, 0); wlc_phy_hw_clk_state_upd(wlc_hw->band->pi, false); } @@ -2964,35 +2890,31 @@ static void brcms_c_flushqueues(struct brcms_c_info *wlc) static u16 brcms_b_read_objmem(struct brcms_hardware *wlc_hw, uint offset, u32 sel) { - struct d11regs __iomem *regs = wlc_hw->regs; - u16 __iomem *objdata_lo = (u16 __iomem *)®s->objdata; - u16 __iomem *objdata_hi = objdata_lo + 1; - u16 v; + struct bcma_device *core = wlc_hw->d11core; + u16 objoff = D11REGOFFS(objdata); - W_REG(®s->objaddr, sel | (offset >> 2)); - (void)R_REG(®s->objaddr); + bcma_write32(core, D11REGOFFS(objaddr), sel | (offset >> 2)); + (void)bcma_read32(core, D11REGOFFS(objaddr)); if (offset & 2) - v = R_REG(objdata_hi); - else - v = R_REG(objdata_lo); + objoff += 2; - return v; + return bcma_read16(core, objoff); +; } static void brcms_b_write_objmem(struct brcms_hardware *wlc_hw, uint offset, u16 v, u32 sel) { - struct d11regs __iomem *regs = wlc_hw->regs; - u16 __iomem *objdata_lo = (u16 __iomem *)®s->objdata; - u16 __iomem *objdata_hi = objdata_lo + 1; + struct bcma_device *core = wlc_hw->d11core; + u16 objoff = D11REGOFFS(objdata); - W_REG(®s->objaddr, sel | (offset >> 2)); - (void)R_REG(®s->objaddr); + bcma_write32(core, D11REGOFFS(objaddr), sel | (offset >> 2)); + (void)bcma_read32(core, D11REGOFFS(objaddr)); if (offset & 2) - W_REG(objdata_hi, v); - else - W_REG(objdata_lo, v); + objoff += 2; + + bcma_write16(core, objoff, v); } /* @@ -3078,14 +3000,14 @@ static void brcms_b_retrylimit_upd(struct brcms_hardware *wlc_hw, /* write retry limit to SCR, shouldn't need to suspend */ if (wlc_hw->up) { - W_REG(&wlc_hw->regs->objaddr, - OBJADDR_SCR_SEL | S_DOT11_SRC_LMT); - (void)R_REG(&wlc_hw->regs->objaddr); - W_REG(&wlc_hw->regs->objdata, wlc_hw->SRL); - W_REG(&wlc_hw->regs->objaddr, - OBJADDR_SCR_SEL | S_DOT11_LRC_LMT); - (void)R_REG(&wlc_hw->regs->objaddr); - W_REG(&wlc_hw->regs->objdata, wlc_hw->LRL); + bcma_write32(wlc_hw->d11core, D11REGOFFS(objaddr), + OBJADDR_SCR_SEL | S_DOT11_SRC_LMT); + (void)bcma_read32(wlc_hw->d11core, D11REGOFFS(objaddr)); + bcma_write32(wlc_hw->d11core, D11REGOFFS(objdata), wlc_hw->SRL); + bcma_write32(wlc_hw->d11core, D11REGOFFS(objaddr), + OBJADDR_SCR_SEL | S_DOT11_LRC_LMT); + (void)bcma_read32(wlc_hw->d11core, D11REGOFFS(objaddr)); + bcma_write32(wlc_hw->d11core, D11REGOFFS(objdata), wlc_hw->LRL); } } @@ -3132,7 +3054,7 @@ static bool brcms_c_ps_allowed(struct brcms_c_info *wlc) return false; /* disallow PS when one of these meets when not scanning */ - if (wlc->monitor) + if (wlc->filter_flags & FIF_PROMISC_IN_BSS) return false; if (cfg->associated) { @@ -3267,9 +3189,9 @@ void brcms_c_init_scb(struct scb *scb) static void brcms_b_coreinit(struct brcms_c_info *wlc) { struct brcms_hardware *wlc_hw = wlc->hw; - struct d11regs __iomem *regs; + struct bcma_device *core = wlc_hw->d11core; u32 sflags; - uint bcnint_us; + u32 bcnint_us; uint i = 0; bool fifosz_fixup = false; int err = 0; @@ -3277,8 +3199,6 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc) struct wiphy *wiphy = wlc->wiphy; struct brcms_ucode *ucode = &wlc_hw->wlc->wl->ucode; - regs = wlc_hw->regs; - BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit); /* reset PSM */ @@ -3291,20 +3211,20 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc) fifosz_fixup = true; /* let the PSM run to the suspended state, set mode to BSS STA */ - W_REG(®s->macintstatus, -1); + bcma_write32(core, D11REGOFFS(macintstatus), -1); brcms_b_mctrl(wlc_hw, ~0, (MCTL_IHR_EN | MCTL_INFRA | MCTL_PSM_RUN | MCTL_WAKE)); /* wait for ucode to self-suspend after auto-init */ - SPINWAIT(((R_REG(®s->macintstatus) & MI_MACSSPNDD) == 0), - 1000 * 1000); - if ((R_REG(®s->macintstatus) & MI_MACSSPNDD) == 0) + SPINWAIT(((bcma_read32(core, D11REGOFFS(macintstatus)) & + MI_MACSSPNDD) == 0), 1000 * 1000); + if ((bcma_read32(core, D11REGOFFS(macintstatus)) & MI_MACSSPNDD) == 0) wiphy_err(wiphy, "wl%d: wlc_coreinit: ucode did not self-" "suspend!\n", wlc_hw->unit); brcms_c_gpio_init(wlc); - sflags = ai_core_sflags(wlc_hw->sih, 0, 0); + sflags = bcma_aread32(core, BCMA_IOST); if (D11REV_IS(wlc_hw->corerev, 23)) { if (BRCMS_ISNPHY(wlc_hw->band)) @@ -3368,7 +3288,7 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc) wlc_hw->xmtfifo_sz[i], i); /* make sure we can still talk to the mac */ - WARN_ON(R_REG(®s->maccontrol) == 0xffffffff); + WARN_ON(bcma_read32(core, D11REGOFFS(maccontrol)) == 0xffffffff); /* band-specific inits done by wlc_bsinit() */ @@ -3377,7 +3297,7 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc) brcms_b_write_shm(wlc_hw, M_MAX_ANTCNT, ANTCNT); /* enable one rx interrupt per received frame */ - W_REG(®s->intrcvlazy[0], (1 << IRL_FC_SHIFT)); + bcma_write32(core, D11REGOFFS(intrcvlazy[0]), (1 << IRL_FC_SHIFT)); /* set the station mode (BSS STA) */ brcms_b_mctrl(wlc_hw, @@ -3386,19 +3306,21 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc) /* set up Beacon interval */ bcnint_us = 0x8000 << 10; - W_REG(®s->tsf_cfprep, (bcnint_us << CFPREP_CBI_SHIFT)); - W_REG(®s->tsf_cfpstart, bcnint_us); - W_REG(®s->macintstatus, MI_GP1); + bcma_write32(core, D11REGOFFS(tsf_cfprep), + (bcnint_us << CFPREP_CBI_SHIFT)); + bcma_write32(core, D11REGOFFS(tsf_cfpstart), bcnint_us); + bcma_write32(core, D11REGOFFS(macintstatus), MI_GP1); /* write interrupt mask */ - W_REG(®s->intctrlregs[RX_FIFO].intmask, DEF_RXINTMASK); + bcma_write32(core, D11REGOFFS(intctrlregs[RX_FIFO].intmask), + DEF_RXINTMASK); /* allow the MAC to control the PHY clock (dynamic on/off) */ brcms_b_macphyclk_set(wlc_hw, ON); /* program dynamic clock control fast powerup delay register */ wlc->fastpwrup_dly = ai_clkctl_fast_pwrup_delay(wlc_hw->sih); - W_REG(®s->scc_fastpwrup_dly, wlc->fastpwrup_dly); + bcma_write16(core, D11REGOFFS(scc_fastpwrup_dly), wlc->fastpwrup_dly); /* tell the ucode the corerev */ brcms_b_write_shm(wlc_hw, M_MACHW_VER, (u16) wlc_hw->corerev); @@ -3411,19 +3333,21 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc) machwcap >> 16) & 0xffff)); /* write retry limits to SCR, this done after PSM init */ - W_REG(®s->objaddr, OBJADDR_SCR_SEL | S_DOT11_SRC_LMT); - (void)R_REG(®s->objaddr); - W_REG(®s->objdata, wlc_hw->SRL); - W_REG(®s->objaddr, OBJADDR_SCR_SEL | S_DOT11_LRC_LMT); - (void)R_REG(®s->objaddr); - W_REG(®s->objdata, wlc_hw->LRL); + bcma_write32(core, D11REGOFFS(objaddr), + OBJADDR_SCR_SEL | S_DOT11_SRC_LMT); + (void)bcma_read32(core, D11REGOFFS(objaddr)); + bcma_write32(core, D11REGOFFS(objdata), wlc_hw->SRL); + bcma_write32(core, D11REGOFFS(objaddr), + OBJADDR_SCR_SEL | S_DOT11_LRC_LMT); + (void)bcma_read32(core, D11REGOFFS(objaddr)); + bcma_write32(core, D11REGOFFS(objdata), wlc_hw->LRL); /* write rate fallback retry limits */ brcms_b_write_shm(wlc_hw, M_SFRMTXCNTFBRTHSD, wlc_hw->SFBL); brcms_b_write_shm(wlc_hw, M_LFRMTXCNTFBRTHSD, wlc_hw->LFBL); - AND_REG(®s->ifs_ctl, 0x0FFF); - W_REG(®s->ifs_aifsn, EDCF_AIFSN_MIN); + bcma_mask16(core, D11REGOFFS(ifs_ctl), 0x0FFF); + bcma_write16(core, D11REGOFFS(ifs_aifsn), EDCF_AIFSN_MIN); /* init the tx dma engines */ for (i = 0; i < NFIFO; i++) { @@ -3437,8 +3361,7 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc) } void -static brcms_b_init(struct brcms_hardware *wlc_hw, u16 chanspec, - bool mute) { +static brcms_b_init(struct brcms_hardware *wlc_hw, u16 chanspec) { u32 macintmask; bool fastclk; struct brcms_c_info *wlc = wlc_hw->wlc; @@ -3463,10 +3386,6 @@ static brcms_b_init(struct brcms_hardware *wlc_hw, u16 chanspec, /* core-specific initialization */ brcms_b_coreinit(wlc); - /* suspend the tx fifos and mute the phy for preism cac time */ - if (mute) - brcms_b_mute(wlc_hw, ON, PHY_MUTE_FOR_PREISM); - /* band-specific inits */ brcms_b_bsinit(wlc, chanspec); @@ -3656,42 +3575,32 @@ static void brcms_c_bandinit_ordered(struct brcms_c_info *wlc, brcms_c_set_phy_chanspec(wlc, chanspec); } -static void brcms_c_mac_bcn_promisc(struct brcms_c_info *wlc) -{ - if (wlc->bcnmisc_monitor) - brcms_b_mctrl(wlc->hw, MCTL_BCNS_PROMISC, MCTL_BCNS_PROMISC); - else - brcms_b_mctrl(wlc->hw, MCTL_BCNS_PROMISC, 0); -} - -void brcms_c_mac_bcn_promisc_change(struct brcms_c_info *wlc, bool promisc) -{ - wlc->bcnmisc_monitor = promisc; - brcms_c_mac_bcn_promisc(wlc); -} - -/* set or clear maccontrol bits MCTL_PROMISC and MCTL_KEEPCONTROL */ -static void brcms_c_mac_promisc(struct brcms_c_info *wlc) +/* + * Set or clear filtering related maccontrol bits based on + * specified filter flags + */ +void brcms_c_mac_promisc(struct brcms_c_info *wlc, uint filter_flags) { u32 promisc_bits = 0; - /* - * promiscuous mode just sets MCTL_PROMISC - * Note: APs get all BSS traffic without the need to set - * the MCTL_PROMISC bit since all BSS data traffic is - * directed at the AP - */ - if (wlc->pub->promisc) + wlc->filter_flags = filter_flags; + + if (filter_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS)) promisc_bits |= MCTL_PROMISC; - /* monitor mode needs both MCTL_PROMISC and MCTL_KEEPCONTROL - * Note: monitor mode also needs MCTL_BCNS_PROMISC, but that is - * handled in brcms_c_mac_bcn_promisc() - */ - if (wlc->monitor) - promisc_bits |= MCTL_PROMISC | MCTL_KEEPCONTROL; + if (filter_flags & FIF_BCN_PRBRESP_PROMISC) + promisc_bits |= MCTL_BCNS_PROMISC; + + if (filter_flags & FIF_FCSFAIL) + promisc_bits |= MCTL_KEEPBADFCS; - brcms_b_mctrl(wlc->hw, MCTL_PROMISC | MCTL_KEEPCONTROL, promisc_bits); + if (filter_flags & (FIF_CONTROL | FIF_PSPOLL)) + promisc_bits |= MCTL_KEEPCONTROL; + + brcms_b_mctrl(wlc->hw, + MCTL_PROMISC | MCTL_BCNS_PROMISC | + MCTL_KEEPCONTROL | MCTL_KEEPBADFCS, + promisc_bits); } /* @@ -3721,10 +3630,6 @@ static void brcms_c_ucode_mac_upd(struct brcms_c_info *wlc) } else { /* disable an active IBSS if we are not on the home channel */ } - - /* update the various promisc bits */ - brcms_c_mac_bcn_promisc(wlc); - brcms_c_mac_promisc(wlc); } static void brcms_c_write_rate_shm(struct brcms_c_info *wlc, u8 rate, @@ -3899,7 +3804,7 @@ static void brcms_c_set_ps_ctrl(struct brcms_c_info *wlc) BCMMSG(wlc->wiphy, "wl%d: hps %d\n", wlc->pub->unit, hps); - v1 = R_REG(&wlc->regs->maccontrol); + v1 = bcma_read32(wlc->hw->d11core, D11REGOFFS(maccontrol)); v2 = MCTL_WAKE; if (hps) v2 |= MCTL_HPS; @@ -3979,7 +3884,7 @@ static void brcms_c_set_home_chanspec(struct brcms_c_info *wlc, u16 chanspec) void brcms_b_set_chanspec(struct brcms_hardware *wlc_hw, u16 chanspec, - bool mute, struct txpwr_limits *txpwr) + bool mute_tx, struct txpwr_limits *txpwr) { uint bandunit; @@ -4005,7 +3910,7 @@ brcms_b_set_chanspec(struct brcms_hardware *wlc_hw, u16 chanspec, } } - wlc_phy_initcal_enable(wlc_hw->band->pi, !mute); + wlc_phy_initcal_enable(wlc_hw->band->pi, !mute_tx); if (!wlc_hw->up) { if (wlc_hw->clk) @@ -4017,7 +3922,7 @@ brcms_b_set_chanspec(struct brcms_hardware *wlc_hw, u16 chanspec, wlc_phy_txpower_limit_set(wlc_hw->band->pi, txpwr, chanspec); /* Update muting of the channel */ - brcms_b_mute(wlc_hw, mute, 0); + brcms_b_mute(wlc_hw, mute_tx); } } @@ -4205,7 +4110,7 @@ void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci, EDCF_TXOP2USEC(acp_shm.txop); acp_shm.aifs = (params->aifs & EDCF_AIFSN_MASK); - if (aci == AC_VI && acp_shm.txop == 0 + if (aci == IEEE80211_AC_VI && acp_shm.txop == 0 && acp_shm.aifs < EDCF_AIFSN_MAX) acp_shm.aifs++; @@ -4218,7 +4123,8 @@ void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci, acp_shm.cwmax = params->cw_max; acp_shm.cwcur = acp_shm.cwmin; acp_shm.bslots = - R_REG(&wlc->regs->tsf_random) & acp_shm.cwcur; + bcma_read16(wlc->hw->d11core, D11REGOFFS(tsf_random)) & + acp_shm.cwcur; acp_shm.reggap = acp_shm.bslots + acp_shm.aifs; /* Indicate the new params to the ucode */ acp_shm.status = brcms_b_read_shm(wlc->hw, (M_EDCF_QINFO + @@ -4242,7 +4148,7 @@ void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci, } } -void brcms_c_edcf_setparams(struct brcms_c_info *wlc, bool suspend) +static void brcms_c_edcf_setparams(struct brcms_c_info *wlc, bool suspend) { u16 aci; int i_ac; @@ -4255,7 +4161,7 @@ void brcms_c_edcf_setparams(struct brcms_c_info *wlc, bool suspend) }; /* ucode needs these parameters during its initialization */ const struct edcf_acparam *edcf_acp = &default_edcf_acparams[0]; - for (i_ac = 0; i_ac < AC_COUNT; i_ac++, edcf_acp++) { + for (i_ac = 0; i_ac < IEEE80211_NUM_ACS; i_ac++, edcf_acp++) { /* find out which ac this set of params applies to */ aci = (edcf_acp->ACI & EDCF_ACI_MASK) >> EDCF_ACI_SHIFT; @@ -4277,17 +4183,6 @@ void brcms_c_edcf_setparams(struct brcms_c_info *wlc, bool suspend) } } -/* maintain LED behavior in down state */ -static void brcms_c_down_led_upd(struct brcms_c_info *wlc) -{ - /* - * maintain LEDs while in down state, turn on sbclk if - * not available yet. Turn on sbclk if necessary - */ - brcms_b_pllreq(wlc->hw, true, BRCMS_PLLREQ_FLIP); - brcms_b_pllreq(wlc->hw, false, BRCMS_PLLREQ_FLIP); -} - static void brcms_c_radio_monitor_start(struct brcms_c_info *wlc) { /* Don't start the timer if HWRADIO feature is disabled */ @@ -4299,28 +4194,6 @@ static void brcms_c_radio_monitor_start(struct brcms_c_info *wlc) brcms_add_timer(wlc->radio_timer, TIMER_INTERVAL_RADIOCHK, true); } -static void brcms_c_radio_disable(struct brcms_c_info *wlc) -{ - if (!wlc->pub->up) { - brcms_c_down_led_upd(wlc); - return; - } - - brcms_c_radio_monitor_start(wlc); - brcms_down(wlc->wl); -} - -static void brcms_c_radio_enable(struct brcms_c_info *wlc) -{ - if (wlc->pub->up) - return; - - if (brcms_deviceremoved(wlc)) - return; - - brcms_up(wlc->wl); -} - static bool brcms_c_radio_monitor_stop(struct brcms_c_info *wlc) { if (!wlc->radio_monitor) @@ -4343,18 +4216,6 @@ static void brcms_c_radio_hwdisable_upd(struct brcms_c_info *wlc) mboolclr(wlc->pub->radio_disabled, WL_RADIO_HW_DISABLE); } -/* - * centralized radio disable/enable function, - * invoke radio enable/disable after updating hwradio status - */ -static void brcms_c_radio_upd(struct brcms_c_info *wlc) -{ - if (wlc->pub->radio_disabled) - brcms_c_radio_disable(wlc); - else - brcms_c_radio_enable(wlc); -} - /* update hwradio status and return it */ bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc) { @@ -4376,12 +4237,7 @@ static void brcms_c_radio_timer(void *arg) return; } - /* cap mpc off count */ - if (wlc->mpc_offcnt < BRCMS_MPC_MAX_DELAYCNT) - wlc->mpc_offcnt++; - brcms_c_radio_hwdisable_upd(wlc); - brcms_c_radio_upd(wlc); } /* common low-level watchdog code */ @@ -4407,60 +4263,6 @@ static void brcms_b_watchdog(void *arg) wlc_phy_watchdog(wlc_hw->band->pi); } -static void brcms_c_radio_mpc_upd(struct brcms_c_info *wlc) -{ - bool mpc_radio, radio_state; - - /* - * Clear the WL_RADIO_MPC_DISABLE bit when mpc feature is disabled - * in case the WL_RADIO_MPC_DISABLE bit was set. Stop the radio - * monitor also when WL_RADIO_MPC_DISABLE is the only reason that - * the radio is going down. - */ - if (!wlc->mpc) { - if (!wlc->pub->radio_disabled) - return; - mboolclr(wlc->pub->radio_disabled, WL_RADIO_MPC_DISABLE); - brcms_c_radio_upd(wlc); - if (!wlc->pub->radio_disabled) - brcms_c_radio_monitor_stop(wlc); - return; - } - - /* - * sync ismpc logic with WL_RADIO_MPC_DISABLE bit in - * wlc->pub->radio_disabled to go ON, always call radio_upd - * synchronously to go OFF, postpone radio_upd to later when - * context is safe(e.g. watchdog) - */ - radio_state = - (mboolisset(wlc->pub->radio_disabled, WL_RADIO_MPC_DISABLE) ? OFF : - ON); - mpc_radio = (brcms_c_ismpc(wlc) == true) ? OFF : ON; - - if (radio_state == ON && mpc_radio == OFF) - wlc->mpc_delay_off = wlc->mpc_dlycnt; - else if (radio_state == OFF && mpc_radio == ON) { - mboolclr(wlc->pub->radio_disabled, WL_RADIO_MPC_DISABLE); - brcms_c_radio_upd(wlc); - if (wlc->mpc_offcnt < BRCMS_MPC_THRESHOLD) - wlc->mpc_dlycnt = BRCMS_MPC_MAX_DELAYCNT; - else - wlc->mpc_dlycnt = BRCMS_MPC_MIN_DELAYCNT; - } - /* - * Below logic is meant to capture the transition from mpc off - * to mpc on for reasons other than wlc->mpc_delay_off keeping - * the mpc off. In that case reset wlc->mpc_delay_off to - * wlc->mpc_dlycnt, so that we restart the countdown of mpc_delay_off - */ - if ((wlc->prev_non_delay_mpc == false) && - (brcms_c_is_non_delay_mpc(wlc) == true) && wlc->mpc_delay_off) - wlc->mpc_delay_off = wlc->mpc_dlycnt; - - wlc->prev_non_delay_mpc = brcms_c_is_non_delay_mpc(wlc); -} - /* common watchdog code */ static void brcms_c_watchdog(void *arg) { @@ -4481,21 +4283,7 @@ static void brcms_c_watchdog(void *arg) /* increment second count */ wlc->pub->now++; - /* delay radio disable */ - if (wlc->mpc_delay_off) { - if (--wlc->mpc_delay_off == 0) { - mboolset(wlc->pub->radio_disabled, - WL_RADIO_MPC_DISABLE); - if (wlc->mpc && brcms_c_ismpc(wlc)) - wlc->mpc_offcnt = 0; - } - } - - /* mpc sync */ - brcms_c_radio_mpc_upd(wlc); - /* radio sync: sw/hw/mpc --> radio_disable/radio_enable */ brcms_c_radio_hwdisable_upd(wlc); - brcms_c_radio_upd(wlc); /* if radio is disable, driver may be down, quit here */ if (wlc->pub->radio_disabled) return; @@ -4599,9 +4387,6 @@ static void brcms_c_info_init(struct brcms_c_info *wlc, int unit) /* WME QoS mode is Auto by default */ wlc->pub->_ampdu = AMPDU_AGG_HOST; wlc->pub->bcmerror = 0; - - /* initialize mpc delay */ - wlc->mpc_delay_off = wlc->mpc_dlycnt = BRCMS_MPC_MIN_DELAYCNT; } static uint brcms_c_attach_module(struct brcms_c_info *wlc) @@ -4647,21 +4432,21 @@ struct brcms_pub *brcms_c_pub(struct brcms_c_info *wlc) * initialize software state for each core and band * put the whole chip in reset(driver down state), no clock */ -static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device, - uint unit, bool piomode, void __iomem *regsva, - struct pci_dev *btparam) +static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core, + uint unit, bool piomode) { struct brcms_hardware *wlc_hw; - struct d11regs __iomem *regs; char *macaddr = NULL; uint err = 0; uint j; bool wme = false; struct shared_phy_params sha_params; struct wiphy *wiphy = wlc->wiphy; + struct pci_dev *pcidev = core->bus->host_pci; - BCMMSG(wlc->wiphy, "wl%d: vendor 0x%x device 0x%x\n", unit, vendor, - device); + BCMMSG(wlc->wiphy, "wl%d: vendor 0x%x device 0x%x\n", unit, + pcidev->vendor, + pcidev->device); wme = true; @@ -4678,7 +4463,7 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device, * Do the hardware portion of the attach. Also initialize software * state that depends on the particular hardware we are running. */ - wlc_hw->sih = ai_attach(regsva, btparam); + wlc_hw->sih = ai_attach(core->bus); if (wlc_hw->sih == NULL) { wiphy_err(wiphy, "wl%d: brcms_b_attach: si_attach failed\n", unit); @@ -4687,25 +4472,19 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device, } /* verify again the device is supported */ - if (!brcms_c_chipmatch(vendor, device)) { + if (!brcms_c_chipmatch(pcidev->vendor, pcidev->device)) { wiphy_err(wiphy, "wl%d: brcms_b_attach: Unsupported " "vendor/device (0x%x/0x%x)\n", - unit, vendor, device); + unit, pcidev->vendor, pcidev->device); err = 12; goto fail; } - wlc_hw->vendorid = vendor; - wlc_hw->deviceid = device; - - /* set bar0 window to point at D11 core */ - wlc_hw->regs = (struct d11regs __iomem *) - ai_setcore(wlc_hw->sih, D11_CORE_ID, 0); - wlc_hw->corerev = ai_corerev(wlc_hw->sih); + wlc_hw->vendorid = pcidev->vendor; + wlc_hw->deviceid = pcidev->device; - regs = wlc_hw->regs; - - wlc->regs = wlc_hw->regs; + wlc_hw->d11core = core; + wlc_hw->corerev = core->id.rev; /* validate chip, chiprev and corerev */ if (!brcms_c_isgoodchip(wlc_hw)) { @@ -4740,8 +4519,9 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device, wlc_hw->boardrev = (u16) j; if (!brcms_c_validboardtype(wlc_hw)) { wiphy_err(wiphy, "wl%d: brcms_b_attach: Unsupported Broadcom " - "board type (0x%x)" " or revision level (0x%x)\n", - unit, wlc_hw->sih->boardtype, wlc_hw->boardrev); + "board type (0x%x)" " or revision level (0x%x)\n", + unit, ai_get_boardtype(wlc_hw->sih), + wlc_hw->boardrev); err = 15; goto fail; } @@ -4762,7 +4542,7 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device, else wlc_hw->_nbands = 1; - if ((wlc_hw->sih->chip == BCM43225_CHIP_ID)) + if ((ai_get_chip_id(wlc_hw->sih) == BCM43225_CHIP_ID)) wlc_hw->_nbands = 1; /* BMAC_NOTE: remove init of pub values when brcms_c_attach() @@ -4794,16 +4574,14 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device, sha_params.corerev = wlc_hw->corerev; sha_params.vid = wlc_hw->vendorid; sha_params.did = wlc_hw->deviceid; - sha_params.chip = wlc_hw->sih->chip; - sha_params.chiprev = wlc_hw->sih->chiprev; - sha_params.chippkg = wlc_hw->sih->chippkg; + sha_params.chip = ai_get_chip_id(wlc_hw->sih); + sha_params.chiprev = ai_get_chiprev(wlc_hw->sih); + sha_params.chippkg = ai_get_chippkg(wlc_hw->sih); sha_params.sromrev = wlc_hw->sromrev; - sha_params.boardtype = wlc_hw->sih->boardtype; + sha_params.boardtype = ai_get_boardtype(wlc_hw->sih); sha_params.boardrev = wlc_hw->boardrev; - sha_params.boardvendor = wlc_hw->sih->boardvendor; sha_params.boardflags = wlc_hw->boardflags; sha_params.boardflags2 = wlc_hw->boardflags2; - sha_params.buscorerev = wlc_hw->sih->buscorerev; /* alloc and save pointer to shared phy state area */ wlc_hw->phy_sh = wlc_phy_shared_attach(&sha_params); @@ -4825,9 +4603,9 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device, wlc_hw->band->bandtype = j ? BRCM_BAND_5G : BRCM_BAND_2G; wlc->band->bandunit = j; wlc->band->bandtype = j ? BRCM_BAND_5G : BRCM_BAND_2G; - wlc->core->coreidx = ai_coreidx(wlc_hw->sih); + wlc->core->coreidx = core->core_index; - wlc_hw->machwcap = R_REG(®s->machwcap); + wlc_hw->machwcap = bcma_read32(core, D11REGOFFS(machwcap)); wlc_hw->machwcap_backup = wlc_hw->machwcap; /* init tx fifo size */ @@ -4836,7 +4614,7 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device, /* Get a phy for this band */ wlc_hw->band->pi = - wlc_phy_attach(wlc_hw->phy_sh, regs, + wlc_phy_attach(wlc_hw->phy_sh, core, wlc_hw->band->bandtype, wlc->wiphy); if (wlc_hw->band->pi == NULL) { @@ -4910,10 +4688,6 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device, /* Match driver "down" state */ ai_pci_down(wlc_hw->sih); - /* register sb interrupt callback functions */ - ai_register_intr_callback(wlc_hw->sih, (void *)brcms_c_wlintrsoff, - (void *)brcms_c_wlintrsrestore, NULL, wlc); - /* turn off pll and xtal to match driver "down" state */ brcms_b_xtal(wlc_hw, OFF); @@ -4944,10 +4718,9 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device, goto fail; } - BCMMSG(wlc->wiphy, - "deviceid 0x%x nbands %d board 0x%x macaddr: %s\n", - wlc_hw->deviceid, wlc_hw->_nbands, - wlc_hw->sih->boardtype, macaddr); + BCMMSG(wlc->wiphy, "deviceid 0x%x nbands %d board 0x%x macaddr: %s\n", + wlc_hw->deviceid, wlc_hw->_nbands, ai_get_boardtype(wlc_hw->sih), + macaddr); return err; @@ -5185,7 +4958,6 @@ static int brcms_b_detach(struct brcms_c_info *wlc) * and per-port interrupt object may has been freed. this must * be done before sb core switch */ - ai_deregister_intr_callback(wlc_hw->sih); ai_pci_sleep(wlc_hw->sih); } @@ -5259,9 +5031,6 @@ static void brcms_c_ap_upd(struct brcms_c_info *wlc) { /* STA-BSS; short capable */ wlc->PLCPHdr_override = BRCMS_PLCP_SHORT; - - /* fixup mpc */ - wlc->mpc = true; } /* Initialize just the hardware when coming out of POR or S3/S5 system states */ @@ -5283,13 +5052,11 @@ static void brcms_b_hw_up(struct brcms_hardware *wlc_hw) ai_pci_fixcfg(wlc_hw->sih); /* + * TODO: test suspend/resume + * * AI chip doesn't restore bar0win2 on * hibernation/resume, need sw fixup */ - if ((wlc_hw->sih->chip == BCM43224_CHIP_ID) || - (wlc_hw->sih->chip == BCM43225_CHIP_ID)) - wlc_hw->regs = (struct d11regs __iomem *) - ai_setcore(wlc_hw->sih, D11_CORE_ID, 0); /* * Inform phy that a POR reset has occurred so @@ -5301,7 +5068,7 @@ static void brcms_b_hw_up(struct brcms_hardware *wlc_hw) wlc_hw->wlc->pub->hw_up = true; if ((wlc_hw->boardflags & BFL_FEM) - && (wlc_hw->sih->chip == BCM4313_CHIP_ID)) { + && (ai_get_chip_id(wlc_hw->sih) == BCM4313_CHIP_ID)) { if (! (wlc_hw->boardrev >= 0x1250 && (wlc_hw->boardflags & BFL_FEM_BT))) @@ -5376,7 +5143,7 @@ static void brcms_c_wme_retries_write(struct brcms_c_info *wlc) if (!wlc->clk) return; - for (ac = 0; ac < AC_COUNT; ac++) + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) brcms_b_write_shm(wlc->hw, M_AC_TXLMT_ADDR(ac), wlc->wme_retries[ac]); } @@ -5396,7 +5163,7 @@ int brcms_c_up(struct brcms_c_info *wlc) } if ((wlc->pub->boardflags & BFL_FEM) - && (wlc->pub->sih->chip == BCM4313_CHIP_ID)) { + && (ai_get_chip_id(wlc->hw->sih) == BCM4313_CHIP_ID)) { if (wlc->pub->boardrev >= 0x1250 && (wlc->pub->boardflags & BFL_FEM_BT)) brcms_b_mhf(wlc->hw, MHF5, MHF5_4313_GPIOCTRL, @@ -5533,9 +5300,9 @@ static int brcms_b_down_finish(struct brcms_hardware *wlc_hw) } else { /* Reset and disable the core */ - if (ai_iscoreup(wlc_hw->sih)) { - if (R_REG(&wlc_hw->regs->maccontrol) & - MCTL_EN_MAC) + if (bcma_core_is_enabled(wlc_hw->d11core)) { + if (bcma_read32(wlc_hw->d11core, + D11REGOFFS(maccontrol)) & MCTL_EN_MAC) brcms_c_suspend_mac_and_wait(wlc_hw->wlc); callbacks += brcms_reset(wlc_hw->wlc->wl); brcms_c_coredisable(wlc_hw); @@ -5575,7 +5342,6 @@ uint brcms_c_down(struct brcms_c_info *wlc) if (!wlc->pub->up) return callbacks; - /* in between, mpc could try to bring down again.. */ wlc->going_down = true; callbacks += brcms_b_bmac_down_prep(wlc->hw); @@ -5852,7 +5618,7 @@ int brcms_c_set_rate_limit(struct brcms_c_info *wlc, u16 srl, u16 lrl) brcms_b_retrylimit_upd(wlc->hw, wlc->SRL, wlc->LRL); - for (ac = 0; ac < AC_COUNT; ac++) { + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { wlc->wme_retries[ac] = SFIELD(wlc->wme_retries[ac], EDCF_SHORT, wlc->SRL); wlc->wme_retries[ac] = SFIELD(wlc->wme_retries[ac], @@ -6103,7 +5869,6 @@ void brcms_c_print_txdesc(struct d11txh *txh) u8 *rtsph = txh->RTSPhyHeader; struct ieee80211_rts rts = txh->rts_frame; - char hexbuf[256]; /* add plcp header along with txh descriptor */ printk(KERN_DEBUG "Raw TxDesc + plcp header:\n"); @@ -6124,17 +5889,16 @@ void brcms_c_print_txdesc(struct d11txh *txh) printk(KERN_DEBUG "XtraFrameTypes: %04x ", xtraft); printk(KERN_DEBUG "\n"); - brcmu_format_hex(hexbuf, iv, sizeof(txh->IV)); - printk(KERN_DEBUG "SecIV: %s\n", hexbuf); - brcmu_format_hex(hexbuf, ra, sizeof(txh->TxFrameRA)); - printk(KERN_DEBUG "RA: %s\n", hexbuf); + print_hex_dump_bytes("SecIV:", DUMP_PREFIX_OFFSET, iv, sizeof(txh->IV)); + print_hex_dump_bytes("RA:", DUMP_PREFIX_OFFSET, + ra, sizeof(txh->TxFrameRA)); printk(KERN_DEBUG "Fb FES Time: %04x ", tfestfb); - brcmu_format_hex(hexbuf, rtspfb, sizeof(txh->RTSPLCPFallback)); - printk(KERN_DEBUG "RTS PLCP: %s ", hexbuf); + print_hex_dump_bytes("Fb RTS PLCP:", DUMP_PREFIX_OFFSET, + rtspfb, sizeof(txh->RTSPLCPFallback)); printk(KERN_DEBUG "RTS DUR: %04x ", rtsdfb); - brcmu_format_hex(hexbuf, fragpfb, sizeof(txh->FragPLCPFallback)); - printk(KERN_DEBUG "PLCP: %s ", hexbuf); + print_hex_dump_bytes("PLCP:", DUMP_PREFIX_OFFSET, + fragpfb, sizeof(txh->FragPLCPFallback)); printk(KERN_DEBUG "DUR: %04x", fragdfb); printk(KERN_DEBUG "\n"); @@ -6149,18 +5913,18 @@ void brcms_c_print_txdesc(struct d11txh *txh) printk(KERN_DEBUG "MaxAggbyte_fb: %04x\n", mabyte_f); printk(KERN_DEBUG "MinByte: %04x\n", mmbyte); - brcmu_format_hex(hexbuf, rtsph, sizeof(txh->RTSPhyHeader)); - printk(KERN_DEBUG "RTS PLCP: %s ", hexbuf); - brcmu_format_hex(hexbuf, (u8 *) &rts, sizeof(txh->rts_frame)); - printk(KERN_DEBUG "RTS Frame: %s", hexbuf); + print_hex_dump_bytes("RTS PLCP:", DUMP_PREFIX_OFFSET, + rtsph, sizeof(txh->RTSPhyHeader)); + print_hex_dump_bytes("RTS Frame:", DUMP_PREFIX_OFFSET, + (u8 *)&rts, sizeof(txh->rts_frame)); printk(KERN_DEBUG "\n"); } #endif /* defined(BCMDBG) */ #if defined(BCMDBG) -int +static int brcms_c_format_flags(const struct brcms_c_bit_desc *bd, u32 flags, char *buf, - int len) + int len) { int i; char *p = buf; @@ -6916,7 +6680,7 @@ brcms_c_d11hdrs_mac80211(struct brcms_c_info *wlc, struct ieee80211_hw *hw, qos = ieee80211_is_data_qos(h->frame_control); /* compute length of frame in bytes for use in PLCP computations */ - len = brcmu_pkttotlen(p); + len = p->len; phylen = len + FCS_LEN; /* Get tx_info */ @@ -7695,11 +7459,11 @@ static void brcms_b_read_tsf(struct brcms_hardware *wlc_hw, u32 *tsf_l_ptr, u32 *tsf_h_ptr) { - struct d11regs __iomem *regs = wlc_hw->regs; + struct bcma_device *core = wlc_hw->d11core; /* read the tsf timer low, then high to get an atomic read */ - *tsf_l_ptr = R_REG(®s->tsf_timerlow); - *tsf_h_ptr = R_REG(®s->tsf_timerhigh); + *tsf_l_ptr = bcma_read32(core, D11REGOFFS(tsf_timerlow)); + *tsf_h_ptr = bcma_read32(core, D11REGOFFS(tsf_timerhigh)); } /* @@ -8253,12 +8017,6 @@ int brcms_c_get_tx_power(struct brcms_c_info *wlc) return (int)(qdbm / BRCMS_TXPWR_DB_FACTOR); } -void brcms_c_set_radio_mpc(struct brcms_c_info *wlc, bool mpc) -{ - wlc->mpc = mpc; - brcms_c_radio_mpc_upd(wlc); -} - /* Process received frames */ /* * Return true if more frames need to be processed. false otherwise. @@ -8293,14 +8051,8 @@ static void brcms_c_recv(struct brcms_c_info *wlc, struct sk_buff *p) len = p->len; if (rxh->RxStatus1 & RXS_FCSERR) { - if (wlc->pub->mac80211_state & MAC80211_PROMISC_BCNS) { - wiphy_err(wlc->wiphy, "FCSERR while scanning******* -" - " tossing\n"); + if (!(wlc->filter_flags & FIF_FCSFAIL)) goto toss; - } else { - wiphy_err(wlc->wiphy, "RCSERR!!!\n"); - goto toss; - } } /* check received pkt has at least frame control field */ @@ -8328,21 +8080,17 @@ static bool brcms_b_recv(struct brcms_hardware *wlc_hw, uint fifo, bool bound) { struct sk_buff *p; - struct sk_buff *head = NULL; - struct sk_buff *tail = NULL; + struct sk_buff *next = NULL; + struct sk_buff_head recv_frames; + uint n = 0; uint bound_limit = bound ? RXBND : -1; BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit); - /* gather received frames */ - while ((p = dma_rx(wlc_hw->di[fifo]))) { + skb_queue_head_init(&recv_frames); - if (!tail) - head = tail = p; - else { - tail->prev = p; - tail = p; - } + /* gather received frames */ + while (dma_rx(wlc_hw->di[fifo], &recv_frames)) { /* !give others some time to run! */ if (++n >= bound_limit) @@ -8353,12 +8101,11 @@ brcms_b_recv(struct brcms_hardware *wlc_hw, uint fifo, bool bound) dma_rxfill(wlc_hw->di[fifo]); /* process each frame */ - while ((p = head) != NULL) { + skb_queue_walk_safe(&recv_frames, p, next) { struct d11rxhdr_le *rxh_le; struct d11rxhdr *rxh; - head = head->prev; - p->prev = NULL; + skb_unlink(p, &recv_frames); rxh_le = (struct d11rxhdr_le *)p->data; rxh = (struct d11rxhdr *)p->data; @@ -8389,7 +8136,7 @@ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded) { u32 macintstatus; struct brcms_hardware *wlc_hw = wlc->hw; - struct d11regs __iomem *regs = wlc_hw->regs; + struct bcma_device *core = wlc_hw->d11core; struct wiphy *wiphy = wlc->wiphy; if (brcms_deviceremoved(wlc)) { @@ -8425,7 +8172,7 @@ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded) /* ATIM window end */ if (macintstatus & MI_ATIMWINEND) { BCMMSG(wlc->wiphy, "end of ATIM window\n"); - OR_REG(®s->maccommand, wlc->qvalid); + bcma_set32(core, D11REGOFFS(maccommand), wlc->qvalid); wlc->qvalid = 0; } @@ -8443,18 +8190,17 @@ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded) if (macintstatus & MI_GP0) { wiphy_err(wiphy, "wl%d: PSM microcode watchdog fired at %d " - "(seconds). Resetting.\n", wlc_hw->unit, wlc_hw->now); + "(seconds). Resetting.\n", wlc_hw->unit, wlc_hw->now); printk_once("%s : PSM Watchdog, chipid 0x%x, chiprev 0x%x\n", - __func__, wlc_hw->sih->chip, - wlc_hw->sih->chiprev); - /* big hammer */ - brcms_init(wlc->wl); + __func__, ai_get_chip_id(wlc_hw->sih), + ai_get_chiprev(wlc_hw->sih)); + brcms_fatal_error(wlc_hw->wlc->wl); } /* gptimer timeout */ if (macintstatus & MI_TO) - W_REG(®s->gptimer, 0); + bcma_write32(core, D11REGOFFS(gptimer), 0); if (macintstatus & MI_RFDISABLE) { BCMMSG(wlc->wiphy, "wl%d: BMAC Detected a change on the" @@ -8470,20 +8216,17 @@ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded) return wlc->macintstatus != 0; fatal: - brcms_init(wlc->wl); + brcms_fatal_error(wlc_hw->wlc->wl); return wlc->macintstatus != 0; } -void brcms_c_init(struct brcms_c_info *wlc) +void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx) { - struct d11regs __iomem *regs; + struct bcma_device *core = wlc->hw->d11core; u16 chanspec; - bool mute = false; BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit); - regs = wlc->regs; - /* * This will happen if a big-hammer was executed. In * that case, we want to go back to the channel that @@ -8494,7 +8237,7 @@ void brcms_c_init(struct brcms_c_info *wlc) else chanspec = brcms_c_init_chanspec(wlc); - brcms_b_init(wlc->hw, chanspec, mute); + brcms_b_init(wlc->hw, chanspec); /* update beacon listen interval */ brcms_c_bcn_li_upd(wlc); @@ -8513,8 +8256,8 @@ void brcms_c_init(struct brcms_c_info *wlc) * update since init path would reset * to default value */ - W_REG(®s->tsf_cfprep, - (bi << CFPREP_CBI_SHIFT)); + bcma_write32(core, D11REGOFFS(tsf_cfprep), + bi << CFPREP_CBI_SHIFT); /* Update maccontrol PM related bits */ brcms_c_set_ps_ctrl(wlc); @@ -8544,7 +8287,7 @@ void brcms_c_init(struct brcms_c_info *wlc) brcms_c_bsinit(wlc); /* Enable EDCF mode (while the MAC is suspended) */ - OR_REG(®s->ifs_ctl, IFS_USEEDCF); + bcma_set16(core, D11REGOFFS(ifs_ctl), IFS_USEEDCF); brcms_c_edcf_setparams(wlc, false); /* Init precedence maps for empty FIFOs */ @@ -8560,14 +8303,15 @@ void brcms_c_init(struct brcms_c_info *wlc) /* ..now really unleash hell (allow the MAC out of suspend) */ brcms_c_enable_mac(wlc); + /* suspend the tx fifos and mute the phy for preism cac time */ + if (mute_tx) + brcms_b_mute(wlc->hw, true); + /* clear tx flow control */ brcms_c_txflowcontrol_reset(wlc); /* enable the RF Disable Delay timer */ - W_REG(&wlc->regs->rfdisabledly, RFDISABLE_DEFAULT); - - /* initialize mpc delay */ - wlc->mpc_delay_off = wlc->mpc_dlycnt = BRCMS_MPC_MIN_DELAYCNT; + bcma_write32(core, D11REGOFFS(rfdisabledly), RFDISABLE_DEFAULT); /* * Initialize WME parameters; if they haven't been set by some other @@ -8577,7 +8321,7 @@ void brcms_c_init(struct brcms_c_info *wlc) /* Uninitialized; read from HW */ int ac; - for (ac = 0; ac < AC_COUNT; ac++) + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) wlc->wme_retries[ac] = brcms_b_read_shm(wlc->hw, M_AC_TXLMT_ADDR(ac)); } @@ -8587,9 +8331,8 @@ void brcms_c_init(struct brcms_c_info *wlc) * The common driver entry routine. Error codes should be unique */ struct brcms_c_info * -brcms_c_attach(struct brcms_info *wl, u16 vendor, u16 device, uint unit, - bool piomode, void __iomem *regsva, struct pci_dev *btparam, - uint *perr) +brcms_c_attach(struct brcms_info *wl, struct bcma_device *core, uint unit, + bool piomode, uint *perr) { struct brcms_c_info *wlc; uint err = 0; @@ -8597,7 +8340,7 @@ brcms_c_attach(struct brcms_info *wl, u16 vendor, u16 device, uint unit, struct brcms_pub *pub; /* allocate struct brcms_c_info state and its substructures */ - wlc = (struct brcms_c_info *) brcms_c_attach_malloc(unit, &err, device); + wlc = (struct brcms_c_info *) brcms_c_attach_malloc(unit, &err, 0); if (wlc == NULL) goto fail; wlc->wiphy = wl->wiphy; @@ -8624,8 +8367,7 @@ brcms_c_attach(struct brcms_info *wl, u16 vendor, u16 device, uint unit, * low level attach steps(all hw accesses go * inside, no more in rest of the attach) */ - err = brcms_b_attach(wlc, vendor, device, unit, piomode, regsva, - btparam); + err = brcms_b_attach(wlc, core, unit, piomode); if (err) goto fail; @@ -8754,8 +8496,6 @@ brcms_c_attach(struct brcms_info *wl, u16 vendor, u16 device, uint unit, brcms_c_ht_update_sgi_rx(wlc, 0); } - /* initialize radio_mpc_disable according to wlc->mpc */ - brcms_c_radio_mpc_upd(wlc); brcms_b_antsel_set(wlc->hw, wlc->asi->antsel_avail); if (perr) diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.h b/drivers/net/wireless/brcm80211/brcmsmac/main.h index c0e0fcfdfaf..adb136ec1f0 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/main.h +++ b/drivers/net/wireless/brcm80211/brcmsmac/main.h @@ -44,8 +44,6 @@ /* transmit buffer max headroom for protocol headers */ #define TXOFF (D11_TXH_LEN + D11_PHY_HDR_LEN) -#define AC_COUNT 4 - /* Macros for doing definition and get/set of bitfields * Usage example, e.g. a three-bit field (bits 4-6): * #define <NAME>_M BITFIELD_MASK(3) @@ -336,7 +334,7 @@ struct brcms_hardware { u32 machwcap_backup; /* backup of machwcap */ struct si_pub *sih; /* SI handle (cookie for siutils calls) */ - struct d11regs __iomem *regs; /* pointer to device registers */ + struct bcma_device *d11core; /* pointer to 802.11 core */ struct phy_shim_info *physhim; /* phy shim layer handler */ struct shared_phy *phy_sh; /* pointer to shared phy state */ struct brcms_hw_band *band;/* pointer to active per-band state */ @@ -402,7 +400,6 @@ struct brcms_txq_info { * * pub: pointer to driver public state. * wl: pointer to specific private state. - * regs: pointer to device registers. * hw: HW related state. * clkreq_override: setting for clkreq for PCIE : Auto, 0, 1. * fastpwrup_dly: time in us needed to bring up d11 fast clock. @@ -427,11 +424,6 @@ struct brcms_txq_info { * bandinit_pending: track band init in auto band. * radio_monitor: radio timer is running. * going_down: down path intermediate variable. - * mpc: enable minimum power consumption. - * mpc_dlycnt: # of watchdog cnt before turn disable radio. - * mpc_offcnt: # of watchdog cnt that radio is disabled. - * mpc_delay_off: delay radio disable by # of watchdog cnt. - * prev_non_delay_mpc: prev state brcms_c_is_non_delay_mpc. * wdtimer: timer for watchdog routine. * radio_timer: timer for hw radio button monitor routine. * monitor: monitor (MPDU sniffing) mode. @@ -441,7 +433,7 @@ struct brcms_txq_info { * bcn_li_dtim: beacon listen interval in # dtims. * WDarmed: watchdog timer is armed. * WDlast: last time wlc_watchdog() was called. - * edcf_txop[AC_COUNT]: current txop for each ac. + * edcf_txop[IEEE80211_NUM_ACS]: current txop for each ac. * wme_retries: per-AC retry limits. * tx_prec_map: Precedence map based on HW FIFO space. * fifo2prec_map[NFIFO]: pointer to fifo2_prec map based on WME. @@ -484,7 +476,6 @@ struct brcms_txq_info { struct brcms_c_info { struct brcms_pub *pub; struct brcms_info *wl; - struct d11regs __iomem *regs; struct brcms_hardware *hw; /* clock */ @@ -522,18 +513,11 @@ struct brcms_c_info { bool radio_monitor; bool going_down; - bool mpc; - u8 mpc_dlycnt; - u8 mpc_offcnt; - u8 mpc_delay_off; - u8 prev_non_delay_mpc; - struct brcms_timer *wdtimer; struct brcms_timer *radio_timer; /* promiscuous */ - bool monitor; - bool bcnmisc_monitor; + uint filter_flags; /* driver feature */ bool _rifs; @@ -546,9 +530,9 @@ struct brcms_c_info { u32 WDlast; /* WME */ - u16 edcf_txop[AC_COUNT]; + u16 edcf_txop[IEEE80211_NUM_ACS]; - u16 wme_retries[AC_COUNT]; + u16 wme_retries[IEEE80211_NUM_ACS]; u16 tx_prec_map; u16 fifo2prec_map[NFIFO]; @@ -671,8 +655,7 @@ extern void brcms_c_print_txdesc(struct d11txh *txh); #endif extern int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config); -extern void brcms_c_mac_bcn_promisc_change(struct brcms_c_info *wlc, - bool promisc); +extern void brcms_c_mac_promisc(struct brcms_c_info *wlc, uint filter_flags); extern void brcms_c_send_q(struct brcms_c_info *wlc); extern int brcms_c_prep_pdu(struct brcms_c_info *wlc, struct sk_buff *pdu, uint *fifo); diff --git a/drivers/net/wireless/brcm80211/brcmsmac/nicpci.c b/drivers/net/wireless/brcm80211/brcmsmac/nicpci.c index 0bcb2679204..7fad6dc1925 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/nicpci.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/nicpci.c @@ -139,6 +139,9 @@ #define SRSH_PI_MASK 0xf000 /* bit 15:12 */ #define SRSH_PI_SHIFT 12 /* bit 15:12 */ +#define PCIREGOFFS(field) offsetof(struct sbpciregs, field) +#define PCIEREGOFFS(field) offsetof(struct sbpcieregs, field) + /* Sonics side: PCI core and host control registers */ struct sbpciregs { u32 control; /* PCI control */ @@ -205,11 +208,7 @@ struct sbpcieregs { }; struct pcicore_info { - union { - struct sbpcieregs __iomem *pcieregs; - struct sbpciregs __iomem *pciregs; - } regs; /* Memory mapped register to the core */ - + struct bcma_device *core; struct si_pub *sih; /* System interconnect handle */ struct pci_dev *dev; u8 pciecap_lcreg_offset;/* PCIE capability LCreg offset @@ -224,9 +223,9 @@ struct pcicore_info { }; #define PCIE_ASPM(sih) \ - (((sih)->buscoretype == PCIE_CORE_ID) && \ - (((sih)->buscorerev >= 3) && \ - ((sih)->buscorerev <= 5))) + ((ai_get_buscoretype(sih) == PCIE_CORE_ID) && \ + ((ai_get_buscorerev(sih) >= 3) && \ + (ai_get_buscorerev(sih) <= 5))) /* delay needed between the mdio control/ mdiodata register data access */ @@ -238,8 +237,7 @@ static void pr28829_delay(void) /* Initialize the PCI core. * It's caller's responsibility to make sure that this is done only once */ -struct pcicore_info *pcicore_init(struct si_pub *sih, struct pci_dev *pdev, - void __iomem *regs) +struct pcicore_info *pcicore_init(struct si_pub *sih, struct bcma_device *core) { struct pcicore_info *pi; @@ -249,17 +247,15 @@ struct pcicore_info *pcicore_init(struct si_pub *sih, struct pci_dev *pdev, return NULL; pi->sih = sih; - pi->dev = pdev; + pi->dev = core->bus->host_pci; + pi->core = core; - if (sih->buscoretype == PCIE_CORE_ID) { + if (core->id.id == PCIE_CORE_ID) { u8 cap_ptr; - pi->regs.pcieregs = regs; cap_ptr = pcicore_find_pci_capability(pi->dev, PCI_CAP_ID_EXP, NULL, NULL); pi->pciecap_lcreg_offset = cap_ptr + PCIE_CAP_LINKCTRL_OFFSET; - } else - pi->regs.pciregs = regs; - + } return pi; } @@ -334,37 +330,37 @@ end: /* ***** Register Access API */ static uint -pcie_readreg(struct sbpcieregs __iomem *pcieregs, uint addrtype, uint offset) +pcie_readreg(struct bcma_device *core, uint addrtype, uint offset) { uint retval = 0xFFFFFFFF; switch (addrtype) { case PCIE_CONFIGREGS: - W_REG(&pcieregs->configaddr, offset); - (void)R_REG((&pcieregs->configaddr)); - retval = R_REG(&pcieregs->configdata); + bcma_write32(core, PCIEREGOFFS(configaddr), offset); + (void)bcma_read32(core, PCIEREGOFFS(configaddr)); + retval = bcma_read32(core, PCIEREGOFFS(configdata)); break; case PCIE_PCIEREGS: - W_REG(&pcieregs->pcieindaddr, offset); - (void)R_REG(&pcieregs->pcieindaddr); - retval = R_REG(&pcieregs->pcieinddata); + bcma_write32(core, PCIEREGOFFS(pcieindaddr), offset); + (void)bcma_read32(core, PCIEREGOFFS(pcieindaddr)); + retval = bcma_read32(core, PCIEREGOFFS(pcieinddata)); break; } return retval; } -static uint pcie_writereg(struct sbpcieregs __iomem *pcieregs, uint addrtype, +static uint pcie_writereg(struct bcma_device *core, uint addrtype, uint offset, uint val) { switch (addrtype) { case PCIE_CONFIGREGS: - W_REG((&pcieregs->configaddr), offset); - W_REG((&pcieregs->configdata), val); + bcma_write32(core, PCIEREGOFFS(configaddr), offset); + bcma_write32(core, PCIEREGOFFS(configdata), val); break; case PCIE_PCIEREGS: - W_REG((&pcieregs->pcieindaddr), offset); - W_REG((&pcieregs->pcieinddata), val); + bcma_write32(core, PCIEREGOFFS(pcieindaddr), offset); + bcma_write32(core, PCIEREGOFFS(pcieinddata), val); break; default: break; @@ -374,7 +370,6 @@ static uint pcie_writereg(struct sbpcieregs __iomem *pcieregs, uint addrtype, static bool pcie_mdiosetblock(struct pcicore_info *pi, uint blk) { - struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs; uint mdiodata, i = 0; uint pcie_serdes_spinwait = 200; @@ -382,12 +377,13 @@ static bool pcie_mdiosetblock(struct pcicore_info *pi, uint blk) (MDIODATA_DEV_ADDR << MDIODATA_DEVADDR_SHF) | (MDIODATA_BLK_ADDR << MDIODATA_REGADDR_SHF) | (blk << 4)); - W_REG(&pcieregs->mdiodata, mdiodata); + bcma_write32(pi->core, PCIEREGOFFS(mdiodata), mdiodata); pr28829_delay(); /* retry till the transaction is complete */ while (i < pcie_serdes_spinwait) { - if (R_REG(&pcieregs->mdiocontrol) & MDIOCTL_ACCESS_DONE) + if (bcma_read32(pi->core, PCIEREGOFFS(mdiocontrol)) & + MDIOCTL_ACCESS_DONE) break; udelay(1000); @@ -404,15 +400,15 @@ static int pcie_mdioop(struct pcicore_info *pi, uint physmedia, uint regaddr, bool write, uint *val) { - struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs; uint mdiodata; uint i = 0; uint pcie_serdes_spinwait = 10; /* enable mdio access to SERDES */ - W_REG(&pcieregs->mdiocontrol, MDIOCTL_PREAM_EN | MDIOCTL_DIVISOR_VAL); + bcma_write32(pi->core, PCIEREGOFFS(mdiocontrol), + MDIOCTL_PREAM_EN | MDIOCTL_DIVISOR_VAL); - if (pi->sih->buscorerev >= 10) { + if (ai_get_buscorerev(pi->sih) >= 10) { /* new serdes is slower in rw, * using two layers of reg address mapping */ @@ -432,20 +428,22 @@ pcie_mdioop(struct pcicore_info *pi, uint physmedia, uint regaddr, bool write, mdiodata |= (MDIODATA_START | MDIODATA_WRITE | MDIODATA_TA | *val); - W_REG(&pcieregs->mdiodata, mdiodata); + bcma_write32(pi->core, PCIEREGOFFS(mdiodata), mdiodata); pr28829_delay(); /* retry till the transaction is complete */ while (i < pcie_serdes_spinwait) { - if (R_REG(&pcieregs->mdiocontrol) & MDIOCTL_ACCESS_DONE) { + if (bcma_read32(pi->core, PCIEREGOFFS(mdiocontrol)) & + MDIOCTL_ACCESS_DONE) { if (!write) { pr28829_delay(); - *val = (R_REG(&pcieregs->mdiodata) & + *val = (bcma_read32(pi->core, + PCIEREGOFFS(mdiodata)) & MDIODATA_MASK); } /* Disable mdio access to SERDES */ - W_REG(&pcieregs->mdiocontrol, 0); + bcma_write32(pi->core, PCIEREGOFFS(mdiocontrol), 0); return 0; } udelay(1000); @@ -453,7 +451,7 @@ pcie_mdioop(struct pcicore_info *pi, uint physmedia, uint regaddr, bool write, } /* Timed out. Disable mdio access to SERDES. */ - W_REG(&pcieregs->mdiocontrol, 0); + bcma_write32(pi->core, PCIEREGOFFS(mdiocontrol), 0); return 1; } @@ -502,18 +500,18 @@ static void pcie_extendL1timer(struct pcicore_info *pi, bool extend) { u32 w; struct si_pub *sih = pi->sih; - struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs; - if (sih->buscoretype != PCIE_CORE_ID || sih->buscorerev < 7) + if (ai_get_buscoretype(sih) != PCIE_CORE_ID || + ai_get_buscorerev(sih) < 7) return; - w = pcie_readreg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG); + w = pcie_readreg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG); if (extend) w |= PCIE_ASPMTIMER_EXTEND; else w &= ~PCIE_ASPMTIMER_EXTEND; - pcie_writereg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG, w); - w = pcie_readreg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG); + pcie_writereg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG, w); + w = pcie_readreg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG); } /* centralized clkreq control policy */ @@ -527,25 +525,27 @@ static void pcie_clkreq_upd(struct pcicore_info *pi, uint state) pcie_clkreq(pi, 1, 0); break; case SI_PCIDOWN: - if (sih->buscorerev == 6) { /* turn on serdes PLL down */ - ai_corereg(sih, SI_CC_IDX, - offsetof(struct chipcregs, chipcontrol_addr), - ~0, 0); - ai_corereg(sih, SI_CC_IDX, - offsetof(struct chipcregs, chipcontrol_data), - ~0x40, 0); + /* turn on serdes PLL down */ + if (ai_get_buscorerev(sih) == 6) { + ai_cc_reg(sih, + offsetof(struct chipcregs, chipcontrol_addr), + ~0, 0); + ai_cc_reg(sih, + offsetof(struct chipcregs, chipcontrol_data), + ~0x40, 0); } else if (pi->pcie_pr42767) { pcie_clkreq(pi, 1, 1); } break; case SI_PCIUP: - if (sih->buscorerev == 6) { /* turn off serdes PLL down */ - ai_corereg(sih, SI_CC_IDX, - offsetof(struct chipcregs, chipcontrol_addr), - ~0, 0); - ai_corereg(sih, SI_CC_IDX, - offsetof(struct chipcregs, chipcontrol_data), - ~0x40, 0x40); + /* turn off serdes PLL down */ + if (ai_get_buscorerev(sih) == 6) { + ai_cc_reg(sih, + offsetof(struct chipcregs, chipcontrol_addr), + ~0, 0); + ai_cc_reg(sih, + offsetof(struct chipcregs, chipcontrol_data), + ~0x40, 0x40); } else if (PCIE_ASPM(sih)) { /* disable clkreq */ pcie_clkreq(pi, 1, 0); } @@ -562,7 +562,7 @@ static void pcie_war_polarity(struct pcicore_info *pi) if (pi->pcie_polarity != 0) return; - w = pcie_readreg(pi->regs.pcieregs, PCIE_PCIEREGS, PCIE_PLP_STATUSREG); + w = pcie_readreg(pi->core, PCIE_PCIEREGS, PCIE_PLP_STATUSREG); /* Detect the current polarity at attach and force that polarity and * disable changing the polarity @@ -581,18 +581,15 @@ static void pcie_war_polarity(struct pcicore_info *pi) */ static void pcie_war_aspm_clkreq(struct pcicore_info *pi) { - struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs; struct si_pub *sih = pi->sih; u16 val16; - u16 __iomem *reg16; u32 w; if (!PCIE_ASPM(sih)) return; /* bypass this on QT or VSIM */ - reg16 = &pcieregs->sprom[SRSH_ASPM_OFFSET]; - val16 = R_REG(reg16); + val16 = bcma_read16(pi->core, PCIEREGOFFS(sprom[SRSH_ASPM_OFFSET])); val16 &= ~SRSH_ASPM_ENB; if (pi->pcie_war_aspm_ovr == PCIE_ASPM_ENAB) @@ -602,15 +599,15 @@ static void pcie_war_aspm_clkreq(struct pcicore_info *pi) else if (pi->pcie_war_aspm_ovr == PCIE_ASPM_L0s_ENAB) val16 |= SRSH_ASPM_L0s_ENB; - W_REG(reg16, val16); + bcma_write16(pi->core, PCIEREGOFFS(sprom[SRSH_ASPM_OFFSET]), val16); pci_read_config_dword(pi->dev, pi->pciecap_lcreg_offset, &w); w &= ~PCIE_ASPM_ENAB; w |= pi->pcie_war_aspm_ovr; pci_write_config_dword(pi->dev, pi->pciecap_lcreg_offset, w); - reg16 = &pcieregs->sprom[SRSH_CLKREQ_OFFSET_REV5]; - val16 = R_REG(reg16); + val16 = bcma_read16(pi->core, + PCIEREGOFFS(sprom[SRSH_CLKREQ_OFFSET_REV5])); if (pi->pcie_war_aspm_ovr != PCIE_ASPM_DISAB) { val16 |= SRSH_CLKREQ_ENB; @@ -618,7 +615,8 @@ static void pcie_war_aspm_clkreq(struct pcicore_info *pi) } else val16 &= ~SRSH_CLKREQ_ENB; - W_REG(reg16, val16); + bcma_write16(pi->core, PCIEREGOFFS(sprom[SRSH_CLKREQ_OFFSET_REV5]), + val16); } /* Apply the polarity determined at the start */ @@ -642,16 +640,15 @@ static void pcie_war_serdes(struct pcicore_info *pi) /* Needs to happen when coming out of 'standby'/'hibernate' */ static void pcie_misc_config_fixup(struct pcicore_info *pi) { - struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs; u16 val16; - u16 __iomem *reg16; - reg16 = &pcieregs->sprom[SRSH_PCIE_MISC_CONFIG]; - val16 = R_REG(reg16); + val16 = bcma_read16(pi->core, + PCIEREGOFFS(sprom[SRSH_PCIE_MISC_CONFIG])); if ((val16 & SRSH_L23READY_EXIT_NOPERST) == 0) { val16 |= SRSH_L23READY_EXIT_NOPERST; - W_REG(reg16, val16); + bcma_write16(pi->core, + PCIEREGOFFS(sprom[SRSH_PCIE_MISC_CONFIG]), val16); } } @@ -659,62 +656,57 @@ static void pcie_misc_config_fixup(struct pcicore_info *pi) /* Needs to happen when coming out of 'standby'/'hibernate' */ static void pcie_war_noplldown(struct pcicore_info *pi) { - struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs; - u16 __iomem *reg16; - /* turn off serdes PLL down */ - ai_corereg(pi->sih, SI_CC_IDX, offsetof(struct chipcregs, chipcontrol), - CHIPCTRL_4321_PLL_DOWN, CHIPCTRL_4321_PLL_DOWN); + ai_cc_reg(pi->sih, offsetof(struct chipcregs, chipcontrol), + CHIPCTRL_4321_PLL_DOWN, CHIPCTRL_4321_PLL_DOWN); /* clear srom shadow backdoor */ - reg16 = &pcieregs->sprom[SRSH_BD_OFFSET]; - W_REG(reg16, 0); + bcma_write16(pi->core, PCIEREGOFFS(sprom[SRSH_BD_OFFSET]), 0); } /* Needs to happen when coming out of 'standby'/'hibernate' */ static void pcie_war_pci_setup(struct pcicore_info *pi) { struct si_pub *sih = pi->sih; - struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs; u32 w; - if (sih->buscorerev == 0 || sih->buscorerev == 1) { - w = pcie_readreg(pcieregs, PCIE_PCIEREGS, + if (ai_get_buscorerev(sih) == 0 || ai_get_buscorerev(sih) == 1) { + w = pcie_readreg(pi->core, PCIE_PCIEREGS, PCIE_TLP_WORKAROUNDSREG); w |= 0x8; - pcie_writereg(pcieregs, PCIE_PCIEREGS, + pcie_writereg(pi->core, PCIE_PCIEREGS, PCIE_TLP_WORKAROUNDSREG, w); } - if (sih->buscorerev == 1) { - w = pcie_readreg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_LCREG); + if (ai_get_buscorerev(sih) == 1) { + w = pcie_readreg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_LCREG); w |= 0x40; - pcie_writereg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_LCREG, w); + pcie_writereg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_LCREG, w); } - if (sih->buscorerev == 0) { + if (ai_get_buscorerev(sih) == 0) { pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_TIMER1, 0x8128); pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_CDR, 0x0100); pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_CDRBW, 0x1466); } else if (PCIE_ASPM(sih)) { /* Change the L1 threshold for better performance */ - w = pcie_readreg(pcieregs, PCIE_PCIEREGS, + w = pcie_readreg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG); w &= ~PCIE_L1THRESHOLDTIME_MASK; w |= PCIE_L1THRESHOLD_WARVAL << PCIE_L1THRESHOLDTIME_SHIFT; - pcie_writereg(pcieregs, PCIE_PCIEREGS, + pcie_writereg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG, w); pcie_war_serdes(pi); pcie_war_aspm_clkreq(pi); - } else if (pi->sih->buscorerev == 7) + } else if (ai_get_buscorerev(pi->sih) == 7) pcie_war_noplldown(pi); /* Note that the fix is actually in the SROM, * that's why this is open-ended */ - if (pi->sih->buscorerev >= 6) + if (ai_get_buscorerev(pi->sih) >= 6) pcie_misc_config_fixup(pi); } @@ -745,7 +737,7 @@ void pcicore_attach(struct pcicore_info *pi, int state) void pcicore_hwup(struct pcicore_info *pi) { - if (!pi || pi->sih->buscoretype != PCIE_CORE_ID) + if (!pi || ai_get_buscoretype(pi->sih) != PCIE_CORE_ID) return; pcie_war_pci_setup(pi); @@ -753,7 +745,7 @@ void pcicore_hwup(struct pcicore_info *pi) void pcicore_up(struct pcicore_info *pi, int state) { - if (!pi || pi->sih->buscoretype != PCIE_CORE_ID) + if (!pi || ai_get_buscoretype(pi->sih) != PCIE_CORE_ID) return; /* Restore L1 timer for better performance */ @@ -781,7 +773,7 @@ void pcicore_sleep(struct pcicore_info *pi) void pcicore_down(struct pcicore_info *pi, int state) { - if (!pi || pi->sih->buscoretype != PCIE_CORE_ID) + if (!pi || ai_get_buscoretype(pi->sih) != PCIE_CORE_ID) return; pcie_clkreq_upd(pi, state); @@ -790,46 +782,45 @@ void pcicore_down(struct pcicore_info *pi, int state) pcie_extendL1timer(pi, false); } -/* precondition: current core is sii->buscoretype */ -static void pcicore_fixcfg(struct pcicore_info *pi, u16 __iomem *reg16) +void pcicore_fixcfg(struct pcicore_info *pi) { - struct si_info *sii = (struct si_info *)(pi->sih); + struct bcma_device *core = pi->core; u16 val16; - uint pciidx; + uint regoff; - pciidx = ai_coreidx(&sii->pub); - val16 = R_REG(reg16); - if (((val16 & SRSH_PI_MASK) >> SRSH_PI_SHIFT) != (u16)pciidx) { - val16 = (u16)(pciidx << SRSH_PI_SHIFT) | - (val16 & ~SRSH_PI_MASK); - W_REG(reg16, val16); - } -} + switch (pi->core->id.id) { + case BCMA_CORE_PCI: + regoff = PCIREGOFFS(sprom[SRSH_PI_OFFSET]); + break; -void -pcicore_fixcfg_pci(struct pcicore_info *pi, struct sbpciregs __iomem *pciregs) -{ - pcicore_fixcfg(pi, &pciregs->sprom[SRSH_PI_OFFSET]); -} + case BCMA_CORE_PCIE: + regoff = PCIEREGOFFS(sprom[SRSH_PI_OFFSET]); + break; -void pcicore_fixcfg_pcie(struct pcicore_info *pi, - struct sbpcieregs __iomem *pcieregs) -{ - pcicore_fixcfg(pi, &pcieregs->sprom[SRSH_PI_OFFSET]); + default: + return; + } + + val16 = bcma_read16(pi->core, regoff); + if (((val16 & SRSH_PI_MASK) >> SRSH_PI_SHIFT) != + (u16)core->core_index) { + val16 = ((u16)core->core_index << SRSH_PI_SHIFT) | + (val16 & ~SRSH_PI_MASK); + bcma_write16(pi->core, regoff, val16); + } } /* precondition: current core is pci core */ void -pcicore_pci_setup(struct pcicore_info *pi, struct sbpciregs __iomem *pciregs) +pcicore_pci_setup(struct pcicore_info *pi) { - u32 w; - - OR_REG(&pciregs->sbtopci2, SBTOPCI_PREF | SBTOPCI_BURST); - - if (((struct si_info *)(pi->sih))->pub.buscorerev >= 11) { - OR_REG(&pciregs->sbtopci2, SBTOPCI_RC_READMULTI); - w = R_REG(&pciregs->clkrun); - W_REG(&pciregs->clkrun, w | PCI_CLKRUN_DSBL); - w = R_REG(&pciregs->clkrun); + bcma_set32(pi->core, PCIREGOFFS(sbtopci2), + SBTOPCI_PREF | SBTOPCI_BURST); + + if (pi->core->id.rev >= 11) { + bcma_set32(pi->core, PCIREGOFFS(sbtopci2), + SBTOPCI_RC_READMULTI); + bcma_set32(pi->core, PCIREGOFFS(clkrun), PCI_CLKRUN_DSBL); + (void)bcma_read32(pi->core, PCIREGOFFS(clkrun)); } } diff --git a/drivers/net/wireless/brcm80211/brcmsmac/nicpci.h b/drivers/net/wireless/brcm80211/brcmsmac/nicpci.h index 58aa80dc332..9fc3ead540a 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/nicpci.h +++ b/drivers/net/wireless/brcm80211/brcmsmac/nicpci.h @@ -62,8 +62,7 @@ struct sbpciregs; struct sbpcieregs; extern struct pcicore_info *pcicore_init(struct si_pub *sih, - struct pci_dev *pdev, - void __iomem *regs); + struct bcma_device *core); extern void pcicore_deinit(struct pcicore_info *pch); extern void pcicore_attach(struct pcicore_info *pch, int state); extern void pcicore_hwup(struct pcicore_info *pch); @@ -72,11 +71,7 @@ extern void pcicore_sleep(struct pcicore_info *pch); extern void pcicore_down(struct pcicore_info *pch, int state); extern u8 pcicore_find_pci_capability(struct pci_dev *dev, u8 req_cap_id, unsigned char *buf, u32 *buflen); -extern void pcicore_fixcfg_pci(struct pcicore_info *pch, - struct sbpciregs __iomem *pciregs); -extern void pcicore_fixcfg_pcie(struct pcicore_info *pch, - struct sbpcieregs __iomem *pciregs); -extern void pcicore_pci_setup(struct pcicore_info *pch, - struct sbpciregs __iomem *pciregs); +extern void pcicore_fixcfg(struct pcicore_info *pch); +extern void pcicore_pci_setup(struct pcicore_info *pch); #endif /* _BRCM_NICPCI_H_ */ diff --git a/drivers/net/wireless/brcm80211/brcmsmac/otp.c b/drivers/net/wireless/brcm80211/brcmsmac/otp.c index edf551561fd..f1ca1262586 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/otp.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/otp.c @@ -77,7 +77,7 @@ struct otp_fn_s { }; struct otpinfo { - uint ccrev; /* chipc revision */ + struct bcma_device *core; /* chipc core */ const struct otp_fn_s *fn; /* OTP functions */ struct si_pub *sih; /* Saved sb handle */ @@ -133,9 +133,10 @@ struct otpinfo { #define OTP_SZ_FU_144 (144/8) /* 144 bits */ static u16 -ipxotp_otpr(struct otpinfo *oi, struct chipcregs __iomem *cc, uint wn) +ipxotp_otpr(struct otpinfo *oi, uint wn) { - return R_REG(&cc->sromotp[wn]); + return bcma_read16(oi->core, + CHIPCREGOFFS(sromotp[wn])); } /* @@ -146,7 +147,7 @@ static int ipxotp_max_rgnsz(struct si_pub *sih, int osizew) { int ret = 0; - switch (sih->chip) { + switch (ai_get_chip_id(sih)) { case BCM43224_CHIP_ID: case BCM43225_CHIP_ID: ret = osizew * 2 - OTP_SZ_FU_72 - OTP_SZ_CHECKSUM; @@ -161,19 +162,21 @@ static int ipxotp_max_rgnsz(struct si_pub *sih, int osizew) return ret; } -static void _ipxotp_init(struct otpinfo *oi, struct chipcregs __iomem *cc) +static void _ipxotp_init(struct otpinfo *oi) { uint k; u32 otpp, st; + int ccrev = ai_get_ccrev(oi->sih); + /* * record word offset of General Use Region * for various chipcommon revs */ - if (oi->sih->ccrev == 21 || oi->sih->ccrev == 24 - || oi->sih->ccrev == 27) { + if (ccrev == 21 || ccrev == 24 + || ccrev == 27) { oi->otpgu_base = REVA4_OTPGU_BASE; - } else if (oi->sih->ccrev == 36) { + } else if (ccrev == 36) { /* * OTP size greater than equal to 2KB (128 words), * otpgu_base is similar to rev23 @@ -182,7 +185,7 @@ static void _ipxotp_init(struct otpinfo *oi, struct chipcregs __iomem *cc) oi->otpgu_base = REVB8_OTPGU_BASE; else oi->otpgu_base = REV36_OTPGU_BASE; - } else if (oi->sih->ccrev == 23 || oi->sih->ccrev >= 25) { + } else if (ccrev == 23 || ccrev >= 25) { oi->otpgu_base = REVB8_OTPGU_BASE; } @@ -190,24 +193,21 @@ static void _ipxotp_init(struct otpinfo *oi, struct chipcregs __iomem *cc) otpp = OTPP_START_BUSY | ((OTPPOC_INIT << OTPP_OC_SHIFT) & OTPP_OC_MASK); - W_REG(&cc->otpprog, otpp); - for (k = 0; - ((st = R_REG(&cc->otpprog)) & OTPP_START_BUSY) - && (k < OTPP_TRIES); k++) - ; + bcma_write32(oi->core, CHIPCREGOFFS(otpprog), otpp); + st = bcma_read32(oi->core, CHIPCREGOFFS(otpprog)); + for (k = 0; (st & OTPP_START_BUSY) && (k < OTPP_TRIES); k++) + st = bcma_read32(oi->core, CHIPCREGOFFS(otpprog)); if (k >= OTPP_TRIES) return; /* Read OTP lock bits and subregion programmed indication bits */ - oi->status = R_REG(&cc->otpstatus); + oi->status = bcma_read32(oi->core, CHIPCREGOFFS(otpstatus)); - if ((oi->sih->chip == BCM43224_CHIP_ID) - || (oi->sih->chip == BCM43225_CHIP_ID)) { + if ((ai_get_chip_id(oi->sih) == BCM43224_CHIP_ID) + || (ai_get_chip_id(oi->sih) == BCM43225_CHIP_ID)) { u32 p_bits; - p_bits = - (ipxotp_otpr(oi, cc, oi->otpgu_base + OTPGU_P_OFF) & - OTPGU_P_MSK) - >> OTPGU_P_SHIFT; + p_bits = (ipxotp_otpr(oi, oi->otpgu_base + OTPGU_P_OFF) & + OTPGU_P_MSK) >> OTPGU_P_SHIFT; oi->status |= (p_bits << OTPS_GUP_SHIFT); } @@ -220,7 +220,7 @@ static void _ipxotp_init(struct otpinfo *oi, struct chipcregs __iomem *cc) oi->hwlim = oi->wsize; if (oi->status & OTPS_GUP_HW) { oi->hwlim = - ipxotp_otpr(oi, cc, oi->otpgu_base + OTPGU_HSB_OFF) / 16; + ipxotp_otpr(oi, oi->otpgu_base + OTPGU_HSB_OFF) / 16; oi->swbase = oi->hwlim; } else oi->swbase = oi->hwbase; @@ -230,7 +230,7 @@ static void _ipxotp_init(struct otpinfo *oi, struct chipcregs __iomem *cc) if (oi->status & OTPS_GUP_SW) { oi->swlim = - ipxotp_otpr(oi, cc, oi->otpgu_base + OTPGU_SFB_OFF) / 16; + ipxotp_otpr(oi, oi->otpgu_base + OTPGU_SFB_OFF) / 16; oi->fbase = oi->swlim; } else oi->fbase = oi->swbase; @@ -240,11 +240,8 @@ static void _ipxotp_init(struct otpinfo *oi, struct chipcregs __iomem *cc) static int ipxotp_init(struct si_pub *sih, struct otpinfo *oi) { - uint idx; - struct chipcregs __iomem *cc; - /* Make sure we're running IPX OTP */ - if (!OTPTYPE_IPX(sih->ccrev)) + if (!OTPTYPE_IPX(ai_get_ccrev(sih))) return -EBADE; /* Make sure OTP is not disabled */ @@ -252,7 +249,7 @@ static int ipxotp_init(struct si_pub *sih, struct otpinfo *oi) return -EBADE; /* Check for otp size */ - switch ((sih->cccaps & CC_CAP_OTPSIZE) >> CC_CAP_OTPSIZE_SHIFT) { + switch ((ai_get_cccaps(sih) & CC_CAP_OTPSIZE) >> CC_CAP_OTPSIZE_SHIFT) { case 0: /* Nothing there */ return -EBADE; @@ -282,21 +279,13 @@ static int ipxotp_init(struct si_pub *sih, struct otpinfo *oi) } /* Retrieve OTP region info */ - idx = ai_coreidx(sih); - cc = ai_setcoreidx(sih, SI_CC_IDX); - - _ipxotp_init(oi, cc); - - ai_setcoreidx(sih, idx); - + _ipxotp_init(oi); return 0; } static int ipxotp_read_region(struct otpinfo *oi, int region, u16 *data, uint *wlen) { - uint idx; - struct chipcregs __iomem *cc; uint base, i, sz; /* Validate region selection */ @@ -365,14 +354,10 @@ ipxotp_read_region(struct otpinfo *oi, int region, u16 *data, uint *wlen) return -EINVAL; } - idx = ai_coreidx(oi->sih); - cc = ai_setcoreidx(oi->sih, SI_CC_IDX); - /* Read the data */ for (i = 0; i < sz; i++) - data[i] = ipxotp_otpr(oi, cc, base + i); + data[i] = ipxotp_otpr(oi, base + i); - ai_setcoreidx(oi->sih, idx); *wlen = sz; return 0; } @@ -384,14 +369,13 @@ static const struct otp_fn_s ipxotp_fn = { static int otp_init(struct si_pub *sih, struct otpinfo *oi) { - int ret; memset(oi, 0, sizeof(struct otpinfo)); - oi->ccrev = sih->ccrev; + oi->core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0); - if (OTPTYPE_IPX(oi->ccrev)) + if (OTPTYPE_IPX(ai_get_ccrev(sih))) oi->fn = &ipxotp_fn; if (oi->fn == NULL) @@ -399,7 +383,7 @@ static int otp_init(struct si_pub *sih, struct otpinfo *oi) oi->sih = sih; - ret = (oi->fn->init) (sih, oi); + ret = (oi->fn->init)(sih, oi); return ret; } diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c index a3149254cbc..264f8c4c703 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c @@ -109,10 +109,10 @@ static const struct chan_info_basic chan_info_all[] = { {204, 5020}, {208, 5040}, {212, 5060}, - {216, 50800} + {216, 5080} }; -const u8 ofdm_rate_lookup[] = { +static const u8 ofdm_rate_lookup[] = { BRCM_RATE_48M, BRCM_RATE_24M, @@ -149,9 +149,8 @@ void wlc_radioreg_enter(struct brcms_phy_pub *pih) void wlc_radioreg_exit(struct brcms_phy_pub *pih) { struct brcms_phy *pi = (struct brcms_phy *) pih; - u16 dummy; - dummy = R_REG(&pi->regs->phyversion); + (void)bcma_read16(pi->d11core, D11REGOFFS(phyversion)); pi->phy_wreg = 0; wlapi_bmac_mctrl(pi->sh->physhim, MCTL_LOCK_RADIO, 0); } @@ -186,19 +185,11 @@ u16 read_radio_reg(struct brcms_phy *pi, u16 addr) if ((D11REV_GE(pi->sh->corerev, 24)) || (D11REV_IS(pi->sh->corerev, 22) && (pi->pubpi.phy_type != PHY_TYPE_SSN))) { - W_REG_FLUSH(&pi->regs->radioregaddr, addr); - data = R_REG(&pi->regs->radioregdata); + bcma_wflush16(pi->d11core, D11REGOFFS(radioregaddr), addr); + data = bcma_read16(pi->d11core, D11REGOFFS(radioregdata)); } else { - W_REG_FLUSH(&pi->regs->phy4waddr, addr); - -#ifdef __ARM_ARCH_4T__ - __asm__(" .align 4 "); - __asm__(" nop "); - data = R_REG(&pi->regs->phy4wdatalo); -#else - data = R_REG(&pi->regs->phy4wdatalo); -#endif - + bcma_wflush16(pi->d11core, D11REGOFFS(phy4waddr), addr); + data = bcma_read16(pi->d11core, D11REGOFFS(phy4wdatalo)); } pi->phy_wreg = 0; @@ -211,15 +202,15 @@ void write_radio_reg(struct brcms_phy *pi, u16 addr, u16 val) (D11REV_IS(pi->sh->corerev, 22) && (pi->pubpi.phy_type != PHY_TYPE_SSN))) { - W_REG_FLUSH(&pi->regs->radioregaddr, addr); - W_REG(&pi->regs->radioregdata, val); + bcma_wflush16(pi->d11core, D11REGOFFS(radioregaddr), addr); + bcma_write16(pi->d11core, D11REGOFFS(radioregdata), val); } else { - W_REG_FLUSH(&pi->regs->phy4waddr, addr); - W_REG(&pi->regs->phy4wdatalo, val); + bcma_wflush16(pi->d11core, D11REGOFFS(phy4waddr), addr); + bcma_write16(pi->d11core, D11REGOFFS(phy4wdatalo), val); } if (++pi->phy_wreg >= pi->phy_wreg_limit) { - (void)R_REG(&pi->regs->maccontrol); + (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol)); pi->phy_wreg = 0; } } @@ -231,19 +222,20 @@ static u32 read_radio_id(struct brcms_phy *pi) if (D11REV_GE(pi->sh->corerev, 24)) { u32 b0, b1, b2; - W_REG_FLUSH(&pi->regs->radioregaddr, 0); - b0 = (u32) R_REG(&pi->regs->radioregdata); - W_REG_FLUSH(&pi->regs->radioregaddr, 1); - b1 = (u32) R_REG(&pi->regs->radioregdata); - W_REG_FLUSH(&pi->regs->radioregaddr, 2); - b2 = (u32) R_REG(&pi->regs->radioregdata); + bcma_wflush16(pi->d11core, D11REGOFFS(radioregaddr), 0); + b0 = (u32) bcma_read16(pi->d11core, D11REGOFFS(radioregdata)); + bcma_wflush16(pi->d11core, D11REGOFFS(radioregaddr), 1); + b1 = (u32) bcma_read16(pi->d11core, D11REGOFFS(radioregdata)); + bcma_wflush16(pi->d11core, D11REGOFFS(radioregaddr), 2); + b2 = (u32) bcma_read16(pi->d11core, D11REGOFFS(radioregdata)); id = ((b0 & 0xf) << 28) | (((b2 << 8) | b1) << 12) | ((b0 >> 4) & 0xf); } else { - W_REG_FLUSH(&pi->regs->phy4waddr, RADIO_IDCODE); - id = (u32) R_REG(&pi->regs->phy4wdatalo); - id |= (u32) R_REG(&pi->regs->phy4wdatahi) << 16; + bcma_wflush16(pi->d11core, D11REGOFFS(phy4waddr), RADIO_IDCODE); + id = (u32) bcma_read16(pi->d11core, D11REGOFFS(phy4wdatalo)); + id |= (u32) bcma_read16(pi->d11core, + D11REGOFFS(phy4wdatahi)) << 16; } pi->phy_wreg = 0; return id; @@ -283,75 +275,52 @@ void mod_radio_reg(struct brcms_phy *pi, u16 addr, u16 mask, u16 val) void write_phy_channel_reg(struct brcms_phy *pi, uint val) { - W_REG(&pi->regs->phychannel, val); + bcma_write16(pi->d11core, D11REGOFFS(phychannel), val); } u16 read_phy_reg(struct brcms_phy *pi, u16 addr) { - struct d11regs __iomem *regs; - - regs = pi->regs; - - W_REG_FLUSH(®s->phyregaddr, addr); + bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), addr); pi->phy_wreg = 0; - return R_REG(®s->phyregdata); + return bcma_read16(pi->d11core, D11REGOFFS(phyregdata)); } void write_phy_reg(struct brcms_phy *pi, u16 addr, u16 val) { - struct d11regs __iomem *regs; - - regs = pi->regs; - #ifdef CONFIG_BCM47XX - W_REG_FLUSH(®s->phyregaddr, addr); - W_REG(®s->phyregdata, val); + bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), addr); + bcma_write16(pi->d11core, D11REGOFFS(phyregdata), val); if (addr == 0x72) - (void)R_REG(®s->phyregdata); + (void)bcma_read16(pi->d11core, D11REGOFFS(phyversion)); #else - W_REG((u32 __iomem *)(®s->phyregaddr), addr | (val << 16)); + bcma_write32(pi->d11core, D11REGOFFS(phyregaddr), addr | (val << 16)); if (++pi->phy_wreg >= pi->phy_wreg_limit) { pi->phy_wreg = 0; - (void)R_REG(®s->phyversion); + (void)bcma_read16(pi->d11core, D11REGOFFS(phyversion)); } #endif } void and_phy_reg(struct brcms_phy *pi, u16 addr, u16 val) { - struct d11regs __iomem *regs; - - regs = pi->regs; - - W_REG_FLUSH(®s->phyregaddr, addr); - - W_REG(®s->phyregdata, (R_REG(®s->phyregdata) & val)); + bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), addr); + bcma_mask16(pi->d11core, D11REGOFFS(phyregdata), val); pi->phy_wreg = 0; } void or_phy_reg(struct brcms_phy *pi, u16 addr, u16 val) { - struct d11regs __iomem *regs; - - regs = pi->regs; - - W_REG_FLUSH(®s->phyregaddr, addr); - - W_REG(®s->phyregdata, (R_REG(®s->phyregdata) | val)); + bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), addr); + bcma_set16(pi->d11core, D11REGOFFS(phyregdata), val); pi->phy_wreg = 0; } void mod_phy_reg(struct brcms_phy *pi, u16 addr, u16 mask, u16 val) { - struct d11regs __iomem *regs; - - regs = pi->regs; - - W_REG_FLUSH(®s->phyregaddr, addr); - - W_REG(®s->phyregdata, - ((R_REG(®s->phyregdata) & ~mask) | (val & mask))); + val &= mask; + bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), addr); + bcma_maskset16(pi->d11core, D11REGOFFS(phyregdata), ~mask, val); pi->phy_wreg = 0; } @@ -412,10 +381,8 @@ struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp) sh->sromrev = shp->sromrev; sh->boardtype = shp->boardtype; sh->boardrev = shp->boardrev; - sh->boardvendor = shp->boardvendor; sh->boardflags = shp->boardflags; sh->boardflags2 = shp->boardflags2; - sh->buscorerev = shp->buscorerev; sh->fast_timer = PHY_SW_TIMER_FAST; sh->slow_timer = PHY_SW_TIMER_SLOW; @@ -458,7 +425,7 @@ static u32 wlc_phy_get_radio_ver(struct brcms_phy *pi) } struct brcms_phy_pub * -wlc_phy_attach(struct shared_phy *sh, struct d11regs __iomem *regs, +wlc_phy_attach(struct shared_phy *sh, struct bcma_device *d11core, int bandtype, struct wiphy *wiphy) { struct brcms_phy *pi; @@ -470,7 +437,7 @@ wlc_phy_attach(struct shared_phy *sh, struct d11regs __iomem *regs, if (D11REV_IS(sh->corerev, 4)) sflags = SISF_2G_PHY | SISF_5G_PHY; else - sflags = ai_core_sflags(sh->sih, 0, 0); + sflags = bcma_aread32(d11core, BCMA_IOST); if (bandtype == BRCM_BAND_5G) { if ((sflags & (SISF_5G_PHY | SISF_DB_PHY)) == 0) @@ -488,7 +455,7 @@ wlc_phy_attach(struct shared_phy *sh, struct d11regs __iomem *regs, if (pi == NULL) return NULL; pi->wiphy = wiphy; - pi->regs = regs; + pi->d11core = d11core; pi->sh = sh; pi->phy_init_por = true; pi->phy_wreg_limit = PHY_WREG_LIMIT; @@ -503,7 +470,7 @@ wlc_phy_attach(struct shared_phy *sh, struct d11regs __iomem *regs, pi->pubpi.coreflags = SICF_GMODE; wlapi_bmac_corereset(pi->sh->physhim, pi->pubpi.coreflags); - phyversion = R_REG(&pi->regs->phyversion); + phyversion = bcma_read16(pi->d11core, D11REGOFFS(phyversion)); pi->pubpi.phy_type = PHY_TYPE(phyversion); pi->pubpi.phy_rev = phyversion & PV_PV_MASK; @@ -515,8 +482,8 @@ wlc_phy_attach(struct shared_phy *sh, struct d11regs __iomem *regs, pi->pubpi.phy_corenum = PHY_CORE_NUM_2; pi->pubpi.ana_rev = (phyversion & PV_AV_MASK) >> PV_AV_SHIFT; - if (!pi->pubpi.phy_type == PHY_TYPE_N && - !pi->pubpi.phy_type == PHY_TYPE_LCN) + if (pi->pubpi.phy_type != PHY_TYPE_N && + pi->pubpi.phy_type != PHY_TYPE_LCN) goto err; if (bandtype == BRCM_BAND_5G) { @@ -787,14 +754,14 @@ void wlc_phy_init(struct brcms_phy_pub *pih, u16 chanspec) pi->radio_chanspec = chanspec; - mc = R_REG(&pi->regs->maccontrol); + mc = bcma_read32(pi->d11core, D11REGOFFS(maccontrol)); if (WARN(mc & MCTL_EN_MAC, "HW error MAC running on init")) return; if (!(pi->measure_hold & PHY_HOLD_FOR_SCAN)) pi->measure_hold |= PHY_HOLD_FOR_NOT_ASSOC; - if (WARN(!(ai_core_sflags(pi->sh->sih, 0, 0) & SISF_FCLKA), + if (WARN(!(bcma_aread32(pi->d11core, BCMA_IOST) & SISF_FCLKA), "HW error SISF_FCLKA\n")) return; @@ -833,8 +800,8 @@ void wlc_phy_cal_init(struct brcms_phy_pub *pih) struct brcms_phy *pi = (struct brcms_phy *) pih; void (*cal_init)(struct brcms_phy *) = NULL; - if (WARN((R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC) != 0, - "HW error: MAC enabled during phy cal\n")) + if (WARN((bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) & + MCTL_EN_MAC) != 0, "HW error: MAC enabled during phy cal\n")) return; if (!pi->initialized) { @@ -1025,7 +992,7 @@ wlc_phy_init_radio_regs(struct brcms_phy *pi, void wlc_phy_do_dummy_tx(struct brcms_phy *pi, bool ofdm, bool pa_on) { #define DUMMY_PKT_LEN 20 - struct d11regs __iomem *regs = pi->regs; + struct bcma_device *core = pi->d11core; int i, count; u8 ofdmpkt[DUMMY_PKT_LEN] = { 0xcc, 0x01, 0x02, 0x00, 0x00, 0x00, 0xd4, 0x00, 0x00, 0x00, @@ -1041,26 +1008,28 @@ void wlc_phy_do_dummy_tx(struct brcms_phy *pi, bool ofdm, bool pa_on) wlapi_bmac_write_template_ram(pi->sh->physhim, 0, DUMMY_PKT_LEN, dummypkt); - W_REG(®s->xmtsel, 0); + bcma_write16(core, D11REGOFFS(xmtsel), 0); if (D11REV_GE(pi->sh->corerev, 11)) - W_REG(®s->wepctl, 0x100); + bcma_write16(core, D11REGOFFS(wepctl), 0x100); else - W_REG(®s->wepctl, 0); + bcma_write16(core, D11REGOFFS(wepctl), 0); - W_REG(®s->txe_phyctl, (ofdm ? 1 : 0) | PHY_TXC_ANT_0); + bcma_write16(core, D11REGOFFS(txe_phyctl), + (ofdm ? 1 : 0) | PHY_TXC_ANT_0); if (ISNPHY(pi) || ISLCNPHY(pi)) - W_REG(®s->txe_phyctl1, 0x1A02); + bcma_write16(core, D11REGOFFS(txe_phyctl1), 0x1A02); - W_REG(®s->txe_wm_0, 0); - W_REG(®s->txe_wm_1, 0); + bcma_write16(core, D11REGOFFS(txe_wm_0), 0); + bcma_write16(core, D11REGOFFS(txe_wm_1), 0); - W_REG(®s->xmttplatetxptr, 0); - W_REG(®s->xmttxcnt, DUMMY_PKT_LEN); + bcma_write16(core, D11REGOFFS(xmttplatetxptr), 0); + bcma_write16(core, D11REGOFFS(xmttxcnt), DUMMY_PKT_LEN); - W_REG(®s->xmtsel, ((8 << 8) | (1 << 5) | (1 << 2) | 2)); + bcma_write16(core, D11REGOFFS(xmtsel), + ((8 << 8) | (1 << 5) | (1 << 2) | 2)); - W_REG(®s->txe_ctl, 0); + bcma_write16(core, D11REGOFFS(txe_ctl), 0); if (!pa_on) { if (ISNPHY(pi)) @@ -1068,27 +1037,28 @@ void wlc_phy_do_dummy_tx(struct brcms_phy *pi, bool ofdm, bool pa_on) } if (ISNPHY(pi) || ISLCNPHY(pi)) - W_REG(®s->txe_aux, 0xD0); + bcma_write16(core, D11REGOFFS(txe_aux), 0xD0); else - W_REG(®s->txe_aux, ((1 << 5) | (1 << 4))); + bcma_write16(core, D11REGOFFS(txe_aux), ((1 << 5) | (1 << 4))); - (void)R_REG(®s->txe_aux); + (void)bcma_read16(core, D11REGOFFS(txe_aux)); i = 0; count = ofdm ? 30 : 250; while ((i++ < count) - && (R_REG(®s->txe_status) & (1 << 7))) + && (bcma_read16(core, D11REGOFFS(txe_status)) & (1 << 7))) udelay(10); i = 0; - while ((i++ < 10) - && ((R_REG(®s->txe_status) & (1 << 10)) == 0)) + while ((i++ < 10) && + ((bcma_read16(core, D11REGOFFS(txe_status)) & (1 << 10)) == 0)) udelay(10); i = 0; - while ((i++ < 10) && ((R_REG(®s->ifsstat) & (1 << 8)))) + while ((i++ < 10) && + ((bcma_read16(core, D11REGOFFS(ifsstat)) & (1 << 8)))) udelay(10); if (!pa_on) { @@ -1145,7 +1115,7 @@ static bool wlc_phy_cal_txpower_recalc_sw(struct brcms_phy *pi) void wlc_phy_switch_radio(struct brcms_phy_pub *pih, bool on) { struct brcms_phy *pi = (struct brcms_phy *) pih; - (void)R_REG(&pi->regs->maccontrol); + (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol)); if (ISNPHY(pi)) { wlc_phy_switch_radio_nphy(pi, on); @@ -1385,7 +1355,7 @@ void wlc_phy_txpower_target_set(struct brcms_phy_pub *ppi, memcpy(&pi->tx_user_target[TXP_FIRST_MCS_40_SDM], &txpwr->mcs_40_mimo[0], BRCMS_NUM_RATES_MCS_2_STREAM); - if (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC) + if (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) & MCTL_EN_MAC) mac_enabled = true; if (mac_enabled) @@ -1415,7 +1385,8 @@ int wlc_phy_txpower_set(struct brcms_phy_pub *ppi, uint qdbm, bool override) if (!SCAN_INPROG_PHY(pi)) { bool suspend; - suspend = (0 == (R_REG(&pi->regs->maccontrol) & + suspend = (0 == (bcma_read32(pi->d11core, + D11REGOFFS(maccontrol)) & MCTL_EN_MAC)); if (!suspend) @@ -1868,18 +1839,17 @@ void wlc_phy_runbist_config(struct brcms_phy_pub *ppi, bool start_end) if (NREV_IS(pi->pubpi.phy_rev, 3) || NREV_IS(pi->pubpi.phy_rev, 4)) { - W_REG(&pi->regs->phyregaddr, 0xa0); - (void)R_REG(&pi->regs->phyregaddr); - rxc = R_REG(&pi->regs->phyregdata); - W_REG(&pi->regs->phyregdata, - (0x1 << 15) | rxc); + bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), + 0xa0); + bcma_set16(pi->d11core, D11REGOFFS(phyregdata), + 0x1 << 15); } } else { if (NREV_IS(pi->pubpi.phy_rev, 3) || NREV_IS(pi->pubpi.phy_rev, 4)) { - W_REG(&pi->regs->phyregaddr, 0xa0); - (void)R_REG(&pi->regs->phyregaddr); - W_REG(&pi->regs->phyregdata, rxc); + bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), + 0xa0); + bcma_write16(pi->d11core, D11REGOFFS(phyregdata), rxc); } wlc_phy_por_inform(ppi); @@ -1999,7 +1969,9 @@ void wlc_phy_txpower_hw_ctrl_set(struct brcms_phy_pub *ppi, bool hwpwrctrl) pi->txpwrctrl = hwpwrctrl; if (ISNPHY(pi)) { - suspend = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC)); + suspend = (0 == (bcma_read32(pi->d11core, + D11REGOFFS(maccontrol)) & + MCTL_EN_MAC)); if (!suspend) wlapi_suspend_mac_and_wait(pi->sh->physhim); @@ -2201,7 +2173,8 @@ void wlc_phy_ant_rxdiv_set(struct brcms_phy_pub *ppi, u8 val) if (!pi->sh->clk) return; - suspend = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC)); + suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) & + MCTL_EN_MAC)); if (!suspend) wlapi_suspend_mac_and_wait(pi->sh->physhim); @@ -2419,8 +2392,8 @@ wlc_phy_noise_sample_request(struct brcms_phy_pub *pih, u8 reason, u8 ch) wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP2, 0); wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP3, 0); - OR_REG(&pi->regs->maccommand, - MCMD_BG_NOISE); + bcma_set32(pi->d11core, D11REGOFFS(maccommand), + MCMD_BG_NOISE); } else { wlapi_suspend_mac_and_wait(pi->sh->physhim); wlc_lcnphy_deaf_mode(pi, (bool) 0); @@ -2438,8 +2411,8 @@ wlc_phy_noise_sample_request(struct brcms_phy_pub *pih, u8 reason, u8 ch) wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP2, 0); wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP3, 0); - OR_REG(&pi->regs->maccommand, - MCMD_BG_NOISE); + bcma_set32(pi->d11core, D11REGOFFS(maccommand), + MCMD_BG_NOISE); } else { struct phy_iq_est est[PHY_CORE_MAX]; u32 cmplx_pwr[PHY_CORE_MAX]; @@ -2932,29 +2905,29 @@ void wlc_lcnphy_epa_switch(struct brcms_phy *pi, bool mode) mod_phy_reg(pi, 0x44c, (0x1 << 2), (1) << 2); } - ai_corereg(pi->sh->sih, SI_CC_IDX, - offsetof(struct chipcregs, gpiocontrol), - ~0x0, 0x0); - ai_corereg(pi->sh->sih, SI_CC_IDX, - offsetof(struct chipcregs, gpioout), 0x40, - 0x40); - ai_corereg(pi->sh->sih, SI_CC_IDX, - offsetof(struct chipcregs, gpioouten), 0x40, - 0x40); + ai_cc_reg(pi->sh->sih, + offsetof(struct chipcregs, gpiocontrol), + ~0x0, 0x0); + ai_cc_reg(pi->sh->sih, + offsetof(struct chipcregs, gpioout), + 0x40, 0x40); + ai_cc_reg(pi->sh->sih, + offsetof(struct chipcregs, gpioouten), + 0x40, 0x40); } else { mod_phy_reg(pi, 0x44c, (0x1 << 2), (0) << 2); mod_phy_reg(pi, 0x44d, (0x1 << 2), (0) << 2); - ai_corereg(pi->sh->sih, SI_CC_IDX, - offsetof(struct chipcregs, gpioout), 0x40, - 0x00); - ai_corereg(pi->sh->sih, SI_CC_IDX, - offsetof(struct chipcregs, gpioouten), 0x40, - 0x0); - ai_corereg(pi->sh->sih, SI_CC_IDX, - offsetof(struct chipcregs, gpiocontrol), - ~0x0, 0x40); + ai_cc_reg(pi->sh->sih, + offsetof(struct chipcregs, gpioout), + 0x40, 0x00); + ai_cc_reg(pi->sh->sih, + offsetof(struct chipcregs, gpioouten), + 0x40, 0x0); + ai_cc_reg(pi->sh->sih, + offsetof(struct chipcregs, gpiocontrol), + ~0x0, 0x40); } } } diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h index 96e15163222..e34a71e7d24 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h @@ -166,7 +166,6 @@ struct shared_phy_params { struct phy_shim_info *physhim; uint unit; uint corerev; - uint buscorerev; u16 vid; u16 did; uint chip; @@ -175,7 +174,6 @@ struct shared_phy_params { uint sromrev; uint boardtype; uint boardrev; - uint boardvendor; u32 boardflags; u32 boardflags2; }; @@ -183,7 +181,7 @@ struct shared_phy_params { extern struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp); extern struct brcms_phy_pub *wlc_phy_attach(struct shared_phy *sh, - struct d11regs __iomem *regs, + struct bcma_device *d11core, int bandtype, struct wiphy *wiphy); extern void wlc_phy_detach(struct brcms_phy_pub *ppi); diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h index bea85241a24..af00e2c2b26 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h @@ -503,10 +503,8 @@ struct shared_phy { uint sromrev; uint boardtype; uint boardrev; - uint boardvendor; u32 boardflags; u32 boardflags2; - uint buscorerev; uint fast_timer; uint slow_timer; uint glacial_timer; @@ -559,7 +557,7 @@ struct brcms_phy { } u; bool user_txpwr_at_rfport; - struct d11regs __iomem *regs; + struct bcma_device *d11core; struct brcms_phy *next; struct brcms_phy_pub pubpi; @@ -774,11 +772,6 @@ struct brcms_phy { s16 nphy_noise_win[PHY_CORE_MAX][PHY_NOISE_WINDOW_SZ]; u8 nphy_noise_index; - u8 nphy_txpid2g[PHY_CORE_NUM_2]; - u8 nphy_txpid5g[PHY_CORE_NUM_2]; - u8 nphy_txpid5gl[PHY_CORE_NUM_2]; - u8 nphy_txpid5gh[PHY_CORE_NUM_2]; - bool nphy_gain_boost; bool nphy_elna_gain_config; u16 old_bphy_test; @@ -1095,7 +1088,7 @@ extern void wlc_phy_table_write_nphy(struct brcms_phy *pi, u32, u32, u32, #define BRCMS_PHY_WAR_PR51571(pi) \ if (NREV_LT((pi)->pubpi.phy_rev, 3)) \ - (void)R_REG(&(pi)->regs->maccontrol) + (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) extern void wlc_phy_cal_perical_nphy_run(struct brcms_phy *pi, u8 caltype); extern void wlc_phy_aci_reset_nphy(struct brcms_phy *pi); diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c index a63aa99d981..ce8562aa5db 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c @@ -1603,7 +1603,7 @@ wlc_lcnphy_set_chanspec_tweaks(struct brcms_phy *pi, u16 chanspec) si_pmu_pllupd(pi->sh->sih); write_phy_reg(pi, 0x942, 0); wlc_lcnphy_txrx_spur_avoidance_mode(pi, false); - pi_lcn->lcnphy_spurmod = 0; + pi_lcn->lcnphy_spurmod = false; mod_phy_reg(pi, 0x424, (0xff << 8), (0x1b) << 8); write_phy_reg(pi, 0x425, 0x5907); @@ -1616,7 +1616,7 @@ wlc_lcnphy_set_chanspec_tweaks(struct brcms_phy *pi, u16 chanspec) write_phy_reg(pi, 0x942, 0); wlc_lcnphy_txrx_spur_avoidance_mode(pi, true); - pi_lcn->lcnphy_spurmod = 0; + pi_lcn->lcnphy_spurmod = false; mod_phy_reg(pi, 0x424, (0xff << 8), (0x1f) << 8); write_phy_reg(pi, 0x425, 0x590a); @@ -2325,7 +2325,7 @@ static s8 wlc_lcnphy_tempcompensated_txpwrctrl(struct brcms_phy *pi) { s8 index, delta_brd, delta_temp, new_index, tempcorrx; s16 manp, meas_temp, temp_diff; - bool neg = 0; + bool neg = false; u16 temp; struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy; @@ -2348,7 +2348,7 @@ static s8 wlc_lcnphy_tempcompensated_txpwrctrl(struct brcms_phy *pi) manp = LCNPHY_TEMPSENSE(pi_lcn->lcnphy_rawtempsense); temp_diff = manp - meas_temp; if (temp_diff < 0) { - neg = 1; + neg = true; temp_diff = -temp_diff; } @@ -2813,10 +2813,8 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi) u16 SAVE_jtag_auxpga = read_radio_reg(pi, RADIO_2064_REG0FF) & 0x10; u16 SAVE_iqadc_aux_en = read_radio_reg(pi, RADIO_2064_REG11F) & 4; idleTssi = read_phy_reg(pi, 0x4ab); - suspend = - (0 == - (R_REG(&((struct brcms_phy *) pi)->regs->maccontrol) & - MCTL_EN_MAC)); + suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) & + MCTL_EN_MAC)); if (!suspend) wlapi_suspend_mac_and_wait(pi->sh->physhim); wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF); @@ -2890,7 +2888,8 @@ static void wlc_lcnphy_vbat_temp_sense_setup(struct brcms_phy *pi, u8 mode) for (i = 0; i < 14; i++) values_to_save[i] = read_phy_reg(pi, tempsense_phy_regs[i]); - suspend = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC)); + suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) & + MCTL_EN_MAC)); if (!suspend) wlapi_suspend_mac_and_wait(pi->sh->physhim); save_txpwrCtrlEn = read_radio_reg(pi, 0x4a4); @@ -3016,8 +3015,8 @@ static void wlc_lcnphy_tx_pwr_ctrl_init(struct brcms_phy_pub *ppi) bool suspend; struct brcms_phy *pi = (struct brcms_phy *) ppi; - suspend = - (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC)); + suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) & + MCTL_EN_MAC)); if (!suspend) wlapi_suspend_mac_and_wait(pi->sh->physhim); @@ -3535,15 +3534,17 @@ wlc_lcnphy_samp_cap(struct brcms_phy *pi, int clip_detect_algo, u16 thresh, timer = 0; old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da); - curval1 = R_REG(&pi->regs->psm_corectlsts); + curval1 = bcma_read16(pi->d11core, D11REGOFFS(psm_corectlsts)); ptr[130] = 0; - W_REG(&pi->regs->psm_corectlsts, ((1 << 6) | curval1)); + bcma_write16(pi->d11core, D11REGOFFS(psm_corectlsts), + ((1 << 6) | curval1)); - W_REG(&pi->regs->smpl_clct_strptr, 0x7E00); - W_REG(&pi->regs->smpl_clct_stpptr, 0x8000); + bcma_write16(pi->d11core, D11REGOFFS(smpl_clct_strptr), 0x7E00); + bcma_write16(pi->d11core, D11REGOFFS(smpl_clct_stpptr), 0x8000); udelay(20); - curval2 = R_REG(&pi->regs->psm_phy_hdr_param); - W_REG(&pi->regs->psm_phy_hdr_param, curval2 | 0x30); + curval2 = bcma_read16(pi->d11core, D11REGOFFS(psm_phy_hdr_param)); + bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param), + curval2 | 0x30); write_phy_reg(pi, 0x555, 0x0); write_phy_reg(pi, 0x5a6, 0x5); @@ -3560,19 +3561,19 @@ wlc_lcnphy_samp_cap(struct brcms_phy *pi, int clip_detect_algo, u16 thresh, sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da); write_phy_reg(pi, 0x6da, (u32) (sslpnCalibClkEnCtrl | 0x2008)); - stpptr = R_REG(&pi->regs->smpl_clct_stpptr); - curptr = R_REG(&pi->regs->smpl_clct_curptr); + stpptr = bcma_read16(pi->d11core, D11REGOFFS(smpl_clct_stpptr)); + curptr = bcma_read16(pi->d11core, D11REGOFFS(smpl_clct_curptr)); do { udelay(10); - curptr = R_REG(&pi->regs->smpl_clct_curptr); + curptr = bcma_read16(pi->d11core, D11REGOFFS(smpl_clct_curptr)); timer++; } while ((curptr != stpptr) && (timer < 500)); - W_REG(&pi->regs->psm_phy_hdr_param, 0x2); + bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param), 0x2); strptr = 0x7E00; - W_REG(&pi->regs->tplatewrptr, strptr); + bcma_write32(pi->d11core, D11REGOFFS(tplatewrptr), strptr); while (strptr < 0x8000) { - val = R_REG(&pi->regs->tplatewrdata); + val = bcma_read32(pi->d11core, D11REGOFFS(tplatewrdata)); imag = ((val >> 16) & 0x3ff); real = ((val) & 0x3ff); if (imag > 511) @@ -3597,8 +3598,8 @@ wlc_lcnphy_samp_cap(struct brcms_phy *pi, int clip_detect_algo, u16 thresh, } write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl); - W_REG(&pi->regs->psm_phy_hdr_param, curval2); - W_REG(&pi->regs->psm_corectlsts, curval1); + bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param), curval2); + bcma_write16(pi->d11core, D11REGOFFS(psm_corectlsts), curval1); } static void @@ -3681,8 +3682,8 @@ wlc_lcnphy_a1(struct brcms_phy *pi, int cal_type, int num_levels, wlc_lcnphy_set_cc(pi, cal_type, phy_c15, phy_c16); udelay(20); for (phy_c8 = 0; phy_c7 != 0 && phy_c8 < num_levels; phy_c8++) { - phy_c23 = 1; - phy_c22 = 0; + phy_c23 = true; + phy_c22 = false; switch (cal_type) { case 0: phy_c10 = 511; @@ -3700,18 +3701,18 @@ wlc_lcnphy_a1(struct brcms_phy *pi, int cal_type, int num_levels, phy_c9 = read_phy_reg(pi, 0x93d); phy_c9 = 2 * phy_c9; - phy_c24 = 0; + phy_c24 = false; phy_c5 = 7; - phy_c25 = 1; + phy_c25 = true; while (1) { write_radio_reg(pi, RADIO_2064_REG026, (phy_c5 & 0x7) | ((phy_c5 & 0x7) << 4)); udelay(50); - phy_c22 = 0; + phy_c22 = false; ptr[130] = 0; wlc_lcnphy_samp_cap(pi, 1, phy_c9, &ptr[0], 2); if (ptr[130] == 1) - phy_c22 = 1; + phy_c22 = true; if (phy_c22) phy_c5 -= 1; if ((phy_c22 != phy_c24) && (!phy_c25)) @@ -3721,7 +3722,7 @@ wlc_lcnphy_a1(struct brcms_phy *pi, int cal_type, int num_levels, if (phy_c5 <= 0 || phy_c5 >= 7) break; phy_c24 = phy_c22; - phy_c25 = 0; + phy_c25 = false; } if (phy_c5 < 0) @@ -3772,10 +3773,10 @@ wlc_lcnphy_a1(struct brcms_phy *pi, int cal_type, int num_levels, phy_c13 = phy_c11; phy_c14 = phy_c12; } - phy_c23 = 0; + phy_c23 = false; } } - phy_c23 = 1; + phy_c23 = true; phy_c15 = phy_c13; phy_c16 = phy_c14; phy_c7 = phy_c7 >> 1; @@ -3965,12 +3966,12 @@ s16 wlc_lcnphy_tempsense_new(struct brcms_phy *pi, bool mode) { u16 tempsenseval1, tempsenseval2; s16 avg = 0; - bool suspend = 0; + bool suspend = false; if (mode == 1) { - suspend = - (0 == - (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC)); + suspend = (0 == (bcma_read32(pi->d11core, + D11REGOFFS(maccontrol)) & + MCTL_EN_MAC)); if (!suspend) wlapi_suspend_mac_and_wait(pi->sh->physhim); wlc_lcnphy_vbat_temp_sense_setup(pi, TEMPSENSE); @@ -4007,14 +4008,14 @@ u16 wlc_lcnphy_tempsense(struct brcms_phy *pi, bool mode) { u16 tempsenseval1, tempsenseval2; s32 avg = 0; - bool suspend = 0; + bool suspend = false; u16 SAVE_txpwrctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi); struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy; if (mode == 1) { - suspend = - (0 == - (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC)); + suspend = (0 == (bcma_read32(pi->d11core, + D11REGOFFS(maccontrol)) & + MCTL_EN_MAC)); if (!suspend) wlapi_suspend_mac_and_wait(pi->sh->physhim); wlc_lcnphy_vbat_temp_sense_setup(pi, TEMPSENSE); @@ -4075,12 +4076,12 @@ s8 wlc_lcnphy_vbatsense(struct brcms_phy *pi, bool mode) { u16 vbatsenseval; s32 avg = 0; - bool suspend = 0; + bool suspend = false; if (mode == 1) { - suspend = - (0 == - (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC)); + suspend = (0 == (bcma_read32(pi->d11core, + D11REGOFFS(maccontrol)) & + MCTL_EN_MAC)); if (!suspend) wlapi_suspend_mac_and_wait(pi->sh->physhim); wlc_lcnphy_vbat_temp_sense_setup(pi, VBATSENSE); @@ -4127,8 +4128,8 @@ static void wlc_lcnphy_glacial_timer_based_cal(struct brcms_phy *pi) s8 index; u16 SAVE_pwrctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi); struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy; - suspend = - (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC)); + suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) & + MCTL_EN_MAC)); if (!suspend) wlapi_suspend_mac_and_wait(pi->sh->physhim); wlc_lcnphy_deaf_mode(pi, true); @@ -4166,8 +4167,8 @@ static void wlc_lcnphy_periodic_cal(struct brcms_phy *pi) pi_lcn->lcnphy_full_cal_channel = CHSPEC_CHANNEL(pi->radio_chanspec); index = pi_lcn->lcnphy_current_index; - suspend = - (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC)); + suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) & + MCTL_EN_MAC)); if (!suspend) { wlapi_bmac_write_shm(pi->sh->physhim, M_CTS_DURATION, 10000); wlapi_suspend_mac_and_wait(pi->sh->physhim); diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c index cd19c2f7a34..a16f1ab292f 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c @@ -29,6 +29,7 @@ #include "phy_radio.h" #include "phyreg_n.h" #include "phytbl_n.h" +#include "soc.h" #define READ_RADIO_REG2(pi, radio_type, jspace, core, reg_name) \ read_radio_reg(pi, radio_type##_##jspace##_##reg_name | \ @@ -14417,12 +14418,6 @@ static void wlc_phy_txpwr_srom_read_ppr_nphy(struct brcms_phy *pi) switch (band_num) { case 0: - pi->nphy_txpid2g[PHY_CORE_0] = - (u8) wlapi_getintvar(shim, - BRCMS_SROM_TXPID2GA0); - pi->nphy_txpid2g[PHY_CORE_1] = - (u8) wlapi_getintvar(shim, - BRCMS_SROM_TXPID2GA1); pi->nphy_pwrctrl_info[PHY_CORE_0].max_pwr_2g = (s8) wlapi_getintvar(shim, BRCMS_SROM_MAXP2GA0); @@ -14486,12 +14481,6 @@ static void wlc_phy_txpwr_srom_read_ppr_nphy(struct brcms_phy *pi) break; case 1: - pi->nphy_txpid5g[PHY_CORE_0] = - (u8) wlapi_getintvar(shim, - BRCMS_SROM_TXPID5GA0); - pi->nphy_txpid5g[PHY_CORE_1] = - (u8) wlapi_getintvar(shim, - BRCMS_SROM_TXPID5GA1); pi->nphy_pwrctrl_info[PHY_CORE_0].max_pwr_5gm = (s8) wlapi_getintvar(shim, BRCMS_SROM_MAXP5GA0); pi->nphy_pwrctrl_info[PHY_CORE_1].max_pwr_5gm = @@ -14551,12 +14540,6 @@ static void wlc_phy_txpwr_srom_read_ppr_nphy(struct brcms_phy *pi) break; case 2: - pi->nphy_txpid5gl[0] = - (u8) wlapi_getintvar(shim, - BRCMS_SROM_TXPID5GLA0); - pi->nphy_txpid5gl[1] = - (u8) wlapi_getintvar(shim, - BRCMS_SROM_TXPID5GLA1); pi->nphy_pwrctrl_info[0].max_pwr_5gl = (s8) wlapi_getintvar(shim, BRCMS_SROM_MAXP5GLA0); @@ -14615,12 +14598,6 @@ static void wlc_phy_txpwr_srom_read_ppr_nphy(struct brcms_phy *pi) break; case 3: - pi->nphy_txpid5gh[0] = - (u8) wlapi_getintvar(shim, - BRCMS_SROM_TXPID5GHA0); - pi->nphy_txpid5gh[1] = - (u8) wlapi_getintvar(shim, - BRCMS_SROM_TXPID5GHA1); pi->nphy_pwrctrl_info[0].max_pwr_5gh = (s8) wlapi_getintvar(shim, BRCMS_SROM_MAXP5GHA0); @@ -17825,7 +17802,7 @@ static void wlc_phy_txpwrctrl_pwr_setup_nphy(struct brcms_phy *pi) if (D11REV_IS(pi->sh->corerev, 11) || D11REV_IS(pi->sh->corerev, 12)) { wlapi_bmac_mctrl(pi->sh->physhim, MCTL_PHYLOCK, MCTL_PHYLOCK); - (void)R_REG(&pi->regs->maccontrol); + (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol)); udelay(1); } @@ -17976,7 +17953,7 @@ static void wlc_phy_txpwrctrl_pwr_setup_nphy(struct brcms_phy *pi) if (D11REV_IS(pi->sh->corerev, 11) || D11REV_IS(pi->sh->corerev, 12)) { wlapi_bmac_mctrl(pi->sh->physhim, MCTL_PHYLOCK, MCTL_PHYLOCK); - (void)R_REG(&pi->regs->maccontrol); + (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol)); udelay(1); } @@ -19470,8 +19447,6 @@ void wlc_phy_init_nphy(struct brcms_phy *pi) u8 tx_pwr_ctrl_state; bool do_nphy_cal = false; uint core; - uint origidx, intr_val; - struct d11regs __iomem *regs; u32 d11_clk_ctl_st; bool do_rssi_cal = false; @@ -19485,25 +19460,21 @@ void wlc_phy_init_nphy(struct brcms_phy *pi) (pi->sh->chippkg == BCM4718_PKG_ID))) { if ((pi->sh->boardflags & BFL_EXTLNA) && (CHSPEC_IS2G(pi->radio_chanspec))) - ai_corereg(pi->sh->sih, SI_CC_IDX, - offsetof(struct chipcregs, chipcontrol), - 0x40, 0x40); + ai_cc_reg(pi->sh->sih, + offsetof(struct chipcregs, chipcontrol), + 0x40, 0x40); } if ((pi->nphy_gband_spurwar2_en) && CHSPEC_IS2G(pi->radio_chanspec) && CHSPEC_IS40(pi->radio_chanspec)) { - regs = (struct d11regs __iomem *) - ai_switch_core(pi->sh->sih, - D11_CORE_ID, &origidx, - &intr_val); - d11_clk_ctl_st = R_REG(®s->clk_ctl_st); - AND_REG(®s->clk_ctl_st, - ~(CCS_FORCEHT | CCS_HTAREQ)); + d11_clk_ctl_st = bcma_read32(pi->d11core, + D11REGOFFS(clk_ctl_st)); + bcma_mask32(pi->d11core, D11REGOFFS(clk_ctl_st), + ~(CCS_FORCEHT | CCS_HTAREQ)); - W_REG(®s->clk_ctl_st, d11_clk_ctl_st); - - ai_restore_core(pi->sh->sih, origidx, intr_val); + bcma_write32(pi->d11core, D11REGOFFS(clk_ctl_st), + d11_clk_ctl_st); } pi->use_int_tx_iqlo_cal_nphy = @@ -19908,7 +19879,8 @@ void wlc_phy_rxcore_setstate_nphy(struct brcms_phy_pub *pih, u8 rxcore_bitmask) if (!pi->sh->clk) return; - suspend = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC)); + suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) & + MCTL_EN_MAC)); if (!suspend) wlapi_suspend_mac_and_wait(pi->sh->physhim); @@ -21286,28 +21258,28 @@ wlc_phy_chanspec_nphy_setup(struct brcms_phy *pi, u16 chanspec, val = read_phy_reg(pi, 0x09) & NPHY_BandControl_currentBand; if (CHSPEC_IS5G(chanspec) && !val) { - val = R_REG(&pi->regs->psm_phy_hdr_param); - W_REG(&pi->regs->psm_phy_hdr_param, + val = bcma_read16(pi->d11core, D11REGOFFS(psm_phy_hdr_param)); + bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param), (val | MAC_PHY_FORCE_CLK)); or_phy_reg(pi, (NPHY_TO_BPHY_OFF + BPHY_BB_CONFIG), (BBCFG_RESETCCA | BBCFG_RESETRX)); - W_REG(&pi->regs->psm_phy_hdr_param, val); + bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param), val); or_phy_reg(pi, 0x09, NPHY_BandControl_currentBand); } else if (!CHSPEC_IS5G(chanspec) && val) { and_phy_reg(pi, 0x09, ~NPHY_BandControl_currentBand); - val = R_REG(&pi->regs->psm_phy_hdr_param); - W_REG(&pi->regs->psm_phy_hdr_param, + val = bcma_read16(pi->d11core, D11REGOFFS(psm_phy_hdr_param)); + bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param), (val | MAC_PHY_FORCE_CLK)); and_phy_reg(pi, (NPHY_TO_BPHY_OFF + BPHY_BB_CONFIG), (u16) (~(BBCFG_RESETCCA | BBCFG_RESETRX))); - W_REG(&pi->regs->psm_phy_hdr_param, val); + bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param), val); } write_phy_reg(pi, 0x1ce, ci->PHY_BW1a); @@ -21365,24 +21337,23 @@ wlc_phy_chanspec_nphy_setup(struct brcms_phy *pi, u16 chanspec, spuravoid = 1; wlapi_bmac_core_phypll_ctl(pi->sh->physhim, false); - si_pmu_spuravoid(pi->sh->sih, spuravoid); + si_pmu_spuravoid_pllupdate(pi->sh->sih, spuravoid); wlapi_bmac_core_phypll_ctl(pi->sh->physhim, true); if ((pi->sh->chip == BCM43224_CHIP_ID) || (pi->sh->chip == BCM43225_CHIP_ID)) { - if (spuravoid == 1) { - - W_REG(&pi->regs->tsf_clk_frac_l, - 0x5341); - W_REG(&pi->regs->tsf_clk_frac_h, - 0x8); + bcma_write16(pi->d11core, + D11REGOFFS(tsf_clk_frac_l), + 0x5341); + bcma_write16(pi->d11core, + D11REGOFFS(tsf_clk_frac_h), 0x8); } else { - - W_REG(&pi->regs->tsf_clk_frac_l, - 0x8889); - W_REG(&pi->regs->tsf_clk_frac_h, - 0x8); + bcma_write16(pi->d11core, + D11REGOFFS(tsf_clk_frac_l), + 0x8889); + bcma_write16(pi->d11core, + D11REGOFFS(tsf_clk_frac_h), 0x8); } } @@ -21522,13 +21493,13 @@ void wlc_phy_antsel_init(struct brcms_phy_pub *ppi, bool lut_init) ai_gpiocontrol(pi->sh->sih, mask, mask, GPIO_DRV_PRIORITY); - mc = R_REG(&pi->regs->maccontrol); + mc = bcma_read32(pi->d11core, D11REGOFFS(maccontrol)); mc &= ~MCTL_GPOUT_SEL_MASK; - W_REG(&pi->regs->maccontrol, mc); + bcma_write32(pi->d11core, D11REGOFFS(maccontrol), mc); - OR_REG(&pi->regs->psm_gpio_oe, mask); + bcma_set16(pi->d11core, D11REGOFFS(psm_gpio_oe), mask); - AND_REG(&pi->regs->psm_gpio_out, ~mask); + bcma_mask16(pi->d11core, D11REGOFFS(psm_gpio_out), ~mask); if (lut_init) { write_phy_reg(pi, 0xf8, 0x02d8); @@ -21545,9 +21516,8 @@ u16 wlc_phy_classifier_nphy(struct brcms_phy *pi, u16 mask, u16 val) bool suspended = false; if (D11REV_IS(pi->sh->corerev, 16)) { - suspended = - (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC) ? - false : true; + suspended = (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) & + MCTL_EN_MAC) ? false : true; if (!suspended) wlapi_suspend_mac_and_wait(pi->sh->physhim); } @@ -25406,7 +25376,8 @@ static void wlc_phy_a4(struct brcms_phy *pi, bool full_cal) if (pi->nphy_papd_skip == 1) return; - phy_b3 = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC)); + phy_b3 = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) & + MCTL_EN_MAC)); if (!phy_b3) wlapi_suspend_mac_and_wait(pi->sh->physhim); @@ -27994,20 +27965,11 @@ void wlc_phy_txpwr_fixpower_nphy(struct brcms_phy *pi) chan_freq_range = wlc_phy_get_chan_freq_range_nphy(pi, 0); switch (chan_freq_range) { case WL_CHAN_FREQ_RANGE_2G: - txpi[0] = pi->nphy_txpid2g[0]; - txpi[1] = pi->nphy_txpid2g[1]; - break; case WL_CHAN_FREQ_RANGE_5GL: - txpi[0] = pi->nphy_txpid5gl[0]; - txpi[1] = pi->nphy_txpid5gl[1]; - break; case WL_CHAN_FREQ_RANGE_5GM: - txpi[0] = pi->nphy_txpid5g[0]; - txpi[1] = pi->nphy_txpid5g[1]; - break; case WL_CHAN_FREQ_RANGE_5GH: - txpi[0] = pi->nphy_txpid5gh[0]; - txpi[1] = pi->nphy_txpid5gh[1]; + txpi[0] = 0; + txpi[1] = 0; break; default: txpi[0] = txpi[1] = 91; @@ -28389,7 +28351,7 @@ void wlc_phy_txpower_recalc_target_nphy(struct brcms_phy *pi) if (D11REV_IS(pi->sh->corerev, 11) || D11REV_IS(pi->sh->corerev, 12)) { wlapi_bmac_mctrl(pi->sh->physhim, MCTL_PHYLOCK, MCTL_PHYLOCK); - (void)R_REG(&pi->regs->maccontrol); + (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol)); udelay(1); } diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pmu.c b/drivers/net/wireless/brcm80211/brcmsmac/pmu.c index 3b36e3acfd7..4931d29d077 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/pmu.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/pmu.c @@ -23,6 +23,7 @@ #include "pub.h" #include "aiutils.h" #include "pmu.h" +#include "soc.h" /* * external LPO crystal frequency @@ -114,10 +115,10 @@ static void si_pmu_res_masks(struct si_pub *sih, u32 * pmin, u32 * pmax) uint rsrcs; /* # resources */ - rsrcs = (sih->pmucaps & PCAP_RC_MASK) >> PCAP_RC_SHIFT; + rsrcs = (ai_get_pmucaps(sih) & PCAP_RC_MASK) >> PCAP_RC_SHIFT; /* determine min/max rsrc masks */ - switch (sih->chip) { + switch (ai_get_chip_id(sih)) { case BCM43224_CHIP_ID: case BCM43225_CHIP_ID: /* ??? */ @@ -138,75 +139,84 @@ static void si_pmu_res_masks(struct si_pub *sih, u32 * pmin, u32 * pmax) *pmax = max_mask; } -static void -si_pmu_spuravoid_pllupdate(struct si_pub *sih, struct chipcregs __iomem *cc, - u8 spuravoid) +void si_pmu_spuravoid_pllupdate(struct si_pub *sih, u8 spuravoid) { u32 tmp = 0; + struct bcma_device *core; - switch (sih->chip) { + /* switch to chipc */ + core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0); + + switch (ai_get_chip_id(sih)) { case BCM43224_CHIP_ID: case BCM43225_CHIP_ID: if (spuravoid == 1) { - W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0); - W_REG(&cc->pllcontrol_data, 0x11500010); - W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL1); - W_REG(&cc->pllcontrol_data, 0x000C0C06); - W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2); - W_REG(&cc->pllcontrol_data, 0x0F600a08); - W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL3); - W_REG(&cc->pllcontrol_data, 0x00000000); - W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL4); - W_REG(&cc->pllcontrol_data, 0x2001E920); - W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL5); - W_REG(&cc->pllcontrol_data, 0x88888815); + bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr), + PMU1_PLL0_PLLCTL0); + bcma_write32(core, CHIPCREGOFFS(pllcontrol_data), + 0x11500010); + bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr), + PMU1_PLL0_PLLCTL1); + bcma_write32(core, CHIPCREGOFFS(pllcontrol_data), + 0x000C0C06); + bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr), + PMU1_PLL0_PLLCTL2); + bcma_write32(core, CHIPCREGOFFS(pllcontrol_data), + 0x0F600a08); + bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr), + PMU1_PLL0_PLLCTL3); + bcma_write32(core, CHIPCREGOFFS(pllcontrol_data), + 0x00000000); + bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr), + PMU1_PLL0_PLLCTL4); + bcma_write32(core, CHIPCREGOFFS(pllcontrol_data), + 0x2001E920); + bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr), + PMU1_PLL0_PLLCTL5); + bcma_write32(core, CHIPCREGOFFS(pllcontrol_data), + 0x88888815); } else { - W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0); - W_REG(&cc->pllcontrol_data, 0x11100010); - W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL1); - W_REG(&cc->pllcontrol_data, 0x000c0c06); - W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2); - W_REG(&cc->pllcontrol_data, 0x03000a08); - W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL3); - W_REG(&cc->pllcontrol_data, 0x00000000); - W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL4); - W_REG(&cc->pllcontrol_data, 0x200005c0); - W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL5); - W_REG(&cc->pllcontrol_data, 0x88888815); + bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr), + PMU1_PLL0_PLLCTL0); + bcma_write32(core, CHIPCREGOFFS(pllcontrol_data), + 0x11100010); + bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr), + PMU1_PLL0_PLLCTL1); + bcma_write32(core, CHIPCREGOFFS(pllcontrol_data), + 0x000c0c06); + bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr), + PMU1_PLL0_PLLCTL2); + bcma_write32(core, CHIPCREGOFFS(pllcontrol_data), + 0x03000a08); + bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr), + PMU1_PLL0_PLLCTL3); + bcma_write32(core, CHIPCREGOFFS(pllcontrol_data), + 0x00000000); + bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr), + PMU1_PLL0_PLLCTL4); + bcma_write32(core, CHIPCREGOFFS(pllcontrol_data), + 0x200005c0); + bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr), + PMU1_PLL0_PLLCTL5); + bcma_write32(core, CHIPCREGOFFS(pllcontrol_data), + 0x88888815); } tmp = 1 << 10; break; - W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0); - W_REG(&cc->pllcontrol_data, 0x11100008); - W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL1); - W_REG(&cc->pllcontrol_data, 0x0c000c06); - W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2); - W_REG(&cc->pllcontrol_data, 0x03000a08); - W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL3); - W_REG(&cc->pllcontrol_data, 0x00000000); - W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL4); - W_REG(&cc->pllcontrol_data, 0x200005c0); - W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL5); - W_REG(&cc->pllcontrol_data, 0x88888855); - - tmp = 1 << 10; - break; - default: /* bail out */ return; } - tmp |= R_REG(&cc->pmucontrol); - W_REG(&cc->pmucontrol, tmp); + bcma_set32(core, CHIPCREGOFFS(pmucontrol), tmp); } u16 si_pmu_fast_pwrup_delay(struct si_pub *sih) { uint delay = PMU_MAX_TRANSITION_DLY; - switch (sih->chip) { + switch (ai_get_chip_id(sih)) { case BCM43224_CHIP_ID: case BCM43225_CHIP_ID: case BCM4313_CHIP_ID: @@ -219,54 +229,35 @@ u16 si_pmu_fast_pwrup_delay(struct si_pub *sih) return (u16) delay; } -void si_pmu_sprom_enable(struct si_pub *sih, bool enable) -{ - struct chipcregs __iomem *cc; - uint origidx; - - /* Remember original core before switch to chipc */ - origidx = ai_coreidx(sih); - cc = ai_setcoreidx(sih, SI_CC_IDX); - - /* Return to original core */ - ai_setcoreidx(sih, origidx); -} - /* Read/write a chipcontrol reg */ u32 si_pmu_chipcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val) { - ai_corereg(sih, SI_CC_IDX, offsetof(struct chipcregs, chipcontrol_addr), - ~0, reg); - return ai_corereg(sih, SI_CC_IDX, - offsetof(struct chipcregs, chipcontrol_data), mask, - val); + ai_cc_reg(sih, offsetof(struct chipcregs, chipcontrol_addr), ~0, reg); + return ai_cc_reg(sih, offsetof(struct chipcregs, chipcontrol_data), + mask, val); } /* Read/write a regcontrol reg */ u32 si_pmu_regcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val) { - ai_corereg(sih, SI_CC_IDX, offsetof(struct chipcregs, regcontrol_addr), - ~0, reg); - return ai_corereg(sih, SI_CC_IDX, - offsetof(struct chipcregs, regcontrol_data), mask, - val); + ai_cc_reg(sih, offsetof(struct chipcregs, regcontrol_addr), ~0, reg); + return ai_cc_reg(sih, offsetof(struct chipcregs, regcontrol_data), + mask, val); } /* Read/write a pllcontrol reg */ u32 si_pmu_pllcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val) { - ai_corereg(sih, SI_CC_IDX, offsetof(struct chipcregs, pllcontrol_addr), - ~0, reg); - return ai_corereg(sih, SI_CC_IDX, - offsetof(struct chipcregs, pllcontrol_data), mask, - val); + ai_cc_reg(sih, offsetof(struct chipcregs, pllcontrol_addr), ~0, reg); + return ai_cc_reg(sih, offsetof(struct chipcregs, pllcontrol_data), + mask, val); } /* PMU PLL update */ void si_pmu_pllupd(struct si_pub *sih) { - ai_corereg(sih, SI_CC_IDX, offsetof(struct chipcregs, pmucontrol), - PCTL_PLL_PLLCTL_UPD, PCTL_PLL_PLLCTL_UPD); + ai_cc_reg(sih, offsetof(struct chipcregs, pmucontrol), + PCTL_PLL_PLLCTL_UPD, PCTL_PLL_PLLCTL_UPD); } /* query alp/xtal clock frequency */ @@ -275,10 +266,10 @@ u32 si_pmu_alp_clock(struct si_pub *sih) u32 clock = ALP_CLOCK; /* bail out with default */ - if (!(sih->cccaps & CC_CAP_PMU)) + if (!(ai_get_cccaps(sih) & CC_CAP_PMU)) return clock; - switch (sih->chip) { + switch (ai_get_chip_id(sih)) { case BCM43224_CHIP_ID: case BCM43225_CHIP_ID: case BCM4313_CHIP_ID: @@ -292,95 +283,29 @@ u32 si_pmu_alp_clock(struct si_pub *sih) return clock; } -void si_pmu_spuravoid(struct si_pub *sih, u8 spuravoid) -{ - struct chipcregs __iomem *cc; - uint origidx, intr_val; - - /* Remember original core before switch to chipc */ - cc = (struct chipcregs __iomem *) - ai_switch_core(sih, CC_CORE_ID, &origidx, &intr_val); - - /* update the pll changes */ - si_pmu_spuravoid_pllupdate(sih, cc, spuravoid); - - /* Return to original core */ - ai_restore_core(sih, origidx, intr_val); -} - /* initialize PMU */ void si_pmu_init(struct si_pub *sih) { - struct chipcregs __iomem *cc; - uint origidx; + struct bcma_device *core; - /* Remember original core before switch to chipc */ - origidx = ai_coreidx(sih); - cc = ai_setcoreidx(sih, SI_CC_IDX); - - if (sih->pmurev == 1) - AND_REG(&cc->pmucontrol, ~PCTL_NOILP_ON_WAIT); - else if (sih->pmurev >= 2) - OR_REG(&cc->pmucontrol, PCTL_NOILP_ON_WAIT); + /* select chipc */ + core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0); - /* Return to original core */ - ai_setcoreidx(sih, origidx); -} - -/* initialize PMU chip controls and other chip level stuff */ -void si_pmu_chip_init(struct si_pub *sih) -{ - uint origidx; - - /* Gate off SPROM clock and chip select signals */ - si_pmu_sprom_enable(sih, false); - - /* Remember original core */ - origidx = ai_coreidx(sih); - - /* Return to original core */ - ai_setcoreidx(sih, origidx); -} - -/* initialize PMU switch/regulators */ -void si_pmu_swreg_init(struct si_pub *sih) -{ -} - -/* initialize PLL */ -void si_pmu_pll_init(struct si_pub *sih, uint xtalfreq) -{ - struct chipcregs __iomem *cc; - uint origidx; - - /* Remember original core before switch to chipc */ - origidx = ai_coreidx(sih); - cc = ai_setcoreidx(sih, SI_CC_IDX); - - switch (sih->chip) { - case BCM4313_CHIP_ID: - case BCM43224_CHIP_ID: - case BCM43225_CHIP_ID: - /* ??? */ - break; - default: - break; - } - - /* Return to original core */ - ai_setcoreidx(sih, origidx); + if (ai_get_pmurev(sih) == 1) + bcma_mask32(core, CHIPCREGOFFS(pmucontrol), + ~PCTL_NOILP_ON_WAIT); + else if (ai_get_pmurev(sih) >= 2) + bcma_set32(core, CHIPCREGOFFS(pmucontrol), PCTL_NOILP_ON_WAIT); } /* initialize PMU resources */ void si_pmu_res_init(struct si_pub *sih) { - struct chipcregs __iomem *cc; - uint origidx; + struct bcma_device *core; u32 min_mask = 0, max_mask = 0; - /* Remember original core before switch to chipc */ - origidx = ai_coreidx(sih); - cc = ai_setcoreidx(sih, SI_CC_IDX); + /* select to chipc */ + core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0); /* Determine min/max rsrc masks */ si_pmu_res_masks(sih, &min_mask, &max_mask); @@ -390,55 +315,50 @@ void si_pmu_res_init(struct si_pub *sih) /* Program max resource mask */ if (max_mask) - W_REG(&cc->max_res_mask, max_mask); + bcma_write32(core, CHIPCREGOFFS(max_res_mask), max_mask); /* Program min resource mask */ if (min_mask) - W_REG(&cc->min_res_mask, min_mask); + bcma_write32(core, CHIPCREGOFFS(min_res_mask), min_mask); /* Add some delay; allow resources to come up and settle. */ mdelay(2); - - /* Return to original core */ - ai_setcoreidx(sih, origidx); } u32 si_pmu_measure_alpclk(struct si_pub *sih) { - struct chipcregs __iomem *cc; - uint origidx; + struct bcma_device *core; u32 alp_khz; - if (sih->pmurev < 10) + if (ai_get_pmurev(sih) < 10) return 0; /* Remember original core before switch to chipc */ - origidx = ai_coreidx(sih); - cc = ai_setcoreidx(sih, SI_CC_IDX); + core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0); - if (R_REG(&cc->pmustatus) & PST_EXTLPOAVAIL) { + if (bcma_read32(core, CHIPCREGOFFS(pmustatus)) & PST_EXTLPOAVAIL) { u32 ilp_ctr, alp_hz; /* * Enable the reg to measure the freq, * in case it was disabled before */ - W_REG(&cc->pmu_xtalfreq, - 1U << PMU_XTALFREQ_REG_MEASURE_SHIFT); + bcma_write32(core, CHIPCREGOFFS(pmu_xtalfreq), + 1U << PMU_XTALFREQ_REG_MEASURE_SHIFT); /* Delay for well over 4 ILP clocks */ udelay(1000); /* Read the latched number of ALP ticks per 4 ILP ticks */ - ilp_ctr = - R_REG(&cc->pmu_xtalfreq) & PMU_XTALFREQ_REG_ILPCTR_MASK; + ilp_ctr = bcma_read32(core, CHIPCREGOFFS(pmu_xtalfreq)) & + PMU_XTALFREQ_REG_ILPCTR_MASK; /* * Turn off the PMU_XTALFREQ_REG_MEASURE_SHIFT * bit to save power */ - W_REG(&cc->pmu_xtalfreq, 0); + bcma_write32(core, CHIPCREGOFFS(pmu_xtalfreq), 0); /* Calculate ALP frequency */ alp_hz = (ilp_ctr * EXT_ILP_HZ) / 4; @@ -451,8 +371,5 @@ u32 si_pmu_measure_alpclk(struct si_pub *sih) } else alp_khz = 0; - /* Return to original core */ - ai_setcoreidx(sih, origidx); - return alp_khz; } diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pmu.h b/drivers/net/wireless/brcm80211/brcmsmac/pmu.h index 3a08c620640..3e39c5e0f9f 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/pmu.h +++ b/drivers/net/wireless/brcm80211/brcmsmac/pmu.h @@ -26,13 +26,10 @@ extern u32 si_pmu_chipcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val); extern u32 si_pmu_regcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val); extern u32 si_pmu_alp_clock(struct si_pub *sih); extern void si_pmu_pllupd(struct si_pub *sih); -extern void si_pmu_spuravoid(struct si_pub *sih, u8 spuravoid); +extern void si_pmu_spuravoid_pllupdate(struct si_pub *sih, u8 spuravoid); extern u32 si_pmu_pllcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val); extern void si_pmu_init(struct si_pub *sih); -extern void si_pmu_chip_init(struct si_pub *sih); -extern void si_pmu_pll_init(struct si_pub *sih, u32 xtalfreq); extern void si_pmu_res_init(struct si_pub *sih); -extern void si_pmu_swreg_init(struct si_pub *sih); extern u32 si_pmu_measure_alpclk(struct si_pub *sih); #endif /* _BRCM_PMU_H_ */ diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pub.h b/drivers/net/wireless/brcm80211/brcmsmac/pub.h index 37bb2dcc113..f0038ad7d7b 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/pub.h +++ b/drivers/net/wireless/brcm80211/brcmsmac/pub.h @@ -17,6 +17,7 @@ #ifndef _BRCM_PUB_H_ #define _BRCM_PUB_H_ +#include <linux/bcma/bcma.h> #include <brcmu_wifi.h> #include "types.h" #include "defs.h" @@ -170,22 +171,6 @@ enum brcms_srom_id { BRCMS_SROM_TSSIPOS2G, BRCMS_SROM_TSSIPOS5G, BRCMS_SROM_TXCHAIN, - BRCMS_SROM_TXPID2GA0, - BRCMS_SROM_TXPID2GA1, - BRCMS_SROM_TXPID2GA2, - BRCMS_SROM_TXPID2GA3, - BRCMS_SROM_TXPID5GA0, - BRCMS_SROM_TXPID5GA1, - BRCMS_SROM_TXPID5GA2, - BRCMS_SROM_TXPID5GA3, - BRCMS_SROM_TXPID5GHA0, - BRCMS_SROM_TXPID5GHA1, - BRCMS_SROM_TXPID5GHA2, - BRCMS_SROM_TXPID5GHA3, - BRCMS_SROM_TXPID5GLA0, - BRCMS_SROM_TXPID5GLA1, - BRCMS_SROM_TXPID5GLA2, - BRCMS_SROM_TXPID5GLA3, /* * per-path identifiers (see srom.c) */ @@ -225,10 +210,6 @@ enum brcms_srom_id { BRCMS_SROM_PA2GW2A1, BRCMS_SROM_PA2GW2A2, BRCMS_SROM_PA2GW2A3, - BRCMS_SROM_PA2GW3A0, - BRCMS_SROM_PA2GW3A1, - BRCMS_SROM_PA2GW3A2, - BRCMS_SROM_PA2GW3A3, BRCMS_SROM_PA5GHW0A0, BRCMS_SROM_PA5GHW0A1, BRCMS_SROM_PA5GHW0A2, @@ -241,10 +222,6 @@ enum brcms_srom_id { BRCMS_SROM_PA5GHW2A1, BRCMS_SROM_PA5GHW2A2, BRCMS_SROM_PA5GHW2A3, - BRCMS_SROM_PA5GHW3A0, - BRCMS_SROM_PA5GHW3A1, - BRCMS_SROM_PA5GHW3A2, - BRCMS_SROM_PA5GHW3A3, BRCMS_SROM_PA5GLW0A0, BRCMS_SROM_PA5GLW0A1, BRCMS_SROM_PA5GLW0A2, @@ -257,10 +234,6 @@ enum brcms_srom_id { BRCMS_SROM_PA5GLW2A1, BRCMS_SROM_PA5GLW2A2, BRCMS_SROM_PA5GLW2A3, - BRCMS_SROM_PA5GLW3A0, - BRCMS_SROM_PA5GLW3A1, - BRCMS_SROM_PA5GLW3A2, - BRCMS_SROM_PA5GLW3A3, BRCMS_SROM_PA5GW0A0, BRCMS_SROM_PA5GW0A1, BRCMS_SROM_PA5GW0A2, @@ -273,14 +246,9 @@ enum brcms_srom_id { BRCMS_SROM_PA5GW2A1, BRCMS_SROM_PA5GW2A2, BRCMS_SROM_PA5GW2A3, - BRCMS_SROM_PA5GW3A0, - BRCMS_SROM_PA5GW3A1, - BRCMS_SROM_PA5GW3A2, - BRCMS_SROM_PA5GW3A3, }; #define BRCMS_NUMRATES 16 /* max # of rates in a rateset */ -#define D11_PHY_HDR_LEN 6 /* Phy header length - 6 bytes */ /* phy types */ #define PHY_TYPE_A 0 /* Phy type A */ @@ -414,7 +382,6 @@ struct brcms_pub { uint _nbands; /* # bands supported */ uint now; /* # elapsed seconds */ - bool promisc; /* promiscuous destination address */ bool delayed_down; /* down delayed */ bool associated; /* true:part of [I]BSS, false: not */ /* (union of stas_associated, aps_associated) */ @@ -564,15 +531,14 @@ struct brcms_antselcfg { /* common functions for every port */ extern struct brcms_c_info * -brcms_c_attach(struct brcms_info *wl, u16 vendor, u16 device, uint unit, - bool piomode, void __iomem *regsva, struct pci_dev *btparam, - uint *perr); +brcms_c_attach(struct brcms_info *wl, struct bcma_device *core, uint unit, + bool piomode, uint *perr); extern uint brcms_c_detach(struct brcms_c_info *wlc); extern int brcms_c_up(struct brcms_c_info *wlc); extern uint brcms_c_down(struct brcms_c_info *wlc); extern bool brcms_c_chipmatch(u16 vendor, u16 device); -extern void brcms_c_init(struct brcms_c_info *wlc); +extern void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx); extern void brcms_c_reset(struct brcms_c_info *wlc); extern void brcms_c_intrson(struct brcms_c_info *wlc); @@ -628,7 +594,7 @@ extern void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc, u8 interval); extern int brcms_c_set_tx_power(struct brcms_c_info *wlc, int txpwr); extern int brcms_c_get_tx_power(struct brcms_c_info *wlc); -extern void brcms_c_set_radio_mpc(struct brcms_c_info *wlc, bool mpc); extern bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc); +extern void brcms_c_mute(struct brcms_c_info *wlc, bool on); #endif /* _BRCM_PUB_H_ */ diff --git a/drivers/net/wireless/brcm80211/brcmsmac/rate.h b/drivers/net/wireless/brcm80211/brcmsmac/rate.h index e7b9dc2f273..980d578825c 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/rate.h +++ b/drivers/net/wireless/brcm80211/brcmsmac/rate.h @@ -19,6 +19,7 @@ #include "types.h" #include "d11.h" +#include "phy_hal.h" extern const u8 rate_info[]; extern const struct brcms_c_rateset cck_ofdm_mimo_rates; @@ -198,11 +199,9 @@ static inline u8 cck_rspec(u8 cck) /* Convert encoded rate value in plcp header to numerical rates in 500 KHz * increments */ -extern const u8 ofdm_rate_lookup[]; - static inline u8 ofdm_phy2mac_rate(u8 rlpt) { - return ofdm_rate_lookup[rlpt & 0x7]; + return wlc_phy_get_ofdm_rate_lookup()[rlpt & 0x7]; } static inline u8 cck_phy2mac_rate(u8 signal) diff --git a/drivers/net/wireless/brcm80211/brcmsmac/srom.c b/drivers/net/wireless/brcm80211/brcmsmac/srom.c index 99f791048e8..61092156755 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/srom.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/srom.c @@ -28,6 +28,7 @@ #include "aiutils.h" #include "otp.h" #include "srom.h" +#include "soc.h" /* * SROM CRC8 polynomial value: @@ -62,9 +63,6 @@ #define SROM_MACHI_ET1 42 #define SROM_MACMID_ET1 43 #define SROM_MACLO_ET1 44 -#define SROM3_MACHI 37 -#define SROM3_MACMID 38 -#define SROM3_MACLO 39 #define SROM_BXARSSI2G 40 #define SROM_BXARSSI5G 41 @@ -101,7 +99,6 @@ #define SROM_BFL 57 #define SROM_BFL2 28 -#define SROM3_BFL2 61 #define SROM_AG10 58 @@ -109,99 +106,16 @@ #define SROM_OPO 60 -#define SROM3_LEDDC 62 - #define SROM_CRCREV 63 -/* SROM Rev 4: Reallocate the software part of the srom to accommodate - * MIMO features. It assumes up to two PCIE functions and 440 bytes - * of usable srom i.e. the usable storage in chips with OTP that - * implements hardware redundancy. - */ - #define SROM4_WORDS 220 -#define SROM4_SIGN 32 -#define SROM4_SIGNATURE 0x5372 - -#define SROM4_BREV 33 - -#define SROM4_BFL0 34 -#define SROM4_BFL1 35 -#define SROM4_BFL2 36 -#define SROM4_BFL3 37 -#define SROM5_BFL0 37 -#define SROM5_BFL1 38 -#define SROM5_BFL2 39 -#define SROM5_BFL3 40 - -#define SROM4_MACHI 38 -#define SROM4_MACMID 39 -#define SROM4_MACLO 40 -#define SROM5_MACHI 41 -#define SROM5_MACMID 42 -#define SROM5_MACLO 43 - -#define SROM4_CCODE 41 -#define SROM4_REGREV 42 -#define SROM5_CCODE 34 -#define SROM5_REGREV 35 - -#define SROM4_LEDBH10 43 -#define SROM4_LEDBH32 44 -#define SROM5_LEDBH10 59 -#define SROM5_LEDBH32 60 - -#define SROM4_LEDDC 45 -#define SROM5_LEDDC 45 - -#define SROM4_AA 46 - -#define SROM4_AG10 47 -#define SROM4_AG32 48 - -#define SROM4_TXPID2G 49 -#define SROM4_TXPID5G 51 -#define SROM4_TXPID5GL 53 -#define SROM4_TXPID5GH 55 - -#define SROM4_TXRXC 61 #define SROM4_TXCHAIN_MASK 0x000f -#define SROM4_TXCHAIN_SHIFT 0 #define SROM4_RXCHAIN_MASK 0x00f0 -#define SROM4_RXCHAIN_SHIFT 4 #define SROM4_SWITCH_MASK 0xff00 -#define SROM4_SWITCH_SHIFT 8 /* Per-path fields */ #define MAX_PATH_SROM 4 -#define SROM4_PATH0 64 -#define SROM4_PATH1 87 -#define SROM4_PATH2 110 -#define SROM4_PATH3 133 - -#define SROM4_2G_ITT_MAXP 0 -#define SROM4_2G_PA 1 -#define SROM4_5G_ITT_MAXP 5 -#define SROM4_5GLH_MAXP 6 -#define SROM4_5G_PA 7 -#define SROM4_5GL_PA 11 -#define SROM4_5GH_PA 15 - -/* All the miriad power offsets */ -#define SROM4_2G_CCKPO 156 -#define SROM4_2G_OFDMPO 157 -#define SROM4_5G_OFDMPO 159 -#define SROM4_5GL_OFDMPO 161 -#define SROM4_5GH_OFDMPO 163 -#define SROM4_2G_MCSPO 165 -#define SROM4_5G_MCSPO 173 -#define SROM4_5GL_MCSPO 181 -#define SROM4_5GH_MCSPO 189 -#define SROM4_CDDPO 197 -#define SROM4_STBCPO 198 -#define SROM4_BW40PO 199 -#define SROM4_BWDUPPO 200 #define SROM4_CRCREV 219 @@ -424,103 +338,32 @@ struct brcms_varbuf { static const struct brcms_sromvar pci_sromvars[] = { {BRCMS_SROM_DEVID, 0xffffff00, SRFL_PRHEX | SRFL_NOVAR, PCI_F0DEVID, 0xffff}, - {BRCMS_SROM_BOARDREV, 0x0000000e, SRFL_PRHEX, SROM_AABREV, - SROM_BR_MASK}, - {BRCMS_SROM_BOARDREV, 0x000000f0, SRFL_PRHEX, SROM4_BREV, 0xffff}, {BRCMS_SROM_BOARDREV, 0xffffff00, SRFL_PRHEX, SROM8_BREV, 0xffff}, - {BRCMS_SROM_BOARDFLAGS, 0x00000002, SRFL_PRHEX, SROM_BFL, 0xffff}, - {BRCMS_SROM_BOARDFLAGS, 0x00000004, SRFL_PRHEX | SRFL_MORE, SROM_BFL, - 0xffff}, - {BRCMS_SROM_CONT, 0, 0, SROM_BFL2, 0xffff}, - {BRCMS_SROM_BOARDFLAGS, 0x00000008, SRFL_PRHEX | SRFL_MORE, SROM_BFL, - 0xffff}, - {BRCMS_SROM_CONT, 0, 0, SROM3_BFL2, 0xffff}, - {BRCMS_SROM_BOARDFLAGS, 0x00000010, SRFL_PRHEX | SRFL_MORE, SROM4_BFL0, - 0xffff}, - {BRCMS_SROM_CONT, 0, 0, SROM4_BFL1, 0xffff}, - {BRCMS_SROM_BOARDFLAGS, 0x000000e0, SRFL_PRHEX | SRFL_MORE, SROM5_BFL0, - 0xffff}, - {BRCMS_SROM_CONT, 0, 0, SROM5_BFL1, 0xffff}, {BRCMS_SROM_BOARDFLAGS, 0xffffff00, SRFL_PRHEX | SRFL_MORE, SROM8_BFL0, 0xffff}, {BRCMS_SROM_CONT, 0, 0, SROM8_BFL1, 0xffff}, - {BRCMS_SROM_BOARDFLAGS2, 0x00000010, SRFL_PRHEX | SRFL_MORE, SROM4_BFL2, - 0xffff}, - {BRCMS_SROM_CONT, 0, 0, SROM4_BFL3, 0xffff}, - {BRCMS_SROM_BOARDFLAGS2, 0x000000e0, SRFL_PRHEX | SRFL_MORE, SROM5_BFL2, - 0xffff}, - {BRCMS_SROM_CONT, 0, 0, SROM5_BFL3, 0xffff}, {BRCMS_SROM_BOARDFLAGS2, 0xffffff00, SRFL_PRHEX | SRFL_MORE, SROM8_BFL2, 0xffff}, {BRCMS_SROM_CONT, 0, 0, SROM8_BFL3, 0xffff}, {BRCMS_SROM_BOARDTYPE, 0xfffffffc, SRFL_PRHEX, SROM_SSID, 0xffff}, - {BRCMS_SROM_BOARDNUM, 0x00000006, 0, SROM_MACLO_IL0, 0xffff}, - {BRCMS_SROM_BOARDNUM, 0x00000008, 0, SROM3_MACLO, 0xffff}, - {BRCMS_SROM_BOARDNUM, 0x00000010, 0, SROM4_MACLO, 0xffff}, - {BRCMS_SROM_BOARDNUM, 0x000000e0, 0, SROM5_MACLO, 0xffff}, {BRCMS_SROM_BOARDNUM, 0xffffff00, 0, SROM8_MACLO, 0xffff}, - {BRCMS_SROM_CC, 0x00000002, 0, SROM_AABREV, SROM_CC_MASK}, - {BRCMS_SROM_REGREV, 0x00000008, 0, SROM_OPO, 0xff00}, - {BRCMS_SROM_REGREV, 0x00000010, 0, SROM4_REGREV, 0x00ff}, - {BRCMS_SROM_REGREV, 0x000000e0, 0, SROM5_REGREV, 0x00ff}, {BRCMS_SROM_REGREV, 0xffffff00, 0, SROM8_REGREV, 0x00ff}, - {BRCMS_SROM_LEDBH0, 0x0000000e, SRFL_NOFFS, SROM_LEDBH10, 0x00ff}, - {BRCMS_SROM_LEDBH1, 0x0000000e, SRFL_NOFFS, SROM_LEDBH10, 0xff00}, - {BRCMS_SROM_LEDBH2, 0x0000000e, SRFL_NOFFS, SROM_LEDBH32, 0x00ff}, - {BRCMS_SROM_LEDBH3, 0x0000000e, SRFL_NOFFS, SROM_LEDBH32, 0xff00}, - {BRCMS_SROM_LEDBH0, 0x00000010, SRFL_NOFFS, SROM4_LEDBH10, 0x00ff}, - {BRCMS_SROM_LEDBH1, 0x00000010, SRFL_NOFFS, SROM4_LEDBH10, 0xff00}, - {BRCMS_SROM_LEDBH2, 0x00000010, SRFL_NOFFS, SROM4_LEDBH32, 0x00ff}, - {BRCMS_SROM_LEDBH3, 0x00000010, SRFL_NOFFS, SROM4_LEDBH32, 0xff00}, - {BRCMS_SROM_LEDBH0, 0x000000e0, SRFL_NOFFS, SROM5_LEDBH10, 0x00ff}, - {BRCMS_SROM_LEDBH1, 0x000000e0, SRFL_NOFFS, SROM5_LEDBH10, 0xff00}, - {BRCMS_SROM_LEDBH2, 0x000000e0, SRFL_NOFFS, SROM5_LEDBH32, 0x00ff}, - {BRCMS_SROM_LEDBH3, 0x000000e0, SRFL_NOFFS, SROM5_LEDBH32, 0xff00}, {BRCMS_SROM_LEDBH0, 0xffffff00, SRFL_NOFFS, SROM8_LEDBH10, 0x00ff}, {BRCMS_SROM_LEDBH1, 0xffffff00, SRFL_NOFFS, SROM8_LEDBH10, 0xff00}, {BRCMS_SROM_LEDBH2, 0xffffff00, SRFL_NOFFS, SROM8_LEDBH32, 0x00ff}, {BRCMS_SROM_LEDBH3, 0xffffff00, SRFL_NOFFS, SROM8_LEDBH32, 0xff00}, - {BRCMS_SROM_PA0B0, 0x0000000e, SRFL_PRHEX, SROM_WL0PAB0, 0xffff}, - {BRCMS_SROM_PA0B1, 0x0000000e, SRFL_PRHEX, SROM_WL0PAB1, 0xffff}, - {BRCMS_SROM_PA0B2, 0x0000000e, SRFL_PRHEX, SROM_WL0PAB2, 0xffff}, - {BRCMS_SROM_PA0ITSSIT, 0x0000000e, 0, SROM_ITT, 0x00ff}, - {BRCMS_SROM_PA0MAXPWR, 0x0000000e, 0, SROM_WL10MAXP, 0x00ff}, {BRCMS_SROM_PA0B0, 0xffffff00, SRFL_PRHEX, SROM8_W0_PAB0, 0xffff}, {BRCMS_SROM_PA0B1, 0xffffff00, SRFL_PRHEX, SROM8_W0_PAB1, 0xffff}, {BRCMS_SROM_PA0B2, 0xffffff00, SRFL_PRHEX, SROM8_W0_PAB2, 0xffff}, {BRCMS_SROM_PA0ITSSIT, 0xffffff00, 0, SROM8_W0_ITTMAXP, 0xff00}, {BRCMS_SROM_PA0MAXPWR, 0xffffff00, 0, SROM8_W0_ITTMAXP, 0x00ff}, - {BRCMS_SROM_OPO, 0x0000000c, 0, SROM_OPO, 0x00ff}, {BRCMS_SROM_OPO, 0xffffff00, 0, SROM8_2G_OFDMPO, 0x00ff}, - {BRCMS_SROM_AA2G, 0x0000000e, 0, SROM_AABREV, SROM_AA0_MASK}, - {BRCMS_SROM_AA2G, 0x000000f0, 0, SROM4_AA, 0x00ff}, {BRCMS_SROM_AA2G, 0xffffff00, 0, SROM8_AA, 0x00ff}, - {BRCMS_SROM_AA5G, 0x0000000e, 0, SROM_AABREV, SROM_AA1_MASK}, - {BRCMS_SROM_AA5G, 0x000000f0, 0, SROM4_AA, 0xff00}, {BRCMS_SROM_AA5G, 0xffffff00, 0, SROM8_AA, 0xff00}, - {BRCMS_SROM_AG0, 0x0000000e, 0, SROM_AG10, 0x00ff}, - {BRCMS_SROM_AG1, 0x0000000e, 0, SROM_AG10, 0xff00}, - {BRCMS_SROM_AG0, 0x000000f0, 0, SROM4_AG10, 0x00ff}, - {BRCMS_SROM_AG1, 0x000000f0, 0, SROM4_AG10, 0xff00}, - {BRCMS_SROM_AG2, 0x000000f0, 0, SROM4_AG32, 0x00ff}, - {BRCMS_SROM_AG3, 0x000000f0, 0, SROM4_AG32, 0xff00}, {BRCMS_SROM_AG0, 0xffffff00, 0, SROM8_AG10, 0x00ff}, {BRCMS_SROM_AG1, 0xffffff00, 0, SROM8_AG10, 0xff00}, {BRCMS_SROM_AG2, 0xffffff00, 0, SROM8_AG32, 0x00ff}, {BRCMS_SROM_AG3, 0xffffff00, 0, SROM8_AG32, 0xff00}, - {BRCMS_SROM_PA1B0, 0x0000000e, SRFL_PRHEX, SROM_WL1PAB0, 0xffff}, - {BRCMS_SROM_PA1B1, 0x0000000e, SRFL_PRHEX, SROM_WL1PAB1, 0xffff}, - {BRCMS_SROM_PA1B2, 0x0000000e, SRFL_PRHEX, SROM_WL1PAB2, 0xffff}, - {BRCMS_SROM_PA1LOB0, 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB0, 0xffff}, - {BRCMS_SROM_PA1LOB1, 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB1, 0xffff}, - {BRCMS_SROM_PA1LOB2, 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB2, 0xffff}, - {BRCMS_SROM_PA1HIB0, 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB0, 0xffff}, - {BRCMS_SROM_PA1HIB1, 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB1, 0xffff}, - {BRCMS_SROM_PA1HIB2, 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB2, 0xffff}, - {BRCMS_SROM_PA1ITSSIT, 0x0000000e, 0, SROM_ITT, 0xff00}, - {BRCMS_SROM_PA1MAXPWR, 0x0000000e, 0, SROM_WL10MAXP, 0xff00}, - {BRCMS_SROM_PA1LOMAXPWR, 0x0000000c, 0, SROM_WL1LHMAXP, 0xff00}, - {BRCMS_SROM_PA1HIMAXPWR, 0x0000000c, 0, SROM_WL1LHMAXP, 0x00ff}, {BRCMS_SROM_PA1B0, 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB0, 0xffff}, {BRCMS_SROM_PA1B1, 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB1, 0xffff}, {BRCMS_SROM_PA1B2, 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB2, 0xffff}, @@ -534,40 +377,20 @@ static const struct brcms_sromvar pci_sromvars[] = { {BRCMS_SROM_PA1MAXPWR, 0xffffff00, 0, SROM8_W1_ITTMAXP, 0x00ff}, {BRCMS_SROM_PA1LOMAXPWR, 0xffffff00, 0, SROM8_W1_MAXP_LCHC, 0xff00}, {BRCMS_SROM_PA1HIMAXPWR, 0xffffff00, 0, SROM8_W1_MAXP_LCHC, 0x00ff}, - {BRCMS_SROM_BXA2G, 0x00000008, 0, SROM_BXARSSI2G, 0x1800}, - {BRCMS_SROM_RSSISAV2G, 0x00000008, 0, SROM_BXARSSI2G, 0x0700}, - {BRCMS_SROM_RSSISMC2G, 0x00000008, 0, SROM_BXARSSI2G, 0x00f0}, - {BRCMS_SROM_RSSISMF2G, 0x00000008, 0, SROM_BXARSSI2G, 0x000f}, {BRCMS_SROM_BXA2G, 0xffffff00, 0, SROM8_BXARSSI2G, 0x1800}, {BRCMS_SROM_RSSISAV2G, 0xffffff00, 0, SROM8_BXARSSI2G, 0x0700}, {BRCMS_SROM_RSSISMC2G, 0xffffff00, 0, SROM8_BXARSSI2G, 0x00f0}, {BRCMS_SROM_RSSISMF2G, 0xffffff00, 0, SROM8_BXARSSI2G, 0x000f}, - {BRCMS_SROM_BXA5G, 0x00000008, 0, SROM_BXARSSI5G, 0x1800}, - {BRCMS_SROM_RSSISAV5G, 0x00000008, 0, SROM_BXARSSI5G, 0x0700}, - {BRCMS_SROM_RSSISMC5G, 0x00000008, 0, SROM_BXARSSI5G, 0x00f0}, - {BRCMS_SROM_RSSISMF5G, 0x00000008, 0, SROM_BXARSSI5G, 0x000f}, {BRCMS_SROM_BXA5G, 0xffffff00, 0, SROM8_BXARSSI5G, 0x1800}, {BRCMS_SROM_RSSISAV5G, 0xffffff00, 0, SROM8_BXARSSI5G, 0x0700}, {BRCMS_SROM_RSSISMC5G, 0xffffff00, 0, SROM8_BXARSSI5G, 0x00f0}, {BRCMS_SROM_RSSISMF5G, 0xffffff00, 0, SROM8_BXARSSI5G, 0x000f}, - {BRCMS_SROM_TRI2G, 0x00000008, 0, SROM_TRI52G, 0x00ff}, - {BRCMS_SROM_TRI5G, 0x00000008, 0, SROM_TRI52G, 0xff00}, - {BRCMS_SROM_TRI5GL, 0x00000008, 0, SROM_TRI5GHL, 0x00ff}, - {BRCMS_SROM_TRI5GH, 0x00000008, 0, SROM_TRI5GHL, 0xff00}, {BRCMS_SROM_TRI2G, 0xffffff00, 0, SROM8_TRI52G, 0x00ff}, {BRCMS_SROM_TRI5G, 0xffffff00, 0, SROM8_TRI52G, 0xff00}, {BRCMS_SROM_TRI5GL, 0xffffff00, 0, SROM8_TRI5GHL, 0x00ff}, {BRCMS_SROM_TRI5GH, 0xffffff00, 0, SROM8_TRI5GHL, 0xff00}, - {BRCMS_SROM_RXPO2G, 0x00000008, SRFL_PRSIGN, SROM_RXPO52G, 0x00ff}, - {BRCMS_SROM_RXPO5G, 0x00000008, SRFL_PRSIGN, SROM_RXPO52G, 0xff00}, {BRCMS_SROM_RXPO2G, 0xffffff00, SRFL_PRSIGN, SROM8_RXPO52G, 0x00ff}, {BRCMS_SROM_RXPO5G, 0xffffff00, SRFL_PRSIGN, SROM8_RXPO52G, 0xff00}, - {BRCMS_SROM_TXCHAIN, 0x000000f0, SRFL_NOFFS, SROM4_TXRXC, - SROM4_TXCHAIN_MASK}, - {BRCMS_SROM_RXCHAIN, 0x000000f0, SRFL_NOFFS, SROM4_TXRXC, - SROM4_RXCHAIN_MASK}, - {BRCMS_SROM_ANTSWITCH, 0x000000f0, SRFL_NOFFS, SROM4_TXRXC, - SROM4_SWITCH_MASK}, {BRCMS_SROM_TXCHAIN, 0xffffff00, SRFL_NOFFS, SROM8_TXRXC, SROM4_TXCHAIN_MASK}, {BRCMS_SROM_RXCHAIN, 0xffffff00, SRFL_NOFFS, SROM8_TXRXC, @@ -594,43 +417,11 @@ static const struct brcms_sromvar pci_sromvars[] = { SROM8_FEM_ANTSWLUT_MASK}, {BRCMS_SROM_TEMPTHRESH, 0xffffff00, 0, SROM8_THERMAL, 0xff00}, {BRCMS_SROM_TEMPOFFSET, 0xffffff00, 0, SROM8_THERMAL, 0x00ff}, - {BRCMS_SROM_TXPID2GA0, 0x000000f0, 0, SROM4_TXPID2G, 0x00ff}, - {BRCMS_SROM_TXPID2GA1, 0x000000f0, 0, SROM4_TXPID2G, 0xff00}, - {BRCMS_SROM_TXPID2GA2, 0x000000f0, 0, SROM4_TXPID2G + 1, 0x00ff}, - {BRCMS_SROM_TXPID2GA3, 0x000000f0, 0, SROM4_TXPID2G + 1, 0xff00}, - {BRCMS_SROM_TXPID5GA0, 0x000000f0, 0, SROM4_TXPID5G, 0x00ff}, - {BRCMS_SROM_TXPID5GA1, 0x000000f0, 0, SROM4_TXPID5G, 0xff00}, - {BRCMS_SROM_TXPID5GA2, 0x000000f0, 0, SROM4_TXPID5G + 1, 0x00ff}, - {BRCMS_SROM_TXPID5GA3, 0x000000f0, 0, SROM4_TXPID5G + 1, 0xff00}, - {BRCMS_SROM_TXPID5GLA0, 0x000000f0, 0, SROM4_TXPID5GL, 0x00ff}, - {BRCMS_SROM_TXPID5GLA1, 0x000000f0, 0, SROM4_TXPID5GL, 0xff00}, - {BRCMS_SROM_TXPID5GLA2, 0x000000f0, 0, SROM4_TXPID5GL + 1, 0x00ff}, - {BRCMS_SROM_TXPID5GLA3, 0x000000f0, 0, SROM4_TXPID5GL + 1, 0xff00}, - {BRCMS_SROM_TXPID5GHA0, 0x000000f0, 0, SROM4_TXPID5GH, 0x00ff}, - {BRCMS_SROM_TXPID5GHA1, 0x000000f0, 0, SROM4_TXPID5GH, 0xff00}, - {BRCMS_SROM_TXPID5GHA2, 0x000000f0, 0, SROM4_TXPID5GH + 1, 0x00ff}, - {BRCMS_SROM_TXPID5GHA3, 0x000000f0, 0, SROM4_TXPID5GH + 1, 0xff00}, - - {BRCMS_SROM_CCODE, 0x0000000f, SRFL_CCODE, SROM_CCODE, 0xffff}, - {BRCMS_SROM_CCODE, 0x00000010, SRFL_CCODE, SROM4_CCODE, 0xffff}, - {BRCMS_SROM_CCODE, 0x000000e0, SRFL_CCODE, SROM5_CCODE, 0xffff}, + {BRCMS_SROM_CCODE, 0xffffff00, SRFL_CCODE, SROM8_CCODE, 0xffff}, {BRCMS_SROM_MACADDR, 0xffffff00, SRFL_ETHADDR, SROM8_MACHI, 0xffff}, - {BRCMS_SROM_MACADDR, 0x000000e0, SRFL_ETHADDR, SROM5_MACHI, 0xffff}, - {BRCMS_SROM_MACADDR, 0x00000010, SRFL_ETHADDR, SROM4_MACHI, 0xffff}, - {BRCMS_SROM_MACADDR, 0x00000008, SRFL_ETHADDR, SROM3_MACHI, 0xffff}, - {BRCMS_SROM_IL0MACADDR, 0x00000007, SRFL_ETHADDR, SROM_MACHI_IL0, - 0xffff}, - {BRCMS_SROM_ET1MACADDR, 0x00000007, SRFL_ETHADDR, SROM_MACHI_ET1, - 0xffff}, {BRCMS_SROM_LEDDC, 0xffffff00, SRFL_NOFFS | SRFL_LEDDC, SROM8_LEDDC, 0xffff}, - {BRCMS_SROM_LEDDC, 0x000000e0, SRFL_NOFFS | SRFL_LEDDC, SROM5_LEDDC, - 0xffff}, - {BRCMS_SROM_LEDDC, 0x00000010, SRFL_NOFFS | SRFL_LEDDC, SROM4_LEDDC, - 0xffff}, - {BRCMS_SROM_LEDDC, 0x00000008, SRFL_NOFFS | SRFL_LEDDC, SROM3_LEDDC, - 0xffff}, {BRCMS_SROM_RAWTEMPSENSE, 0xffffff00, SRFL_PRHEX, SROM8_MPWR_RAWTS, 0x01ff}, {BRCMS_SROM_MEASPOWER, 0xffffff00, SRFL_PRHEX, SROM8_MPWR_RAWTS, @@ -650,16 +441,7 @@ static const struct brcms_sromvar pci_sromvars[] = { {BRCMS_SROM_PHYCAL_TEMPDELTA, 0xffffff00, 0, SROM8_PHYCAL_TEMPDELTA, 0x00ff}, - {BRCMS_SROM_CCK2GPO, 0x000000f0, 0, SROM4_2G_CCKPO, 0xffff}, {BRCMS_SROM_CCK2GPO, 0x00000100, 0, SROM8_2G_CCKPO, 0xffff}, - {BRCMS_SROM_OFDM2GPO, 0x000000f0, SRFL_MORE, SROM4_2G_OFDMPO, 0xffff}, - {BRCMS_SROM_CONT, 0, 0, SROM4_2G_OFDMPO + 1, 0xffff}, - {BRCMS_SROM_OFDM5GPO, 0x000000f0, SRFL_MORE, SROM4_5G_OFDMPO, 0xffff}, - {BRCMS_SROM_CONT, 0, 0, SROM4_5G_OFDMPO + 1, 0xffff}, - {BRCMS_SROM_OFDM5GLPO, 0x000000f0, SRFL_MORE, SROM4_5GL_OFDMPO, 0xffff}, - {BRCMS_SROM_CONT, 0, 0, SROM4_5GL_OFDMPO + 1, 0xffff}, - {BRCMS_SROM_OFDM5GHPO, 0x000000f0, SRFL_MORE, SROM4_5GH_OFDMPO, 0xffff}, - {BRCMS_SROM_CONT, 0, 0, SROM4_5GH_OFDMPO + 1, 0xffff}, {BRCMS_SROM_OFDM2GPO, 0x00000100, SRFL_MORE, SROM8_2G_OFDMPO, 0xffff}, {BRCMS_SROM_CONT, 0, 0, SROM8_2G_OFDMPO + 1, 0xffff}, {BRCMS_SROM_OFDM5GPO, 0x00000100, SRFL_MORE, SROM8_5G_OFDMPO, 0xffff}, @@ -668,38 +450,6 @@ static const struct brcms_sromvar pci_sromvars[] = { {BRCMS_SROM_CONT, 0, 0, SROM8_5GL_OFDMPO + 1, 0xffff}, {BRCMS_SROM_OFDM5GHPO, 0x00000100, SRFL_MORE, SROM8_5GH_OFDMPO, 0xffff}, {BRCMS_SROM_CONT, 0, 0, SROM8_5GH_OFDMPO + 1, 0xffff}, - {BRCMS_SROM_MCS2GPO0, 0x000000f0, 0, SROM4_2G_MCSPO, 0xffff}, - {BRCMS_SROM_MCS2GPO1, 0x000000f0, 0, SROM4_2G_MCSPO + 1, 0xffff}, - {BRCMS_SROM_MCS2GPO2, 0x000000f0, 0, SROM4_2G_MCSPO + 2, 0xffff}, - {BRCMS_SROM_MCS2GPO3, 0x000000f0, 0, SROM4_2G_MCSPO + 3, 0xffff}, - {BRCMS_SROM_MCS2GPO4, 0x000000f0, 0, SROM4_2G_MCSPO + 4, 0xffff}, - {BRCMS_SROM_MCS2GPO5, 0x000000f0, 0, SROM4_2G_MCSPO + 5, 0xffff}, - {BRCMS_SROM_MCS2GPO6, 0x000000f0, 0, SROM4_2G_MCSPO + 6, 0xffff}, - {BRCMS_SROM_MCS2GPO7, 0x000000f0, 0, SROM4_2G_MCSPO + 7, 0xffff}, - {BRCMS_SROM_MCS5GPO0, 0x000000f0, 0, SROM4_5G_MCSPO, 0xffff}, - {BRCMS_SROM_MCS5GPO1, 0x000000f0, 0, SROM4_5G_MCSPO + 1, 0xffff}, - {BRCMS_SROM_MCS5GPO2, 0x000000f0, 0, SROM4_5G_MCSPO + 2, 0xffff}, - {BRCMS_SROM_MCS5GPO3, 0x000000f0, 0, SROM4_5G_MCSPO + 3, 0xffff}, - {BRCMS_SROM_MCS5GPO4, 0x000000f0, 0, SROM4_5G_MCSPO + 4, 0xffff}, - {BRCMS_SROM_MCS5GPO5, 0x000000f0, 0, SROM4_5G_MCSPO + 5, 0xffff}, - {BRCMS_SROM_MCS5GPO6, 0x000000f0, 0, SROM4_5G_MCSPO + 6, 0xffff}, - {BRCMS_SROM_MCS5GPO7, 0x000000f0, 0, SROM4_5G_MCSPO + 7, 0xffff}, - {BRCMS_SROM_MCS5GLPO0, 0x000000f0, 0, SROM4_5GL_MCSPO, 0xffff}, - {BRCMS_SROM_MCS5GLPO1, 0x000000f0, 0, SROM4_5GL_MCSPO + 1, 0xffff}, - {BRCMS_SROM_MCS5GLPO2, 0x000000f0, 0, SROM4_5GL_MCSPO + 2, 0xffff}, - {BRCMS_SROM_MCS5GLPO3, 0x000000f0, 0, SROM4_5GL_MCSPO + 3, 0xffff}, - {BRCMS_SROM_MCS5GLPO4, 0x000000f0, 0, SROM4_5GL_MCSPO + 4, 0xffff}, - {BRCMS_SROM_MCS5GLPO5, 0x000000f0, 0, SROM4_5GL_MCSPO + 5, 0xffff}, - {BRCMS_SROM_MCS5GLPO6, 0x000000f0, 0, SROM4_5GL_MCSPO + 6, 0xffff}, - {BRCMS_SROM_MCS5GLPO7, 0x000000f0, 0, SROM4_5GL_MCSPO + 7, 0xffff}, - {BRCMS_SROM_MCS5GHPO0, 0x000000f0, 0, SROM4_5GH_MCSPO, 0xffff}, - {BRCMS_SROM_MCS5GHPO1, 0x000000f0, 0, SROM4_5GH_MCSPO + 1, 0xffff}, - {BRCMS_SROM_MCS5GHPO2, 0x000000f0, 0, SROM4_5GH_MCSPO + 2, 0xffff}, - {BRCMS_SROM_MCS5GHPO3, 0x000000f0, 0, SROM4_5GH_MCSPO + 3, 0xffff}, - {BRCMS_SROM_MCS5GHPO4, 0x000000f0, 0, SROM4_5GH_MCSPO + 4, 0xffff}, - {BRCMS_SROM_MCS5GHPO5, 0x000000f0, 0, SROM4_5GH_MCSPO + 5, 0xffff}, - {BRCMS_SROM_MCS5GHPO6, 0x000000f0, 0, SROM4_5GH_MCSPO + 6, 0xffff}, - {BRCMS_SROM_MCS5GHPO7, 0x000000f0, 0, SROM4_5GH_MCSPO + 7, 0xffff}, {BRCMS_SROM_MCS2GPO0, 0x00000100, 0, SROM8_2G_MCSPO, 0xffff}, {BRCMS_SROM_MCS2GPO1, 0x00000100, 0, SROM8_2G_MCSPO + 1, 0xffff}, {BRCMS_SROM_MCS2GPO2, 0x00000100, 0, SROM8_2G_MCSPO + 2, 0xffff}, @@ -732,10 +482,6 @@ static const struct brcms_sromvar pci_sromvars[] = { {BRCMS_SROM_MCS5GHPO5, 0x00000100, 0, SROM8_5GH_MCSPO + 5, 0xffff}, {BRCMS_SROM_MCS5GHPO6, 0x00000100, 0, SROM8_5GH_MCSPO + 6, 0xffff}, {BRCMS_SROM_MCS5GHPO7, 0x00000100, 0, SROM8_5GH_MCSPO + 7, 0xffff}, - {BRCMS_SROM_CDDPO, 0x000000f0, 0, SROM4_CDDPO, 0xffff}, - {BRCMS_SROM_STBCPO, 0x000000f0, 0, SROM4_STBCPO, 0xffff}, - {BRCMS_SROM_BW40PO, 0x000000f0, 0, SROM4_BW40PO, 0xffff}, - {BRCMS_SROM_BWDUPPO, 0x000000f0, 0, SROM4_BWDUPPO, 0xffff}, {BRCMS_SROM_CDDPO, 0x00000100, 0, SROM8_CDDPO, 0xffff}, {BRCMS_SROM_STBCPO, 0x00000100, 0, SROM8_STBCPO, 0xffff}, {BRCMS_SROM_BW40PO, 0x00000100, 0, SROM8_BW40PO, 0xffff}, @@ -811,34 +557,6 @@ static const struct brcms_sromvar pci_sromvars[] = { }; static const struct brcms_sromvar perpath_pci_sromvars[] = { - {BRCMS_SROM_MAXP2GA0, 0x000000f0, 0, SROM4_2G_ITT_MAXP, 0x00ff}, - {BRCMS_SROM_ITT2GA0, 0x000000f0, 0, SROM4_2G_ITT_MAXP, 0xff00}, - {BRCMS_SROM_ITT5GA0, 0x000000f0, 0, SROM4_5G_ITT_MAXP, 0xff00}, - {BRCMS_SROM_PA2GW0A0, 0x000000f0, SRFL_PRHEX, SROM4_2G_PA, 0xffff}, - {BRCMS_SROM_PA2GW1A0, 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 1, 0xffff}, - {BRCMS_SROM_PA2GW2A0, 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 2, 0xffff}, - {BRCMS_SROM_PA2GW3A0, 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 3, 0xffff}, - {BRCMS_SROM_MAXP5GA0, 0x000000f0, 0, SROM4_5G_ITT_MAXP, 0x00ff}, - {BRCMS_SROM_MAXP5GHA0, 0x000000f0, 0, SROM4_5GLH_MAXP, 0x00ff}, - {BRCMS_SROM_MAXP5GLA0, 0x000000f0, 0, SROM4_5GLH_MAXP, 0xff00}, - {BRCMS_SROM_PA5GW0A0, 0x000000f0, SRFL_PRHEX, SROM4_5G_PA, 0xffff}, - {BRCMS_SROM_PA5GW1A0, 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 1, 0xffff}, - {BRCMS_SROM_PA5GW2A0, 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 2, 0xffff}, - {BRCMS_SROM_PA5GW3A0, 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 3, 0xffff}, - {BRCMS_SROM_PA5GLW0A0, 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA, 0xffff}, - {BRCMS_SROM_PA5GLW1A0, 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 1, - 0xffff}, - {BRCMS_SROM_PA5GLW2A0, 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 2, - 0xffff}, - {BRCMS_SROM_PA5GLW3A0, 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 3, - 0xffff}, - {BRCMS_SROM_PA5GHW0A0, 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA, 0xffff}, - {BRCMS_SROM_PA5GHW1A0, 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 1, - 0xffff}, - {BRCMS_SROM_PA5GHW2A0, 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 2, - 0xffff}, - {BRCMS_SROM_PA5GHW3A0, 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 3, - 0xffff}, {BRCMS_SROM_MAXP2GA0, 0xffffff00, 0, SROM8_2G_ITT_MAXP, 0x00ff}, {BRCMS_SROM_ITT2GA0, 0xffffff00, 0, SROM8_2G_ITT_MAXP, 0xff00}, {BRCMS_SROM_ITT5GA0, 0xffffff00, 0, SROM8_5G_ITT_MAXP, 0xff00}, @@ -868,24 +586,6 @@ static const struct brcms_sromvar perpath_pci_sromvars[] = { * shared between devices. */ static u8 brcms_srom_crc8_table[CRC8_TABLE_SIZE]; -static u16 __iomem * -srom_window_address(struct si_pub *sih, u8 __iomem *curmap) -{ - if (sih->ccrev < 32) - return (u16 __iomem *)(curmap + PCI_BAR0_SPROM_OFFSET); - if (sih->cccaps & CC_CAP_SROM) - return (u16 __iomem *) - (curmap + PCI_16KB0_CCREGS_OFFSET + CC_SROM_OTP); - - return NULL; -} - -/* Parse SROM and create name=value pairs. 'srom' points to - * the SROM word array. 'off' specifies the offset of the - * first word 'srom' points to, which should be either 0 or - * SROM3_SWRG_OFF (full SROM or software region). - */ - static uint mask_shift(u16 mask) { uint i; @@ -906,18 +606,16 @@ static uint mask_width(u16 mask) return 0; } -static inline void ltoh16_buf(u16 *buf, unsigned int size) +static inline void le16_to_cpu_buf(u16 *buf, uint nwords) { - size /= 2; - while (size--) - *(buf + size) = le16_to_cpu(*(__le16 *)(buf + size)); + while (nwords--) + *(buf + nwords) = le16_to_cpu(*(__le16 *)(buf + nwords)); } -static inline void htol16_buf(u16 *buf, unsigned int size) +static inline void cpu_to_le16_buf(u16 *buf, uint nwords) { - size /= 2; - while (size--) - *(__le16 *)(buf + size) = cpu_to_le16(*(buf + size)); + while (nwords--) + *(__le16 *)(buf + nwords) = cpu_to_le16(*(buf + nwords)); } /* @@ -929,11 +627,14 @@ _initvars_srom_pci(u8 sromrev, u16 *srom, struct list_head *var_list) struct brcms_srom_list_head *entry; enum brcms_srom_id id; u16 w; - u32 val; + u32 val = 0; const struct brcms_sromvar *srv; uint width; uint flags; u32 sr = (1 << sromrev); + uint p; + uint pb = SROM8_PATH0; + const uint psz = SROM8_PATH1 - SROM8_PATH0; /* first store the srom revision */ entry = kzalloc(sizeof(struct brcms_srom_list_head), GFP_KERNEL); @@ -1031,47 +732,34 @@ _initvars_srom_pci(u8 sromrev, u16 *srom, struct list_head *var_list) list_add(&entry->var_list, var_list); } - if (sromrev >= 4) { - /* Do per-path variables */ - uint p, pb, psz; - - if (sromrev >= 8) { - pb = SROM8_PATH0; - psz = SROM8_PATH1 - SROM8_PATH0; - } else { - pb = SROM4_PATH0; - psz = SROM4_PATH1 - SROM4_PATH0; - } - - for (p = 0; p < MAX_PATH_SROM; p++) { - for (srv = perpath_pci_sromvars; - srv->varid != BRCMS_SROM_NULL; srv++) { - if ((srv->revmask & sr) == 0) - continue; + for (p = 0; p < MAX_PATH_SROM; p++) { + for (srv = perpath_pci_sromvars; + srv->varid != BRCMS_SROM_NULL; srv++) { + if ((srv->revmask & sr) == 0) + continue; - if (srv->flags & SRFL_NOVAR) - continue; + if (srv->flags & SRFL_NOVAR) + continue; - w = srom[pb + srv->off]; - val = (w & srv->mask) >> mask_shift(srv->mask); - width = mask_width(srv->mask); + w = srom[pb + srv->off]; + val = (w & srv->mask) >> mask_shift(srv->mask); + width = mask_width(srv->mask); - /* Cheating: no per-path var is more than - * 1 word */ - if ((srv->flags & SRFL_NOFFS) - && ((int)val == (1 << width) - 1)) - continue; + /* Cheating: no per-path var is more than + * 1 word */ + if ((srv->flags & SRFL_NOFFS) + && ((int)val == (1 << width) - 1)) + continue; - entry = - kzalloc(sizeof(struct brcms_srom_list_head), - GFP_KERNEL); - entry->varid = srv->varid+p; - entry->var_type = BRCMS_SROM_UNUMBER; - entry->uval = val; - list_add(&entry->var_list, var_list); - } - pb += psz; + entry = + kzalloc(sizeof(struct brcms_srom_list_head), + GFP_KERNEL); + entry->varid = srv->varid+p; + entry->var_type = BRCMS_SROM_UNUMBER; + entry->uval = val; + list_add(&entry->var_list, var_list); } + pb += psz; } } @@ -1080,41 +768,48 @@ _initvars_srom_pci(u8 sromrev, u16 *srom, struct list_head *var_list) * Return 0 on success, nonzero on error. */ static int -sprom_read_pci(struct si_pub *sih, u16 __iomem *sprom, uint wordoff, - u16 *buf, uint nwords, bool check_crc) +sprom_read_pci(struct si_pub *sih, u16 *buf, uint nwords, bool check_crc) { int err = 0; uint i; + u8 *bbuf = (u8 *)buf; /* byte buffer */ + uint nbytes = nwords << 1; + struct bcma_device *core; + uint sprom_offset; + + /* determine core to read */ + if (ai_get_ccrev(sih) < 32) { + core = ai_findcore(sih, BCMA_CORE_80211, 0); + sprom_offset = PCI_BAR0_SPROM_OFFSET; + } else { + core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0); + sprom_offset = CHIPCREGOFFS(sromotp); + } - /* read the sprom */ - for (i = 0; i < nwords; i++) - buf[i] = R_REG(&sprom[wordoff + i]); - - if (check_crc) { - - if (buf[0] == 0xffff) - /* - * The hardware thinks that an srom that starts with - * 0xffff is blank, regardless of the rest of the - * content, so declare it bad. - */ - return -ENODATA; + /* read the sprom in bytes */ + for (i = 0; i < nbytes; i++) + bbuf[i] = bcma_read8(core, sprom_offset+i); - /* fixup the endianness so crc8 will pass */ - htol16_buf(buf, nwords * 2); - if (crc8(brcms_srom_crc8_table, (u8 *) buf, nwords * 2, - CRC8_INIT_VALUE) != - CRC8_GOOD_VALUE(brcms_srom_crc8_table)) - /* DBG only pci always read srom4 first, then srom8/9 */ - err = -EIO; + if (buf[0] == 0xffff) + /* + * The hardware thinks that an srom that starts with + * 0xffff is blank, regardless of the rest of the + * content, so declare it bad. + */ + return -ENODATA; + if (check_crc && + crc8(brcms_srom_crc8_table, bbuf, nbytes, CRC8_INIT_VALUE) != + CRC8_GOOD_VALUE(brcms_srom_crc8_table)) + err = -EIO; + else /* now correct the endianness of the byte array */ - ltoh16_buf(buf, nwords * 2); - } + le16_to_cpu_buf(buf, nwords); + return err; } -static int otp_read_pci(struct si_pub *sih, u16 *buf, uint bufsz) +static int otp_read_pci(struct si_pub *sih, u16 *buf, uint nwords) { u8 *otp; uint sz = OTP_SZ_MAX / 2; /* size in words */ @@ -1126,7 +821,8 @@ static int otp_read_pci(struct si_pub *sih, u16 *buf, uint bufsz) err = otp_read_region(sih, OTP_HW_RGN, (u16 *) otp, &sz); - memcpy(buf, otp, bufsz); + sz = min_t(uint, sz, nwords); + memcpy(buf, otp, sz * 2); kfree(otp); @@ -1139,13 +835,13 @@ static int otp_read_pci(struct si_pub *sih, u16 *buf, uint bufsz) return -ENODATA; /* fixup the endianness so crc8 will pass */ - htol16_buf(buf, bufsz); - if (crc8(brcms_srom_crc8_table, (u8 *) buf, SROM4_WORDS * 2, + cpu_to_le16_buf(buf, sz); + if (crc8(brcms_srom_crc8_table, (u8 *) buf, sz * 2, CRC8_INIT_VALUE) != CRC8_GOOD_VALUE(brcms_srom_crc8_table)) err = -EIO; - - /* now correct the endianness of the byte array */ - ltoh16_buf(buf, bufsz); + else + /* now correct the endianness of the byte array */ + le16_to_cpu_buf(buf, sz); return err; } @@ -1154,10 +850,9 @@ static int otp_read_pci(struct si_pub *sih, u16 *buf, uint bufsz) * Initialize nonvolatile variable table from sprom. * Return 0 on success, nonzero on error. */ -static int initvars_srom_pci(struct si_pub *sih, void __iomem *curmap) +int srom_var_init(struct si_pub *sih) { u16 *srom; - u16 __iomem *sromwindow; u8 sromrev = 0; u32 sr; int err = 0; @@ -1169,33 +864,17 @@ static int initvars_srom_pci(struct si_pub *sih, void __iomem *curmap) if (!srom) return -ENOMEM; - sromwindow = srom_window_address(sih, curmap); - crc8_populate_lsb(brcms_srom_crc8_table, SROM_CRC8_POLY); if (ai_is_sprom_available(sih)) { - err = sprom_read_pci(sih, sromwindow, 0, srom, SROM_WORDS, - true); - - if ((srom[SROM4_SIGN] == SROM4_SIGNATURE) || - (((sih->buscoretype == PCIE_CORE_ID) - && (sih->buscorerev >= 6)) - || ((sih->buscoretype == PCI_CORE_ID) - && (sih->buscorerev >= 0xe)))) { - /* sromrev >= 4, read more */ - err = sprom_read_pci(sih, sromwindow, 0, srom, - SROM4_WORDS, true); - sromrev = srom[SROM4_CRCREV] & 0xff; - } else if (err == 0) { - /* srom is good and is rev < 4 */ + err = sprom_read_pci(sih, srom, SROM4_WORDS, true); + + if (err == 0) + /* srom read and passed crc */ /* top word of sprom contains version and crc8 */ - sromrev = srom[SROM_CRCREV] & 0xff; - /* bcm4401 sroms misprogrammed */ - if (sromrev == 0x10) - sromrev = 1; - } + sromrev = srom[SROM4_CRCREV] & 0xff; } else { /* Use OTP if SPROM not available */ - err = otp_read_pci(sih, srom, SROM_MAX); + err = otp_read_pci(sih, srom, SROM4_WORDS); if (err == 0) /* OTP only contain SROM rev8/rev9 for now */ sromrev = srom[SROM4_CRCREV] & 0xff; @@ -1208,10 +887,9 @@ static int initvars_srom_pci(struct si_pub *sih, void __iomem *curmap) sr = 1 << sromrev; /* - * srom version check: Current valid versions: 1, 2, 3, 4, 5, 8, - * 9 + * srom version check: Current valid versions: 8, 9 */ - if ((sr & 0x33e) == 0) { + if ((sr & 0x300) == 0) { err = -EINVAL; goto errout; } @@ -1238,21 +916,6 @@ void srom_free_vars(struct si_pub *sih) kfree(entry); } } -/* - * Initialize local vars from the right source for this platform. - * Return 0 on success, nonzero on error. - */ -int srom_var_init(struct si_pub *sih, void __iomem *curmap) -{ - uint len; - - len = 0; - - if (curmap != NULL) - return initvars_srom_pci(sih, curmap); - - return -EINVAL; -} /* * Search the name=value vars for a specific one and return its value. diff --git a/drivers/net/wireless/brcm80211/brcmsmac/srom.h b/drivers/net/wireless/brcm80211/brcmsmac/srom.h index 708c43ff51c..f2a58f262c9 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/srom.h +++ b/drivers/net/wireless/brcm80211/brcmsmac/srom.h @@ -20,15 +20,10 @@ #include "types.h" /* Prototypes */ -extern int srom_var_init(struct si_pub *sih, void __iomem *curmap); +extern int srom_var_init(struct si_pub *sih); extern void srom_free_vars(struct si_pub *sih); extern int srom_read(struct si_pub *sih, uint bus, void *curmap, uint byteoff, uint nbytes, u16 *buf, bool check_crc); -/* parse standard PCMCIA cis, normally used by SB/PCMCIA/SDIO/SPI/OTP - * and extract from it into name=value pairs - */ -extern int srom_parsecis(u8 **pcis, uint ciscnt, - char **vars, uint *count); #endif /* _BRCM_SROM_H_ */ diff --git a/drivers/net/wireless/brcm80211/brcmsmac/types.h b/drivers/net/wireless/brcm80211/brcmsmac/types.h index 27a814b0746..e11ae83111e 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/types.h +++ b/drivers/net/wireless/brcm80211/brcmsmac/types.h @@ -250,66 +250,18 @@ do { \ wiphy_err(dev, "%s: " fmt, __func__, ##args); \ } while (0) -/* - * Register access macros. - * - * These macro's take a pointer to the address to read as one of their - * arguments. The macro itself deduces the size of the IO transaction (u8, u16 - * or u32). Advantage of this approach in combination with using a struct to - * define the registers in a register block, is that access size and access - * location are defined in only one spot. This reduces the risk of the - * programmer trying to use an unsupported transaction size on a register. - * - */ - -#define R_REG(r) \ - ({ \ - __typeof(*(r)) __osl_v; \ - switch (sizeof(*(r))) { \ - case sizeof(u8): \ - __osl_v = readb((u8 __iomem *)(r)); \ - break; \ - case sizeof(u16): \ - __osl_v = readw((u16 __iomem *)(r)); \ - break; \ - case sizeof(u32): \ - __osl_v = readl((u32 __iomem *)(r)); \ - break; \ - } \ - __osl_v; \ - }) - -#define W_REG(r, v) do { \ - switch (sizeof(*(r))) { \ - case sizeof(u8): \ - writeb((u8)((v) & 0xFF), (u8 __iomem *)(r)); \ - break; \ - case sizeof(u16): \ - writew((u16)((v) & 0xFFFF), (u16 __iomem *)(r)); \ - break; \ - case sizeof(u32): \ - writel((u32)(v), (u32 __iomem *)(r)); \ - break; \ - } \ - } while (0) - #ifdef CONFIG_BCM47XX /* * bcm4716 (which includes 4717 & 4718), plus 4706 on PCIe can reorder * transactions. As a fix, a read after write is performed on certain places * in the code. Older chips and the newer 5357 family don't require this fix. */ -#define W_REG_FLUSH(r, v) ({ W_REG((r), (v)); (void)R_REG(r); }) +#define bcma_wflush16(c, o, v) \ + ({ bcma_write16(c, o, v); (void)bcma_read16(c, o); }) #else -#define W_REG_FLUSH(r, v) W_REG((r), (v)) +#define bcma_wflush16(c, o, v) bcma_write16(c, o, v) #endif /* CONFIG_BCM47XX */ -#define AND_REG(r, v) W_REG((r), R_REG(r) & (v)) -#define OR_REG(r, v) W_REG((r), R_REG(r) | (v)) - -#define SET_REG(r, mask, val) \ - W_REG((r), ((R_REG(r) & ~(mask)) | (val))) - /* multi-bool data type: set of bools, mbool is true if any is set */ /* set one bool */ diff --git a/drivers/net/wireless/brcm80211/brcmutil/utils.c b/drivers/net/wireless/brcm80211/brcmutil/utils.c index f27c4891082..b7537f70a79 100644 --- a/drivers/net/wireless/brcm80211/brcmutil/utils.c +++ b/drivers/net/wireless/brcm80211/brcmutil/utils.c @@ -16,6 +16,7 @@ #include <linux/netdevice.h> #include <linux/module.h> + #include <brcmu_utils.h> MODULE_AUTHOR("Broadcom Corporation"); @@ -40,74 +41,20 @@ EXPORT_SYMBOL(brcmu_pkt_buf_get_skb); /* Free the driver packet. Free the tag if present */ void brcmu_pkt_buf_free_skb(struct sk_buff *skb) { - struct sk_buff *nskb; - int nest = 0; - - /* perversion: we use skb->next to chain multi-skb packets */ - while (skb) { - nskb = skb->next; - skb->next = NULL; - - if (skb->destructor) - /* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if - * destructor exists - */ - dev_kfree_skb_any(skb); - else - /* can free immediately (even in_irq()) if destructor - * does not exist - */ - dev_kfree_skb(skb); - - nest++; - skb = nskb; - } + WARN_ON(skb->next); + if (skb->destructor) + /* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if + * destructor exists + */ + dev_kfree_skb_any(skb); + else + /* can free immediately (even in_irq()) if destructor + * does not exist + */ + dev_kfree_skb(skb); } EXPORT_SYMBOL(brcmu_pkt_buf_free_skb); - -/* copy a buffer into a pkt buffer chain */ -uint brcmu_pktfrombuf(struct sk_buff *p, uint offset, int len, - unsigned char *buf) -{ - uint n, ret = 0; - - /* skip 'offset' bytes */ - for (; p && offset; p = p->next) { - if (offset < (uint) (p->len)) - break; - offset -= p->len; - } - - if (!p) - return 0; - - /* copy the data */ - for (; p && len; p = p->next) { - n = min((uint) (p->len) - offset, (uint) len); - memcpy(p->data + offset, buf, n); - buf += n; - len -= n; - ret += n; - offset = 0; - } - - return ret; -} -EXPORT_SYMBOL(brcmu_pktfrombuf); - -/* return total length of buffer chain */ -uint brcmu_pkttotlen(struct sk_buff *p) -{ - uint total; - - total = 0; - for (; p; p = p->next) - total += p->len; - return total; -} -EXPORT_SYMBOL(brcmu_pkttotlen); - /* * osl multiple-precedence packet queue * hi_prec is always >= the number of the highest non-empty precedence @@ -115,21 +62,13 @@ EXPORT_SYMBOL(brcmu_pkttotlen); struct sk_buff *brcmu_pktq_penq(struct pktq *pq, int prec, struct sk_buff *p) { - struct pktq_prec *q; + struct sk_buff_head *q; if (pktq_full(pq) || pktq_pfull(pq, prec)) return NULL; - q = &pq->q[prec]; - - if (q->head) - q->tail->prev = p; - else - q->head = p; - - q->tail = p; - q->len++; - + q = &pq->q[prec].skblist; + skb_queue_tail(q, p); pq->len++; if (pq->hi_prec < prec) @@ -142,20 +81,13 @@ EXPORT_SYMBOL(brcmu_pktq_penq); struct sk_buff *brcmu_pktq_penq_head(struct pktq *pq, int prec, struct sk_buff *p) { - struct pktq_prec *q; + struct sk_buff_head *q; if (pktq_full(pq) || pktq_pfull(pq, prec)) return NULL; - q = &pq->q[prec]; - - if (q->head == NULL) - q->tail = p; - - p->prev = q->head; - q->head = p; - q->len++; - + q = &pq->q[prec].skblist; + skb_queue_head(q, p); pq->len++; if (pq->hi_prec < prec) @@ -167,53 +99,30 @@ EXPORT_SYMBOL(brcmu_pktq_penq_head); struct sk_buff *brcmu_pktq_pdeq(struct pktq *pq, int prec) { - struct pktq_prec *q; + struct sk_buff_head *q; struct sk_buff *p; - q = &pq->q[prec]; - - p = q->head; + q = &pq->q[prec].skblist; + p = skb_dequeue(q); if (p == NULL) return NULL; - q->head = p->prev; - if (q->head == NULL) - q->tail = NULL; - - q->len--; - pq->len--; - - p->prev = NULL; - return p; } EXPORT_SYMBOL(brcmu_pktq_pdeq); struct sk_buff *brcmu_pktq_pdeq_tail(struct pktq *pq, int prec) { - struct pktq_prec *q; - struct sk_buff *p, *prev; - - q = &pq->q[prec]; + struct sk_buff_head *q; + struct sk_buff *p; - p = q->head; + q = &pq->q[prec].skblist; + p = skb_dequeue_tail(q); if (p == NULL) return NULL; - for (prev = NULL; p != q->tail; p = p->prev) - prev = p; - - if (prev) - prev->prev = NULL; - else - q->head = NULL; - - q->tail = prev; - q->len--; - pq->len--; - return p; } EXPORT_SYMBOL(brcmu_pktq_pdeq_tail); @@ -222,31 +131,17 @@ void brcmu_pktq_pflush(struct pktq *pq, int prec, bool dir, bool (*fn)(struct sk_buff *, void *), void *arg) { - struct pktq_prec *q; - struct sk_buff *p, *prev = NULL; + struct sk_buff_head *q; + struct sk_buff *p, *next; - q = &pq->q[prec]; - p = q->head; - while (p) { + q = &pq->q[prec].skblist; + skb_queue_walk_safe(q, p, next) { if (fn == NULL || (*fn) (p, arg)) { - bool head = (p == q->head); - if (head) - q->head = p->prev; - else - prev->prev = p->prev; - p->prev = NULL; + skb_unlink(p, q); brcmu_pkt_buf_free_skb(p); - q->len--; pq->len--; - p = (head ? q->head : prev->prev); - } else { - prev = p; - p = p->prev; } } - - if (q->head == NULL) - q->tail = NULL; } EXPORT_SYMBOL(brcmu_pktq_pflush); @@ -271,8 +166,10 @@ void brcmu_pktq_init(struct pktq *pq, int num_prec, int max_len) pq->max = (u16) max_len; - for (prec = 0; prec < num_prec; prec++) + for (prec = 0; prec < num_prec; prec++) { pq->q[prec].max = pq->max; + skb_queue_head_init(&pq->q[prec].skblist); + } } EXPORT_SYMBOL(brcmu_pktq_init); @@ -284,13 +181,13 @@ struct sk_buff *brcmu_pktq_peek_tail(struct pktq *pq, int *prec_out) return NULL; for (prec = 0; prec < pq->hi_prec; prec++) - if (pq->q[prec].head) + if (!skb_queue_empty(&pq->q[prec].skblist)) break; if (prec_out) *prec_out = prec; - return pq->q[prec].tail; + return skb_peek_tail(&pq->q[prec].skblist); } EXPORT_SYMBOL(brcmu_pktq_peek_tail); @@ -303,7 +200,7 @@ int brcmu_pktq_mlen(struct pktq *pq, uint prec_bmp) for (prec = 0; prec <= pq->hi_prec; prec++) if (prec_bmp & (1 << prec)) - len += pq->q[prec].len; + len += pq->q[prec].skblist.qlen; return len; } @@ -313,39 +210,32 @@ EXPORT_SYMBOL(brcmu_pktq_mlen); struct sk_buff *brcmu_pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out) { - struct pktq_prec *q; + struct sk_buff_head *q; struct sk_buff *p; int prec; if (pq->len == 0) return NULL; - while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL) + while ((prec = pq->hi_prec) > 0 && + skb_queue_empty(&pq->q[prec].skblist)) pq->hi_prec--; - while ((prec_bmp & (1 << prec)) == 0 || pq->q[prec].head == NULL) + while ((prec_bmp & (1 << prec)) == 0 || + skb_queue_empty(&pq->q[prec].skblist)) if (prec-- == 0) return NULL; - q = &pq->q[prec]; - - p = q->head; + q = &pq->q[prec].skblist; + p = skb_dequeue(q); if (p == NULL) return NULL; - q->head = p->prev; - if (q->head == NULL) - q->tail = NULL; - - q->len--; + pq->len--; if (prec_out) *prec_out = prec; - pq->len--; - - p->prev = NULL; - return p; } EXPORT_SYMBOL(brcmu_pktq_mdeq); @@ -364,23 +254,3 @@ void brcmu_prpkt(const char *msg, struct sk_buff *p0) } EXPORT_SYMBOL(brcmu_prpkt); #endif /* defined(BCMDBG) */ - -#if defined(BCMDBG) -/* - * print bytes formatted as hex to a string. return the resulting - * string length - */ -int brcmu_format_hex(char *str, const void *bytes, int len) -{ - int i; - char *p = str; - const u8 *src = (const u8 *)bytes; - - for (i = 0; i < len; i++) { - p += snprintf(p, 3, "%02X", *src); - src++; - } - return (int)(p - str); -} -EXPORT_SYMBOL(brcmu_format_hex); -#endif /* defined(BCMDBG) */ diff --git a/drivers/net/wireless/brcm80211/include/brcmu_utils.h b/drivers/net/wireless/brcm80211/include/brcmu_utils.h index 7d0f46e0eb9..ad249a0b473 100644 --- a/drivers/net/wireless/brcm80211/include/brcmu_utils.h +++ b/drivers/net/wireless/brcm80211/include/brcmu_utils.h @@ -65,9 +65,7 @@ #define ETHER_ADDR_STR_LEN 18 struct pktq_prec { - struct sk_buff *head; /* first packet to dequeue */ - struct sk_buff *tail; /* last packet to dequeue */ - u16 len; /* number of queued packets */ + struct sk_buff_head skblist; u16 max; /* maximum number of queued packets */ }; @@ -88,32 +86,32 @@ struct pktq { static inline int pktq_plen(struct pktq *pq, int prec) { - return pq->q[prec].len; + return pq->q[prec].skblist.qlen; } static inline int pktq_pavail(struct pktq *pq, int prec) { - return pq->q[prec].max - pq->q[prec].len; + return pq->q[prec].max - pq->q[prec].skblist.qlen; } static inline bool pktq_pfull(struct pktq *pq, int prec) { - return pq->q[prec].len >= pq->q[prec].max; + return pq->q[prec].skblist.qlen >= pq->q[prec].max; } static inline bool pktq_pempty(struct pktq *pq, int prec) { - return pq->q[prec].len == 0; + return skb_queue_empty(&pq->q[prec].skblist); } static inline struct sk_buff *pktq_ppeek(struct pktq *pq, int prec) { - return pq->q[prec].head; + return skb_peek(&pq->q[prec].skblist); } static inline struct sk_buff *pktq_ppeek_tail(struct pktq *pq, int prec) { - return pq->q[prec].tail; + return skb_peek_tail(&pq->q[prec].skblist); } extern struct sk_buff *brcmu_pktq_penq(struct pktq *pq, int prec, @@ -172,24 +170,16 @@ extern void brcmu_pktq_flush(struct pktq *pq, bool dir, bool (*fn)(struct sk_buff *, void *), void *arg); /* externs */ -/* packet */ -extern uint brcmu_pktfrombuf(struct sk_buff *p, - uint offset, int len, unsigned char *buf); -extern uint brcmu_pkttotlen(struct sk_buff *p); - /* ip address */ struct ipv4_addr; + +/* externs */ +/* format/print */ #ifdef BCMDBG extern void brcmu_prpkt(const char *msg, struct sk_buff *p0); #else #define brcmu_prpkt(a, b) #endif /* BCMDBG */ -/* externs */ -/* format/print */ -#if defined(BCMDBG) -extern int brcmu_format_hex(char *str, const void *bytes, int len); -#endif - #endif /* _BRCMU_UTILS_H_ */ diff --git a/drivers/net/wireless/brcm80211/include/chipcommon.h b/drivers/net/wireless/brcm80211/include/chipcommon.h index fefabc39e64..f96834a7c05 100644 --- a/drivers/net/wireless/brcm80211/include/chipcommon.h +++ b/drivers/net/wireless/brcm80211/include/chipcommon.h @@ -19,6 +19,8 @@ #include "defs.h" /* for PAD macro */ +#define CHIPCREGOFFS(field) offsetof(struct chipcregs, field) + struct chipcregs { u32 chipid; /* 0x0 */ u32 capabilities; diff --git a/drivers/net/wireless/brcm80211/include/defs.h b/drivers/net/wireless/brcm80211/include/defs.h index 1e5f310af1e..f0d8c04a9c8 100644 --- a/drivers/net/wireless/brcm80211/include/defs.h +++ b/drivers/net/wireless/brcm80211/include/defs.h @@ -62,7 +62,6 @@ #define WL_RADIO_SW_DISABLE (1<<0) #define WL_RADIO_HW_DISABLE (1<<1) -#define WL_RADIO_MPC_DISABLE (1<<2) /* some countries don't support any channel */ #define WL_RADIO_COUNTRY_DISABLE (1<<3) diff --git a/drivers/net/wireless/brcm80211/include/soc.h b/drivers/net/wireless/brcm80211/include/soc.h index 4fcb956ad9e..4e9b7e4827e 100644 --- a/drivers/net/wireless/brcm80211/include/soc.h +++ b/drivers/net/wireless/brcm80211/include/soc.h @@ -77,8 +77,9 @@ #define DMEMS_CORE_ID 0x835 /* SDR/DDR1 memory controller core */ #define DEF_SHIM_COMP 0x837 /* SHIM component in ubus/6362 */ #define OOB_ROUTER_CORE_ID 0x367 /* OOB router core ID */ -/* Default component, in ai chips it maps all unused address ranges */ -#define DEF_AI_COMP 0xfff +#define DEF_AI_COMP 0xfff /* Default component, in ai chips it + * maps all unused address ranges + */ /* Common core control flags */ #define SICF_BIST_EN 0x8000 @@ -87,4 +88,11 @@ #define SICF_FGC 0x0002 #define SICF_CLOCK_EN 0x0001 +/* Common core status flags */ +#define SISF_BIST_DONE 0x8000 +#define SISF_BIST_ERROR 0x4000 +#define SISF_GATED_CLK 0x2000 +#define SISF_DMA64 0x1000 +#define SISF_CORE_BITS 0x0fff + #endif /* _BRCM_SOC_H */ diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c index 5441ad19511..89e9d3a78c3 100644 --- a/drivers/net/wireless/hostap/hostap_cs.c +++ b/drivers/net/wireless/hostap/hostap_cs.c @@ -656,6 +656,9 @@ static const struct pcmcia_device_id hostap_cs_ids[] = { "Addtron", "AWP-100 Wireless PCMCIA", "Version 01.02", 0xe6ec52ce, 0x08649af2, 0x4b74baa0), PCMCIA_DEVICE_PROD_ID123( + "Canon", "Wireless LAN CF Card K30225", "Version 01.00", + 0x96ef6fe2, 0x263fcbab, 0xa57adb8c), + PCMCIA_DEVICE_PROD_ID123( "D", "Link DWL-650 11Mbps WLAN Card", "Version 01.02", 0x71b18589, 0xb6f1b0ab, 0x4b74baa0), PCMCIA_DEVICE_PROD_ID123( diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c index 045a93645a3..18054d9c668 100644 --- a/drivers/net/wireless/hostap/hostap_ioctl.c +++ b/drivers/net/wireless/hostap/hostap_ioctl.c @@ -3872,8 +3872,8 @@ static void prism2_get_drvinfo(struct net_device *dev, iface = netdev_priv(dev); local = iface->local; - strncpy(info->driver, "hostap", sizeof(info->driver) - 1); - snprintf(info->fw_version, sizeof(info->fw_version) - 1, + strlcpy(info->driver, "hostap", sizeof(info->driver)); + snprintf(info->fw_version, sizeof(info->fw_version), "%d.%d.%d", (local->sta_fw_ver >> 16) & 0xff, (local->sta_fw_ver >> 8) & 0xff, local->sta_fw_ver & 0xff); diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c index 127e9c63bea..a0e5c21d365 100644 --- a/drivers/net/wireless/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/ipw2x00/ipw2100.c @@ -5981,8 +5981,8 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev, struct ipw2100_priv *priv = libipw_priv(dev); char fw_ver[64], ucode_ver[64]; - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); ipw2100_get_fwversion(priv, fw_ver, sizeof(fw_ver)); ipw2100_get_ucodeversion(priv, ucode_ver, sizeof(ucode_ver)); @@ -5990,7 +5990,8 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev, snprintf(info->fw_version, sizeof(info->fw_version), "%s:%d:%s", fw_ver, priv->eeprom_version, ucode_ver); - strcpy(info->bus_info, pci_name(priv->pci_dev)); + strlcpy(info->bus_info, pci_name(priv->pci_dev), + sizeof(info->bus_info)); } static u32 ipw2100_ethtool_get_link(struct net_device *dev) diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c index 99a710dfe77..018a8deb88a 100644 --- a/drivers/net/wireless/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/ipw2x00/ipw2200.c @@ -131,6 +131,14 @@ static struct ieee80211_rate ipw2200_rates[] = { #define ipw2200_bg_rates (ipw2200_rates + 0) #define ipw2200_num_bg_rates 12 +/* Ugly macro to convert literal channel numbers into their mhz equivalents + * There are certianly some conditions that will break this (like feeding it '30') + * but they shouldn't arise since nothing talks on channel 30. */ +#define ieee80211chan2mhz(x) \ + (((x) <= 14) ? \ + (((x) == 14) ? 2484 : ((x) * 5) + 2407) : \ + ((x) + 1000) * 5) + #ifdef CONFIG_IPW2200_QOS static int qos_enable = 0; static int qos_burst_enable = 0; @@ -10540,8 +10548,8 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev, char date[32]; u32 len; - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); len = sizeof(vers); ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len); @@ -10550,7 +10558,8 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev, snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)", vers, date); - strcpy(info->bus_info, pci_name(p->pci_dev)); + strlcpy(info->bus_info, pci_name(p->pci_dev), + sizeof(info->bus_info)); info->eedump_len = IPW_EEPROM_IMAGE_SIZE; } diff --git a/drivers/net/wireless/ipw2x00/libipw.h b/drivers/net/wireless/ipw2x00/libipw.h index 70f5586d96b..8874588fb92 100644 --- a/drivers/net/wireless/ipw2x00/libipw.h +++ b/drivers/net/wireless/ipw2x00/libipw.h @@ -66,16 +66,8 @@ extern u32 libipw_debug_level; do { if (libipw_debug_level & (level)) \ printk(KERN_DEBUG "libipw: %c %s " fmt, \ in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0) -static inline bool libipw_ratelimit_debug(u32 level) -{ - return (libipw_debug_level & level) && net_ratelimit(); -} #else #define LIBIPW_DEBUG(level, fmt, args...) do {} while (0) -static inline bool libipw_ratelimit_debug(u32 level) -{ - return false; -} #endif /* CONFIG_LIBIPW_DEBUG */ /* @@ -813,9 +805,6 @@ struct libipw_device { /* WEP and other encryption related settings at the device level */ int open_wep; /* Set to 1 to allow unencrypted frames */ - int reset_on_keychange; /* Set to 1 if the HW needs to be reset on - * WEP key changes */ - /* If the host performs {en,de}cryption, then set to 1 */ int host_encrypt; int host_encrypt_msdu; @@ -868,7 +857,6 @@ struct libipw_device { struct libipw_security * sec); netdev_tx_t (*hard_start_xmit) (struct libipw_txb * txb, struct net_device * dev, int pri); - int (*reset_port) (struct net_device * dev); int (*is_queue_full) (struct net_device * dev, int pri); int (*handle_management) (struct net_device * dev, diff --git a/drivers/net/wireless/ipw2x00/libipw_wx.c b/drivers/net/wireless/ipw2x00/libipw_wx.c index 6623e505225..1571505b1a3 100644 --- a/drivers/net/wireless/ipw2x00/libipw_wx.c +++ b/drivers/net/wireless/ipw2x00/libipw_wx.c @@ -474,17 +474,6 @@ int libipw_wx_set_encode(struct libipw_device *ieee, if (ieee->set_security) ieee->set_security(dev, &sec); - /* Do not reset port if card is in Managed mode since resetting will - * generate new IEEE 802.11 authentication which may end up in looping - * with IEEE 802.1X. If your hardware requires a reset after WEP - * configuration (for example... Prism2), implement the reset_port in - * the callbacks structures used to initialize the 802.11 stack. */ - if (ieee->reset_on_keychange && - ieee->iw_mode != IW_MODE_INFRA && - ieee->reset_port && ieee->reset_port(dev)) { - printk(KERN_DEBUG "%s: reset_port failed\n", dev->name); - return -EINVAL; - } return 0; } @@ -688,20 +677,6 @@ int libipw_wx_set_encodeext(struct libipw_device *ieee, if (ieee->set_security) ieee->set_security(ieee->dev, &sec); - /* - * Do not reset port if card is in Managed mode since resetting will - * generate new IEEE 802.11 authentication which may end up in looping - * with IEEE 802.1X. If your hardware requires a reset after WEP - * configuration (for example... Prism2), implement the reset_port in - * the callbacks structures used to initialize the 802.11 stack. - */ - if (ieee->reset_on_keychange && - ieee->iw_mode != IW_MODE_INFRA && - ieee->reset_port && ieee->reset_port(dev)) { - LIBIPW_DEBUG_WX("%s: reset_port failed\n", dev->name); - return -EINVAL; - } - return ret; } diff --git a/drivers/net/wireless/iwlegacy/3945-debug.c b/drivers/net/wireless/iwlegacy/3945-debug.c new file mode 100644 index 00000000000..5e1a19fd354 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/3945-debug.c @@ -0,0 +1,505 @@ +/****************************************************************************** + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + *****************************************************************************/ + +#include "common.h" +#include "3945.h" + +static int +il3945_stats_flag(struct il_priv *il, char *buf, int bufsz) +{ + int p = 0; + + p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", + le32_to_cpu(il->_3945.stats.flag)); + if (le32_to_cpu(il->_3945.stats.flag) & UCODE_STATS_CLEAR_MSK) + p += scnprintf(buf + p, bufsz - p, + "\tStatistics have been cleared\n"); + p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n", + (le32_to_cpu(il->_3945.stats.flag) & + UCODE_STATS_FREQUENCY_MSK) ? "2.4 GHz" : "5.2 GHz"); + p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n", + (le32_to_cpu(il->_3945.stats.flag) & + UCODE_STATS_NARROW_BAND_MSK) ? "enabled" : "disabled"); + return p; +} + +ssize_t +il3945_ucode_rx_stats_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct il_priv *il = file->private_data; + int pos = 0; + char *buf; + int bufsz = + sizeof(struct iwl39_stats_rx_phy) * 40 + + sizeof(struct iwl39_stats_rx_non_phy) * 40 + 400; + ssize_t ret; + struct iwl39_stats_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm; + struct iwl39_stats_rx_phy *cck, *accum_cck, *delta_cck, *max_cck; + struct iwl39_stats_rx_non_phy *general, *accum_general; + struct iwl39_stats_rx_non_phy *delta_general, *max_general; + + if (!il_is_alive(il)) + return -EAGAIN; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) { + IL_ERR("Can not allocate Buffer\n"); + return -ENOMEM; + } + + /* + * The statistic information display here is based on + * the last stats notification from uCode + * might not reflect the current uCode activity + */ + ofdm = &il->_3945.stats.rx.ofdm; + cck = &il->_3945.stats.rx.cck; + general = &il->_3945.stats.rx.general; + accum_ofdm = &il->_3945.accum_stats.rx.ofdm; + accum_cck = &il->_3945.accum_stats.rx.cck; + accum_general = &il->_3945.accum_stats.rx.general; + delta_ofdm = &il->_3945.delta_stats.rx.ofdm; + delta_cck = &il->_3945.delta_stats.rx.cck; + delta_general = &il->_3945.delta_stats.rx.general; + max_ofdm = &il->_3945.max_delta.rx.ofdm; + max_cck = &il->_3945.max_delta.rx.cck; + max_general = &il->_3945.max_delta.rx.general; + + pos += il3945_stats_flag(il, buf, bufsz); + pos += + scnprintf(buf + pos, bufsz - pos, + "%-32s current" + "acumulative delta max\n", + "Statistics_Rx - OFDM:"); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "ina_cnt:", + le32_to_cpu(ofdm->ina_cnt), accum_ofdm->ina_cnt, + delta_ofdm->ina_cnt, max_ofdm->ina_cnt); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "fina_cnt:", + le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt, + delta_ofdm->fina_cnt, max_ofdm->fina_cnt); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "plcp_err:", + le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err, + delta_ofdm->plcp_err, max_ofdm->plcp_err); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "crc32_err:", + le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err, + delta_ofdm->crc32_err, max_ofdm->crc32_err); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "overrun_err:", + le32_to_cpu(ofdm->overrun_err), accum_ofdm->overrun_err, + delta_ofdm->overrun_err, max_ofdm->overrun_err); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "early_overrun_err:", + le32_to_cpu(ofdm->early_overrun_err), + accum_ofdm->early_overrun_err, + delta_ofdm->early_overrun_err, + max_ofdm->early_overrun_err); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "crc32_good:", + le32_to_cpu(ofdm->crc32_good), accum_ofdm->crc32_good, + delta_ofdm->crc32_good, max_ofdm->crc32_good); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:", + le32_to_cpu(ofdm->false_alarm_cnt), + accum_ofdm->false_alarm_cnt, delta_ofdm->false_alarm_cnt, + max_ofdm->false_alarm_cnt); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "fina_sync_err_cnt:", + le32_to_cpu(ofdm->fina_sync_err_cnt), + accum_ofdm->fina_sync_err_cnt, + delta_ofdm->fina_sync_err_cnt, + max_ofdm->fina_sync_err_cnt); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "sfd_timeout:", + le32_to_cpu(ofdm->sfd_timeout), accum_ofdm->sfd_timeout, + delta_ofdm->sfd_timeout, max_ofdm->sfd_timeout); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "fina_timeout:", + le32_to_cpu(ofdm->fina_timeout), accum_ofdm->fina_timeout, + delta_ofdm->fina_timeout, max_ofdm->fina_timeout); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "unresponded_rts:", + le32_to_cpu(ofdm->unresponded_rts), + accum_ofdm->unresponded_rts, delta_ofdm->unresponded_rts, + max_ofdm->unresponded_rts); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "rxe_frame_lmt_ovrun:", + le32_to_cpu(ofdm->rxe_frame_limit_overrun), + accum_ofdm->rxe_frame_limit_overrun, + delta_ofdm->rxe_frame_limit_overrun, + max_ofdm->rxe_frame_limit_overrun); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "sent_ack_cnt:", + le32_to_cpu(ofdm->sent_ack_cnt), accum_ofdm->sent_ack_cnt, + delta_ofdm->sent_ack_cnt, max_ofdm->sent_ack_cnt); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "sent_cts_cnt:", + le32_to_cpu(ofdm->sent_cts_cnt), accum_ofdm->sent_cts_cnt, + delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt); + + pos += + scnprintf(buf + pos, bufsz - pos, + "%-32s current" + "acumulative delta max\n", + "Statistics_Rx - CCK:"); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "ina_cnt:", + le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt, + delta_cck->ina_cnt, max_cck->ina_cnt); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "fina_cnt:", + le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt, + delta_cck->fina_cnt, max_cck->fina_cnt); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "plcp_err:", + le32_to_cpu(cck->plcp_err), accum_cck->plcp_err, + delta_cck->plcp_err, max_cck->plcp_err); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "crc32_err:", + le32_to_cpu(cck->crc32_err), accum_cck->crc32_err, + delta_cck->crc32_err, max_cck->crc32_err); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "overrun_err:", + le32_to_cpu(cck->overrun_err), accum_cck->overrun_err, + delta_cck->overrun_err, max_cck->overrun_err); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "early_overrun_err:", + le32_to_cpu(cck->early_overrun_err), + accum_cck->early_overrun_err, + delta_cck->early_overrun_err, max_cck->early_overrun_err); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "crc32_good:", + le32_to_cpu(cck->crc32_good), accum_cck->crc32_good, + delta_cck->crc32_good, max_cck->crc32_good); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:", + le32_to_cpu(cck->false_alarm_cnt), + accum_cck->false_alarm_cnt, delta_cck->false_alarm_cnt, + max_cck->false_alarm_cnt); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "fina_sync_err_cnt:", + le32_to_cpu(cck->fina_sync_err_cnt), + accum_cck->fina_sync_err_cnt, + delta_cck->fina_sync_err_cnt, max_cck->fina_sync_err_cnt); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "sfd_timeout:", + le32_to_cpu(cck->sfd_timeout), accum_cck->sfd_timeout, + delta_cck->sfd_timeout, max_cck->sfd_timeout); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "fina_timeout:", + le32_to_cpu(cck->fina_timeout), accum_cck->fina_timeout, + delta_cck->fina_timeout, max_cck->fina_timeout); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "unresponded_rts:", + le32_to_cpu(cck->unresponded_rts), + accum_cck->unresponded_rts, delta_cck->unresponded_rts, + max_cck->unresponded_rts); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "rxe_frame_lmt_ovrun:", + le32_to_cpu(cck->rxe_frame_limit_overrun), + accum_cck->rxe_frame_limit_overrun, + delta_cck->rxe_frame_limit_overrun, + max_cck->rxe_frame_limit_overrun); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "sent_ack_cnt:", + le32_to_cpu(cck->sent_ack_cnt), accum_cck->sent_ack_cnt, + delta_cck->sent_ack_cnt, max_cck->sent_ack_cnt); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "sent_cts_cnt:", + le32_to_cpu(cck->sent_cts_cnt), accum_cck->sent_cts_cnt, + delta_cck->sent_cts_cnt, max_cck->sent_cts_cnt); + + pos += + scnprintf(buf + pos, bufsz - pos, + "%-32s current" + "acumulative delta max\n", + "Statistics_Rx - GENERAL:"); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "bogus_cts:", + le32_to_cpu(general->bogus_cts), accum_general->bogus_cts, + delta_general->bogus_cts, max_general->bogus_cts); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "bogus_ack:", + le32_to_cpu(general->bogus_ack), accum_general->bogus_ack, + delta_general->bogus_ack, max_general->bogus_ack); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "non_bssid_frames:", + le32_to_cpu(general->non_bssid_frames), + accum_general->non_bssid_frames, + delta_general->non_bssid_frames, + max_general->non_bssid_frames); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "filtered_frames:", + le32_to_cpu(general->filtered_frames), + accum_general->filtered_frames, + delta_general->filtered_frames, + max_general->filtered_frames); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "non_channel_beacons:", + le32_to_cpu(general->non_channel_beacons), + accum_general->non_channel_beacons, + delta_general->non_channel_beacons, + max_general->non_channel_beacons); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +ssize_t +il3945_ucode_tx_stats_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct il_priv *il = file->private_data; + int pos = 0; + char *buf; + int bufsz = (sizeof(struct iwl39_stats_tx) * 48) + 250; + ssize_t ret; + struct iwl39_stats_tx *tx, *accum_tx, *delta_tx, *max_tx; + + if (!il_is_alive(il)) + return -EAGAIN; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) { + IL_ERR("Can not allocate Buffer\n"); + return -ENOMEM; + } + + /* + * The statistic information display here is based on + * the last stats notification from uCode + * might not reflect the current uCode activity + */ + tx = &il->_3945.stats.tx; + accum_tx = &il->_3945.accum_stats.tx; + delta_tx = &il->_3945.delta_stats.tx; + max_tx = &il->_3945.max_delta.tx; + pos += il3945_stats_flag(il, buf, bufsz); + pos += + scnprintf(buf + pos, bufsz - pos, + "%-32s current" + "acumulative delta max\n", + "Statistics_Tx:"); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "preamble:", + le32_to_cpu(tx->preamble_cnt), accum_tx->preamble_cnt, + delta_tx->preamble_cnt, max_tx->preamble_cnt); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "rx_detected_cnt:", + le32_to_cpu(tx->rx_detected_cnt), + accum_tx->rx_detected_cnt, delta_tx->rx_detected_cnt, + max_tx->rx_detected_cnt); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "bt_prio_defer_cnt:", + le32_to_cpu(tx->bt_prio_defer_cnt), + accum_tx->bt_prio_defer_cnt, delta_tx->bt_prio_defer_cnt, + max_tx->bt_prio_defer_cnt); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "bt_prio_kill_cnt:", + le32_to_cpu(tx->bt_prio_kill_cnt), + accum_tx->bt_prio_kill_cnt, delta_tx->bt_prio_kill_cnt, + max_tx->bt_prio_kill_cnt); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "few_bytes_cnt:", + le32_to_cpu(tx->few_bytes_cnt), accum_tx->few_bytes_cnt, + delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "cts_timeout:", + le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout, + delta_tx->cts_timeout, max_tx->cts_timeout); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "ack_timeout:", + le32_to_cpu(tx->ack_timeout), accum_tx->ack_timeout, + delta_tx->ack_timeout, max_tx->ack_timeout); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "expected_ack_cnt:", + le32_to_cpu(tx->expected_ack_cnt), + accum_tx->expected_ack_cnt, delta_tx->expected_ack_cnt, + max_tx->expected_ack_cnt); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "actual_ack_cnt:", + le32_to_cpu(tx->actual_ack_cnt), accum_tx->actual_ack_cnt, + delta_tx->actual_ack_cnt, max_tx->actual_ack_cnt); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +ssize_t +il3945_ucode_general_stats_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct il_priv *il = file->private_data; + int pos = 0; + char *buf; + int bufsz = sizeof(struct iwl39_stats_general) * 10 + 300; + ssize_t ret; + struct iwl39_stats_general *general, *accum_general; + struct iwl39_stats_general *delta_general, *max_general; + struct stats_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg; + struct iwl39_stats_div *div, *accum_div, *delta_div, *max_div; + + if (!il_is_alive(il)) + return -EAGAIN; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) { + IL_ERR("Can not allocate Buffer\n"); + return -ENOMEM; + } + + /* + * The statistic information display here is based on + * the last stats notification from uCode + * might not reflect the current uCode activity + */ + general = &il->_3945.stats.general; + dbg = &il->_3945.stats.general.dbg; + div = &il->_3945.stats.general.div; + accum_general = &il->_3945.accum_stats.general; + delta_general = &il->_3945.delta_stats.general; + max_general = &il->_3945.max_delta.general; + accum_dbg = &il->_3945.accum_stats.general.dbg; + delta_dbg = &il->_3945.delta_stats.general.dbg; + max_dbg = &il->_3945.max_delta.general.dbg; + accum_div = &il->_3945.accum_stats.general.div; + delta_div = &il->_3945.delta_stats.general.div; + max_div = &il->_3945.max_delta.general.div; + pos += il3945_stats_flag(il, buf, bufsz); + pos += + scnprintf(buf + pos, bufsz - pos, + "%-32s current" + "acumulative delta max\n", + "Statistics_General:"); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "burst_check:", + le32_to_cpu(dbg->burst_check), accum_dbg->burst_check, + delta_dbg->burst_check, max_dbg->burst_check); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "burst_count:", + le32_to_cpu(dbg->burst_count), accum_dbg->burst_count, + delta_dbg->burst_count, max_dbg->burst_count); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "sleep_time:", + le32_to_cpu(general->sleep_time), + accum_general->sleep_time, delta_general->sleep_time, + max_general->sleep_time); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "slots_out:", + le32_to_cpu(general->slots_out), accum_general->slots_out, + delta_general->slots_out, max_general->slots_out); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "slots_idle:", + le32_to_cpu(general->slots_idle), + accum_general->slots_idle, delta_general->slots_idle, + max_general->slots_idle); + pos += + scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n", + le32_to_cpu(general->ttl_timestamp)); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "tx_on_a:", + le32_to_cpu(div->tx_on_a), accum_div->tx_on_a, + delta_div->tx_on_a, max_div->tx_on_a); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "tx_on_b:", + le32_to_cpu(div->tx_on_b), accum_div->tx_on_b, + delta_div->tx_on_b, max_div->tx_on_b); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "exec_time:", + le32_to_cpu(div->exec_time), accum_div->exec_time, + delta_div->exec_time, max_div->exec_time); + pos += + scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "probe_time:", + le32_to_cpu(div->probe_time), accum_div->probe_time, + delta_div->probe_time, max_div->probe_time); + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c new file mode 100644 index 00000000000..54b2d391e91 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/3945-mac.c @@ -0,0 +1,3976 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/pci-aspm.h> +#include <linux/slab.h> +#include <linux/dma-mapping.h> +#include <linux/delay.h> +#include <linux/sched.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/firmware.h> +#include <linux/etherdevice.h> +#include <linux/if_arp.h> + +#include <net/ieee80211_radiotap.h> +#include <net/mac80211.h> + +#include <asm/div64.h> + +#define DRV_NAME "iwl3945" + +#include "commands.h" +#include "common.h" +#include "3945.h" +#include "iwl-spectrum.h" + +/* + * module name, copyright, version, etc. + */ + +#define DRV_DESCRIPTION \ +"Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux" + +#ifdef CONFIG_IWLEGACY_DEBUG +#define VD "d" +#else +#define VD +#endif + +/* + * add "s" to indicate spectrum measurement included. + * we add it here to be consistent with previous releases in which + * this was configurable. + */ +#define DRV_VERSION IWLWIFI_VERSION VD "s" +#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation" +#define DRV_AUTHOR "<ilw@linux.intel.com>" + +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_VERSION(DRV_VERSION); +MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); +MODULE_LICENSE("GPL"); + + /* module parameters */ +struct il_mod_params il3945_mod_params = { + .sw_crypto = 1, + .restart_fw = 1, + .disable_hw_scan = 1, + /* the rest are 0 by default */ +}; + +/** + * il3945_get_antenna_flags - Get antenna flags for RXON command + * @il: eeprom and antenna fields are used to determine antenna flags + * + * il->eeprom39 is used to determine if antenna AUX/MAIN are reversed + * il3945_mod_params.antenna specifies the antenna diversity mode: + * + * IL_ANTENNA_DIVERSITY - NIC selects best antenna by itself + * IL_ANTENNA_MAIN - Force MAIN antenna + * IL_ANTENNA_AUX - Force AUX antenna + */ +__le32 +il3945_get_antenna_flags(const struct il_priv *il) +{ + struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom; + + switch (il3945_mod_params.antenna) { + case IL_ANTENNA_DIVERSITY: + return 0; + + case IL_ANTENNA_MAIN: + if (eeprom->antenna_switch_type) + return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK; + return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK; + + case IL_ANTENNA_AUX: + if (eeprom->antenna_switch_type) + return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK; + return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK; + } + + /* bad antenna selector value */ + IL_ERR("Bad antenna selector value (0x%x)\n", + il3945_mod_params.antenna); + + return 0; /* "diversity" is default if error */ +} + +static int +il3945_set_ccmp_dynamic_key_info(struct il_priv *il, + struct ieee80211_key_conf *keyconf, u8 sta_id) +{ + unsigned long flags; + __le16 key_flags = 0; + int ret; + + key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK); + key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); + + if (sta_id == il->ctx.bcast_sta_id) + key_flags |= STA_KEY_MULTICAST_MSK; + + keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + keyconf->hw_key_idx = keyconf->keyidx; + key_flags &= ~STA_KEY_FLG_INVALID; + + spin_lock_irqsave(&il->sta_lock, flags); + il->stations[sta_id].keyinfo.cipher = keyconf->cipher; + il->stations[sta_id].keyinfo.keylen = keyconf->keylen; + memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen); + + memcpy(il->stations[sta_id].sta.key.key, keyconf->key, keyconf->keylen); + + if ((il->stations[sta_id].sta.key. + key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC) + il->stations[sta_id].sta.key.key_offset = + il_get_free_ucode_key_idx(il); + /* else, we are overriding an existing key => no need to allocated room + * in uCode. */ + + WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, + "no space for a new key"); + + il->stations[sta_id].sta.key.key_flags = key_flags; + il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; + il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + + D_INFO("hwcrypto: modify ucode station key info\n"); + + ret = il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC); + + spin_unlock_irqrestore(&il->sta_lock, flags); + + return ret; +} + +static int +il3945_set_tkip_dynamic_key_info(struct il_priv *il, + struct ieee80211_key_conf *keyconf, u8 sta_id) +{ + return -EOPNOTSUPP; +} + +static int +il3945_set_wep_dynamic_key_info(struct il_priv *il, + struct ieee80211_key_conf *keyconf, u8 sta_id) +{ + return -EOPNOTSUPP; +} + +static int +il3945_clear_sta_key_info(struct il_priv *il, u8 sta_id) +{ + unsigned long flags; + struct il_addsta_cmd sta_cmd; + + spin_lock_irqsave(&il->sta_lock, flags); + memset(&il->stations[sta_id].keyinfo, 0, sizeof(struct il_hw_key)); + memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo)); + il->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC; + il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; + il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + memcpy(&sta_cmd, &il->stations[sta_id].sta, + sizeof(struct il_addsta_cmd)); + spin_unlock_irqrestore(&il->sta_lock, flags); + + D_INFO("hwcrypto: clear ucode station key info\n"); + return il_send_add_sta(il, &sta_cmd, CMD_SYNC); +} + +static int +il3945_set_dynamic_key(struct il_priv *il, struct ieee80211_key_conf *keyconf, + u8 sta_id) +{ + int ret = 0; + + keyconf->hw_key_idx = HW_KEY_DYNAMIC; + + switch (keyconf->cipher) { + case WLAN_CIPHER_SUITE_CCMP: + ret = il3945_set_ccmp_dynamic_key_info(il, keyconf, sta_id); + break; + case WLAN_CIPHER_SUITE_TKIP: + ret = il3945_set_tkip_dynamic_key_info(il, keyconf, sta_id); + break; + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + ret = il3945_set_wep_dynamic_key_info(il, keyconf, sta_id); + break; + default: + IL_ERR("Unknown alg: %s alg=%x\n", __func__, keyconf->cipher); + ret = -EINVAL; + } + + D_WEP("Set dynamic key: alg=%x len=%d idx=%d sta=%d ret=%d\n", + keyconf->cipher, keyconf->keylen, keyconf->keyidx, sta_id, ret); + + return ret; +} + +static int +il3945_remove_static_key(struct il_priv *il) +{ + int ret = -EOPNOTSUPP; + + return ret; +} + +static int +il3945_set_static_key(struct il_priv *il, struct ieee80211_key_conf *key) +{ + if (key->cipher == WLAN_CIPHER_SUITE_WEP40 || + key->cipher == WLAN_CIPHER_SUITE_WEP104) + return -EOPNOTSUPP; + + IL_ERR("Static key invalid: cipher %x\n", key->cipher); + return -EINVAL; +} + +static void +il3945_clear_free_frames(struct il_priv *il) +{ + struct list_head *element; + + D_INFO("%d frames on pre-allocated heap on clear.\n", il->frames_count); + + while (!list_empty(&il->free_frames)) { + element = il->free_frames.next; + list_del(element); + kfree(list_entry(element, struct il3945_frame, list)); + il->frames_count--; + } + + if (il->frames_count) { + IL_WARN("%d frames still in use. Did we lose one?\n", + il->frames_count); + il->frames_count = 0; + } +} + +static struct il3945_frame * +il3945_get_free_frame(struct il_priv *il) +{ + struct il3945_frame *frame; + struct list_head *element; + if (list_empty(&il->free_frames)) { + frame = kzalloc(sizeof(*frame), GFP_KERNEL); + if (!frame) { + IL_ERR("Could not allocate frame!\n"); + return NULL; + } + + il->frames_count++; + return frame; + } + + element = il->free_frames.next; + list_del(element); + return list_entry(element, struct il3945_frame, list); +} + +static void +il3945_free_frame(struct il_priv *il, struct il3945_frame *frame) +{ + memset(frame, 0, sizeof(*frame)); + list_add(&frame->list, &il->free_frames); +} + +unsigned int +il3945_fill_beacon_frame(struct il_priv *il, struct ieee80211_hdr *hdr, + int left) +{ + + if (!il_is_associated(il) || !il->beacon_skb) + return 0; + + if (il->beacon_skb->len > left) + return 0; + + memcpy(hdr, il->beacon_skb->data, il->beacon_skb->len); + + return il->beacon_skb->len; +} + +static int +il3945_send_beacon_cmd(struct il_priv *il) +{ + struct il3945_frame *frame; + unsigned int frame_size; + int rc; + u8 rate; + + frame = il3945_get_free_frame(il); + + if (!frame) { + IL_ERR("Could not obtain free frame buffer for beacon " + "command.\n"); + return -ENOMEM; + } + + rate = il_get_lowest_plcp(il, &il->ctx); + + frame_size = il3945_hw_get_beacon_cmd(il, frame, rate); + + rc = il_send_cmd_pdu(il, C_TX_BEACON, frame_size, &frame->u.cmd[0]); + + il3945_free_frame(il, frame); + + return rc; +} + +static void +il3945_unset_hw_params(struct il_priv *il) +{ + if (il->_3945.shared_virt) + dma_free_coherent(&il->pci_dev->dev, + sizeof(struct il3945_shared), + il->_3945.shared_virt, il->_3945.shared_phys); +} + +static void +il3945_build_tx_cmd_hwcrypto(struct il_priv *il, struct ieee80211_tx_info *info, + struct il_device_cmd *cmd, + struct sk_buff *skb_frag, int sta_id) +{ + struct il3945_tx_cmd *tx_cmd = (struct il3945_tx_cmd *)cmd->cmd.payload; + struct il_hw_key *keyinfo = &il->stations[sta_id].keyinfo; + + tx_cmd->sec_ctl = 0; + + switch (keyinfo->cipher) { + case WLAN_CIPHER_SUITE_CCMP: + tx_cmd->sec_ctl = TX_CMD_SEC_CCM; + memcpy(tx_cmd->key, keyinfo->key, keyinfo->keylen); + D_TX("tx_cmd with AES hwcrypto\n"); + break; + + case WLAN_CIPHER_SUITE_TKIP: + break; + + case WLAN_CIPHER_SUITE_WEP104: + tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; + /* fall through */ + case WLAN_CIPHER_SUITE_WEP40: + tx_cmd->sec_ctl |= + TX_CMD_SEC_WEP | (info->control.hw_key-> + hw_key_idx & TX_CMD_SEC_MSK) << + TX_CMD_SEC_SHIFT; + + memcpy(&tx_cmd->key[3], keyinfo->key, keyinfo->keylen); + + D_TX("Configuring packet for WEP encryption " "with key %d\n", + info->control.hw_key->hw_key_idx); + break; + + default: + IL_ERR("Unknown encode cipher %x\n", keyinfo->cipher); + break; + } +} + +/* + * handle build C_TX command notification. + */ +static void +il3945_build_tx_cmd_basic(struct il_priv *il, struct il_device_cmd *cmd, + struct ieee80211_tx_info *info, + struct ieee80211_hdr *hdr, u8 std_id) +{ + struct il3945_tx_cmd *tx_cmd = (struct il3945_tx_cmd *)cmd->cmd.payload; + __le32 tx_flags = tx_cmd->tx_flags; + __le16 fc = hdr->frame_control; + + tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; + if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { + tx_flags |= TX_CMD_FLG_ACK_MSK; + if (ieee80211_is_mgmt(fc)) + tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; + if (ieee80211_is_probe_resp(fc) && + !(le16_to_cpu(hdr->seq_ctrl) & 0xf)) + tx_flags |= TX_CMD_FLG_TSF_MSK; + } else { + tx_flags &= (~TX_CMD_FLG_ACK_MSK); + tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; + } + + tx_cmd->sta_id = std_id; + if (ieee80211_has_morefrags(fc)) + tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; + + if (ieee80211_is_data_qos(fc)) { + u8 *qc = ieee80211_get_qos_ctl(hdr); + tx_cmd->tid_tspec = qc[0] & 0xf; + tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; + } else { + tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; + } + + il_tx_cmd_protection(il, info, fc, &tx_flags); + + tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); + if (ieee80211_is_mgmt(fc)) { + if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) + tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3); + else + tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2); + } else { + tx_cmd->timeout.pm_frame_timeout = 0; + } + + tx_cmd->driver_txop = 0; + tx_cmd->tx_flags = tx_flags; + tx_cmd->next_frame_len = 0; +} + +/* + * start C_TX command process + */ +static int +il3945_tx_skb(struct il_priv *il, struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct il3945_tx_cmd *tx_cmd; + struct il_tx_queue *txq = NULL; + struct il_queue *q = NULL; + struct il_device_cmd *out_cmd; + struct il_cmd_meta *out_meta; + dma_addr_t phys_addr; + dma_addr_t txcmd_phys; + int txq_id = skb_get_queue_mapping(skb); + u16 len, idx, hdr_len; + u8 id; + u8 unicast; + u8 sta_id; + u8 tid = 0; + __le16 fc; + u8 wait_write_ptr = 0; + unsigned long flags; + + spin_lock_irqsave(&il->lock, flags); + if (il_is_rfkill(il)) { + D_DROP("Dropping - RF KILL\n"); + goto drop_unlock; + } + + if ((ieee80211_get_tx_rate(il->hw, info)->hw_value & 0xFF) == + IL_INVALID_RATE) { + IL_ERR("ERROR: No TX rate available.\n"); + goto drop_unlock; + } + + unicast = !is_multicast_ether_addr(hdr->addr1); + id = 0; + + fc = hdr->frame_control; + +#ifdef CONFIG_IWLEGACY_DEBUG + if (ieee80211_is_auth(fc)) + D_TX("Sending AUTH frame\n"); + else if (ieee80211_is_assoc_req(fc)) + D_TX("Sending ASSOC frame\n"); + else if (ieee80211_is_reassoc_req(fc)) + D_TX("Sending REASSOC frame\n"); +#endif + + spin_unlock_irqrestore(&il->lock, flags); + + hdr_len = ieee80211_hdrlen(fc); + + /* Find idx into station table for destination station */ + sta_id = il_sta_id_or_broadcast(il, &il->ctx, info->control.sta); + if (sta_id == IL_INVALID_STATION) { + D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1); + goto drop; + } + + D_RATE("station Id %d\n", sta_id); + + if (ieee80211_is_data_qos(fc)) { + u8 *qc = ieee80211_get_qos_ctl(hdr); + tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; + if (unlikely(tid >= MAX_TID_COUNT)) + goto drop; + } + + /* Descriptor for chosen Tx queue */ + txq = &il->txq[txq_id]; + q = &txq->q; + + if ((il_queue_space(q) < q->high_mark)) + goto drop; + + spin_lock_irqsave(&il->lock, flags); + + idx = il_get_cmd_idx(q, q->write_ptr, 0); + + /* Set up driver data for this TFD */ + memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct il_tx_info)); + txq->txb[q->write_ptr].skb = skb; + txq->txb[q->write_ptr].ctx = &il->ctx; + + /* Init first empty entry in queue's array of Tx/cmd buffers */ + out_cmd = txq->cmd[idx]; + out_meta = &txq->meta[idx]; + tx_cmd = (struct il3945_tx_cmd *)out_cmd->cmd.payload; + memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); + memset(tx_cmd, 0, sizeof(*tx_cmd)); + + /* + * Set up the Tx-command (not MAC!) header. + * Store the chosen Tx queue and TFD idx within the sequence field; + * after Tx, uCode's Tx response will return this value so driver can + * locate the frame within the tx queue and do post-tx processing. + */ + out_cmd->hdr.cmd = C_TX; + out_cmd->hdr.sequence = + cpu_to_le16((u16) + (QUEUE_TO_SEQ(txq_id) | IDX_TO_SEQ(q->write_ptr))); + + /* Copy MAC header from skb into command buffer */ + memcpy(tx_cmd->hdr, hdr, hdr_len); + + if (info->control.hw_key) + il3945_build_tx_cmd_hwcrypto(il, info, out_cmd, skb, sta_id); + + /* TODO need this for burst mode later on */ + il3945_build_tx_cmd_basic(il, out_cmd, info, hdr, sta_id); + + il3945_hw_build_tx_cmd_rate(il, out_cmd, info, hdr, sta_id); + + /* Total # bytes to be transmitted */ + len = (u16) skb->len; + tx_cmd->len = cpu_to_le16(len); + + il_dbg_log_tx_data_frame(il, len, hdr); + il_update_stats(il, true, fc, len); + tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK; + tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK; + + if (!ieee80211_has_morefrags(hdr->frame_control)) { + txq->need_update = 1; + } else { + wait_write_ptr = 1; + txq->need_update = 0; + } + + D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence)); + D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags)); + il_print_hex_dump(il, IL_DL_TX, tx_cmd, sizeof(*tx_cmd)); + il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd->hdr, + ieee80211_hdrlen(fc)); + + /* + * Use the first empty entry in this queue's command buffer array + * to contain the Tx command and MAC header concatenated together + * (payload data will be in another buffer). + * Size of this varies, due to varying MAC header length. + * If end is not dword aligned, we'll have 2 extra bytes at the end + * of the MAC header (device reads on dword boundaries). + * We'll tell device about this padding later. + */ + len = + sizeof(struct il3945_tx_cmd) + sizeof(struct il_cmd_header) + + hdr_len; + len = (len + 3) & ~3; + + /* Physical address of this Tx command's header (not MAC header!), + * within command buffer array. */ + txcmd_phys = + pci_map_single(il->pci_dev, &out_cmd->hdr, len, PCI_DMA_TODEVICE); + /* we do not map meta data ... so we can safely access address to + * provide to unmap command*/ + dma_unmap_addr_set(out_meta, mapping, txcmd_phys); + dma_unmap_len_set(out_meta, len, len); + + /* Add buffer containing Tx command and MAC(!) header to TFD's + * first entry */ + il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, txcmd_phys, len, 1, + 0); + + /* Set up TFD's 2nd entry to point directly to remainder of skb, + * if any (802.11 null frames have no payload). */ + len = skb->len - hdr_len; + if (len) { + phys_addr = + pci_map_single(il->pci_dev, skb->data + hdr_len, len, + PCI_DMA_TODEVICE); + il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, phys_addr, + len, 0, U32_PAD(len)); + } + + /* Tell device the write idx *just past* this latest filled TFD */ + q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd); + il_txq_update_write_ptr(il, txq); + spin_unlock_irqrestore(&il->lock, flags); + + if (il_queue_space(q) < q->high_mark && il->mac80211_registered) { + if (wait_write_ptr) { + spin_lock_irqsave(&il->lock, flags); + txq->need_update = 1; + il_txq_update_write_ptr(il, txq); + spin_unlock_irqrestore(&il->lock, flags); + } + + il_stop_queue(il, txq); + } + + return 0; + +drop_unlock: + spin_unlock_irqrestore(&il->lock, flags); +drop: + return -1; +} + +static int +il3945_get_measurement(struct il_priv *il, + struct ieee80211_measurement_params *params, u8 type) +{ + struct il_spectrum_cmd spectrum; + struct il_rx_pkt *pkt; + struct il_host_cmd cmd = { + .id = C_SPECTRUM_MEASUREMENT, + .data = (void *)&spectrum, + .flags = CMD_WANT_SKB, + }; + u32 add_time = le64_to_cpu(params->start_time); + int rc; + int spectrum_resp_status; + int duration = le16_to_cpu(params->duration); + struct il_rxon_context *ctx = &il->ctx; + + if (il_is_associated(il)) + add_time = + il_usecs_to_beacons(il, + le64_to_cpu(params->start_time) - + il->_3945.last_tsf, + le16_to_cpu(ctx->timing. + beacon_interval)); + + memset(&spectrum, 0, sizeof(spectrum)); + + spectrum.channel_count = cpu_to_le16(1); + spectrum.flags = + RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK; + spectrum.filter_flags = MEASUREMENT_FILTER_FLAG; + cmd.len = sizeof(spectrum); + spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len)); + + if (il_is_associated(il)) + spectrum.start_time = + il_add_beacon_time(il, il->_3945.last_beacon_time, add_time, + le16_to_cpu(ctx->timing. + beacon_interval)); + else + spectrum.start_time = 0; + + spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT); + spectrum.channels[0].channel = params->channel; + spectrum.channels[0].type = type; + if (ctx->active.flags & RXON_FLG_BAND_24G_MSK) + spectrum.flags |= + RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK | + RXON_FLG_TGG_PROTECT_MSK; + + rc = il_send_cmd_sync(il, &cmd); + if (rc) + return rc; + + pkt = (struct il_rx_pkt *)cmd.reply_page; + if (pkt->hdr.flags & IL_CMD_FAILED_MSK) { + IL_ERR("Bad return from N_RX_ON_ASSOC command\n"); + rc = -EIO; + } + + spectrum_resp_status = le16_to_cpu(pkt->u.spectrum.status); + switch (spectrum_resp_status) { + case 0: /* Command will be handled */ + if (pkt->u.spectrum.id != 0xff) { + D_INFO("Replaced existing measurement: %d\n", + pkt->u.spectrum.id); + il->measurement_status &= ~MEASUREMENT_READY; + } + il->measurement_status |= MEASUREMENT_ACTIVE; + rc = 0; + break; + + case 1: /* Command will not be handled */ + rc = -EAGAIN; + break; + } + + il_free_pages(il, cmd.reply_page); + + return rc; +} + +static void +il3945_hdl_alive(struct il_priv *il, struct il_rx_buf *rxb) +{ + struct il_rx_pkt *pkt = rxb_addr(rxb); + struct il_alive_resp *palive; + struct delayed_work *pwork; + + palive = &pkt->u.alive_frame; + + D_INFO("Alive ucode status 0x%08X revision " "0x%01X 0x%01X\n", + palive->is_valid, palive->ver_type, palive->ver_subtype); + + if (palive->ver_subtype == INITIALIZE_SUBTYPE) { + D_INFO("Initialization Alive received.\n"); + memcpy(&il->card_alive_init, &pkt->u.alive_frame, + sizeof(struct il_alive_resp)); + pwork = &il->init_alive_start; + } else { + D_INFO("Runtime Alive received.\n"); + memcpy(&il->card_alive, &pkt->u.alive_frame, + sizeof(struct il_alive_resp)); + pwork = &il->alive_start; + il3945_disable_events(il); + } + + /* We delay the ALIVE response by 5ms to + * give the HW RF Kill time to activate... */ + if (palive->is_valid == UCODE_VALID_OK) + queue_delayed_work(il->workqueue, pwork, msecs_to_jiffies(5)); + else + IL_WARN("uCode did not respond OK.\n"); +} + +static void +il3945_hdl_add_sta(struct il_priv *il, struct il_rx_buf *rxb) +{ +#ifdef CONFIG_IWLEGACY_DEBUG + struct il_rx_pkt *pkt = rxb_addr(rxb); +#endif + + D_RX("Received C_ADD_STA: 0x%02X\n", pkt->u.status); +} + +static void +il3945_hdl_beacon(struct il_priv *il, struct il_rx_buf *rxb) +{ + struct il_rx_pkt *pkt = rxb_addr(rxb); + struct il3945_beacon_notif *beacon = &(pkt->u.beacon_status); +#ifdef CONFIG_IWLEGACY_DEBUG + u8 rate = beacon->beacon_notify_hdr.rate; + + D_RX("beacon status %x retries %d iss %d " "tsf %d %d rate %d\n", + le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK, + beacon->beacon_notify_hdr.failure_frame, + le32_to_cpu(beacon->ibss_mgr_status), + le32_to_cpu(beacon->high_tsf), le32_to_cpu(beacon->low_tsf), rate); +#endif + + il->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status); + +} + +/* Handle notification from uCode that card's power state is changing + * due to software, hardware, or critical temperature RFKILL */ +static void +il3945_hdl_card_state(struct il_priv *il, struct il_rx_buf *rxb) +{ + struct il_rx_pkt *pkt = rxb_addr(rxb); + u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); + unsigned long status = il->status; + + IL_WARN("Card state received: HW:%s SW:%s\n", + (flags & HW_CARD_DISABLED) ? "Kill" : "On", + (flags & SW_CARD_DISABLED) ? "Kill" : "On"); + + _il_wr(il, CSR_UCODE_DRV_GP1_SET, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); + + if (flags & HW_CARD_DISABLED) + set_bit(S_RF_KILL_HW, &il->status); + else + clear_bit(S_RF_KILL_HW, &il->status); + + il_scan_cancel(il); + + if ((test_bit(S_RF_KILL_HW, &status) != + test_bit(S_RF_KILL_HW, &il->status))) + wiphy_rfkill_set_hw_state(il->hw->wiphy, + test_bit(S_RF_KILL_HW, &il->status)); + else + wake_up(&il->wait_command_queue); +} + +/** + * il3945_setup_handlers - Initialize Rx handler callbacks + * + * Setup the RX handlers for each of the reply types sent from the uCode + * to the host. + * + * This function chains into the hardware specific files for them to setup + * any hardware specific handlers as well. + */ +static void +il3945_setup_handlers(struct il_priv *il) +{ + il->handlers[N_ALIVE] = il3945_hdl_alive; + il->handlers[C_ADD_STA] = il3945_hdl_add_sta; + il->handlers[N_ERROR] = il_hdl_error; + il->handlers[N_CHANNEL_SWITCH] = il_hdl_csa; + il->handlers[N_SPECTRUM_MEASUREMENT] = il_hdl_spectrum_measurement; + il->handlers[N_PM_SLEEP] = il_hdl_pm_sleep; + il->handlers[N_PM_DEBUG_STATS] = il_hdl_pm_debug_stats; + il->handlers[N_BEACON] = il3945_hdl_beacon; + + /* + * The same handler is used for both the REPLY to a discrete + * stats request from the host as well as for the periodic + * stats notifications (after received beacons) from the uCode. + */ + il->handlers[C_STATS] = il3945_hdl_c_stats; + il->handlers[N_STATS] = il3945_hdl_stats; + + il_setup_rx_scan_handlers(il); + il->handlers[N_CARD_STATE] = il3945_hdl_card_state; + + /* Set up hardware specific Rx handlers */ + il3945_hw_handler_setup(il); +} + +/************************** RX-FUNCTIONS ****************************/ +/* + * Rx theory of operation + * + * The host allocates 32 DMA target addresses and passes the host address + * to the firmware at register IL_RFDS_TBL_LOWER + N * RFD_SIZE where N is + * 0 to 31 + * + * Rx Queue Indexes + * The host/firmware share two idx registers for managing the Rx buffers. + * + * The READ idx maps to the first position that the firmware may be writing + * to -- the driver can read up to (but not including) this position and get + * good data. + * The READ idx is managed by the firmware once the card is enabled. + * + * The WRITE idx maps to the last position the driver has read from -- the + * position preceding WRITE is the last slot the firmware can place a packet. + * + * The queue is empty (no good data) if WRITE = READ - 1, and is full if + * WRITE = READ. + * + * During initialization, the host sets up the READ queue position to the first + * IDX position, and WRITE to the last (READ - 1 wrapped) + * + * When the firmware places a packet in a buffer, it will advance the READ idx + * and fire the RX interrupt. The driver can then query the READ idx and + * process as many packets as possible, moving the WRITE idx forward as it + * resets the Rx queue buffers with new memory. + * + * The management in the driver is as follows: + * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When + * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled + * to replenish the iwl->rxq->rx_free. + * + In il3945_rx_replenish (scheduled) if 'processed' != 'read' then the + * iwl->rxq is replenished and the READ IDX is updated (updating the + * 'processed' and 'read' driver idxes as well) + * + A received packet is processed and handed to the kernel network stack, + * detached from the iwl->rxq. The driver 'processed' idx is updated. + * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free + * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ + * IDX is not incremented and iwl->status(RX_STALLED) is set. If there + * were enough free buffers and RX_STALLED is set it is cleared. + * + * + * Driver sequence: + * + * il3945_rx_replenish() Replenishes rx_free list from rx_used, and calls + * il3945_rx_queue_restock + * il3945_rx_queue_restock() Moves available buffers from rx_free into Rx + * queue, updates firmware pointers, and updates + * the WRITE idx. If insufficient rx_free buffers + * are available, schedules il3945_rx_replenish + * + * -- enable interrupts -- + * ISR - il3945_rx() Detach il_rx_bufs from pool up to the + * READ IDX, detaching the SKB from the pool. + * Moves the packet buffer from queue to rx_used. + * Calls il3945_rx_queue_restock to refill any empty + * slots. + * ... + * + */ + +/** + * il3945_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr + */ +static inline __le32 +il3945_dma_addr2rbd_ptr(struct il_priv *il, dma_addr_t dma_addr) +{ + return cpu_to_le32((u32) dma_addr); +} + +/** + * il3945_rx_queue_restock - refill RX queue from pre-allocated pool + * + * If there are slots in the RX queue that need to be restocked, + * and we have free pre-allocated buffers, fill the ranks as much + * as we can, pulling from rx_free. + * + * This moves the 'write' idx forward to catch up with 'processed', and + * also updates the memory address in the firmware to reference the new + * target buffer. + */ +static void +il3945_rx_queue_restock(struct il_priv *il) +{ + struct il_rx_queue *rxq = &il->rxq; + struct list_head *element; + struct il_rx_buf *rxb; + unsigned long flags; + int write; + + spin_lock_irqsave(&rxq->lock, flags); + write = rxq->write & ~0x7; + while (il_rx_queue_space(rxq) > 0 && rxq->free_count) { + /* Get next free Rx buffer, remove from free list */ + element = rxq->rx_free.next; + rxb = list_entry(element, struct il_rx_buf, list); + list_del(element); + + /* Point to Rx buffer via next RBD in circular buffer */ + rxq->bd[rxq->write] = + il3945_dma_addr2rbd_ptr(il, rxb->page_dma); + rxq->queue[rxq->write] = rxb; + rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; + rxq->free_count--; + } + spin_unlock_irqrestore(&rxq->lock, flags); + /* If the pre-allocated buffer pool is dropping low, schedule to + * refill it */ + if (rxq->free_count <= RX_LOW_WATERMARK) + queue_work(il->workqueue, &il->rx_replenish); + + /* If we've added more space for the firmware to place data, tell it. + * Increment device's write pointer in multiples of 8. */ + if (rxq->write_actual != (rxq->write & ~0x7) || + abs(rxq->write - rxq->read) > 7) { + spin_lock_irqsave(&rxq->lock, flags); + rxq->need_update = 1; + spin_unlock_irqrestore(&rxq->lock, flags); + il_rx_queue_update_write_ptr(il, rxq); + } +} + +/** + * il3945_rx_replenish - Move all used packet from rx_used to rx_free + * + * When moving to rx_free an SKB is allocated for the slot. + * + * Also restock the Rx queue via il3945_rx_queue_restock. + * This is called as a scheduled work item (except for during initialization) + */ +static void +il3945_rx_allocate(struct il_priv *il, gfp_t priority) +{ + struct il_rx_queue *rxq = &il->rxq; + struct list_head *element; + struct il_rx_buf *rxb; + struct page *page; + unsigned long flags; + gfp_t gfp_mask = priority; + + while (1) { + spin_lock_irqsave(&rxq->lock, flags); + + if (list_empty(&rxq->rx_used)) { + spin_unlock_irqrestore(&rxq->lock, flags); + return; + } + spin_unlock_irqrestore(&rxq->lock, flags); + + if (rxq->free_count > RX_LOW_WATERMARK) + gfp_mask |= __GFP_NOWARN; + + if (il->hw_params.rx_page_order > 0) + gfp_mask |= __GFP_COMP; + + /* Alloc a new receive buffer */ + page = alloc_pages(gfp_mask, il->hw_params.rx_page_order); + if (!page) { + if (net_ratelimit()) + D_INFO("Failed to allocate SKB buffer.\n"); + if (rxq->free_count <= RX_LOW_WATERMARK && + net_ratelimit()) + IL_ERR("Failed to allocate SKB buffer with %0x." + "Only %u free buffers remaining.\n", + priority, rxq->free_count); + /* We don't reschedule replenish work here -- we will + * call the restock method and if it still needs + * more buffers it will schedule replenish */ + break; + } + + spin_lock_irqsave(&rxq->lock, flags); + if (list_empty(&rxq->rx_used)) { + spin_unlock_irqrestore(&rxq->lock, flags); + __free_pages(page, il->hw_params.rx_page_order); + return; + } + element = rxq->rx_used.next; + rxb = list_entry(element, struct il_rx_buf, list); + list_del(element); + spin_unlock_irqrestore(&rxq->lock, flags); + + rxb->page = page; + /* Get physical address of RB/SKB */ + rxb->page_dma = + pci_map_page(il->pci_dev, page, 0, + PAGE_SIZE << il->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); + + spin_lock_irqsave(&rxq->lock, flags); + + list_add_tail(&rxb->list, &rxq->rx_free); + rxq->free_count++; + il->alloc_rxb_page++; + + spin_unlock_irqrestore(&rxq->lock, flags); + } +} + +void +il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq) +{ + unsigned long flags; + int i; + spin_lock_irqsave(&rxq->lock, flags); + INIT_LIST_HEAD(&rxq->rx_free); + INIT_LIST_HEAD(&rxq->rx_used); + /* Fill the rx_used queue with _all_ of the Rx buffers */ + for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { + /* In the reset function, these buffers may have been allocated + * to an SKB, so we need to unmap and free potential storage */ + if (rxq->pool[i].page != NULL) { + pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma, + PAGE_SIZE << il->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); + __il_free_pages(il, rxq->pool[i].page); + rxq->pool[i].page = NULL; + } + list_add_tail(&rxq->pool[i].list, &rxq->rx_used); + } + + /* Set us so that we have processed and used all buffers, but have + * not restocked the Rx queue with fresh buffers */ + rxq->read = rxq->write = 0; + rxq->write_actual = 0; + rxq->free_count = 0; + spin_unlock_irqrestore(&rxq->lock, flags); +} + +void +il3945_rx_replenish(void *data) +{ + struct il_priv *il = data; + unsigned long flags; + + il3945_rx_allocate(il, GFP_KERNEL); + + spin_lock_irqsave(&il->lock, flags); + il3945_rx_queue_restock(il); + spin_unlock_irqrestore(&il->lock, flags); +} + +static void +il3945_rx_replenish_now(struct il_priv *il) +{ + il3945_rx_allocate(il, GFP_ATOMIC); + + il3945_rx_queue_restock(il); +} + +/* Assumes that the skb field of the buffers in 'pool' is kept accurate. + * If an SKB has been detached, the POOL needs to have its SKB set to NULL + * This free routine walks the list of POOL entries and if SKB is set to + * non NULL it is unmapped and freed + */ +static void +il3945_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq) +{ + int i; + for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { + if (rxq->pool[i].page != NULL) { + pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma, + PAGE_SIZE << il->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); + __il_free_pages(il, rxq->pool[i].page); + rxq->pool[i].page = NULL; + } + } + + dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, + rxq->bd_dma); + dma_free_coherent(&il->pci_dev->dev, sizeof(struct il_rb_status), + rxq->rb_stts, rxq->rb_stts_dma); + rxq->bd = NULL; + rxq->rb_stts = NULL; +} + +/* Convert linear signal-to-noise ratio into dB */ +static u8 ratio2dB[100] = { +/* 0 1 2 3 4 5 6 7 8 9 */ + 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */ + 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */ + 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */ + 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */ + 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */ + 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */ + 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */ + 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */ + 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */ + 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */ +}; + +/* Calculates a relative dB value from a ratio of linear + * (i.e. not dB) signal levels. + * Conversion assumes that levels are voltages (20*log), not powers (10*log). */ +int +il3945_calc_db_from_ratio(int sig_ratio) +{ + /* 1000:1 or higher just report as 60 dB */ + if (sig_ratio >= 1000) + return 60; + + /* 100:1 or higher, divide by 10 and use table, + * add 20 dB to make up for divide by 10 */ + if (sig_ratio >= 100) + return 20 + (int)ratio2dB[sig_ratio / 10]; + + /* We shouldn't see this */ + if (sig_ratio < 1) + return 0; + + /* Use table for ratios 1:1 - 99:1 */ + return (int)ratio2dB[sig_ratio]; +} + +/** + * il3945_rx_handle - Main entry function for receiving responses from uCode + * + * Uses the il->handlers callback function array to invoke + * the appropriate handlers, including command responses, + * frame-received notifications, and other notifications. + */ +static void +il3945_rx_handle(struct il_priv *il) +{ + struct il_rx_buf *rxb; + struct il_rx_pkt *pkt; + struct il_rx_queue *rxq = &il->rxq; + u32 r, i; + int reclaim; + unsigned long flags; + u8 fill_rx = 0; + u32 count = 8; + int total_empty = 0; + + /* uCode's read idx (stored in shared DRAM) indicates the last Rx + * buffer that the driver may process (last buffer filled by ucode). */ + r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF; + i = rxq->read; + + /* calculate total frames need to be restock after handling RX */ + total_empty = r - rxq->write_actual; + if (total_empty < 0) + total_empty += RX_QUEUE_SIZE; + + if (total_empty > (RX_QUEUE_SIZE / 2)) + fill_rx = 1; + /* Rx interrupt, but nothing sent from uCode */ + if (i == r) + D_RX("r = %d, i = %d\n", r, i); + + while (i != r) { + int len; + + rxb = rxq->queue[i]; + + /* If an RXB doesn't have a Rx queue slot associated with it, + * then a bug has been introduced in the queue refilling + * routines -- catch it here */ + BUG_ON(rxb == NULL); + + rxq->queue[i] = NULL; + + pci_unmap_page(il->pci_dev, rxb->page_dma, + PAGE_SIZE << il->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); + pkt = rxb_addr(rxb); + + len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK; + len += sizeof(u32); /* account for status word */ + + /* Reclaim a command buffer only if this packet is a response + * to a (driver-originated) command. + * If the packet (e.g. Rx frame) originated from uCode, + * there is no command buffer to reclaim. + * Ucode should set SEQ_RX_FRAME bit if ucode-originated, + * but apparently a few don't get set; catch them here. */ + reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) && + pkt->hdr.cmd != N_STATS && pkt->hdr.cmd != C_TX; + + /* Based on type of command response or notification, + * handle those that need handling via function in + * handlers table. See il3945_setup_handlers() */ + if (il->handlers[pkt->hdr.cmd]) { + D_RX("r = %d, i = %d, %s, 0x%02x\n", r, i, + il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); + il->isr_stats.handlers[pkt->hdr.cmd]++; + il->handlers[pkt->hdr.cmd] (il, rxb); + } else { + /* No handling needed */ + D_RX("r %d i %d No handler needed for %s, 0x%02x\n", r, + i, il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); + } + + /* + * XXX: After here, we should always check rxb->page + * against NULL before touching it or its virtual + * memory (pkt). Because some handler might have + * already taken or freed the pages. + */ + + if (reclaim) { + /* Invoke any callbacks, transfer the buffer to caller, + * and fire off the (possibly) blocking il_send_cmd() + * as we reclaim the driver command queue */ + if (rxb->page) + il_tx_cmd_complete(il, rxb); + else + IL_WARN("Claim null rxb?\n"); + } + + /* Reuse the page if possible. For notification packets and + * SKBs that fail to Rx correctly, add them back into the + * rx_free list for reuse later. */ + spin_lock_irqsave(&rxq->lock, flags); + if (rxb->page != NULL) { + rxb->page_dma = + pci_map_page(il->pci_dev, rxb->page, 0, + PAGE_SIZE << il->hw_params. + rx_page_order, PCI_DMA_FROMDEVICE); + list_add_tail(&rxb->list, &rxq->rx_free); + rxq->free_count++; + } else + list_add_tail(&rxb->list, &rxq->rx_used); + + spin_unlock_irqrestore(&rxq->lock, flags); + + i = (i + 1) & RX_QUEUE_MASK; + /* If there are a lot of unused frames, + * restock the Rx queue so ucode won't assert. */ + if (fill_rx) { + count++; + if (count >= 8) { + rxq->read = i; + il3945_rx_replenish_now(il); + count = 0; + } + } + } + + /* Backtrack one entry */ + rxq->read = i; + if (fill_rx) + il3945_rx_replenish_now(il); + else + il3945_rx_queue_restock(il); +} + +/* call this function to flush any scheduled tasklet */ +static inline void +il3945_synchronize_irq(struct il_priv *il) +{ + /* wait to make sure we flush pending tasklet */ + synchronize_irq(il->pci_dev->irq); + tasklet_kill(&il->irq_tasklet); +} + +static const char * +il3945_desc_lookup(int i) +{ + switch (i) { + case 1: + return "FAIL"; + case 2: + return "BAD_PARAM"; + case 3: + return "BAD_CHECKSUM"; + case 4: + return "NMI_INTERRUPT"; + case 5: + return "SYSASSERT"; + case 6: + return "FATAL_ERROR"; + } + + return "UNKNOWN"; +} + +#define ERROR_START_OFFSET (1 * sizeof(u32)) +#define ERROR_ELEM_SIZE (7 * sizeof(u32)) + +void +il3945_dump_nic_error_log(struct il_priv *il) +{ + u32 i; + u32 desc, time, count, base, data1; + u32 blink1, blink2, ilink1, ilink2; + + base = le32_to_cpu(il->card_alive.error_event_table_ptr); + + if (!il3945_hw_valid_rtc_data_addr(base)) { + IL_ERR("Not valid error log pointer 0x%08X\n", base); + return; + } + + count = il_read_targ_mem(il, base); + + if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) { + IL_ERR("Start IWL Error Log Dump:\n"); + IL_ERR("Status: 0x%08lX, count: %d\n", il->status, count); + } + + IL_ERR("Desc Time asrtPC blink2 " + "ilink1 nmiPC Line\n"); + for (i = ERROR_START_OFFSET; + i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET; + i += ERROR_ELEM_SIZE) { + desc = il_read_targ_mem(il, base + i); + time = il_read_targ_mem(il, base + i + 1 * sizeof(u32)); + blink1 = il_read_targ_mem(il, base + i + 2 * sizeof(u32)); + blink2 = il_read_targ_mem(il, base + i + 3 * sizeof(u32)); + ilink1 = il_read_targ_mem(il, base + i + 4 * sizeof(u32)); + ilink2 = il_read_targ_mem(il, base + i + 5 * sizeof(u32)); + data1 = il_read_targ_mem(il, base + i + 6 * sizeof(u32)); + + IL_ERR("%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n", + il3945_desc_lookup(desc), desc, time, blink1, blink2, + ilink1, ilink2, data1); + } +} + +static void +il3945_irq_tasklet(struct il_priv *il) +{ + u32 inta, handled = 0; + u32 inta_fh; + unsigned long flags; +#ifdef CONFIG_IWLEGACY_DEBUG + u32 inta_mask; +#endif + + spin_lock_irqsave(&il->lock, flags); + + /* Ack/clear/reset pending uCode interrupts. + * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, + * and will clear only when CSR_FH_INT_STATUS gets cleared. */ + inta = _il_rd(il, CSR_INT); + _il_wr(il, CSR_INT, inta); + + /* Ack/clear/reset pending flow-handler (DMA) interrupts. + * Any new interrupts that happen after this, either while we're + * in this tasklet, or later, will show up in next ISR/tasklet. */ + inta_fh = _il_rd(il, CSR_FH_INT_STATUS); + _il_wr(il, CSR_FH_INT_STATUS, inta_fh); + +#ifdef CONFIG_IWLEGACY_DEBUG + if (il_get_debug_level(il) & IL_DL_ISR) { + /* just for debug */ + inta_mask = _il_rd(il, CSR_INT_MASK); + D_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta, + inta_mask, inta_fh); + } +#endif + + spin_unlock_irqrestore(&il->lock, flags); + + /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not + * atomic, make sure that inta covers all the interrupts that + * we've discovered, even if FH interrupt came in just after + * reading CSR_INT. */ + if (inta_fh & CSR39_FH_INT_RX_MASK) + inta |= CSR_INT_BIT_FH_RX; + if (inta_fh & CSR39_FH_INT_TX_MASK) + inta |= CSR_INT_BIT_FH_TX; + + /* Now service all interrupt bits discovered above. */ + if (inta & CSR_INT_BIT_HW_ERR) { + IL_ERR("Hardware error detected. Restarting.\n"); + + /* Tell the device to stop sending interrupts */ + il_disable_interrupts(il); + + il->isr_stats.hw++; + il_irq_handle_error(il); + + handled |= CSR_INT_BIT_HW_ERR; + + return; + } +#ifdef CONFIG_IWLEGACY_DEBUG + if (il_get_debug_level(il) & (IL_DL_ISR)) { + /* NIC fires this, but we don't use it, redundant with WAKEUP */ + if (inta & CSR_INT_BIT_SCD) { + D_ISR("Scheduler finished to transmit " + "the frame/frames.\n"); + il->isr_stats.sch++; + } + + /* Alive notification via Rx interrupt will do the real work */ + if (inta & CSR_INT_BIT_ALIVE) { + D_ISR("Alive interrupt\n"); + il->isr_stats.alive++; + } + } +#endif + /* Safely ignore these bits for debug checks below */ + inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); + + /* Error detected by uCode */ + if (inta & CSR_INT_BIT_SW_ERR) { + IL_ERR("Microcode SW error detected. " "Restarting 0x%X.\n", + inta); + il->isr_stats.sw++; + il_irq_handle_error(il); + handled |= CSR_INT_BIT_SW_ERR; + } + + /* uCode wakes up after power-down sleep */ + if (inta & CSR_INT_BIT_WAKEUP) { + D_ISR("Wakeup interrupt\n"); + il_rx_queue_update_write_ptr(il, &il->rxq); + il_txq_update_write_ptr(il, &il->txq[0]); + il_txq_update_write_ptr(il, &il->txq[1]); + il_txq_update_write_ptr(il, &il->txq[2]); + il_txq_update_write_ptr(il, &il->txq[3]); + il_txq_update_write_ptr(il, &il->txq[4]); + il_txq_update_write_ptr(il, &il->txq[5]); + + il->isr_stats.wakeup++; + handled |= CSR_INT_BIT_WAKEUP; + } + + /* All uCode command responses, including Tx command responses, + * Rx "responses" (frame-received notification), and other + * notifications from uCode come through here*/ + if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { + il3945_rx_handle(il); + il->isr_stats.rx++; + handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); + } + + if (inta & CSR_INT_BIT_FH_TX) { + D_ISR("Tx interrupt\n"); + il->isr_stats.tx++; + + _il_wr(il, CSR_FH_INT_STATUS, (1 << 6)); + il_wr(il, FH39_TCSR_CREDIT(FH39_SRVC_CHNL), 0x0); + handled |= CSR_INT_BIT_FH_TX; + } + + if (inta & ~handled) { + IL_ERR("Unhandled INTA bits 0x%08x\n", inta & ~handled); + il->isr_stats.unhandled++; + } + + if (inta & ~il->inta_mask) { + IL_WARN("Disabled INTA bits 0x%08x were pending\n", + inta & ~il->inta_mask); + IL_WARN(" with inta_fh = 0x%08x\n", inta_fh); + } + + /* Re-enable all interrupts */ + /* only Re-enable if disabled by irq */ + if (test_bit(S_INT_ENABLED, &il->status)) + il_enable_interrupts(il); + +#ifdef CONFIG_IWLEGACY_DEBUG + if (il_get_debug_level(il) & (IL_DL_ISR)) { + inta = _il_rd(il, CSR_INT); + inta_mask = _il_rd(il, CSR_INT_MASK); + inta_fh = _il_rd(il, CSR_FH_INT_STATUS); + D_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, " + "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags); + } +#endif +} + +static int +il3945_get_channels_for_scan(struct il_priv *il, enum ieee80211_band band, + u8 is_active, u8 n_probes, + struct il3945_scan_channel *scan_ch, + struct ieee80211_vif *vif) +{ + struct ieee80211_channel *chan; + const struct ieee80211_supported_band *sband; + const struct il_channel_info *ch_info; + u16 passive_dwell = 0; + u16 active_dwell = 0; + int added, i; + + sband = il_get_hw_mode(il, band); + if (!sband) + return 0; + + active_dwell = il_get_active_dwell_time(il, band, n_probes); + passive_dwell = il_get_passive_dwell_time(il, band, vif); + + if (passive_dwell <= active_dwell) + passive_dwell = active_dwell + 1; + + for (i = 0, added = 0; i < il->scan_request->n_channels; i++) { + chan = il->scan_request->channels[i]; + + if (chan->band != band) + continue; + + scan_ch->channel = chan->hw_value; + + ch_info = il_get_channel_info(il, band, scan_ch->channel); + if (!il_is_channel_valid(ch_info)) { + D_SCAN("Channel %d is INVALID for this band.\n", + scan_ch->channel); + continue; + } + + scan_ch->active_dwell = cpu_to_le16(active_dwell); + scan_ch->passive_dwell = cpu_to_le16(passive_dwell); + /* If passive , set up for auto-switch + * and use long active_dwell time. + */ + if (!is_active || il_is_channel_passive(ch_info) || + (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) { + scan_ch->type = 0; /* passive */ + if (IL_UCODE_API(il->ucode_ver) == 1) + scan_ch->active_dwell = + cpu_to_le16(passive_dwell - 1); + } else { + scan_ch->type = 1; /* active */ + } + + /* Set direct probe bits. These may be used both for active + * scan channels (probes gets sent right away), + * or for passive channels (probes get se sent only after + * hearing clear Rx packet).*/ + if (IL_UCODE_API(il->ucode_ver) >= 2) { + if (n_probes) + scan_ch->type |= IL39_SCAN_PROBE_MASK(n_probes); + } else { + /* uCode v1 does not allow setting direct probe bits on + * passive channel. */ + if ((scan_ch->type & 1) && n_probes) + scan_ch->type |= IL39_SCAN_PROBE_MASK(n_probes); + } + + /* Set txpower levels to defaults */ + scan_ch->tpc.dsp_atten = 110; + /* scan_pwr_info->tpc.dsp_atten; */ + + /*scan_pwr_info->tpc.tx_gain; */ + if (band == IEEE80211_BAND_5GHZ) + scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3; + else { + scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3)); + /* NOTE: if we were doing 6Mb OFDM for scans we'd use + * power level: + * scan_ch->tpc.tx_gain = ((1 << 5) | (2 << 3)) | 3; + */ + } + + D_SCAN("Scanning %d [%s %d]\n", scan_ch->channel, + (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE", + (scan_ch->type & 1) ? active_dwell : passive_dwell); + + scan_ch++; + added++; + } + + D_SCAN("total channels to scan %d\n", added); + return added; +} + +static void +il3945_init_hw_rates(struct il_priv *il, struct ieee80211_rate *rates) +{ + int i; + + for (i = 0; i < RATE_COUNT_LEGACY; i++) { + rates[i].bitrate = il3945_rates[i].ieee * 5; + rates[i].hw_value = i; /* Rate scaling will work on idxes */ + rates[i].hw_value_short = i; + rates[i].flags = 0; + if (i > IL39_LAST_OFDM_RATE || i < IL_FIRST_OFDM_RATE) { + /* + * If CCK != 1M then set short preamble rate flag. + */ + rates[i].flags |= + (il3945_rates[i].plcp == + 10) ? 0 : IEEE80211_RATE_SHORT_PREAMBLE; + } + } +} + +/****************************************************************************** + * + * uCode download functions + * + ******************************************************************************/ + +static void +il3945_dealloc_ucode_pci(struct il_priv *il) +{ + il_free_fw_desc(il->pci_dev, &il->ucode_code); + il_free_fw_desc(il->pci_dev, &il->ucode_data); + il_free_fw_desc(il->pci_dev, &il->ucode_data_backup); + il_free_fw_desc(il->pci_dev, &il->ucode_init); + il_free_fw_desc(il->pci_dev, &il->ucode_init_data); + il_free_fw_desc(il->pci_dev, &il->ucode_boot); +} + +/** + * il3945_verify_inst_full - verify runtime uCode image in card vs. host, + * looking at all data. + */ +static int +il3945_verify_inst_full(struct il_priv *il, __le32 * image, u32 len) +{ + u32 val; + u32 save_len = len; + int rc = 0; + u32 errcnt; + + D_INFO("ucode inst image size is %u\n", len); + + il_wr(il, HBUS_TARG_MEM_RADDR, IL39_RTC_INST_LOWER_BOUND); + + errcnt = 0; + for (; len > 0; len -= sizeof(u32), image++) { + /* read data comes through single port, auto-incr addr */ + /* NOTE: Use the debugless read so we don't flood kernel log + * if IL_DL_IO is set */ + val = _il_rd(il, HBUS_TARG_MEM_RDAT); + if (val != le32_to_cpu(*image)) { + IL_ERR("uCode INST section is invalid at " + "offset 0x%x, is 0x%x, s/b 0x%x\n", + save_len - len, val, le32_to_cpu(*image)); + rc = -EIO; + errcnt++; + if (errcnt >= 20) + break; + } + } + + if (!errcnt) + D_INFO("ucode image in INSTRUCTION memory is good\n"); + + return rc; +} + +/** + * il3945_verify_inst_sparse - verify runtime uCode image in card vs. host, + * using sample data 100 bytes apart. If these sample points are good, + * it's a pretty good bet that everything between them is good, too. + */ +static int +il3945_verify_inst_sparse(struct il_priv *il, __le32 * image, u32 len) +{ + u32 val; + int rc = 0; + u32 errcnt = 0; + u32 i; + + D_INFO("ucode inst image size is %u\n", len); + + for (i = 0; i < len; i += 100, image += 100 / sizeof(u32)) { + /* read data comes through single port, auto-incr addr */ + /* NOTE: Use the debugless read so we don't flood kernel log + * if IL_DL_IO is set */ + il_wr(il, HBUS_TARG_MEM_RADDR, i + IL39_RTC_INST_LOWER_BOUND); + val = _il_rd(il, HBUS_TARG_MEM_RDAT); + if (val != le32_to_cpu(*image)) { +#if 0 /* Enable this if you want to see details */ + IL_ERR("uCode INST section is invalid at " + "offset 0x%x, is 0x%x, s/b 0x%x\n", i, val, + *image); +#endif + rc = -EIO; + errcnt++; + if (errcnt >= 3) + break; + } + } + + return rc; +} + +/** + * il3945_verify_ucode - determine which instruction image is in SRAM, + * and verify its contents + */ +static int +il3945_verify_ucode(struct il_priv *il) +{ + __le32 *image; + u32 len; + int rc = 0; + + /* Try bootstrap */ + image = (__le32 *) il->ucode_boot.v_addr; + len = il->ucode_boot.len; + rc = il3945_verify_inst_sparse(il, image, len); + if (rc == 0) { + D_INFO("Bootstrap uCode is good in inst SRAM\n"); + return 0; + } + + /* Try initialize */ + image = (__le32 *) il->ucode_init.v_addr; + len = il->ucode_init.len; + rc = il3945_verify_inst_sparse(il, image, len); + if (rc == 0) { + D_INFO("Initialize uCode is good in inst SRAM\n"); + return 0; + } + + /* Try runtime/protocol */ + image = (__le32 *) il->ucode_code.v_addr; + len = il->ucode_code.len; + rc = il3945_verify_inst_sparse(il, image, len); + if (rc == 0) { + D_INFO("Runtime uCode is good in inst SRAM\n"); + return 0; + } + + IL_ERR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n"); + + /* Since nothing seems to match, show first several data entries in + * instruction SRAM, so maybe visual inspection will give a clue. + * Selection of bootstrap image (vs. other images) is arbitrary. */ + image = (__le32 *) il->ucode_boot.v_addr; + len = il->ucode_boot.len; + rc = il3945_verify_inst_full(il, image, len); + + return rc; +} + +static void +il3945_nic_start(struct il_priv *il) +{ + /* Remove all resets to allow NIC to operate */ + _il_wr(il, CSR_RESET, 0); +} + +#define IL3945_UCODE_GET(item) \ +static u32 il3945_ucode_get_##item(const struct il_ucode_header *ucode)\ +{ \ + return le32_to_cpu(ucode->v1.item); \ +} + +static u32 +il3945_ucode_get_header_size(u32 api_ver) +{ + return 24; +} + +static u8 * +il3945_ucode_get_data(const struct il_ucode_header *ucode) +{ + return (u8 *) ucode->v1.data; +} + +IL3945_UCODE_GET(inst_size); +IL3945_UCODE_GET(data_size); +IL3945_UCODE_GET(init_size); +IL3945_UCODE_GET(init_data_size); +IL3945_UCODE_GET(boot_size); + +/** + * il3945_read_ucode - Read uCode images from disk file. + * + * Copy into buffers for card to fetch via bus-mastering + */ +static int +il3945_read_ucode(struct il_priv *il) +{ + const struct il_ucode_header *ucode; + int ret = -EINVAL, idx; + const struct firmware *ucode_raw; + /* firmware file name contains uCode/driver compatibility version */ + const char *name_pre = il->cfg->fw_name_pre; + const unsigned int api_max = il->cfg->ucode_api_max; + const unsigned int api_min = il->cfg->ucode_api_min; + char buf[25]; + u8 *src; + size_t len; + u32 api_ver, inst_size, data_size, init_size, init_data_size, boot_size; + + /* Ask kernel firmware_class module to get the boot firmware off disk. + * request_firmware() is synchronous, file is in memory on return. */ + for (idx = api_max; idx >= api_min; idx--) { + sprintf(buf, "%s%u%s", name_pre, idx, ".ucode"); + ret = request_firmware(&ucode_raw, buf, &il->pci_dev->dev); + if (ret < 0) { + IL_ERR("%s firmware file req failed: %d\n", buf, ret); + if (ret == -ENOENT) + continue; + else + goto error; + } else { + if (idx < api_max) + IL_ERR("Loaded firmware %s, " + "which is deprecated. " + " Please use API v%u instead.\n", buf, + api_max); + D_INFO("Got firmware '%s' file " + "(%zd bytes) from disk\n", buf, ucode_raw->size); + break; + } + } + + if (ret < 0) + goto error; + + /* Make sure that we got at least our header! */ + if (ucode_raw->size < il3945_ucode_get_header_size(1)) { + IL_ERR("File size way too small!\n"); + ret = -EINVAL; + goto err_release; + } + + /* Data from ucode file: header followed by uCode images */ + ucode = (struct il_ucode_header *)ucode_raw->data; + + il->ucode_ver = le32_to_cpu(ucode->ver); + api_ver = IL_UCODE_API(il->ucode_ver); + inst_size = il3945_ucode_get_inst_size(ucode); + data_size = il3945_ucode_get_data_size(ucode); + init_size = il3945_ucode_get_init_size(ucode); + init_data_size = il3945_ucode_get_init_data_size(ucode); + boot_size = il3945_ucode_get_boot_size(ucode); + src = il3945_ucode_get_data(ucode); + + /* api_ver should match the api version forming part of the + * firmware filename ... but we don't check for that and only rely + * on the API version read from firmware header from here on forward */ + + if (api_ver < api_min || api_ver > api_max) { + IL_ERR("Driver unable to support your firmware API. " + "Driver supports v%u, firmware is v%u.\n", api_max, + api_ver); + il->ucode_ver = 0; + ret = -EINVAL; + goto err_release; + } + if (api_ver != api_max) + IL_ERR("Firmware has old API version. Expected %u, " + "got %u. New firmware can be obtained " + "from http://www.intellinuxwireless.org.\n", api_max, + api_ver); + + IL_INFO("loaded firmware version %u.%u.%u.%u\n", + IL_UCODE_MAJOR(il->ucode_ver), IL_UCODE_MINOR(il->ucode_ver), + IL_UCODE_API(il->ucode_ver), IL_UCODE_SERIAL(il->ucode_ver)); + + snprintf(il->hw->wiphy->fw_version, sizeof(il->hw->wiphy->fw_version), + "%u.%u.%u.%u", IL_UCODE_MAJOR(il->ucode_ver), + IL_UCODE_MINOR(il->ucode_ver), IL_UCODE_API(il->ucode_ver), + IL_UCODE_SERIAL(il->ucode_ver)); + + D_INFO("f/w package hdr ucode version raw = 0x%x\n", il->ucode_ver); + D_INFO("f/w package hdr runtime inst size = %u\n", inst_size); + D_INFO("f/w package hdr runtime data size = %u\n", data_size); + D_INFO("f/w package hdr init inst size = %u\n", init_size); + D_INFO("f/w package hdr init data size = %u\n", init_data_size); + D_INFO("f/w package hdr boot inst size = %u\n", boot_size); + + /* Verify size of file vs. image size info in file's header */ + if (ucode_raw->size != + il3945_ucode_get_header_size(api_ver) + inst_size + data_size + + init_size + init_data_size + boot_size) { + + D_INFO("uCode file size %zd does not match expected size\n", + ucode_raw->size); + ret = -EINVAL; + goto err_release; + } + + /* Verify that uCode images will fit in card's SRAM */ + if (inst_size > IL39_MAX_INST_SIZE) { + D_INFO("uCode instr len %d too large to fit in\n", inst_size); + ret = -EINVAL; + goto err_release; + } + + if (data_size > IL39_MAX_DATA_SIZE) { + D_INFO("uCode data len %d too large to fit in\n", data_size); + ret = -EINVAL; + goto err_release; + } + if (init_size > IL39_MAX_INST_SIZE) { + D_INFO("uCode init instr len %d too large to fit in\n", + init_size); + ret = -EINVAL; + goto err_release; + } + if (init_data_size > IL39_MAX_DATA_SIZE) { + D_INFO("uCode init data len %d too large to fit in\n", + init_data_size); + ret = -EINVAL; + goto err_release; + } + if (boot_size > IL39_MAX_BSM_SIZE) { + D_INFO("uCode boot instr len %d too large to fit in\n", + boot_size); + ret = -EINVAL; + goto err_release; + } + + /* Allocate ucode buffers for card's bus-master loading ... */ + + /* Runtime instructions and 2 copies of data: + * 1) unmodified from disk + * 2) backup cache for save/restore during power-downs */ + il->ucode_code.len = inst_size; + il_alloc_fw_desc(il->pci_dev, &il->ucode_code); + + il->ucode_data.len = data_size; + il_alloc_fw_desc(il->pci_dev, &il->ucode_data); + + il->ucode_data_backup.len = data_size; + il_alloc_fw_desc(il->pci_dev, &il->ucode_data_backup); + + if (!il->ucode_code.v_addr || !il->ucode_data.v_addr || + !il->ucode_data_backup.v_addr) + goto err_pci_alloc; + + /* Initialization instructions and data */ + if (init_size && init_data_size) { + il->ucode_init.len = init_size; + il_alloc_fw_desc(il->pci_dev, &il->ucode_init); + + il->ucode_init_data.len = init_data_size; + il_alloc_fw_desc(il->pci_dev, &il->ucode_init_data); + + if (!il->ucode_init.v_addr || !il->ucode_init_data.v_addr) + goto err_pci_alloc; + } + + /* Bootstrap (instructions only, no data) */ + if (boot_size) { + il->ucode_boot.len = boot_size; + il_alloc_fw_desc(il->pci_dev, &il->ucode_boot); + + if (!il->ucode_boot.v_addr) + goto err_pci_alloc; + } + + /* Copy images into buffers for card's bus-master reads ... */ + + /* Runtime instructions (first block of data in file) */ + len = inst_size; + D_INFO("Copying (but not loading) uCode instr len %zd\n", len); + memcpy(il->ucode_code.v_addr, src, len); + src += len; + + D_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n", + il->ucode_code.v_addr, (u32) il->ucode_code.p_addr); + + /* Runtime data (2nd block) + * NOTE: Copy into backup buffer will be done in il3945_up() */ + len = data_size; + D_INFO("Copying (but not loading) uCode data len %zd\n", len); + memcpy(il->ucode_data.v_addr, src, len); + memcpy(il->ucode_data_backup.v_addr, src, len); + src += len; + + /* Initialization instructions (3rd block) */ + if (init_size) { + len = init_size; + D_INFO("Copying (but not loading) init instr len %zd\n", len); + memcpy(il->ucode_init.v_addr, src, len); + src += len; + } + + /* Initialization data (4th block) */ + if (init_data_size) { + len = init_data_size; + D_INFO("Copying (but not loading) init data len %zd\n", len); + memcpy(il->ucode_init_data.v_addr, src, len); + src += len; + } + + /* Bootstrap instructions (5th block) */ + len = boot_size; + D_INFO("Copying (but not loading) boot instr len %zd\n", len); + memcpy(il->ucode_boot.v_addr, src, len); + + /* We have our copies now, allow OS release its copies */ + release_firmware(ucode_raw); + return 0; + +err_pci_alloc: + IL_ERR("failed to allocate pci memory\n"); + ret = -ENOMEM; + il3945_dealloc_ucode_pci(il); + +err_release: + release_firmware(ucode_raw); + +error: + return ret; +} + +/** + * il3945_set_ucode_ptrs - Set uCode address location + * + * Tell initialization uCode where to find runtime uCode. + * + * BSM registers initially contain pointers to initialization uCode. + * We need to replace them to load runtime uCode inst and data, + * and to save runtime data when powering down. + */ +static int +il3945_set_ucode_ptrs(struct il_priv *il) +{ + dma_addr_t pinst; + dma_addr_t pdata; + + /* bits 31:0 for 3945 */ + pinst = il->ucode_code.p_addr; + pdata = il->ucode_data_backup.p_addr; + + /* Tell bootstrap uCode where to find image to load */ + il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst); + il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata); + il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, il->ucode_data.len); + + /* Inst byte count must be last to set up, bit 31 signals uCode + * that all new ptr/size info is in place */ + il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG, + il->ucode_code.len | BSM_DRAM_INST_LOAD); + + D_INFO("Runtime uCode pointers are set.\n"); + + return 0; +} + +/** + * il3945_init_alive_start - Called after N_ALIVE notification received + * + * Called after N_ALIVE notification received from "initialize" uCode. + * + * Tell "initialize" uCode to go ahead and load the runtime uCode. + */ +static void +il3945_init_alive_start(struct il_priv *il) +{ + /* Check alive response for "valid" sign from uCode */ + if (il->card_alive_init.is_valid != UCODE_VALID_OK) { + /* We had an error bringing up the hardware, so take it + * all the way back down so we can try again */ + D_INFO("Initialize Alive failed.\n"); + goto restart; + } + + /* Bootstrap uCode has loaded initialize uCode ... verify inst image. + * This is a paranoid check, because we would not have gotten the + * "initialize" alive if code weren't properly loaded. */ + if (il3945_verify_ucode(il)) { + /* Runtime instruction load was bad; + * take it all the way back down so we can try again */ + D_INFO("Bad \"initialize\" uCode load.\n"); + goto restart; + } + + /* Send pointers to protocol/runtime uCode image ... init code will + * load and launch runtime uCode, which will send us another "Alive" + * notification. */ + D_INFO("Initialization Alive received.\n"); + if (il3945_set_ucode_ptrs(il)) { + /* Runtime instruction load won't happen; + * take it all the way back down so we can try again */ + D_INFO("Couldn't set up uCode pointers.\n"); + goto restart; + } + return; + +restart: + queue_work(il->workqueue, &il->restart); +} + +/** + * il3945_alive_start - called after N_ALIVE notification received + * from protocol/runtime uCode (initialization uCode's + * Alive gets handled by il3945_init_alive_start()). + */ +static void +il3945_alive_start(struct il_priv *il) +{ + int thermal_spin = 0; + u32 rfkill; + struct il_rxon_context *ctx = &il->ctx; + + D_INFO("Runtime Alive received.\n"); + + if (il->card_alive.is_valid != UCODE_VALID_OK) { + /* We had an error bringing up the hardware, so take it + * all the way back down so we can try again */ + D_INFO("Alive failed.\n"); + goto restart; + } + + /* Initialize uCode has loaded Runtime uCode ... verify inst image. + * This is a paranoid check, because we would not have gotten the + * "runtime" alive if code weren't properly loaded. */ + if (il3945_verify_ucode(il)) { + /* Runtime instruction load was bad; + * take it all the way back down so we can try again */ + D_INFO("Bad runtime uCode load.\n"); + goto restart; + } + + rfkill = il_rd_prph(il, APMG_RFKILL_REG); + D_INFO("RFKILL status: 0x%x\n", rfkill); + + if (rfkill & 0x1) { + clear_bit(S_RF_KILL_HW, &il->status); + /* if RFKILL is not on, then wait for thermal + * sensor in adapter to kick in */ + while (il3945_hw_get_temperature(il) == 0) { + thermal_spin++; + udelay(10); + } + + if (thermal_spin) + D_INFO("Thermal calibration took %dus\n", + thermal_spin * 10); + } else + set_bit(S_RF_KILL_HW, &il->status); + + /* After the ALIVE response, we can send commands to 3945 uCode */ + set_bit(S_ALIVE, &il->status); + + /* Enable watchdog to monitor the driver tx queues */ + il_setup_watchdog(il); + + if (il_is_rfkill(il)) + return; + + ieee80211_wake_queues(il->hw); + + il->active_rate = RATES_MASK_3945; + + il_power_update_mode(il, true); + + if (il_is_associated(il)) { + struct il3945_rxon_cmd *active_rxon = + (struct il3945_rxon_cmd *)(&ctx->active); + + ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; + active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; + } else { + /* Initialize our rx_config data */ + il_connection_init_rx_config(il, ctx); + } + + /* Configure Bluetooth device coexistence support */ + il_send_bt_config(il); + + set_bit(S_READY, &il->status); + + /* Configure the adapter for unassociated operation */ + il3945_commit_rxon(il, ctx); + + il3945_reg_txpower_periodic(il); + + D_INFO("ALIVE processing complete.\n"); + wake_up(&il->wait_command_queue); + + return; + +restart: + queue_work(il->workqueue, &il->restart); +} + +static void il3945_cancel_deferred_work(struct il_priv *il); + +static void +__il3945_down(struct il_priv *il) +{ + unsigned long flags; + int exit_pending; + + D_INFO(DRV_NAME " is going down\n"); + + il_scan_cancel_timeout(il, 200); + + exit_pending = test_and_set_bit(S_EXIT_PENDING, &il->status); + + /* Stop TX queues watchdog. We need to have S_EXIT_PENDING bit set + * to prevent rearm timer */ + del_timer_sync(&il->watchdog); + + /* Station information will now be cleared in device */ + il_clear_ucode_stations(il, NULL); + il_dealloc_bcast_stations(il); + il_clear_driver_stations(il); + + /* Unblock any waiting calls */ + wake_up_all(&il->wait_command_queue); + + /* Wipe out the EXIT_PENDING status bit if we are not actually + * exiting the module */ + if (!exit_pending) + clear_bit(S_EXIT_PENDING, &il->status); + + /* stop and reset the on-board processor */ + _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); + + /* tell the device to stop sending interrupts */ + spin_lock_irqsave(&il->lock, flags); + il_disable_interrupts(il); + spin_unlock_irqrestore(&il->lock, flags); + il3945_synchronize_irq(il); + + if (il->mac80211_registered) + ieee80211_stop_queues(il->hw); + + /* If we have not previously called il3945_init() then + * clear all bits but the RF Kill bits and return */ + if (!il_is_init(il)) { + il->status = + test_bit(S_RF_KILL_HW, + &il-> + status) << S_RF_KILL_HW | + test_bit(S_GEO_CONFIGURED, + &il-> + status) << S_GEO_CONFIGURED | + test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING; + goto exit; + } + + /* ...otherwise clear out all the status bits but the RF Kill + * bit and continue taking the NIC down. */ + il->status &= + test_bit(S_RF_KILL_HW, + &il->status) << S_RF_KILL_HW | test_bit(S_GEO_CONFIGURED, + &il-> + status) << + S_GEO_CONFIGURED | test_bit(S_FW_ERROR, + &il-> + status) << S_FW_ERROR | + test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING; + + il3945_hw_txq_ctx_stop(il); + il3945_hw_rxq_stop(il); + + /* Power-down device's busmaster DMA clocks */ + il_wr_prph(il, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT); + udelay(5); + + /* Stop the device, and put it in low power state */ + il_apm_stop(il); + +exit: + memset(&il->card_alive, 0, sizeof(struct il_alive_resp)); + + if (il->beacon_skb) + dev_kfree_skb(il->beacon_skb); + il->beacon_skb = NULL; + + /* clear out any free frames */ + il3945_clear_free_frames(il); +} + +static void +il3945_down(struct il_priv *il) +{ + mutex_lock(&il->mutex); + __il3945_down(il); + mutex_unlock(&il->mutex); + + il3945_cancel_deferred_work(il); +} + +#define MAX_HW_RESTARTS 5 + +static int +il3945_alloc_bcast_station(struct il_priv *il) +{ + struct il_rxon_context *ctx = &il->ctx; + unsigned long flags; + u8 sta_id; + + spin_lock_irqsave(&il->sta_lock, flags); + sta_id = il_prep_station(il, ctx, il_bcast_addr, false, NULL); + if (sta_id == IL_INVALID_STATION) { + IL_ERR("Unable to prepare broadcast station\n"); + spin_unlock_irqrestore(&il->sta_lock, flags); + + return -EINVAL; + } + + il->stations[sta_id].used |= IL_STA_DRIVER_ACTIVE; + il->stations[sta_id].used |= IL_STA_BCAST; + spin_unlock_irqrestore(&il->sta_lock, flags); + + return 0; +} + +static int +__il3945_up(struct il_priv *il) +{ + int rc, i; + + rc = il3945_alloc_bcast_station(il); + if (rc) + return rc; + + if (test_bit(S_EXIT_PENDING, &il->status)) { + IL_WARN("Exit pending; will not bring the NIC up\n"); + return -EIO; + } + + if (!il->ucode_data_backup.v_addr || !il->ucode_data.v_addr) { + IL_ERR("ucode not available for device bring up\n"); + return -EIO; + } + + /* If platform's RF_KILL switch is NOT set to KILL */ + if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) + clear_bit(S_RF_KILL_HW, &il->status); + else { + set_bit(S_RF_KILL_HW, &il->status); + IL_WARN("Radio disabled by HW RF Kill switch\n"); + return -ENODEV; + } + + _il_wr(il, CSR_INT, 0xFFFFFFFF); + + rc = il3945_hw_nic_init(il); + if (rc) { + IL_ERR("Unable to int nic\n"); + return rc; + } + + /* make sure rfkill handshake bits are cleared */ + _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); + + /* clear (again), then enable host interrupts */ + _il_wr(il, CSR_INT, 0xFFFFFFFF); + il_enable_interrupts(il); + + /* really make sure rfkill handshake bits are cleared */ + _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + + /* Copy original ucode data image from disk into backup cache. + * This will be used to initialize the on-board processor's + * data SRAM for a clean start when the runtime program first loads. */ + memcpy(il->ucode_data_backup.v_addr, il->ucode_data.v_addr, + il->ucode_data.len); + + /* We return success when we resume from suspend and rf_kill is on. */ + if (test_bit(S_RF_KILL_HW, &il->status)) + return 0; + + for (i = 0; i < MAX_HW_RESTARTS; i++) { + + /* load bootstrap state machine, + * load bootstrap program into processor's memory, + * prepare to load the "initialize" uCode */ + rc = il->cfg->ops->lib->load_ucode(il); + + if (rc) { + IL_ERR("Unable to set up bootstrap uCode: %d\n", rc); + continue; + } + + /* start card; "initialize" will load runtime ucode */ + il3945_nic_start(il); + + D_INFO(DRV_NAME " is coming up\n"); + + return 0; + } + + set_bit(S_EXIT_PENDING, &il->status); + __il3945_down(il); + clear_bit(S_EXIT_PENDING, &il->status); + + /* tried to restart and config the device for as long as our + * patience could withstand */ + IL_ERR("Unable to initialize device after %d attempts.\n", i); + return -EIO; +} + +/***************************************************************************** + * + * Workqueue callbacks + * + *****************************************************************************/ + +static void +il3945_bg_init_alive_start(struct work_struct *data) +{ + struct il_priv *il = + container_of(data, struct il_priv, init_alive_start.work); + + mutex_lock(&il->mutex); + if (test_bit(S_EXIT_PENDING, &il->status)) + goto out; + + il3945_init_alive_start(il); +out: + mutex_unlock(&il->mutex); +} + +static void +il3945_bg_alive_start(struct work_struct *data) +{ + struct il_priv *il = + container_of(data, struct il_priv, alive_start.work); + + mutex_lock(&il->mutex); + if (test_bit(S_EXIT_PENDING, &il->status)) + goto out; + + il3945_alive_start(il); +out: + mutex_unlock(&il->mutex); +} + +/* + * 3945 cannot interrupt driver when hardware rf kill switch toggles; + * driver must poll CSR_GP_CNTRL_REG register for change. This register + * *is* readable even when device has been SW_RESET into low power mode + * (e.g. during RF KILL). + */ +static void +il3945_rfkill_poll(struct work_struct *data) +{ + struct il_priv *il = + container_of(data, struct il_priv, _3945.rfkill_poll.work); + bool old_rfkill = test_bit(S_RF_KILL_HW, &il->status); + bool new_rfkill = + !(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); + + if (new_rfkill != old_rfkill) { + if (new_rfkill) + set_bit(S_RF_KILL_HW, &il->status); + else + clear_bit(S_RF_KILL_HW, &il->status); + + wiphy_rfkill_set_hw_state(il->hw->wiphy, new_rfkill); + + D_RF_KILL("RF_KILL bit toggled to %s.\n", + new_rfkill ? "disable radio" : "enable radio"); + } + + /* Keep this running, even if radio now enabled. This will be + * cancelled in mac_start() if system decides to start again */ + queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll, + round_jiffies_relative(2 * HZ)); + +} + +int +il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif) +{ + struct il_host_cmd cmd = { + .id = C_SCAN, + .len = sizeof(struct il3945_scan_cmd), + .flags = CMD_SIZE_HUGE, + }; + struct il3945_scan_cmd *scan; + u8 n_probes = 0; + enum ieee80211_band band; + bool is_active = false; + int ret; + u16 len; + + lockdep_assert_held(&il->mutex); + + if (!il->scan_cmd) { + il->scan_cmd = + kmalloc(sizeof(struct il3945_scan_cmd) + IL_MAX_SCAN_SIZE, + GFP_KERNEL); + if (!il->scan_cmd) { + D_SCAN("Fail to allocate scan memory\n"); + return -ENOMEM; + } + } + scan = il->scan_cmd; + memset(scan, 0, sizeof(struct il3945_scan_cmd) + IL_MAX_SCAN_SIZE); + + scan->quiet_plcp_th = IL_PLCP_QUIET_THRESH; + scan->quiet_time = IL_ACTIVE_QUIET_TIME; + + if (il_is_associated(il)) { + u16 interval; + u32 extra; + u32 suspend_time = 100; + u32 scan_suspend_time = 100; + + D_INFO("Scanning while associated...\n"); + + interval = vif->bss_conf.beacon_int; + + scan->suspend_time = 0; + scan->max_out_time = cpu_to_le32(200 * 1024); + if (!interval) + interval = suspend_time; + /* + * suspend time format: + * 0-19: beacon interval in usec (time before exec.) + * 20-23: 0 + * 24-31: number of beacons (suspend between channels) + */ + + extra = (suspend_time / interval) << 24; + scan_suspend_time = + 0xFF0FFFFF & (extra | ((suspend_time % interval) * 1024)); + + scan->suspend_time = cpu_to_le32(scan_suspend_time); + D_SCAN("suspend_time 0x%X beacon interval %d\n", + scan_suspend_time, interval); + } + + if (il->scan_request->n_ssids) { + int i, p = 0; + D_SCAN("Kicking off active scan\n"); + for (i = 0; i < il->scan_request->n_ssids; i++) { + /* always does wildcard anyway */ + if (!il->scan_request->ssids[i].ssid_len) + continue; + scan->direct_scan[p].id = WLAN_EID_SSID; + scan->direct_scan[p].len = + il->scan_request->ssids[i].ssid_len; + memcpy(scan->direct_scan[p].ssid, + il->scan_request->ssids[i].ssid, + il->scan_request->ssids[i].ssid_len); + n_probes++; + p++; + } + is_active = true; + } else + D_SCAN("Kicking off passive scan.\n"); + + /* We don't build a direct scan probe request; the uCode will do + * that based on the direct_mask added to each channel entry */ + scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK; + scan->tx_cmd.sta_id = il->ctx.bcast_sta_id; + scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; + + /* flags + rate selection */ + + switch (il->scan_band) { + case IEEE80211_BAND_2GHZ: + scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; + scan->tx_cmd.rate = RATE_1M_PLCP; + band = IEEE80211_BAND_2GHZ; + break; + case IEEE80211_BAND_5GHZ: + scan->tx_cmd.rate = RATE_6M_PLCP; + band = IEEE80211_BAND_5GHZ; + break; + default: + IL_WARN("Invalid scan band\n"); + return -EIO; + } + + /* + * If active scaning is requested but a certain channel is marked + * passive, we can do active scanning if we detect transmissions. For + * passive only scanning disable switching to active on any channel. + */ + scan->good_CRC_th = + is_active ? IL_GOOD_CRC_TH_DEFAULT : IL_GOOD_CRC_TH_NEVER; + + len = + il_fill_probe_req(il, (struct ieee80211_mgmt *)scan->data, + vif->addr, il->scan_request->ie, + il->scan_request->ie_len, + IL_MAX_SCAN_SIZE - sizeof(*scan)); + scan->tx_cmd.len = cpu_to_le16(len); + + /* select Rx antennas */ + scan->flags |= il3945_get_antenna_flags(il); + + scan->channel_count = + il3945_get_channels_for_scan(il, band, is_active, n_probes, + (void *)&scan->data[len], vif); + if (scan->channel_count == 0) { + D_SCAN("channel count %d\n", scan->channel_count); + return -EIO; + } + + cmd.len += + le16_to_cpu(scan->tx_cmd.len) + + scan->channel_count * sizeof(struct il3945_scan_channel); + cmd.data = scan; + scan->len = cpu_to_le16(cmd.len); + + set_bit(S_SCAN_HW, &il->status); + ret = il_send_cmd_sync(il, &cmd); + if (ret) + clear_bit(S_SCAN_HW, &il->status); + return ret; +} + +void +il3945_post_scan(struct il_priv *il) +{ + struct il_rxon_context *ctx = &il->ctx; + + /* + * Since setting the RXON may have been deferred while + * performing the scan, fire one off if needed + */ + if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging))) + il3945_commit_rxon(il, ctx); +} + +static void +il3945_bg_restart(struct work_struct *data) +{ + struct il_priv *il = container_of(data, struct il_priv, restart); + + if (test_bit(S_EXIT_PENDING, &il->status)) + return; + + if (test_and_clear_bit(S_FW_ERROR, &il->status)) { + mutex_lock(&il->mutex); + il->ctx.vif = NULL; + il->is_open = 0; + mutex_unlock(&il->mutex); + il3945_down(il); + ieee80211_restart_hw(il->hw); + } else { + il3945_down(il); + + mutex_lock(&il->mutex); + if (test_bit(S_EXIT_PENDING, &il->status)) { + mutex_unlock(&il->mutex); + return; + } + + __il3945_up(il); + mutex_unlock(&il->mutex); + } +} + +static void +il3945_bg_rx_replenish(struct work_struct *data) +{ + struct il_priv *il = container_of(data, struct il_priv, rx_replenish); + + mutex_lock(&il->mutex); + if (test_bit(S_EXIT_PENDING, &il->status)) + goto out; + + il3945_rx_replenish(il); +out: + mutex_unlock(&il->mutex); +} + +void +il3945_post_associate(struct il_priv *il) +{ + int rc = 0; + struct ieee80211_conf *conf = NULL; + struct il_rxon_context *ctx = &il->ctx; + + if (!ctx->vif || !il->is_open) + return; + + D_ASSOC("Associated as %d to: %pM\n", ctx->vif->bss_conf.aid, + ctx->active.bssid_addr); + + if (test_bit(S_EXIT_PENDING, &il->status)) + return; + + il_scan_cancel_timeout(il, 200); + + conf = &il->hw->conf; + + ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; + il3945_commit_rxon(il, ctx); + + rc = il_send_rxon_timing(il, ctx); + if (rc) + IL_WARN("C_RXON_TIMING failed - " "Attempting to continue.\n"); + + ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; + + ctx->staging.assoc_id = cpu_to_le16(ctx->vif->bss_conf.aid); + + D_ASSOC("assoc id %d beacon interval %d\n", ctx->vif->bss_conf.aid, + ctx->vif->bss_conf.beacon_int); + + if (ctx->vif->bss_conf.use_short_preamble) + ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; + else + ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; + + if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) { + if (ctx->vif->bss_conf.use_short_slot) + ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; + else + ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; + } + + il3945_commit_rxon(il, ctx); + + switch (ctx->vif->type) { + case NL80211_IFTYPE_STATION: + il3945_rate_scale_init(il->hw, IL_AP_ID); + break; + case NL80211_IFTYPE_ADHOC: + il3945_send_beacon_cmd(il); + break; + default: + IL_ERR("%s Should not be called in %d mode\n", __func__, + ctx->vif->type); + break; + } +} + +/***************************************************************************** + * + * mac80211 entry point functions + * + *****************************************************************************/ + +#define UCODE_READY_TIMEOUT (2 * HZ) + +static int +il3945_mac_start(struct ieee80211_hw *hw) +{ + struct il_priv *il = hw->priv; + int ret; + + D_MAC80211("enter\n"); + + /* we should be verifying the device is ready to be opened */ + mutex_lock(&il->mutex); + + /* fetch ucode file from disk, alloc and copy to bus-master buffers ... + * ucode filename and max sizes are card-specific. */ + + if (!il->ucode_code.len) { + ret = il3945_read_ucode(il); + if (ret) { + IL_ERR("Could not read microcode: %d\n", ret); + mutex_unlock(&il->mutex); + goto out_release_irq; + } + } + + ret = __il3945_up(il); + + mutex_unlock(&il->mutex); + + if (ret) + goto out_release_irq; + + D_INFO("Start UP work.\n"); + + /* Wait for START_ALIVE from ucode. Otherwise callbacks from + * mac80211 will not be run successfully. */ + ret = wait_event_timeout(il->wait_command_queue, + test_bit(S_READY, &il->status), + UCODE_READY_TIMEOUT); + if (!ret) { + if (!test_bit(S_READY, &il->status)) { + IL_ERR("Wait for START_ALIVE timeout after %dms.\n", + jiffies_to_msecs(UCODE_READY_TIMEOUT)); + ret = -ETIMEDOUT; + goto out_release_irq; + } + } + + /* ucode is running and will send rfkill notifications, + * no need to poll the killswitch state anymore */ + cancel_delayed_work(&il->_3945.rfkill_poll); + + il->is_open = 1; + D_MAC80211("leave\n"); + return 0; + +out_release_irq: + il->is_open = 0; + D_MAC80211("leave - failed\n"); + return ret; +} + +static void +il3945_mac_stop(struct ieee80211_hw *hw) +{ + struct il_priv *il = hw->priv; + + D_MAC80211("enter\n"); + + if (!il->is_open) { + D_MAC80211("leave - skip\n"); + return; + } + + il->is_open = 0; + + il3945_down(il); + + flush_workqueue(il->workqueue); + + /* start polling the killswitch state again */ + queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll, + round_jiffies_relative(2 * HZ)); + + D_MAC80211("leave\n"); +} + +static void +il3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + struct il_priv *il = hw->priv; + + D_MAC80211("enter\n"); + + D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, + ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); + + if (il3945_tx_skb(il, skb)) + dev_kfree_skb_any(skb); + + D_MAC80211("leave\n"); +} + +void +il3945_config_ap(struct il_priv *il) +{ + struct il_rxon_context *ctx = &il->ctx; + struct ieee80211_vif *vif = ctx->vif; + int rc = 0; + + if (test_bit(S_EXIT_PENDING, &il->status)) + return; + + /* The following should be done only at AP bring up */ + if (!(il_is_associated(il))) { + + /* RXON - unassoc (to set timing command) */ + ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; + il3945_commit_rxon(il, ctx); + + /* RXON Timing */ + rc = il_send_rxon_timing(il, ctx); + if (rc) + IL_WARN("C_RXON_TIMING failed - " + "Attempting to continue.\n"); + + ctx->staging.assoc_id = 0; + + if (vif->bss_conf.use_short_preamble) + ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; + else + ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; + + if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) { + if (vif->bss_conf.use_short_slot) + ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; + else + ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; + } + /* restore RXON assoc */ + ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; + il3945_commit_rxon(il, ctx); + } + il3945_send_beacon_cmd(il); +} + +static int +il3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct il_priv *il = hw->priv; + int ret = 0; + u8 sta_id = IL_INVALID_STATION; + u8 static_key; + + D_MAC80211("enter\n"); + + if (il3945_mod_params.sw_crypto) { + D_MAC80211("leave - hwcrypto disabled\n"); + return -EOPNOTSUPP; + } + + /* + * To support IBSS RSN, don't program group keys in IBSS, the + * hardware will then not attempt to decrypt the frames. + */ + if (vif->type == NL80211_IFTYPE_ADHOC && + !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) + return -EOPNOTSUPP; + + static_key = !il_is_associated(il); + + if (!static_key) { + sta_id = il_sta_id_or_broadcast(il, &il->ctx, sta); + if (sta_id == IL_INVALID_STATION) + return -EINVAL; + } + + mutex_lock(&il->mutex); + il_scan_cancel_timeout(il, 100); + + switch (cmd) { + case SET_KEY: + if (static_key) + ret = il3945_set_static_key(il, key); + else + ret = il3945_set_dynamic_key(il, key, sta_id); + D_MAC80211("enable hwcrypto key\n"); + break; + case DISABLE_KEY: + if (static_key) + ret = il3945_remove_static_key(il); + else + ret = il3945_clear_sta_key_info(il, sta_id); + D_MAC80211("disable hwcrypto key\n"); + break; + default: + ret = -EINVAL; + } + + mutex_unlock(&il->mutex); + D_MAC80211("leave\n"); + + return ret; +} + +static int +il3945_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct il_priv *il = hw->priv; + struct il3945_sta_priv *sta_priv = (void *)sta->drv_priv; + int ret; + bool is_ap = vif->type == NL80211_IFTYPE_STATION; + u8 sta_id; + + D_INFO("received request to add station %pM\n", sta->addr); + mutex_lock(&il->mutex); + D_INFO("proceeding to add station %pM\n", sta->addr); + sta_priv->common.sta_id = IL_INVALID_STATION; + + ret = + il_add_station_common(il, &il->ctx, sta->addr, is_ap, sta, &sta_id); + if (ret) { + IL_ERR("Unable to add station %pM (%d)\n", sta->addr, ret); + /* Should we return success if return code is EEXIST ? */ + mutex_unlock(&il->mutex); + return ret; + } + + sta_priv->common.sta_id = sta_id; + + /* Initialize rate scaling */ + D_INFO("Initializing rate scaling for station %pM\n", sta->addr); + il3945_rs_rate_init(il, sta, sta_id); + mutex_unlock(&il->mutex); + + return 0; +} + +static void +il3945_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, + unsigned int *total_flags, u64 multicast) +{ + struct il_priv *il = hw->priv; + __le32 filter_or = 0, filter_nand = 0; + struct il_rxon_context *ctx = &il->ctx; + +#define CHK(test, flag) do { \ + if (*total_flags & (test)) \ + filter_or |= (flag); \ + else \ + filter_nand |= (flag); \ + } while (0) + + D_MAC80211("Enter: changed: 0x%x, total: 0x%x\n", changed_flags, + *total_flags); + + CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK); + CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK); + CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK); + +#undef CHK + + mutex_lock(&il->mutex); + + ctx->staging.filter_flags &= ~filter_nand; + ctx->staging.filter_flags |= filter_or; + + /* + * Not committing directly because hardware can perform a scan, + * but even if hw is ready, committing here breaks for some reason, + * we'll eventually commit the filter flags change anyway. + */ + + mutex_unlock(&il->mutex); + + /* + * Receiving all multicast frames is always enabled by the + * default flags setup in il_connection_init_rx_config() + * since we currently do not support programming multicast + * filters into the device. + */ + *total_flags &= + FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS | + FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; +} + +/***************************************************************************** + * + * sysfs attributes + * + *****************************************************************************/ + +#ifdef CONFIG_IWLEGACY_DEBUG + +/* + * The following adds a new attribute to the sysfs representation + * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/) + * used for controlling the debug level. + * + * See the level definitions in iwl for details. + * + * The debug_level being managed using sysfs below is a per device debug + * level that is used instead of the global debug level if it (the per + * device debug level) is set. + */ +static ssize_t +il3945_show_debug_level(struct device *d, struct device_attribute *attr, + char *buf) +{ + struct il_priv *il = dev_get_drvdata(d); + return sprintf(buf, "0x%08X\n", il_get_debug_level(il)); +} + +static ssize_t +il3945_store_debug_level(struct device *d, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct il_priv *il = dev_get_drvdata(d); + unsigned long val; + int ret; + + ret = strict_strtoul(buf, 0, &val); + if (ret) + IL_INFO("%s is not in hex or decimal form.\n", buf); + else { + il->debug_level = val; + if (il_alloc_traffic_mem(il)) + IL_ERR("Not enough memory to generate traffic log\n"); + } + return strnlen(buf, count); +} + +static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, il3945_show_debug_level, + il3945_store_debug_level); + +#endif /* CONFIG_IWLEGACY_DEBUG */ + +static ssize_t +il3945_show_temperature(struct device *d, struct device_attribute *attr, + char *buf) +{ + struct il_priv *il = dev_get_drvdata(d); + + if (!il_is_alive(il)) + return -EAGAIN; + + return sprintf(buf, "%d\n", il3945_hw_get_temperature(il)); +} + +static DEVICE_ATTR(temperature, S_IRUGO, il3945_show_temperature, NULL); + +static ssize_t +il3945_show_tx_power(struct device *d, struct device_attribute *attr, char *buf) +{ + struct il_priv *il = dev_get_drvdata(d); + return sprintf(buf, "%d\n", il->tx_power_user_lmt); +} + +static ssize_t +il3945_store_tx_power(struct device *d, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct il_priv *il = dev_get_drvdata(d); + char *p = (char *)buf; + u32 val; + + val = simple_strtoul(p, &p, 10); + if (p == buf) + IL_INFO(": %s is not in decimal form.\n", buf); + else + il3945_hw_reg_set_txpower(il, val); + + return count; +} + +static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, il3945_show_tx_power, + il3945_store_tx_power); + +static ssize_t +il3945_show_flags(struct device *d, struct device_attribute *attr, char *buf) +{ + struct il_priv *il = dev_get_drvdata(d); + struct il_rxon_context *ctx = &il->ctx; + + return sprintf(buf, "0x%04X\n", ctx->active.flags); +} + +static ssize_t +il3945_store_flags(struct device *d, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct il_priv *il = dev_get_drvdata(d); + u32 flags = simple_strtoul(buf, NULL, 0); + struct il_rxon_context *ctx = &il->ctx; + + mutex_lock(&il->mutex); + if (le32_to_cpu(ctx->staging.flags) != flags) { + /* Cancel any currently running scans... */ + if (il_scan_cancel_timeout(il, 100)) + IL_WARN("Could not cancel scan.\n"); + else { + D_INFO("Committing rxon.flags = 0x%04X\n", flags); + ctx->staging.flags = cpu_to_le32(flags); + il3945_commit_rxon(il, ctx); + } + } + mutex_unlock(&il->mutex); + + return count; +} + +static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, il3945_show_flags, + il3945_store_flags); + +static ssize_t +il3945_show_filter_flags(struct device *d, struct device_attribute *attr, + char *buf) +{ + struct il_priv *il = dev_get_drvdata(d); + struct il_rxon_context *ctx = &il->ctx; + + return sprintf(buf, "0x%04X\n", le32_to_cpu(ctx->active.filter_flags)); +} + +static ssize_t +il3945_store_filter_flags(struct device *d, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct il_priv *il = dev_get_drvdata(d); + struct il_rxon_context *ctx = &il->ctx; + u32 filter_flags = simple_strtoul(buf, NULL, 0); + + mutex_lock(&il->mutex); + if (le32_to_cpu(ctx->staging.filter_flags) != filter_flags) { + /* Cancel any currently running scans... */ + if (il_scan_cancel_timeout(il, 100)) + IL_WARN("Could not cancel scan.\n"); + else { + D_INFO("Committing rxon.filter_flags = " "0x%04X\n", + filter_flags); + ctx->staging.filter_flags = cpu_to_le32(filter_flags); + il3945_commit_rxon(il, ctx); + } + } + mutex_unlock(&il->mutex); + + return count; +} + +static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, il3945_show_filter_flags, + il3945_store_filter_flags); + +static ssize_t +il3945_show_measurement(struct device *d, struct device_attribute *attr, + char *buf) +{ + struct il_priv *il = dev_get_drvdata(d); + struct il_spectrum_notification measure_report; + u32 size = sizeof(measure_report), len = 0, ofs = 0; + u8 *data = (u8 *) &measure_report; + unsigned long flags; + + spin_lock_irqsave(&il->lock, flags); + if (!(il->measurement_status & MEASUREMENT_READY)) { + spin_unlock_irqrestore(&il->lock, flags); + return 0; + } + memcpy(&measure_report, &il->measure_report, size); + il->measurement_status = 0; + spin_unlock_irqrestore(&il->lock, flags); + + while (size && PAGE_SIZE - len) { + hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len, + PAGE_SIZE - len, 1); + len = strlen(buf); + if (PAGE_SIZE - len) + buf[len++] = '\n'; + + ofs += 16; + size -= min(size, 16U); + } + + return len; +} + +static ssize_t +il3945_store_measurement(struct device *d, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct il_priv *il = dev_get_drvdata(d); + struct il_rxon_context *ctx = &il->ctx; + struct ieee80211_measurement_params params = { + .channel = le16_to_cpu(ctx->active.channel), + .start_time = cpu_to_le64(il->_3945.last_tsf), + .duration = cpu_to_le16(1), + }; + u8 type = IL_MEASURE_BASIC; + u8 buffer[32]; + u8 channel; + + if (count) { + char *p = buffer; + strncpy(buffer, buf, min(sizeof(buffer), count)); + channel = simple_strtoul(p, NULL, 0); + if (channel) + params.channel = channel; + + p = buffer; + while (*p && *p != ' ') + p++; + if (*p) + type = simple_strtoul(p + 1, NULL, 0); + } + + D_INFO("Invoking measurement of type %d on " "channel %d (for '%s')\n", + type, params.channel, buf); + il3945_get_measurement(il, ¶ms, type); + + return count; +} + +static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR, il3945_show_measurement, + il3945_store_measurement); + +static ssize_t +il3945_store_retry_rate(struct device *d, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct il_priv *il = dev_get_drvdata(d); + + il->retry_rate = simple_strtoul(buf, NULL, 0); + if (il->retry_rate <= 0) + il->retry_rate = 1; + + return count; +} + +static ssize_t +il3945_show_retry_rate(struct device *d, struct device_attribute *attr, + char *buf) +{ + struct il_priv *il = dev_get_drvdata(d); + return sprintf(buf, "%d", il->retry_rate); +} + +static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, il3945_show_retry_rate, + il3945_store_retry_rate); + +static ssize_t +il3945_show_channels(struct device *d, struct device_attribute *attr, char *buf) +{ + /* all this shit doesn't belong into sysfs anyway */ + return 0; +} + +static DEVICE_ATTR(channels, S_IRUSR, il3945_show_channels, NULL); + +static ssize_t +il3945_show_antenna(struct device *d, struct device_attribute *attr, char *buf) +{ + struct il_priv *il = dev_get_drvdata(d); + + if (!il_is_alive(il)) + return -EAGAIN; + + return sprintf(buf, "%d\n", il3945_mod_params.antenna); +} + +static ssize_t +il3945_store_antenna(struct device *d, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct il_priv *il __maybe_unused = dev_get_drvdata(d); + int ant; + + if (count == 0) + return 0; + + if (sscanf(buf, "%1i", &ant) != 1) { + D_INFO("not in hex or decimal form.\n"); + return count; + } + + if (ant >= 0 && ant <= 2) { + D_INFO("Setting antenna select to %d.\n", ant); + il3945_mod_params.antenna = (enum il3945_antenna)ant; + } else + D_INFO("Bad antenna select value %d.\n", ant); + + return count; +} + +static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, il3945_show_antenna, + il3945_store_antenna); + +static ssize_t +il3945_show_status(struct device *d, struct device_attribute *attr, char *buf) +{ + struct il_priv *il = dev_get_drvdata(d); + if (!il_is_alive(il)) + return -EAGAIN; + return sprintf(buf, "0x%08x\n", (int)il->status); +} + +static DEVICE_ATTR(status, S_IRUGO, il3945_show_status, NULL); + +static ssize_t +il3945_dump_error_log(struct device *d, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct il_priv *il = dev_get_drvdata(d); + char *p = (char *)buf; + + if (p[0] == '1') + il3945_dump_nic_error_log(il); + + return strnlen(buf, count); +} + +static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, il3945_dump_error_log); + +/***************************************************************************** + * + * driver setup and tear down + * + *****************************************************************************/ + +static void +il3945_setup_deferred_work(struct il_priv *il) +{ + il->workqueue = create_singlethread_workqueue(DRV_NAME); + + init_waitqueue_head(&il->wait_command_queue); + + INIT_WORK(&il->restart, il3945_bg_restart); + INIT_WORK(&il->rx_replenish, il3945_bg_rx_replenish); + INIT_DELAYED_WORK(&il->init_alive_start, il3945_bg_init_alive_start); + INIT_DELAYED_WORK(&il->alive_start, il3945_bg_alive_start); + INIT_DELAYED_WORK(&il->_3945.rfkill_poll, il3945_rfkill_poll); + + il_setup_scan_deferred_work(il); + + il3945_hw_setup_deferred_work(il); + + init_timer(&il->watchdog); + il->watchdog.data = (unsigned long)il; + il->watchdog.function = il_bg_watchdog; + + tasklet_init(&il->irq_tasklet, + (void (*)(unsigned long))il3945_irq_tasklet, + (unsigned long)il); +} + +static void +il3945_cancel_deferred_work(struct il_priv *il) +{ + il3945_hw_cancel_deferred_work(il); + + cancel_delayed_work_sync(&il->init_alive_start); + cancel_delayed_work(&il->alive_start); + + il_cancel_scan_deferred_work(il); +} + +static struct attribute *il3945_sysfs_entries[] = { + &dev_attr_antenna.attr, + &dev_attr_channels.attr, + &dev_attr_dump_errors.attr, + &dev_attr_flags.attr, + &dev_attr_filter_flags.attr, + &dev_attr_measurement.attr, + &dev_attr_retry_rate.attr, + &dev_attr_status.attr, + &dev_attr_temperature.attr, + &dev_attr_tx_power.attr, +#ifdef CONFIG_IWLEGACY_DEBUG + &dev_attr_debug_level.attr, +#endif + NULL +}; + +static struct attribute_group il3945_attribute_group = { + .name = NULL, /* put in device directory */ + .attrs = il3945_sysfs_entries, +}; + +struct ieee80211_ops il3945_hw_ops = { + .tx = il3945_mac_tx, + .start = il3945_mac_start, + .stop = il3945_mac_stop, + .add_interface = il_mac_add_interface, + .remove_interface = il_mac_remove_interface, + .change_interface = il_mac_change_interface, + .config = il_mac_config, + .configure_filter = il3945_configure_filter, + .set_key = il3945_mac_set_key, + .conf_tx = il_mac_conf_tx, + .reset_tsf = il_mac_reset_tsf, + .bss_info_changed = il_mac_bss_info_changed, + .hw_scan = il_mac_hw_scan, + .sta_add = il3945_mac_sta_add, + .sta_remove = il_mac_sta_remove, + .tx_last_beacon = il_mac_tx_last_beacon, +}; + +static int +il3945_init_drv(struct il_priv *il) +{ + int ret; + struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom; + + il->retry_rate = 1; + il->beacon_skb = NULL; + + spin_lock_init(&il->sta_lock); + spin_lock_init(&il->hcmd_lock); + + INIT_LIST_HEAD(&il->free_frames); + + mutex_init(&il->mutex); + + il->ieee_channels = NULL; + il->ieee_rates = NULL; + il->band = IEEE80211_BAND_2GHZ; + + il->iw_mode = NL80211_IFTYPE_STATION; + il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF; + + /* initialize force reset */ + il->force_reset.reset_duration = IL_DELAY_NEXT_FORCE_FW_RELOAD; + + if (eeprom->version < EEPROM_3945_EEPROM_VERSION) { + IL_WARN("Unsupported EEPROM version: 0x%04X\n", + eeprom->version); + ret = -EINVAL; + goto err; + } + ret = il_init_channel_map(il); + if (ret) { + IL_ERR("initializing regulatory failed: %d\n", ret); + goto err; + } + + /* Set up txpower settings in driver for all channels */ + if (il3945_txpower_set_from_eeprom(il)) { + ret = -EIO; + goto err_free_channel_map; + } + + ret = il_init_geos(il); + if (ret) { + IL_ERR("initializing geos failed: %d\n", ret); + goto err_free_channel_map; + } + il3945_init_hw_rates(il, il->ieee_rates); + + return 0; + +err_free_channel_map: + il_free_channel_map(il); +err: + return ret; +} + +#define IL3945_MAX_PROBE_REQUEST 200 + +static int +il3945_setup_mac(struct il_priv *il) +{ + int ret; + struct ieee80211_hw *hw = il->hw; + + hw->rate_control_algorithm = "iwl-3945-rs"; + hw->sta_data_size = sizeof(struct il3945_sta_priv); + hw->vif_data_size = sizeof(struct il_vif_priv); + + /* Tell mac80211 our characteristics */ + hw->flags = IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_SPECTRUM_MGMT; + + hw->wiphy->interface_modes = il->ctx.interface_modes; + + hw->wiphy->flags |= + WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS | + WIPHY_FLAG_IBSS_RSN; + + hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945; + /* we create the 802.11 header and a zero-length SSID element */ + hw->wiphy->max_scan_ie_len = IL3945_MAX_PROBE_REQUEST - 24 - 2; + + /* Default value; 4 EDCA QOS priorities */ + hw->queues = 4; + + if (il->bands[IEEE80211_BAND_2GHZ].n_channels) + il->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = + &il->bands[IEEE80211_BAND_2GHZ]; + + if (il->bands[IEEE80211_BAND_5GHZ].n_channels) + il->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = + &il->bands[IEEE80211_BAND_5GHZ]; + + il_leds_init(il); + + ret = ieee80211_register_hw(il->hw); + if (ret) { + IL_ERR("Failed to register hw (error %d)\n", ret); + return ret; + } + il->mac80211_registered = 1; + + return 0; +} + +static int +il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + int err = 0; + struct il_priv *il; + struct ieee80211_hw *hw; + struct il_cfg *cfg = (struct il_cfg *)(ent->driver_data); + struct il3945_eeprom *eeprom; + unsigned long flags; + + /*********************** + * 1. Allocating HW data + * ********************/ + + /* mac80211 allocates memory for this device instance, including + * space for this driver's ilate structure */ + hw = il_alloc_all(cfg); + if (hw == NULL) { + pr_err("Can not allocate network device\n"); + err = -ENOMEM; + goto out; + } + il = hw->priv; + SET_IEEE80211_DEV(hw, &pdev->dev); + + il->cmd_queue = IL39_CMD_QUEUE_NUM; + + il->ctx.ctxid = 0; + + il->ctx.rxon_cmd = C_RXON; + il->ctx.rxon_timing_cmd = C_RXON_TIMING; + il->ctx.rxon_assoc_cmd = C_RXON_ASSOC; + il->ctx.qos_cmd = C_QOS_PARAM; + il->ctx.ap_sta_id = IL_AP_ID; + il->ctx.wep_key_cmd = C_WEPKEY; + il->ctx.interface_modes = + BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC); + il->ctx.ibss_devtype = RXON_DEV_TYPE_IBSS; + il->ctx.station_devtype = RXON_DEV_TYPE_ESS; + il->ctx.unused_devtype = RXON_DEV_TYPE_ESS; + + /* + * Disabling hardware scan means that mac80211 will perform scans + * "the hard way", rather than using device's scan. + */ + if (il3945_mod_params.disable_hw_scan) { + D_INFO("Disabling hw_scan\n"); + il3945_hw_ops.hw_scan = NULL; + } + + D_INFO("*** LOAD DRIVER ***\n"); + il->cfg = cfg; + il->pci_dev = pdev; + il->inta_mask = CSR_INI_SET_MASK; + + if (il_alloc_traffic_mem(il)) + IL_ERR("Not enough memory to generate traffic log\n"); + + /*************************** + * 2. Initializing PCI bus + * *************************/ + pci_disable_link_state(pdev, + PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | + PCIE_LINK_STATE_CLKPM); + + if (pci_enable_device(pdev)) { + err = -ENODEV; + goto out_ieee80211_free_hw; + } + + pci_set_master(pdev); + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (!err) + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + IL_WARN("No suitable DMA available.\n"); + goto out_pci_disable_device; + } + + pci_set_drvdata(pdev, il); + err = pci_request_regions(pdev, DRV_NAME); + if (err) + goto out_pci_disable_device; + + /*********************** + * 3. Read REV Register + * ********************/ + il->hw_base = pci_iomap(pdev, 0, 0); + if (!il->hw_base) { + err = -ENODEV; + goto out_pci_release_regions; + } + + D_INFO("pci_resource_len = 0x%08llx\n", + (unsigned long long)pci_resource_len(pdev, 0)); + D_INFO("pci_resource_base = %p\n", il->hw_base); + + /* We disable the RETRY_TIMEOUT register (0x41) to keep + * PCI Tx retries from interfering with C3 CPU state */ + pci_write_config_byte(pdev, 0x41, 0x00); + + /* these spin locks will be used in apm_ops.init and EEPROM access + * we should init now + */ + spin_lock_init(&il->reg_lock); + spin_lock_init(&il->lock); + + /* + * stop and reset the on-board processor just in case it is in a + * strange state ... like being left stranded by a primary kernel + * and this is now the kdump kernel trying to start up + */ + _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); + + /*********************** + * 4. Read EEPROM + * ********************/ + + /* Read the EEPROM */ + err = il_eeprom_init(il); + if (err) { + IL_ERR("Unable to init EEPROM\n"); + goto out_iounmap; + } + /* MAC Address location in EEPROM same for 3945/4965 */ + eeprom = (struct il3945_eeprom *)il->eeprom; + D_INFO("MAC address: %pM\n", eeprom->mac_address); + SET_IEEE80211_PERM_ADDR(il->hw, eeprom->mac_address); + + /*********************** + * 5. Setup HW Constants + * ********************/ + /* Device-specific setup */ + if (il3945_hw_set_hw_params(il)) { + IL_ERR("failed to set hw settings\n"); + goto out_eeprom_free; + } + + /*********************** + * 6. Setup il + * ********************/ + + err = il3945_init_drv(il); + if (err) { + IL_ERR("initializing driver failed\n"); + goto out_unset_hw_params; + } + + IL_INFO("Detected Intel Wireless WiFi Link %s\n", il->cfg->name); + + /*********************** + * 7. Setup Services + * ********************/ + + spin_lock_irqsave(&il->lock, flags); + il_disable_interrupts(il); + spin_unlock_irqrestore(&il->lock, flags); + + pci_enable_msi(il->pci_dev); + + err = request_irq(il->pci_dev->irq, il_isr, IRQF_SHARED, DRV_NAME, il); + if (err) { + IL_ERR("Error allocating IRQ %d\n", il->pci_dev->irq); + goto out_disable_msi; + } + + err = sysfs_create_group(&pdev->dev.kobj, &il3945_attribute_group); + if (err) { + IL_ERR("failed to create sysfs device attributes\n"); + goto out_release_irq; + } + + il_set_rxon_channel(il, &il->bands[IEEE80211_BAND_2GHZ].channels[5], + &il->ctx); + il3945_setup_deferred_work(il); + il3945_setup_handlers(il); + il_power_initialize(il); + + /********************************* + * 8. Setup and Register mac80211 + * *******************************/ + + il_enable_interrupts(il); + + err = il3945_setup_mac(il); + if (err) + goto out_remove_sysfs; + + err = il_dbgfs_register(il, DRV_NAME); + if (err) + IL_ERR("failed to create debugfs files. Ignoring error: %d\n", + err); + + /* Start monitoring the killswitch */ + queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll, 2 * HZ); + + return 0; + +out_remove_sysfs: + destroy_workqueue(il->workqueue); + il->workqueue = NULL; + sysfs_remove_group(&pdev->dev.kobj, &il3945_attribute_group); +out_release_irq: + free_irq(il->pci_dev->irq, il); +out_disable_msi: + pci_disable_msi(il->pci_dev); + il_free_geos(il); + il_free_channel_map(il); +out_unset_hw_params: + il3945_unset_hw_params(il); +out_eeprom_free: + il_eeprom_free(il); +out_iounmap: + pci_iounmap(pdev, il->hw_base); +out_pci_release_regions: + pci_release_regions(pdev); +out_pci_disable_device: + pci_set_drvdata(pdev, NULL); + pci_disable_device(pdev); +out_ieee80211_free_hw: + il_free_traffic_mem(il); + ieee80211_free_hw(il->hw); +out: + return err; +} + +static void __devexit +il3945_pci_remove(struct pci_dev *pdev) +{ + struct il_priv *il = pci_get_drvdata(pdev); + unsigned long flags; + + if (!il) + return; + + D_INFO("*** UNLOAD DRIVER ***\n"); + + il_dbgfs_unregister(il); + + set_bit(S_EXIT_PENDING, &il->status); + + il_leds_exit(il); + + if (il->mac80211_registered) { + ieee80211_unregister_hw(il->hw); + il->mac80211_registered = 0; + } else { + il3945_down(il); + } + + /* + * Make sure device is reset to low power before unloading driver. + * This may be redundant with il_down(), but there are paths to + * run il_down() without calling apm_ops.stop(), and there are + * paths to avoid running il_down() at all before leaving driver. + * This (inexpensive) call *makes sure* device is reset. + */ + il_apm_stop(il); + + /* make sure we flush any pending irq or + * tasklet for the driver + */ + spin_lock_irqsave(&il->lock, flags); + il_disable_interrupts(il); + spin_unlock_irqrestore(&il->lock, flags); + + il3945_synchronize_irq(il); + + sysfs_remove_group(&pdev->dev.kobj, &il3945_attribute_group); + + cancel_delayed_work_sync(&il->_3945.rfkill_poll); + + il3945_dealloc_ucode_pci(il); + + if (il->rxq.bd) + il3945_rx_queue_free(il, &il->rxq); + il3945_hw_txq_ctx_free(il); + + il3945_unset_hw_params(il); + + /*netif_stop_queue(dev); */ + flush_workqueue(il->workqueue); + + /* ieee80211_unregister_hw calls il3945_mac_stop, which flushes + * il->workqueue... so we can't take down the workqueue + * until now... */ + destroy_workqueue(il->workqueue); + il->workqueue = NULL; + il_free_traffic_mem(il); + + free_irq(pdev->irq, il); + pci_disable_msi(pdev); + + pci_iounmap(pdev, il->hw_base); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + + il_free_channel_map(il); + il_free_geos(il); + kfree(il->scan_cmd); + if (il->beacon_skb) + dev_kfree_skb(il->beacon_skb); + + ieee80211_free_hw(il->hw); +} + +/***************************************************************************** + * + * driver and module entry point + * + *****************************************************************************/ + +static struct pci_driver il3945_driver = { + .name = DRV_NAME, + .id_table = il3945_hw_card_ids, + .probe = il3945_pci_probe, + .remove = __devexit_p(il3945_pci_remove), + .driver.pm = IL_LEGACY_PM_OPS, +}; + +static int __init +il3945_init(void) +{ + + int ret; + pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n"); + pr_info(DRV_COPYRIGHT "\n"); + + ret = il3945_rate_control_register(); + if (ret) { + pr_err("Unable to register rate control algorithm: %d\n", ret); + return ret; + } + + ret = pci_register_driver(&il3945_driver); + if (ret) { + pr_err("Unable to initialize PCI module\n"); + goto error_register; + } + + return ret; + +error_register: + il3945_rate_control_unregister(); + return ret; +} + +static void __exit +il3945_exit(void) +{ + pci_unregister_driver(&il3945_driver); + il3945_rate_control_unregister(); +} + +MODULE_FIRMWARE(IL3945_MODULE_FIRMWARE(IL3945_UCODE_API_MAX)); + +module_param_named(antenna, il3945_mod_params.antenna, int, S_IRUGO); +MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])"); +module_param_named(swcrypto, il3945_mod_params.sw_crypto, int, S_IRUGO); +MODULE_PARM_DESC(swcrypto, "using software crypto (default 1 [software])"); +module_param_named(disable_hw_scan, il3945_mod_params.disable_hw_scan, int, + S_IRUGO); +MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 1)"); +#ifdef CONFIG_IWLEGACY_DEBUG +module_param_named(debug, il_debug_level, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(debug, "debug output mask"); +#endif +module_param_named(fw_restart, il3945_mod_params.restart_fw, int, S_IRUGO); +MODULE_PARM_DESC(fw_restart, "restart firmware in case of error"); + +module_exit(il3945_exit); +module_init(il3945_init); diff --git a/drivers/net/wireless/iwlegacy/3945-rs.c b/drivers/net/wireless/iwlegacy/3945-rs.c new file mode 100644 index 00000000000..d7a83f22919 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/3945-rs.c @@ -0,0 +1,986 @@ +/****************************************************************************** + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/skbuff.h> +#include <linux/slab.h> +#include <net/mac80211.h> + +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/delay.h> + +#include <linux/workqueue.h> + +#include "commands.h" +#include "3945.h" + +#define RS_NAME "iwl-3945-rs" + +static s32 il3945_expected_tpt_g[RATE_COUNT_3945] = { + 7, 13, 35, 58, 0, 0, 76, 104, 130, 168, 191, 202 +}; + +static s32 il3945_expected_tpt_g_prot[RATE_COUNT_3945] = { + 7, 13, 35, 58, 0, 0, 0, 80, 93, 113, 123, 125 +}; + +static s32 il3945_expected_tpt_a[RATE_COUNT_3945] = { + 0, 0, 0, 0, 40, 57, 72, 98, 121, 154, 177, 186 +}; + +static s32 il3945_expected_tpt_b[RATE_COUNT_3945] = { + 7, 13, 35, 58, 0, 0, 0, 0, 0, 0, 0, 0 +}; + +struct il3945_tpt_entry { + s8 min_rssi; + u8 idx; +}; + +static struct il3945_tpt_entry il3945_tpt_table_a[] = { + {-60, RATE_54M_IDX}, + {-64, RATE_48M_IDX}, + {-72, RATE_36M_IDX}, + {-80, RATE_24M_IDX}, + {-84, RATE_18M_IDX}, + {-85, RATE_12M_IDX}, + {-87, RATE_9M_IDX}, + {-89, RATE_6M_IDX} +}; + +static struct il3945_tpt_entry il3945_tpt_table_g[] = { + {-60, RATE_54M_IDX}, + {-64, RATE_48M_IDX}, + {-68, RATE_36M_IDX}, + {-80, RATE_24M_IDX}, + {-84, RATE_18M_IDX}, + {-85, RATE_12M_IDX}, + {-86, RATE_11M_IDX}, + {-88, RATE_5M_IDX}, + {-90, RATE_2M_IDX}, + {-92, RATE_1M_IDX} +}; + +#define RATE_MAX_WINDOW 62 +#define RATE_FLUSH (3*HZ) +#define RATE_WIN_FLUSH (HZ/2) +#define IL39_RATE_HIGH_TH 11520 +#define IL_SUCCESS_UP_TH 8960 +#define IL_SUCCESS_DOWN_TH 10880 +#define RATE_MIN_FAILURE_TH 6 +#define RATE_MIN_SUCCESS_TH 8 +#define RATE_DECREASE_TH 1920 +#define RATE_RETRY_TH 15 + +static u8 +il3945_get_rate_idx_by_rssi(s32 rssi, enum ieee80211_band band) +{ + u32 idx = 0; + u32 table_size = 0; + struct il3945_tpt_entry *tpt_table = NULL; + + if (rssi < IL_MIN_RSSI_VAL || rssi > IL_MAX_RSSI_VAL) + rssi = IL_MIN_RSSI_VAL; + + switch (band) { + case IEEE80211_BAND_2GHZ: + tpt_table = il3945_tpt_table_g; + table_size = ARRAY_SIZE(il3945_tpt_table_g); + break; + case IEEE80211_BAND_5GHZ: + tpt_table = il3945_tpt_table_a; + table_size = ARRAY_SIZE(il3945_tpt_table_a); + break; + default: + BUG(); + break; + } + + while (idx < table_size && rssi < tpt_table[idx].min_rssi) + idx++; + + idx = min(idx, table_size - 1); + + return tpt_table[idx].idx; +} + +static void +il3945_clear_win(struct il3945_rate_scale_data *win) +{ + win->data = 0; + win->success_counter = 0; + win->success_ratio = -1; + win->counter = 0; + win->average_tpt = IL_INVALID_VALUE; + win->stamp = 0; +} + +/** + * il3945_rate_scale_flush_wins - flush out the rate scale wins + * + * Returns the number of wins that have gathered data but were + * not flushed. If there were any that were not flushed, then + * reschedule the rate flushing routine. + */ +static int +il3945_rate_scale_flush_wins(struct il3945_rs_sta *rs_sta) +{ + int unflushed = 0; + int i; + unsigned long flags; + struct il_priv *il __maybe_unused = rs_sta->il; + + /* + * For each rate, if we have collected data on that rate + * and it has been more than RATE_WIN_FLUSH + * since we flushed, clear out the gathered stats + */ + for (i = 0; i < RATE_COUNT_3945; i++) { + if (!rs_sta->win[i].counter) + continue; + + spin_lock_irqsave(&rs_sta->lock, flags); + if (time_after(jiffies, rs_sta->win[i].stamp + RATE_WIN_FLUSH)) { + D_RATE("flushing %d samples of rate " "idx %d\n", + rs_sta->win[i].counter, i); + il3945_clear_win(&rs_sta->win[i]); + } else + unflushed++; + spin_unlock_irqrestore(&rs_sta->lock, flags); + } + + return unflushed; +} + +#define RATE_FLUSH_MAX 5000 /* msec */ +#define RATE_FLUSH_MIN 50 /* msec */ +#define IL_AVERAGE_PACKETS 1500 + +static void +il3945_bg_rate_scale_flush(unsigned long data) +{ + struct il3945_rs_sta *rs_sta = (void *)data; + struct il_priv *il __maybe_unused = rs_sta->il; + int unflushed = 0; + unsigned long flags; + u32 packet_count, duration, pps; + + D_RATE("enter\n"); + + unflushed = il3945_rate_scale_flush_wins(rs_sta); + + spin_lock_irqsave(&rs_sta->lock, flags); + + /* Number of packets Rx'd since last time this timer ran */ + packet_count = (rs_sta->tx_packets - rs_sta->last_tx_packets) + 1; + + rs_sta->last_tx_packets = rs_sta->tx_packets + 1; + + if (unflushed) { + duration = + jiffies_to_msecs(jiffies - rs_sta->last_partial_flush); + + D_RATE("Tx'd %d packets in %dms\n", packet_count, duration); + + /* Determine packets per second */ + if (duration) + pps = (packet_count * 1000) / duration; + else + pps = 0; + + if (pps) { + duration = (IL_AVERAGE_PACKETS * 1000) / pps; + if (duration < RATE_FLUSH_MIN) + duration = RATE_FLUSH_MIN; + else if (duration > RATE_FLUSH_MAX) + duration = RATE_FLUSH_MAX; + } else + duration = RATE_FLUSH_MAX; + + rs_sta->flush_time = msecs_to_jiffies(duration); + + D_RATE("new flush period: %d msec ave %d\n", duration, + packet_count); + + mod_timer(&rs_sta->rate_scale_flush, + jiffies + rs_sta->flush_time); + + rs_sta->last_partial_flush = jiffies; + } else { + rs_sta->flush_time = RATE_FLUSH; + rs_sta->flush_pending = 0; + } + /* If there weren't any unflushed entries, we don't schedule the timer + * to run again */ + + rs_sta->last_flush = jiffies; + + spin_unlock_irqrestore(&rs_sta->lock, flags); + + D_RATE("leave\n"); +} + +/** + * il3945_collect_tx_data - Update the success/failure sliding win + * + * We keep a sliding win of the last 64 packets transmitted + * at this rate. win->data contains the bitmask of successful + * packets. + */ +static void +il3945_collect_tx_data(struct il3945_rs_sta *rs_sta, + struct il3945_rate_scale_data *win, int success, + int retries, int idx) +{ + unsigned long flags; + s32 fail_count; + struct il_priv *il __maybe_unused = rs_sta->il; + + if (!retries) { + D_RATE("leave: retries == 0 -- should be at least 1\n"); + return; + } + + spin_lock_irqsave(&rs_sta->lock, flags); + + /* + * Keep track of only the latest 62 tx frame attempts in this rate's + * history win; anything older isn't really relevant any more. + * If we have filled up the sliding win, drop the oldest attempt; + * if the oldest attempt (highest bit in bitmap) shows "success", + * subtract "1" from the success counter (this is the main reason + * we keep these bitmaps!). + * */ + while (retries > 0) { + if (win->counter >= RATE_MAX_WINDOW) { + + /* remove earliest */ + win->counter = RATE_MAX_WINDOW - 1; + + if (win->data & (1ULL << (RATE_MAX_WINDOW - 1))) { + win->data &= ~(1ULL << (RATE_MAX_WINDOW - 1)); + win->success_counter--; + } + } + + /* Increment frames-attempted counter */ + win->counter++; + + /* Shift bitmap by one frame (throw away oldest history), + * OR in "1", and increment "success" if this + * frame was successful. */ + win->data <<= 1; + if (success > 0) { + win->success_counter++; + win->data |= 0x1; + success--; + } + + retries--; + } + + /* Calculate current success ratio, avoid divide-by-0! */ + if (win->counter > 0) + win->success_ratio = + 128 * (100 * win->success_counter) / win->counter; + else + win->success_ratio = IL_INVALID_VALUE; + + fail_count = win->counter - win->success_counter; + + /* Calculate average throughput, if we have enough history. */ + if (fail_count >= RATE_MIN_FAILURE_TH || + win->success_counter >= RATE_MIN_SUCCESS_TH) + win->average_tpt = + ((win->success_ratio * rs_sta->expected_tpt[idx] + + 64) / 128); + else + win->average_tpt = IL_INVALID_VALUE; + + /* Tag this win as having been updated */ + win->stamp = jiffies; + + spin_unlock_irqrestore(&rs_sta->lock, flags); +} + +/* + * Called after adding a new station to initialize rate scaling + */ +void +il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id) +{ + struct ieee80211_hw *hw = il->hw; + struct ieee80211_conf *conf = &il->hw->conf; + struct il3945_sta_priv *psta; + struct il3945_rs_sta *rs_sta; + struct ieee80211_supported_band *sband; + int i; + + D_INFO("enter\n"); + if (sta_id == il->ctx.bcast_sta_id) + goto out; + + psta = (struct il3945_sta_priv *)sta->drv_priv; + rs_sta = &psta->rs_sta; + sband = hw->wiphy->bands[conf->channel->band]; + + rs_sta->il = il; + + rs_sta->start_rate = RATE_INVALID; + + /* default to just 802.11b */ + rs_sta->expected_tpt = il3945_expected_tpt_b; + + rs_sta->last_partial_flush = jiffies; + rs_sta->last_flush = jiffies; + rs_sta->flush_time = RATE_FLUSH; + rs_sta->last_tx_packets = 0; + + rs_sta->rate_scale_flush.data = (unsigned long)rs_sta; + rs_sta->rate_scale_flush.function = il3945_bg_rate_scale_flush; + + for (i = 0; i < RATE_COUNT_3945; i++) + il3945_clear_win(&rs_sta->win[i]); + + /* TODO: what is a good starting rate for STA? About middle? Maybe not + * the lowest or the highest rate.. Could consider using RSSI from + * previous packets? Need to have IEEE 802.1X auth succeed immediately + * after assoc.. */ + + for (i = sband->n_bitrates - 1; i >= 0; i--) { + if (sta->supp_rates[sband->band] & (1 << i)) { + rs_sta->last_txrate_idx = i; + break; + } + } + + il->_3945.sta_supp_rates = sta->supp_rates[sband->band]; + /* For 5 GHz band it start at IL_FIRST_OFDM_RATE */ + if (sband->band == IEEE80211_BAND_5GHZ) { + rs_sta->last_txrate_idx += IL_FIRST_OFDM_RATE; + il->_3945.sta_supp_rates <<= IL_FIRST_OFDM_RATE; + } + +out: + il->stations[sta_id].used &= ~IL_STA_UCODE_INPROGRESS; + + D_INFO("leave\n"); +} + +static void * +il3945_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) +{ + return hw->priv; +} + +/* rate scale requires free function to be implemented */ +static void +il3945_rs_free(void *il) +{ +} + +static void * +il3945_rs_alloc_sta(void *il_priv, struct ieee80211_sta *sta, gfp_t gfp) +{ + struct il3945_rs_sta *rs_sta; + struct il3945_sta_priv *psta = (void *)sta->drv_priv; + struct il_priv *il __maybe_unused = il_priv; + + D_RATE("enter\n"); + + rs_sta = &psta->rs_sta; + + spin_lock_init(&rs_sta->lock); + init_timer(&rs_sta->rate_scale_flush); + + D_RATE("leave\n"); + + return rs_sta; +} + +static void +il3945_rs_free_sta(void *il_priv, struct ieee80211_sta *sta, void *il_sta) +{ + struct il3945_rs_sta *rs_sta = il_sta; + + /* + * Be careful not to use any members of il3945_rs_sta (like trying + * to use il_priv to print out debugging) since it may not be fully + * initialized at this point. + */ + del_timer_sync(&rs_sta->rate_scale_flush); +} + +/** + * il3945_rs_tx_status - Update rate control values based on Tx results + * + * NOTE: Uses il_priv->retry_rate for the # of retries attempted by + * the hardware for each rate. + */ +static void +il3945_rs_tx_status(void *il_rate, struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta, void *il_sta, + struct sk_buff *skb) +{ + s8 retries = 0, current_count; + int scale_rate_idx, first_idx, last_idx; + unsigned long flags; + struct il_priv *il = (struct il_priv *)il_rate; + struct il3945_rs_sta *rs_sta = il_sta; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + D_RATE("enter\n"); + + retries = info->status.rates[0].count; + /* Sanity Check for retries */ + if (retries > RATE_RETRY_TH) + retries = RATE_RETRY_TH; + + first_idx = sband->bitrates[info->status.rates[0].idx].hw_value; + if (first_idx < 0 || first_idx >= RATE_COUNT_3945) { + D_RATE("leave: Rate out of bounds: %d\n", first_idx); + return; + } + + if (!il_sta) { + D_RATE("leave: No STA il data to update!\n"); + return; + } + + /* Treat uninitialized rate scaling data same as non-existing. */ + if (!rs_sta->il) { + D_RATE("leave: STA il data uninitialized!\n"); + return; + } + + rs_sta->tx_packets++; + + scale_rate_idx = first_idx; + last_idx = first_idx; + + /* + * Update the win for each rate. We determine which rates + * were Tx'd based on the total number of retries vs. the number + * of retries configured for each rate -- currently set to the + * il value 'retry_rate' vs. rate specific + * + * On exit from this while loop last_idx indicates the rate + * at which the frame was finally transmitted (or failed if no + * ACK) + */ + while (retries > 1) { + if ((retries - 1) < il->retry_rate) { + current_count = (retries - 1); + last_idx = scale_rate_idx; + } else { + current_count = il->retry_rate; + last_idx = il3945_rs_next_rate(il, scale_rate_idx); + } + + /* Update this rate accounting for as many retries + * as was used for it (per current_count) */ + il3945_collect_tx_data(rs_sta, &rs_sta->win[scale_rate_idx], 0, + current_count, scale_rate_idx); + D_RATE("Update rate %d for %d retries.\n", scale_rate_idx, + current_count); + + retries -= current_count; + + scale_rate_idx = last_idx; + } + + /* Update the last idx win with success/failure based on ACK */ + D_RATE("Update rate %d with %s.\n", last_idx, + (info->flags & IEEE80211_TX_STAT_ACK) ? "success" : "failure"); + il3945_collect_tx_data(rs_sta, &rs_sta->win[last_idx], + info->flags & IEEE80211_TX_STAT_ACK, 1, + last_idx); + + /* We updated the rate scale win -- if its been more than + * flush_time since the last run, schedule the flush + * again */ + spin_lock_irqsave(&rs_sta->lock, flags); + + if (!rs_sta->flush_pending && + time_after(jiffies, rs_sta->last_flush + rs_sta->flush_time)) { + + rs_sta->last_partial_flush = jiffies; + rs_sta->flush_pending = 1; + mod_timer(&rs_sta->rate_scale_flush, + jiffies + rs_sta->flush_time); + } + + spin_unlock_irqrestore(&rs_sta->lock, flags); + + D_RATE("leave\n"); +} + +static u16 +il3945_get_adjacent_rate(struct il3945_rs_sta *rs_sta, u8 idx, u16 rate_mask, + enum ieee80211_band band) +{ + u8 high = RATE_INVALID; + u8 low = RATE_INVALID; + struct il_priv *il __maybe_unused = rs_sta->il; + + /* 802.11A walks to the next literal adjacent rate in + * the rate table */ + if (unlikely(band == IEEE80211_BAND_5GHZ)) { + int i; + u32 mask; + + /* Find the previous rate that is in the rate mask */ + i = idx - 1; + for (mask = (1 << i); i >= 0; i--, mask >>= 1) { + if (rate_mask & mask) { + low = i; + break; + } + } + + /* Find the next rate that is in the rate mask */ + i = idx + 1; + for (mask = (1 << i); i < RATE_COUNT_3945; i++, mask <<= 1) { + if (rate_mask & mask) { + high = i; + break; + } + } + + return (high << 8) | low; + } + + low = idx; + while (low != RATE_INVALID) { + if (rs_sta->tgg) + low = il3945_rates[low].prev_rs_tgg; + else + low = il3945_rates[low].prev_rs; + if (low == RATE_INVALID) + break; + if (rate_mask & (1 << low)) + break; + D_RATE("Skipping masked lower rate: %d\n", low); + } + + high = idx; + while (high != RATE_INVALID) { + if (rs_sta->tgg) + high = il3945_rates[high].next_rs_tgg; + else + high = il3945_rates[high].next_rs; + if (high == RATE_INVALID) + break; + if (rate_mask & (1 << high)) + break; + D_RATE("Skipping masked higher rate: %d\n", high); + } + + return (high << 8) | low; +} + +/** + * il3945_rs_get_rate - find the rate for the requested packet + * + * Returns the ieee80211_rate structure allocated by the driver. + * + * The rate control algorithm has no internal mapping between hw_mode's + * rate ordering and the rate ordering used by the rate control algorithm. + * + * The rate control algorithm uses a single table of rates that goes across + * the entire A/B/G spectrum vs. being limited to just one particular + * hw_mode. + * + * As such, we can't convert the idx obtained below into the hw_mode's + * rate table and must reference the driver allocated rate table + * + */ +static void +il3945_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta, + struct ieee80211_tx_rate_control *txrc) +{ + struct ieee80211_supported_band *sband = txrc->sband; + struct sk_buff *skb = txrc->skb; + u8 low = RATE_INVALID; + u8 high = RATE_INVALID; + u16 high_low; + int idx; + struct il3945_rs_sta *rs_sta = il_sta; + struct il3945_rate_scale_data *win = NULL; + int current_tpt = IL_INVALID_VALUE; + int low_tpt = IL_INVALID_VALUE; + int high_tpt = IL_INVALID_VALUE; + u32 fail_count; + s8 scale_action = 0; + unsigned long flags; + u16 rate_mask; + s8 max_rate_idx = -1; + struct il_priv *il __maybe_unused = (struct il_priv *)il_r; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + D_RATE("enter\n"); + + /* Treat uninitialized rate scaling data same as non-existing. */ + if (rs_sta && !rs_sta->il) { + D_RATE("Rate scaling information not initialized yet.\n"); + il_sta = NULL; + } + + if (rate_control_send_low(sta, il_sta, txrc)) + return; + + rate_mask = sta->supp_rates[sband->band]; + + /* get user max rate if set */ + max_rate_idx = txrc->max_rate_idx; + if (sband->band == IEEE80211_BAND_5GHZ && max_rate_idx != -1) + max_rate_idx += IL_FIRST_OFDM_RATE; + if (max_rate_idx < 0 || max_rate_idx >= RATE_COUNT) + max_rate_idx = -1; + + idx = min(rs_sta->last_txrate_idx & 0xffff, RATE_COUNT_3945 - 1); + + if (sband->band == IEEE80211_BAND_5GHZ) + rate_mask = rate_mask << IL_FIRST_OFDM_RATE; + + spin_lock_irqsave(&rs_sta->lock, flags); + + /* for recent assoc, choose best rate regarding + * to rssi value + */ + if (rs_sta->start_rate != RATE_INVALID) { + if (rs_sta->start_rate < idx && + (rate_mask & (1 << rs_sta->start_rate))) + idx = rs_sta->start_rate; + rs_sta->start_rate = RATE_INVALID; + } + + /* force user max rate if set by user */ + if (max_rate_idx != -1 && max_rate_idx < idx) { + if (rate_mask & (1 << max_rate_idx)) + idx = max_rate_idx; + } + + win = &(rs_sta->win[idx]); + + fail_count = win->counter - win->success_counter; + + if (fail_count < RATE_MIN_FAILURE_TH && + win->success_counter < RATE_MIN_SUCCESS_TH) { + spin_unlock_irqrestore(&rs_sta->lock, flags); + + D_RATE("Invalid average_tpt on rate %d: " + "counter: %d, success_counter: %d, " + "expected_tpt is %sNULL\n", idx, win->counter, + win->success_counter, + rs_sta->expected_tpt ? "not " : ""); + + /* Can't calculate this yet; not enough history */ + win->average_tpt = IL_INVALID_VALUE; + goto out; + + } + + current_tpt = win->average_tpt; + + high_low = + il3945_get_adjacent_rate(rs_sta, idx, rate_mask, sband->band); + low = high_low & 0xff; + high = (high_low >> 8) & 0xff; + + /* If user set max rate, dont allow higher than user constrain */ + if (max_rate_idx != -1 && max_rate_idx < high) + high = RATE_INVALID; + + /* Collect Measured throughputs of adjacent rates */ + if (low != RATE_INVALID) + low_tpt = rs_sta->win[low].average_tpt; + + if (high != RATE_INVALID) + high_tpt = rs_sta->win[high].average_tpt; + + spin_unlock_irqrestore(&rs_sta->lock, flags); + + scale_action = 0; + + /* Low success ratio , need to drop the rate */ + if (win->success_ratio < RATE_DECREASE_TH || !current_tpt) { + D_RATE("decrease rate because of low success_ratio\n"); + scale_action = -1; + /* No throughput measured yet for adjacent rates, + * try increase */ + } else if (low_tpt == IL_INVALID_VALUE && high_tpt == IL_INVALID_VALUE) { + + if (high != RATE_INVALID && + win->success_ratio >= RATE_INCREASE_TH) + scale_action = 1; + else if (low != RATE_INVALID) + scale_action = 0; + + /* Both adjacent throughputs are measured, but neither one has + * better throughput; we're using the best rate, don't change + * it! */ + } else if (low_tpt != IL_INVALID_VALUE && high_tpt != IL_INVALID_VALUE + && low_tpt < current_tpt && high_tpt < current_tpt) { + + D_RATE("No action -- low [%d] & high [%d] < " + "current_tpt [%d]\n", low_tpt, high_tpt, current_tpt); + scale_action = 0; + + /* At least one of the rates has better throughput */ + } else { + if (high_tpt != IL_INVALID_VALUE) { + + /* High rate has better throughput, Increase + * rate */ + if (high_tpt > current_tpt && + win->success_ratio >= RATE_INCREASE_TH) + scale_action = 1; + else { + D_RATE("decrease rate because of high tpt\n"); + scale_action = 0; + } + } else if (low_tpt != IL_INVALID_VALUE) { + if (low_tpt > current_tpt) { + D_RATE("decrease rate because of low tpt\n"); + scale_action = -1; + } else if (win->success_ratio >= RATE_INCREASE_TH) { + /* Lower rate has better + * throughput,decrease rate */ + scale_action = 1; + } + } + } + + /* Sanity check; asked for decrease, but success rate or throughput + * has been good at old rate. Don't change it. */ + if (scale_action == -1 && low != RATE_INVALID && + (win->success_ratio > RATE_HIGH_TH || + current_tpt > 100 * rs_sta->expected_tpt[low])) + scale_action = 0; + + switch (scale_action) { + case -1: + /* Decrese rate */ + if (low != RATE_INVALID) + idx = low; + break; + case 1: + /* Increase rate */ + if (high != RATE_INVALID) + idx = high; + + break; + case 0: + default: + /* No change */ + break; + } + + D_RATE("Selected %d (action %d) - low %d high %d\n", idx, scale_action, + low, high); + +out: + + if (sband->band == IEEE80211_BAND_5GHZ) { + if (WARN_ON_ONCE(idx < IL_FIRST_OFDM_RATE)) + idx = IL_FIRST_OFDM_RATE; + rs_sta->last_txrate_idx = idx; + info->control.rates[0].idx = idx - IL_FIRST_OFDM_RATE; + } else { + rs_sta->last_txrate_idx = idx; + info->control.rates[0].idx = rs_sta->last_txrate_idx; + } + + D_RATE("leave: %d\n", idx); +} + +#ifdef CONFIG_MAC80211_DEBUGFS +static int +il3945_open_file_generic(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static ssize_t +il3945_sta_dbgfs_stats_table_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + char *buff; + int desc = 0; + int j; + ssize_t ret; + struct il3945_rs_sta *lq_sta = file->private_data; + + buff = kmalloc(1024, GFP_KERNEL); + if (!buff) + return -ENOMEM; + + desc += + sprintf(buff + desc, + "tx packets=%d last rate idx=%d\n" + "rate=0x%X flush time %d\n", lq_sta->tx_packets, + lq_sta->last_txrate_idx, lq_sta->start_rate, + jiffies_to_msecs(lq_sta->flush_time)); + for (j = 0; j < RATE_COUNT_3945; j++) { + desc += + sprintf(buff + desc, "counter=%d success=%d %%=%d\n", + lq_sta->win[j].counter, + lq_sta->win[j].success_counter, + lq_sta->win[j].success_ratio); + } + ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); + kfree(buff); + return ret; +} + +static const struct file_operations rs_sta_dbgfs_stats_table_ops = { + .read = il3945_sta_dbgfs_stats_table_read, + .open = il3945_open_file_generic, + .llseek = default_llseek, +}; + +static void +il3945_add_debugfs(void *il, void *il_sta, struct dentry *dir) +{ + struct il3945_rs_sta *lq_sta = il_sta; + + lq_sta->rs_sta_dbgfs_stats_table_file = + debugfs_create_file("rate_stats_table", 0600, dir, lq_sta, + &rs_sta_dbgfs_stats_table_ops); + +} + +static void +il3945_remove_debugfs(void *il, void *il_sta) +{ + struct il3945_rs_sta *lq_sta = il_sta; + debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file); +} +#endif + +/* + * Initialization of rate scaling information is done by driver after + * the station is added. Since mac80211 calls this function before a + * station is added we ignore it. + */ +static void +il3945_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta, void *il_sta) +{ +} + +static struct rate_control_ops rs_ops = { + .module = NULL, + .name = RS_NAME, + .tx_status = il3945_rs_tx_status, + .get_rate = il3945_rs_get_rate, + .rate_init = il3945_rs_rate_init_stub, + .alloc = il3945_rs_alloc, + .free = il3945_rs_free, + .alloc_sta = il3945_rs_alloc_sta, + .free_sta = il3945_rs_free_sta, +#ifdef CONFIG_MAC80211_DEBUGFS + .add_sta_debugfs = il3945_add_debugfs, + .remove_sta_debugfs = il3945_remove_debugfs, +#endif + +}; + +void +il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id) +{ + struct il_priv *il = hw->priv; + s32 rssi = 0; + unsigned long flags; + struct il3945_rs_sta *rs_sta; + struct ieee80211_sta *sta; + struct il3945_sta_priv *psta; + + D_RATE("enter\n"); + + rcu_read_lock(); + + sta = + ieee80211_find_sta(il->ctx.vif, il->stations[sta_id].sta.sta.addr); + if (!sta) { + D_RATE("Unable to find station to initialize rate scaling.\n"); + rcu_read_unlock(); + return; + } + + psta = (void *)sta->drv_priv; + rs_sta = &psta->rs_sta; + + spin_lock_irqsave(&rs_sta->lock, flags); + + rs_sta->tgg = 0; + switch (il->band) { + case IEEE80211_BAND_2GHZ: + /* TODO: this always does G, not a regression */ + if (il->ctx.active.flags & RXON_FLG_TGG_PROTECT_MSK) { + rs_sta->tgg = 1; + rs_sta->expected_tpt = il3945_expected_tpt_g_prot; + } else + rs_sta->expected_tpt = il3945_expected_tpt_g; + break; + case IEEE80211_BAND_5GHZ: + rs_sta->expected_tpt = il3945_expected_tpt_a; + break; + case IEEE80211_NUM_BANDS: + BUG(); + break; + } + + spin_unlock_irqrestore(&rs_sta->lock, flags); + + rssi = il->_3945.last_rx_rssi; + if (rssi == 0) + rssi = IL_MIN_RSSI_VAL; + + D_RATE("Network RSSI: %d\n", rssi); + + rs_sta->start_rate = il3945_get_rate_idx_by_rssi(rssi, il->band); + + D_RATE("leave: rssi %d assign rate idx: " "%d (plcp 0x%x)\n", rssi, + rs_sta->start_rate, il3945_rates[rs_sta->start_rate].plcp); + rcu_read_unlock(); +} + +int +il3945_rate_control_register(void) +{ + return ieee80211_rate_control_register(&rs_ops); +} + +void +il3945_rate_control_unregister(void) +{ + ieee80211_rate_control_unregister(&rs_ops); +} diff --git a/drivers/net/wireless/iwlegacy/3945.c b/drivers/net/wireless/iwlegacy/3945.c new file mode 100644 index 00000000000..1489b1573a6 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/3945.c @@ -0,0 +1,2743 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/pci.h> +#include <linux/dma-mapping.h> +#include <linux/delay.h> +#include <linux/sched.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/firmware.h> +#include <linux/etherdevice.h> +#include <asm/unaligned.h> +#include <net/mac80211.h> + +#include "common.h" +#include "3945.h" + +/* Send led command */ +static int +il3945_send_led_cmd(struct il_priv *il, struct il_led_cmd *led_cmd) +{ + struct il_host_cmd cmd = { + .id = C_LEDS, + .len = sizeof(struct il_led_cmd), + .data = led_cmd, + .flags = CMD_ASYNC, + .callback = NULL, + }; + + return il_send_cmd(il, &cmd); +} + +const struct il_led_ops il3945_led_ops = { + .cmd = il3945_send_led_cmd, +}; + +#define IL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \ + [RATE_##r##M_IDX] = { RATE_##r##M_PLCP, \ + RATE_##r##M_IEEE, \ + RATE_##ip##M_IDX, \ + RATE_##in##M_IDX, \ + RATE_##rp##M_IDX, \ + RATE_##rn##M_IDX, \ + RATE_##pp##M_IDX, \ + RATE_##np##M_IDX, \ + RATE_##r##M_IDX_TBL, \ + RATE_##ip##M_IDX_TBL } + +/* + * Parameter order: + * rate, prev rate, next rate, prev tgg rate, next tgg rate + * + * If there isn't a valid next or previous rate then INV is used which + * maps to RATE_INVALID + * + */ +const struct il3945_rate_info il3945_rates[RATE_COUNT_3945] = { + IL_DECLARE_RATE_INFO(1, INV, 2, INV, 2, INV, 2), /* 1mbps */ + IL_DECLARE_RATE_INFO(2, 1, 5, 1, 5, 1, 5), /* 2mbps */ + IL_DECLARE_RATE_INFO(5, 2, 6, 2, 11, 2, 11), /*5.5mbps */ + IL_DECLARE_RATE_INFO(11, 9, 12, 5, 12, 5, 18), /* 11mbps */ + IL_DECLARE_RATE_INFO(6, 5, 9, 5, 11, 5, 11), /* 6mbps */ + IL_DECLARE_RATE_INFO(9, 6, 11, 5, 11, 5, 11), /* 9mbps */ + IL_DECLARE_RATE_INFO(12, 11, 18, 11, 18, 11, 18), /* 12mbps */ + IL_DECLARE_RATE_INFO(18, 12, 24, 12, 24, 11, 24), /* 18mbps */ + IL_DECLARE_RATE_INFO(24, 18, 36, 18, 36, 18, 36), /* 24mbps */ + IL_DECLARE_RATE_INFO(36, 24, 48, 24, 48, 24, 48), /* 36mbps */ + IL_DECLARE_RATE_INFO(48, 36, 54, 36, 54, 36, 54), /* 48mbps */ + IL_DECLARE_RATE_INFO(54, 48, INV, 48, INV, 48, INV), /* 54mbps */ +}; + +static inline u8 +il3945_get_prev_ieee_rate(u8 rate_idx) +{ + u8 rate = il3945_rates[rate_idx].prev_ieee; + + if (rate == RATE_INVALID) + rate = rate_idx; + return rate; +} + +/* 1 = enable the il3945_disable_events() function */ +#define IL_EVT_DISABLE (0) +#define IL_EVT_DISABLE_SIZE (1532/32) + +/** + * il3945_disable_events - Disable selected events in uCode event log + * + * Disable an event by writing "1"s into "disable" + * bitmap in SRAM. Bit position corresponds to Event # (id/type). + * Default values of 0 enable uCode events to be logged. + * Use for only special debugging. This function is just a placeholder as-is, + * you'll need to provide the special bits! ... + * ... and set IL_EVT_DISABLE to 1. */ +void +il3945_disable_events(struct il_priv *il) +{ + int i; + u32 base; /* SRAM address of event log header */ + u32 disable_ptr; /* SRAM address of event-disable bitmap array */ + u32 array_size; /* # of u32 entries in array */ + static const u32 evt_disable[IL_EVT_DISABLE_SIZE] = { + 0x00000000, /* 31 - 0 Event id numbers */ + 0x00000000, /* 63 - 32 */ + 0x00000000, /* 95 - 64 */ + 0x00000000, /* 127 - 96 */ + 0x00000000, /* 159 - 128 */ + 0x00000000, /* 191 - 160 */ + 0x00000000, /* 223 - 192 */ + 0x00000000, /* 255 - 224 */ + 0x00000000, /* 287 - 256 */ + 0x00000000, /* 319 - 288 */ + 0x00000000, /* 351 - 320 */ + 0x00000000, /* 383 - 352 */ + 0x00000000, /* 415 - 384 */ + 0x00000000, /* 447 - 416 */ + 0x00000000, /* 479 - 448 */ + 0x00000000, /* 511 - 480 */ + 0x00000000, /* 543 - 512 */ + 0x00000000, /* 575 - 544 */ + 0x00000000, /* 607 - 576 */ + 0x00000000, /* 639 - 608 */ + 0x00000000, /* 671 - 640 */ + 0x00000000, /* 703 - 672 */ + 0x00000000, /* 735 - 704 */ + 0x00000000, /* 767 - 736 */ + 0x00000000, /* 799 - 768 */ + 0x00000000, /* 831 - 800 */ + 0x00000000, /* 863 - 832 */ + 0x00000000, /* 895 - 864 */ + 0x00000000, /* 927 - 896 */ + 0x00000000, /* 959 - 928 */ + 0x00000000, /* 991 - 960 */ + 0x00000000, /* 1023 - 992 */ + 0x00000000, /* 1055 - 1024 */ + 0x00000000, /* 1087 - 1056 */ + 0x00000000, /* 1119 - 1088 */ + 0x00000000, /* 1151 - 1120 */ + 0x00000000, /* 1183 - 1152 */ + 0x00000000, /* 1215 - 1184 */ + 0x00000000, /* 1247 - 1216 */ + 0x00000000, /* 1279 - 1248 */ + 0x00000000, /* 1311 - 1280 */ + 0x00000000, /* 1343 - 1312 */ + 0x00000000, /* 1375 - 1344 */ + 0x00000000, /* 1407 - 1376 */ + 0x00000000, /* 1439 - 1408 */ + 0x00000000, /* 1471 - 1440 */ + 0x00000000, /* 1503 - 1472 */ + }; + + base = le32_to_cpu(il->card_alive.log_event_table_ptr); + if (!il3945_hw_valid_rtc_data_addr(base)) { + IL_ERR("Invalid event log pointer 0x%08X\n", base); + return; + } + + disable_ptr = il_read_targ_mem(il, base + (4 * sizeof(u32))); + array_size = il_read_targ_mem(il, base + (5 * sizeof(u32))); + + if (IL_EVT_DISABLE && array_size == IL_EVT_DISABLE_SIZE) { + D_INFO("Disabling selected uCode log events at 0x%x\n", + disable_ptr); + for (i = 0; i < IL_EVT_DISABLE_SIZE; i++) + il_write_targ_mem(il, disable_ptr + (i * sizeof(u32)), + evt_disable[i]); + + } else { + D_INFO("Selected uCode log events may be disabled\n"); + D_INFO(" by writing \"1\"s into disable bitmap\n"); + D_INFO(" in SRAM at 0x%x, size %d u32s\n", disable_ptr, + array_size); + } + +} + +static int +il3945_hwrate_to_plcp_idx(u8 plcp) +{ + int idx; + + for (idx = 0; idx < RATE_COUNT_3945; idx++) + if (il3945_rates[idx].plcp == plcp) + return idx; + return -1; +} + +#ifdef CONFIG_IWLEGACY_DEBUG +#define TX_STATUS_ENTRY(x) case TX_3945_STATUS_FAIL_ ## x: return #x + +static const char * +il3945_get_tx_fail_reason(u32 status) +{ + switch (status & TX_STATUS_MSK) { + case TX_3945_STATUS_SUCCESS: + return "SUCCESS"; + TX_STATUS_ENTRY(SHORT_LIMIT); + TX_STATUS_ENTRY(LONG_LIMIT); + TX_STATUS_ENTRY(FIFO_UNDERRUN); + TX_STATUS_ENTRY(MGMNT_ABORT); + TX_STATUS_ENTRY(NEXT_FRAG); + TX_STATUS_ENTRY(LIFE_EXPIRE); + TX_STATUS_ENTRY(DEST_PS); + TX_STATUS_ENTRY(ABORTED); + TX_STATUS_ENTRY(BT_RETRY); + TX_STATUS_ENTRY(STA_INVALID); + TX_STATUS_ENTRY(FRAG_DROPPED); + TX_STATUS_ENTRY(TID_DISABLE); + TX_STATUS_ENTRY(FRAME_FLUSHED); + TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL); + TX_STATUS_ENTRY(TX_LOCKED); + TX_STATUS_ENTRY(NO_BEACON_ON_RADAR); + } + + return "UNKNOWN"; +} +#else +static inline const char * +il3945_get_tx_fail_reason(u32 status) +{ + return ""; +} +#endif + +/* + * get ieee prev rate from rate scale table. + * for A and B mode we need to overright prev + * value + */ +int +il3945_rs_next_rate(struct il_priv *il, int rate) +{ + int next_rate = il3945_get_prev_ieee_rate(rate); + + switch (il->band) { + case IEEE80211_BAND_5GHZ: + if (rate == RATE_12M_IDX) + next_rate = RATE_9M_IDX; + else if (rate == RATE_6M_IDX) + next_rate = RATE_6M_IDX; + break; + case IEEE80211_BAND_2GHZ: + if (!(il->_3945.sta_supp_rates & IL_OFDM_RATES_MASK) && + il_is_associated(il)) { + if (rate == RATE_11M_IDX) + next_rate = RATE_5M_IDX; + } + break; + + default: + break; + } + + return next_rate; +} + +/** + * il3945_tx_queue_reclaim - Reclaim Tx queue entries already Tx'd + * + * When FW advances 'R' idx, all entries between old and new 'R' idx + * need to be reclaimed. As result, some free space forms. If there is + * enough free space (> low mark), wake the stack that feeds us. + */ +static void +il3945_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx) +{ + struct il_tx_queue *txq = &il->txq[txq_id]; + struct il_queue *q = &txq->q; + struct il_tx_info *tx_info; + + BUG_ON(txq_id == IL39_CMD_QUEUE_NUM); + + for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; + q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) { + + tx_info = &txq->txb[txq->q.read_ptr]; + ieee80211_tx_status_irqsafe(il->hw, tx_info->skb); + tx_info->skb = NULL; + il->cfg->ops->lib->txq_free_tfd(il, txq); + } + + if (il_queue_space(q) > q->low_mark && txq_id >= 0 && + txq_id != IL39_CMD_QUEUE_NUM && il->mac80211_registered) + il_wake_queue(il, txq); +} + +/** + * il3945_hdl_tx - Handle Tx response + */ +static void +il3945_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb) +{ + struct il_rx_pkt *pkt = rxb_addr(rxb); + u16 sequence = le16_to_cpu(pkt->hdr.sequence); + int txq_id = SEQ_TO_QUEUE(sequence); + int idx = SEQ_TO_IDX(sequence); + struct il_tx_queue *txq = &il->txq[txq_id]; + struct ieee80211_tx_info *info; + struct il3945_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; + u32 status = le32_to_cpu(tx_resp->status); + int rate_idx; + int fail; + + if (idx >= txq->q.n_bd || il_queue_used(&txq->q, idx) == 0) { + IL_ERR("Read idx for DMA queue txq_id (%d) idx %d " + "is out of range [0-%d] %d %d\n", txq_id, idx, + txq->q.n_bd, txq->q.write_ptr, txq->q.read_ptr); + return; + } + + txq->time_stamp = jiffies; + info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb); + ieee80211_tx_info_clear_status(info); + + /* Fill the MRR chain with some info about on-chip retransmissions */ + rate_idx = il3945_hwrate_to_plcp_idx(tx_resp->rate); + if (info->band == IEEE80211_BAND_5GHZ) + rate_idx -= IL_FIRST_OFDM_RATE; + + fail = tx_resp->failure_frame; + + info->status.rates[0].idx = rate_idx; + info->status.rates[0].count = fail + 1; /* add final attempt */ + + /* tx_status->rts_retry_count = tx_resp->failure_rts; */ + info->flags |= + ((status & TX_STATUS_MSK) == + TX_STATUS_SUCCESS) ? IEEE80211_TX_STAT_ACK : 0; + + D_TX("Tx queue %d Status %s (0x%08x) plcp rate %d retries %d\n", txq_id, + il3945_get_tx_fail_reason(status), status, tx_resp->rate, + tx_resp->failure_frame); + + D_TX_REPLY("Tx queue reclaim %d\n", idx); + il3945_tx_queue_reclaim(il, txq_id, idx); + + if (status & TX_ABORT_REQUIRED_MSK) + IL_ERR("TODO: Implement Tx ABORT REQUIRED!!!\n"); +} + +/***************************************************************************** + * + * Intel PRO/Wireless 3945ABG/BG Network Connection + * + * RX handler implementations + * + *****************************************************************************/ +#ifdef CONFIG_IWLEGACY_DEBUGFS +static void +il3945_accumulative_stats(struct il_priv *il, __le32 * stats) +{ + int i; + __le32 *prev_stats; + u32 *accum_stats; + u32 *delta, *max_delta; + + prev_stats = (__le32 *) &il->_3945.stats; + accum_stats = (u32 *) &il->_3945.accum_stats; + delta = (u32 *) &il->_3945.delta_stats; + max_delta = (u32 *) &il->_3945.max_delta; + + for (i = sizeof(__le32); i < sizeof(struct il3945_notif_stats); + i += + sizeof(__le32), stats++, prev_stats++, delta++, max_delta++, + accum_stats++) { + if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) { + *delta = + (le32_to_cpu(*stats) - le32_to_cpu(*prev_stats)); + *accum_stats += *delta; + if (*delta > *max_delta) + *max_delta = *delta; + } + } + + /* reset accumulative stats for "no-counter" type stats */ + il->_3945.accum_stats.general.temperature = + il->_3945.stats.general.temperature; + il->_3945.accum_stats.general.ttl_timestamp = + il->_3945.stats.general.ttl_timestamp; +} +#endif + +void +il3945_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb) +{ + struct il_rx_pkt *pkt = rxb_addr(rxb); + + D_RX("Statistics notification received (%d vs %d).\n", + (int)sizeof(struct il3945_notif_stats), + le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK); +#ifdef CONFIG_IWLEGACY_DEBUGFS + il3945_accumulative_stats(il, (__le32 *) &pkt->u.raw); +#endif + + memcpy(&il->_3945.stats, pkt->u.raw, sizeof(il->_3945.stats)); +} + +void +il3945_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb) +{ + struct il_rx_pkt *pkt = rxb_addr(rxb); + __le32 *flag = (__le32 *) &pkt->u.raw; + + if (le32_to_cpu(*flag) & UCODE_STATS_CLEAR_MSK) { +#ifdef CONFIG_IWLEGACY_DEBUGFS + memset(&il->_3945.accum_stats, 0, + sizeof(struct il3945_notif_stats)); + memset(&il->_3945.delta_stats, 0, + sizeof(struct il3945_notif_stats)); + memset(&il->_3945.max_delta, 0, + sizeof(struct il3945_notif_stats)); +#endif + D_RX("Statistics have been cleared\n"); + } + il3945_hdl_stats(il, rxb); +} + +/****************************************************************************** + * + * Misc. internal state and helper functions + * + ******************************************************************************/ + +/* This is necessary only for a number of stats, see the caller. */ +static int +il3945_is_network_packet(struct il_priv *il, struct ieee80211_hdr *header) +{ + /* Filter incoming packets to determine if they are targeted toward + * this network, discarding packets coming from ourselves */ + switch (il->iw_mode) { + case NL80211_IFTYPE_ADHOC: /* Header: Dest. | Source | BSSID */ + /* packets to our IBSS update information */ + return !compare_ether_addr(header->addr3, il->bssid); + case NL80211_IFTYPE_STATION: /* Header: Dest. | AP{BSSID} | Source */ + /* packets to our IBSS update information */ + return !compare_ether_addr(header->addr2, il->bssid); + default: + return 1; + } +} + +static void +il3945_pass_packet_to_mac80211(struct il_priv *il, struct il_rx_buf *rxb, + struct ieee80211_rx_status *stats) +{ + struct il_rx_pkt *pkt = rxb_addr(rxb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IL_RX_DATA(pkt); + struct il3945_rx_frame_hdr *rx_hdr = IL_RX_HDR(pkt); + struct il3945_rx_frame_end *rx_end = IL_RX_END(pkt); + u16 len = le16_to_cpu(rx_hdr->len); + struct sk_buff *skb; + __le16 fc = hdr->frame_control; + + /* We received data from the HW, so stop the watchdog */ + if (unlikely + (len + IL39_RX_FRAME_SIZE > + PAGE_SIZE << il->hw_params.rx_page_order)) { + D_DROP("Corruption detected!\n"); + return; + } + + /* We only process data packets if the interface is open */ + if (unlikely(!il->is_open)) { + D_DROP("Dropping packet while interface is not open.\n"); + return; + } + + skb = dev_alloc_skb(128); + if (!skb) { + IL_ERR("dev_alloc_skb failed\n"); + return; + } + + if (!il3945_mod_params.sw_crypto) + il_set_decrypted_flag(il, (struct ieee80211_hdr *)rxb_addr(rxb), + le32_to_cpu(rx_end->status), stats); + + skb_add_rx_frag(skb, 0, rxb->page, + (void *)rx_hdr->payload - (void *)pkt, len); + + il_update_stats(il, false, fc, len); + memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); + + ieee80211_rx(il->hw, skb); + il->alloc_rxb_page--; + rxb->page = NULL; +} + +#define IL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6) + +static void +il3945_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb) +{ + struct ieee80211_hdr *header; + struct ieee80211_rx_status rx_status; + struct il_rx_pkt *pkt = rxb_addr(rxb); + struct il3945_rx_frame_stats *rx_stats = IL_RX_STATS(pkt); + struct il3945_rx_frame_hdr *rx_hdr = IL_RX_HDR(pkt); + struct il3945_rx_frame_end *rx_end = IL_RX_END(pkt); + u16 rx_stats_sig_avg __maybe_unused = le16_to_cpu(rx_stats->sig_avg); + u16 rx_stats_noise_diff __maybe_unused = + le16_to_cpu(rx_stats->noise_diff); + u8 network_packet; + + rx_status.flag = 0; + rx_status.mactime = le64_to_cpu(rx_end->timestamp); + rx_status.band = + (rx_hdr-> + phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? IEEE80211_BAND_2GHZ : + IEEE80211_BAND_5GHZ; + rx_status.freq = + ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel), + rx_status.band); + + rx_status.rate_idx = il3945_hwrate_to_plcp_idx(rx_hdr->rate); + if (rx_status.band == IEEE80211_BAND_5GHZ) + rx_status.rate_idx -= IL_FIRST_OFDM_RATE; + + rx_status.antenna = + (le16_to_cpu(rx_hdr->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) >> + 4; + + /* set the preamble flag if appropriate */ + if (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK) + rx_status.flag |= RX_FLAG_SHORTPRE; + + if ((unlikely(rx_stats->phy_count > 20))) { + D_DROP("dsp size out of range [0,20]: %d/n", + rx_stats->phy_count); + return; + } + + if (!(rx_end->status & RX_RES_STATUS_NO_CRC32_ERROR) || + !(rx_end->status & RX_RES_STATUS_NO_RXE_OVERFLOW)) { + D_RX("Bad CRC or FIFO: 0x%08X.\n", rx_end->status); + return; + } + + /* Convert 3945's rssi indicator to dBm */ + rx_status.signal = rx_stats->rssi - IL39_RSSI_OFFSET; + + D_STATS("Rssi %d sig_avg %d noise_diff %d\n", rx_status.signal, + rx_stats_sig_avg, rx_stats_noise_diff); + + header = (struct ieee80211_hdr *)IL_RX_DATA(pkt); + + network_packet = il3945_is_network_packet(il, header); + + D_STATS("[%c] %d RSSI:%d Signal:%u, Rate:%u\n", + network_packet ? '*' : ' ', le16_to_cpu(rx_hdr->channel), + rx_status.signal, rx_status.signal, rx_status.rate_idx); + + il_dbg_log_rx_data_frame(il, le16_to_cpu(rx_hdr->len), header); + + if (network_packet) { + il->_3945.last_beacon_time = + le32_to_cpu(rx_end->beacon_timestamp); + il->_3945.last_tsf = le64_to_cpu(rx_end->timestamp); + il->_3945.last_rx_rssi = rx_status.signal; + } + + il3945_pass_packet_to_mac80211(il, rxb, &rx_status); +} + +int +il3945_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq, + dma_addr_t addr, u16 len, u8 reset, u8 pad) +{ + int count; + struct il_queue *q; + struct il3945_tfd *tfd, *tfd_tmp; + + q = &txq->q; + tfd_tmp = (struct il3945_tfd *)txq->tfds; + tfd = &tfd_tmp[q->write_ptr]; + + if (reset) + memset(tfd, 0, sizeof(*tfd)); + + count = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags)); + + if (count >= NUM_TFD_CHUNKS || count < 0) { + IL_ERR("Error can not send more than %d chunks\n", + NUM_TFD_CHUNKS); + return -EINVAL; + } + + tfd->tbs[count].addr = cpu_to_le32(addr); + tfd->tbs[count].len = cpu_to_le32(len); + + count++; + + tfd->control_flags = + cpu_to_le32(TFD_CTL_COUNT_SET(count) | TFD_CTL_PAD_SET(pad)); + + return 0; +} + +/** + * il3945_hw_txq_free_tfd - Free one TFD, those at idx [txq->q.read_ptr] + * + * Does NOT advance any idxes + */ +void +il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq) +{ + struct il3945_tfd *tfd_tmp = (struct il3945_tfd *)txq->tfds; + int idx = txq->q.read_ptr; + struct il3945_tfd *tfd = &tfd_tmp[idx]; + struct pci_dev *dev = il->pci_dev; + int i; + int counter; + + /* sanity check */ + counter = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags)); + if (counter > NUM_TFD_CHUNKS) { + IL_ERR("Too many chunks: %i\n", counter); + /* @todo issue fatal error, it is quite serious situation */ + return; + } + + /* Unmap tx_cmd */ + if (counter) + pci_unmap_single(dev, dma_unmap_addr(&txq->meta[idx], mapping), + dma_unmap_len(&txq->meta[idx], len), + PCI_DMA_TODEVICE); + + /* unmap chunks if any */ + + for (i = 1; i < counter; i++) + pci_unmap_single(dev, le32_to_cpu(tfd->tbs[i].addr), + le32_to_cpu(tfd->tbs[i].len), + PCI_DMA_TODEVICE); + + /* free SKB */ + if (txq->txb) { + struct sk_buff *skb; + + skb = txq->txb[txq->q.read_ptr].skb; + + /* can be called from irqs-disabled context */ + if (skb) { + dev_kfree_skb_any(skb); + txq->txb[txq->q.read_ptr].skb = NULL; + } + } +} + +/** + * il3945_hw_build_tx_cmd_rate - Add rate portion to TX_CMD: + * +*/ +void +il3945_hw_build_tx_cmd_rate(struct il_priv *il, struct il_device_cmd *cmd, + struct ieee80211_tx_info *info, + struct ieee80211_hdr *hdr, int sta_id) +{ + u16 hw_value = ieee80211_get_tx_rate(il->hw, info)->hw_value; + u16 rate_idx = min(hw_value & 0xffff, RATE_COUNT_3945 - 1); + u16 rate_mask; + int rate; + const u8 rts_retry_limit = 7; + u8 data_retry_limit; + __le32 tx_flags; + __le16 fc = hdr->frame_control; + struct il3945_tx_cmd *tx_cmd = (struct il3945_tx_cmd *)cmd->cmd.payload; + + rate = il3945_rates[rate_idx].plcp; + tx_flags = tx_cmd->tx_flags; + + /* We need to figure out how to get the sta->supp_rates while + * in this running context */ + rate_mask = RATES_MASK_3945; + + /* Set retry limit on DATA packets and Probe Responses */ + if (ieee80211_is_probe_resp(fc)) + data_retry_limit = 3; + else + data_retry_limit = IL_DEFAULT_TX_RETRY; + tx_cmd->data_retry_limit = data_retry_limit; + /* Set retry limit on RTS packets */ + tx_cmd->rts_retry_limit = min(data_retry_limit, rts_retry_limit); + + tx_cmd->rate = rate; + tx_cmd->tx_flags = tx_flags; + + /* OFDM */ + tx_cmd->supp_rates[0] = + ((rate_mask & IL_OFDM_RATES_MASK) >> IL_FIRST_OFDM_RATE) & 0xFF; + + /* CCK */ + tx_cmd->supp_rates[1] = (rate_mask & 0xF); + + D_RATE("Tx sta id: %d, rate: %d (plcp), flags: 0x%4X " + "cck/ofdm mask: 0x%x/0x%x\n", sta_id, tx_cmd->rate, + le32_to_cpu(tx_cmd->tx_flags), tx_cmd->supp_rates[1], + tx_cmd->supp_rates[0]); +} + +static u8 +il3945_sync_sta(struct il_priv *il, int sta_id, u16 tx_rate) +{ + unsigned long flags_spin; + struct il_station_entry *station; + + if (sta_id == IL_INVALID_STATION) + return IL_INVALID_STATION; + + spin_lock_irqsave(&il->sta_lock, flags_spin); + station = &il->stations[sta_id]; + + station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK; + station->sta.rate_n_flags = cpu_to_le16(tx_rate); + station->sta.mode = STA_CONTROL_MODIFY_MSK; + il_send_add_sta(il, &station->sta, CMD_ASYNC); + spin_unlock_irqrestore(&il->sta_lock, flags_spin); + + D_RATE("SCALE sync station %d to rate %d\n", sta_id, tx_rate); + return sta_id; +} + +static void +il3945_set_pwr_vmain(struct il_priv *il) +{ +/* + * (for documentation purposes) + * to set power to V_AUX, do + + if (pci_pme_capable(il->pci_dev, PCI_D3cold)) { + il_set_bits_mask_prph(il, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_PWR_SRC_VAUX, + ~APMG_PS_CTRL_MSK_PWR_SRC); + + _il_poll_bit(il, CSR_GPIO_IN, + CSR_GPIO_IN_VAL_VAUX_PWR_SRC, + CSR_GPIO_IN_BIT_AUX_POWER, 5000); + } + */ + + il_set_bits_mask_prph(il, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, + ~APMG_PS_CTRL_MSK_PWR_SRC); + + _il_poll_bit(il, CSR_GPIO_IN, CSR_GPIO_IN_VAL_VMAIN_PWR_SRC, + CSR_GPIO_IN_BIT_AUX_POWER, 5000); +} + +static int +il3945_rx_init(struct il_priv *il, struct il_rx_queue *rxq) +{ + il_wr(il, FH39_RCSR_RBD_BASE(0), rxq->bd_dma); + il_wr(il, FH39_RCSR_RPTR_ADDR(0), rxq->rb_stts_dma); + il_wr(il, FH39_RCSR_WPTR(0), 0); + il_wr(il, FH39_RCSR_CONFIG(0), + FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE | + FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE | + FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN | + FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 | (RX_QUEUE_SIZE_LOG + << + FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE) + | FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST | (1 << + FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH) + | FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH); + + /* fake read to flush all prev I/O */ + il_rd(il, FH39_RSSR_CTRL); + + return 0; +} + +static int +il3945_tx_reset(struct il_priv *il) +{ + + /* bypass mode */ + il_wr_prph(il, ALM_SCD_MODE_REG, 0x2); + + /* RA 0 is active */ + il_wr_prph(il, ALM_SCD_ARASTAT_REG, 0x01); + + /* all 6 fifo are active */ + il_wr_prph(il, ALM_SCD_TXFACT_REG, 0x3f); + + il_wr_prph(il, ALM_SCD_SBYP_MODE_1_REG, 0x010000); + il_wr_prph(il, ALM_SCD_SBYP_MODE_2_REG, 0x030002); + il_wr_prph(il, ALM_SCD_TXF4MF_REG, 0x000004); + il_wr_prph(il, ALM_SCD_TXF5MF_REG, 0x000005); + + il_wr(il, FH39_TSSR_CBB_BASE, il->_3945.shared_phys); + + il_wr(il, FH39_TSSR_MSG_CONFIG, + FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON | + FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON | + FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B | + FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON | + FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON | + FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH | + FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH); + + return 0; +} + +/** + * il3945_txq_ctx_reset - Reset TX queue context + * + * Destroys all DMA structures and initialize them again + */ +static int +il3945_txq_ctx_reset(struct il_priv *il) +{ + int rc; + int txq_id, slots_num; + + il3945_hw_txq_ctx_free(il); + + /* allocate tx queue structure */ + rc = il_alloc_txq_mem(il); + if (rc) + return rc; + + /* Tx CMD queue */ + rc = il3945_tx_reset(il); + if (rc) + goto error; + + /* Tx queue(s) */ + for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) { + slots_num = + (txq_id == + IL39_CMD_QUEUE_NUM) ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; + rc = il_tx_queue_init(il, &il->txq[txq_id], slots_num, txq_id); + if (rc) { + IL_ERR("Tx %d queue init failed\n", txq_id); + goto error; + } + } + + return rc; + +error: + il3945_hw_txq_ctx_free(il); + return rc; +} + +/* + * Start up 3945's basic functionality after it has been reset + * (e.g. after platform boot, or shutdown via il_apm_stop()) + * NOTE: This does not load uCode nor start the embedded processor + */ +static int +il3945_apm_init(struct il_priv *il) +{ + int ret = il_apm_init(il); + + /* Clear APMG (NIC's internal power management) interrupts */ + il_wr_prph(il, APMG_RTC_INT_MSK_REG, 0x0); + il_wr_prph(il, APMG_RTC_INT_STT_REG, 0xFFFFFFFF); + + /* Reset radio chip */ + il_set_bits_prph(il, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ); + udelay(5); + il_clear_bits_prph(il, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ); + + return ret; +} + +static void +il3945_nic_config(struct il_priv *il) +{ + struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom; + unsigned long flags; + u8 rev_id = il->pci_dev->revision; + + spin_lock_irqsave(&il->lock, flags); + + /* Determine HW type */ + D_INFO("HW Revision ID = 0x%X\n", rev_id); + + if (rev_id & PCI_CFG_REV_ID_BIT_RTP) + D_INFO("RTP type\n"); + else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) { + D_INFO("3945 RADIO-MB type\n"); + il_set_bit(il, CSR_HW_IF_CONFIG_REG, + CSR39_HW_IF_CONFIG_REG_BIT_3945_MB); + } else { + D_INFO("3945 RADIO-MM type\n"); + il_set_bit(il, CSR_HW_IF_CONFIG_REG, + CSR39_HW_IF_CONFIG_REG_BIT_3945_MM); + } + + if (EEPROM_SKU_CAP_OP_MODE_MRC == eeprom->sku_cap) { + D_INFO("SKU OP mode is mrc\n"); + il_set_bit(il, CSR_HW_IF_CONFIG_REG, + CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC); + } else + D_INFO("SKU OP mode is basic\n"); + + if ((eeprom->board_revision & 0xF0) == 0xD0) { + D_INFO("3945ABG revision is 0x%X\n", eeprom->board_revision); + il_set_bit(il, CSR_HW_IF_CONFIG_REG, + CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE); + } else { + D_INFO("3945ABG revision is 0x%X\n", eeprom->board_revision); + il_clear_bit(il, CSR_HW_IF_CONFIG_REG, + CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE); + } + + if (eeprom->almgor_m_version <= 1) { + il_set_bit(il, CSR_HW_IF_CONFIG_REG, + CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A); + D_INFO("Card M type A version is 0x%X\n", + eeprom->almgor_m_version); + } else { + D_INFO("Card M type B version is 0x%X\n", + eeprom->almgor_m_version); + il_set_bit(il, CSR_HW_IF_CONFIG_REG, + CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B); + } + spin_unlock_irqrestore(&il->lock, flags); + + if (eeprom->sku_cap & EEPROM_SKU_CAP_SW_RF_KILL_ENABLE) + D_RF_KILL("SW RF KILL supported in EEPROM.\n"); + + if (eeprom->sku_cap & EEPROM_SKU_CAP_HW_RF_KILL_ENABLE) + D_RF_KILL("HW RF KILL supported in EEPROM.\n"); +} + +int +il3945_hw_nic_init(struct il_priv *il) +{ + int rc; + unsigned long flags; + struct il_rx_queue *rxq = &il->rxq; + + spin_lock_irqsave(&il->lock, flags); + il->cfg->ops->lib->apm_ops.init(il); + spin_unlock_irqrestore(&il->lock, flags); + + il3945_set_pwr_vmain(il); + + il->cfg->ops->lib->apm_ops.config(il); + + /* Allocate the RX queue, or reset if it is already allocated */ + if (!rxq->bd) { + rc = il_rx_queue_alloc(il); + if (rc) { + IL_ERR("Unable to initialize Rx queue\n"); + return -ENOMEM; + } + } else + il3945_rx_queue_reset(il, rxq); + + il3945_rx_replenish(il); + + il3945_rx_init(il, rxq); + + /* Look at using this instead: + rxq->need_update = 1; + il_rx_queue_update_write_ptr(il, rxq); + */ + + il_wr(il, FH39_RCSR_WPTR(0), rxq->write & ~7); + + rc = il3945_txq_ctx_reset(il); + if (rc) + return rc; + + set_bit(S_INIT, &il->status); + + return 0; +} + +/** + * il3945_hw_txq_ctx_free - Free TXQ Context + * + * Destroy all TX DMA queues and structures + */ +void +il3945_hw_txq_ctx_free(struct il_priv *il) +{ + int txq_id; + + /* Tx queues */ + if (il->txq) + for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) + if (txq_id == IL39_CMD_QUEUE_NUM) + il_cmd_queue_free(il); + else + il_tx_queue_free(il, txq_id); + + /* free tx queue structure */ + il_txq_mem(il); +} + +void +il3945_hw_txq_ctx_stop(struct il_priv *il) +{ + int txq_id; + + /* stop SCD */ + il_wr_prph(il, ALM_SCD_MODE_REG, 0); + il_wr_prph(il, ALM_SCD_TXFACT_REG, 0); + + /* reset TFD queues */ + for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) { + il_wr(il, FH39_TCSR_CONFIG(txq_id), 0x0); + il_poll_bit(il, FH39_TSSR_TX_STATUS, + FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id), + 1000); + } + + il3945_hw_txq_ctx_free(il); +} + +/** + * il3945_hw_reg_adjust_power_by_temp + * return idx delta into power gain settings table +*/ +static int +il3945_hw_reg_adjust_power_by_temp(int new_reading, int old_reading) +{ + return (new_reading - old_reading) * (-11) / 100; +} + +/** + * il3945_hw_reg_temp_out_of_range - Keep temperature in sane range + */ +static inline int +il3945_hw_reg_temp_out_of_range(int temperature) +{ + return (temperature < -260 || temperature > 25) ? 1 : 0; +} + +int +il3945_hw_get_temperature(struct il_priv *il) +{ + return _il_rd(il, CSR_UCODE_DRV_GP2); +} + +/** + * il3945_hw_reg_txpower_get_temperature + * get the current temperature by reading from NIC +*/ +static int +il3945_hw_reg_txpower_get_temperature(struct il_priv *il) +{ + struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom; + int temperature; + + temperature = il3945_hw_get_temperature(il); + + /* driver's okay range is -260 to +25. + * human readable okay range is 0 to +285 */ + D_INFO("Temperature: %d\n", temperature + IL_TEMP_CONVERT); + + /* handle insane temp reading */ + if (il3945_hw_reg_temp_out_of_range(temperature)) { + IL_ERR("Error bad temperature value %d\n", temperature); + + /* if really really hot(?), + * substitute the 3rd band/group's temp measured at factory */ + if (il->last_temperature > 100) + temperature = eeprom->groups[2].temperature; + else /* else use most recent "sane" value from driver */ + temperature = il->last_temperature; + } + + return temperature; /* raw, not "human readable" */ +} + +/* Adjust Txpower only if temperature variance is greater than threshold. + * + * Both are lower than older versions' 9 degrees */ +#define IL_TEMPERATURE_LIMIT_TIMER 6 + +/** + * il3945_is_temp_calib_needed - determines if new calibration is needed + * + * records new temperature in tx_mgr->temperature. + * replaces tx_mgr->last_temperature *only* if calib needed + * (assumes caller will actually do the calibration!). */ +static int +il3945_is_temp_calib_needed(struct il_priv *il) +{ + int temp_diff; + + il->temperature = il3945_hw_reg_txpower_get_temperature(il); + temp_diff = il->temperature - il->last_temperature; + + /* get absolute value */ + if (temp_diff < 0) { + D_POWER("Getting cooler, delta %d,\n", temp_diff); + temp_diff = -temp_diff; + } else if (temp_diff == 0) + D_POWER("Same temp,\n"); + else + D_POWER("Getting warmer, delta %d,\n", temp_diff); + + /* if we don't need calibration, *don't* update last_temperature */ + if (temp_diff < IL_TEMPERATURE_LIMIT_TIMER) { + D_POWER("Timed thermal calib not needed\n"); + return 0; + } + + D_POWER("Timed thermal calib needed\n"); + + /* assume that caller will actually do calib ... + * update the "last temperature" value */ + il->last_temperature = il->temperature; + return 1; +} + +#define IL_MAX_GAIN_ENTRIES 78 +#define IL_CCK_FROM_OFDM_POWER_DIFF -5 +#define IL_CCK_FROM_OFDM_IDX_DIFF (10) + +/* radio and DSP power table, each step is 1/2 dB. + * 1st number is for RF analog gain, 2nd number is for DSP pre-DAC gain. */ +static struct il3945_tx_power power_gain_table[2][IL_MAX_GAIN_ENTRIES] = { + { + {251, 127}, /* 2.4 GHz, highest power */ + {251, 127}, + {251, 127}, + {251, 127}, + {251, 125}, + {251, 110}, + {251, 105}, + {251, 98}, + {187, 125}, + {187, 115}, + {187, 108}, + {187, 99}, + {243, 119}, + {243, 111}, + {243, 105}, + {243, 97}, + {243, 92}, + {211, 106}, + {211, 100}, + {179, 120}, + {179, 113}, + {179, 107}, + {147, 125}, + {147, 119}, + {147, 112}, + {147, 106}, + {147, 101}, + {147, 97}, + {147, 91}, + {115, 107}, + {235, 121}, + {235, 115}, + {235, 109}, + {203, 127}, + {203, 121}, + {203, 115}, + {203, 108}, + {203, 102}, + {203, 96}, + {203, 92}, + {171, 110}, + {171, 104}, + {171, 98}, + {139, 116}, + {227, 125}, + {227, 119}, + {227, 113}, + {227, 107}, + {227, 101}, + {227, 96}, + {195, 113}, + {195, 106}, + {195, 102}, + {195, 95}, + {163, 113}, + {163, 106}, + {163, 102}, + {163, 95}, + {131, 113}, + {131, 106}, + {131, 102}, + {131, 95}, + {99, 113}, + {99, 106}, + {99, 102}, + {99, 95}, + {67, 113}, + {67, 106}, + {67, 102}, + {67, 95}, + {35, 113}, + {35, 106}, + {35, 102}, + {35, 95}, + {3, 113}, + {3, 106}, + {3, 102}, + {3, 95} /* 2.4 GHz, lowest power */ + }, + { + {251, 127}, /* 5.x GHz, highest power */ + {251, 120}, + {251, 114}, + {219, 119}, + {219, 101}, + {187, 113}, + {187, 102}, + {155, 114}, + {155, 103}, + {123, 117}, + {123, 107}, + {123, 99}, + {123, 92}, + {91, 108}, + {59, 125}, + {59, 118}, + {59, 109}, + {59, 102}, + {59, 96}, + {59, 90}, + {27, 104}, + {27, 98}, + {27, 92}, + {115, 118}, + {115, 111}, + {115, 104}, + {83, 126}, + {83, 121}, + {83, 113}, + {83, 105}, + {83, 99}, + {51, 118}, + {51, 111}, + {51, 104}, + {51, 98}, + {19, 116}, + {19, 109}, + {19, 102}, + {19, 98}, + {19, 93}, + {171, 113}, + {171, 107}, + {171, 99}, + {139, 120}, + {139, 113}, + {139, 107}, + {139, 99}, + {107, 120}, + {107, 113}, + {107, 107}, + {107, 99}, + {75, 120}, + {75, 113}, + {75, 107}, + {75, 99}, + {43, 120}, + {43, 113}, + {43, 107}, + {43, 99}, + {11, 120}, + {11, 113}, + {11, 107}, + {11, 99}, + {131, 107}, + {131, 99}, + {99, 120}, + {99, 113}, + {99, 107}, + {99, 99}, + {67, 120}, + {67, 113}, + {67, 107}, + {67, 99}, + {35, 120}, + {35, 113}, + {35, 107}, + {35, 99}, + {3, 120} /* 5.x GHz, lowest power */ + } +}; + +static inline u8 +il3945_hw_reg_fix_power_idx(int idx) +{ + if (idx < 0) + return 0; + if (idx >= IL_MAX_GAIN_ENTRIES) + return IL_MAX_GAIN_ENTRIES - 1; + return (u8) idx; +} + +/* Kick off thermal recalibration check every 60 seconds */ +#define REG_RECALIB_PERIOD (60) + +/** + * il3945_hw_reg_set_scan_power - Set Tx power for scan probe requests + * + * Set (in our channel info database) the direct scan Tx power for 1 Mbit (CCK) + * or 6 Mbit (OFDM) rates. + */ +static void +il3945_hw_reg_set_scan_power(struct il_priv *il, u32 scan_tbl_idx, s32 rate_idx, + const s8 *clip_pwrs, + struct il_channel_info *ch_info, int band_idx) +{ + struct il3945_scan_power_info *scan_power_info; + s8 power; + u8 power_idx; + + scan_power_info = &ch_info->scan_pwr_info[scan_tbl_idx]; + + /* use this channel group's 6Mbit clipping/saturation pwr, + * but cap at regulatory scan power restriction (set during init + * based on eeprom channel data) for this channel. */ + power = min(ch_info->scan_power, clip_pwrs[RATE_6M_IDX_TBL]); + + power = min(power, il->tx_power_user_lmt); + scan_power_info->requested_power = power; + + /* find difference between new scan *power* and current "normal" + * Tx *power* for 6Mb. Use this difference (x2) to adjust the + * current "normal" temperature-compensated Tx power *idx* for + * this rate (1Mb or 6Mb) to yield new temp-compensated scan power + * *idx*. */ + power_idx = + ch_info->power_info[rate_idx].power_table_idx - (power - + ch_info-> + power_info + [RATE_6M_IDX_TBL]. + requested_power) * + 2; + + /* store reference idx that we use when adjusting *all* scan + * powers. So we can accommodate user (all channel) or spectrum + * management (single channel) power changes "between" temperature + * feedback compensation procedures. + * don't force fit this reference idx into gain table; it may be a + * negative number. This will help avoid errors when we're at + * the lower bounds (highest gains, for warmest temperatures) + * of the table. */ + + /* don't exceed table bounds for "real" setting */ + power_idx = il3945_hw_reg_fix_power_idx(power_idx); + + scan_power_info->power_table_idx = power_idx; + scan_power_info->tpc.tx_gain = + power_gain_table[band_idx][power_idx].tx_gain; + scan_power_info->tpc.dsp_atten = + power_gain_table[band_idx][power_idx].dsp_atten; +} + +/** + * il3945_send_tx_power - fill in Tx Power command with gain settings + * + * Configures power settings for all rates for the current channel, + * using values from channel info struct, and send to NIC + */ +static int +il3945_send_tx_power(struct il_priv *il) +{ + int rate_idx, i; + const struct il_channel_info *ch_info = NULL; + struct il3945_txpowertable_cmd txpower = { + .channel = il->ctx.active.channel, + }; + u16 chan; + + if (WARN_ONCE + (test_bit(S_SCAN_HW, &il->status), + "TX Power requested while scanning!\n")) + return -EAGAIN; + + chan = le16_to_cpu(il->ctx.active.channel); + + txpower.band = (il->band == IEEE80211_BAND_5GHZ) ? 0 : 1; + ch_info = il_get_channel_info(il, il->band, chan); + if (!ch_info) { + IL_ERR("Failed to get channel info for channel %d [%d]\n", chan, + il->band); + return -EINVAL; + } + + if (!il_is_channel_valid(ch_info)) { + D_POWER("Not calling TX_PWR_TBL_CMD on " "non-Tx channel.\n"); + return 0; + } + + /* fill cmd with power settings for all rates for current channel */ + /* Fill OFDM rate */ + for (rate_idx = IL_FIRST_OFDM_RATE, i = 0; + rate_idx <= IL39_LAST_OFDM_RATE; rate_idx++, i++) { + + txpower.power[i].tpc = ch_info->power_info[i].tpc; + txpower.power[i].rate = il3945_rates[rate_idx].plcp; + + D_POWER("ch %d:%d rf %d dsp %3d rate code 0x%02x\n", + le16_to_cpu(txpower.channel), txpower.band, + txpower.power[i].tpc.tx_gain, + txpower.power[i].tpc.dsp_atten, txpower.power[i].rate); + } + /* Fill CCK rates */ + for (rate_idx = IL_FIRST_CCK_RATE; rate_idx <= IL_LAST_CCK_RATE; + rate_idx++, i++) { + txpower.power[i].tpc = ch_info->power_info[i].tpc; + txpower.power[i].rate = il3945_rates[rate_idx].plcp; + + D_POWER("ch %d:%d rf %d dsp %3d rate code 0x%02x\n", + le16_to_cpu(txpower.channel), txpower.band, + txpower.power[i].tpc.tx_gain, + txpower.power[i].tpc.dsp_atten, txpower.power[i].rate); + } + + return il_send_cmd_pdu(il, C_TX_PWR_TBL, + sizeof(struct il3945_txpowertable_cmd), + &txpower); + +} + +/** + * il3945_hw_reg_set_new_power - Configures power tables at new levels + * @ch_info: Channel to update. Uses power_info.requested_power. + * + * Replace requested_power and base_power_idx ch_info fields for + * one channel. + * + * Called if user or spectrum management changes power preferences. + * Takes into account h/w and modulation limitations (clip power). + * + * This does *not* send anything to NIC, just sets up ch_info for one channel. + * + * NOTE: reg_compensate_for_temperature_dif() *must* be run after this to + * properly fill out the scan powers, and actual h/w gain settings, + * and send changes to NIC + */ +static int +il3945_hw_reg_set_new_power(struct il_priv *il, struct il_channel_info *ch_info) +{ + struct il3945_channel_power_info *power_info; + int power_changed = 0; + int i; + const s8 *clip_pwrs; + int power; + + /* Get this chnlgrp's rate-to-max/clip-powers table */ + clip_pwrs = il->_3945.clip_groups[ch_info->group_idx].clip_powers; + + /* Get this channel's rate-to-current-power settings table */ + power_info = ch_info->power_info; + + /* update OFDM Txpower settings */ + for (i = RATE_6M_IDX_TBL; i <= RATE_54M_IDX_TBL; i++, ++power_info) { + int delta_idx; + + /* limit new power to be no more than h/w capability */ + power = min(ch_info->curr_txpow, clip_pwrs[i]); + if (power == power_info->requested_power) + continue; + + /* find difference between old and new requested powers, + * update base (non-temp-compensated) power idx */ + delta_idx = (power - power_info->requested_power) * 2; + power_info->base_power_idx -= delta_idx; + + /* save new requested power value */ + power_info->requested_power = power; + + power_changed = 1; + } + + /* update CCK Txpower settings, based on OFDM 12M setting ... + * ... all CCK power settings for a given channel are the *same*. */ + if (power_changed) { + power = + ch_info->power_info[RATE_12M_IDX_TBL].requested_power + + IL_CCK_FROM_OFDM_POWER_DIFF; + + /* do all CCK rates' il3945_channel_power_info structures */ + for (i = RATE_1M_IDX_TBL; i <= RATE_11M_IDX_TBL; i++) { + power_info->requested_power = power; + power_info->base_power_idx = + ch_info->power_info[RATE_12M_IDX_TBL]. + base_power_idx + IL_CCK_FROM_OFDM_IDX_DIFF; + ++power_info; + } + } + + return 0; +} + +/** + * il3945_hw_reg_get_ch_txpower_limit - returns new power limit for channel + * + * NOTE: Returned power limit may be less (but not more) than requested, + * based strictly on regulatory (eeprom and spectrum mgt) limitations + * (no consideration for h/w clipping limitations). + */ +static int +il3945_hw_reg_get_ch_txpower_limit(struct il_channel_info *ch_info) +{ + s8 max_power; + +#if 0 + /* if we're using TGd limits, use lower of TGd or EEPROM */ + if (ch_info->tgd_data.max_power != 0) + max_power = + min(ch_info->tgd_data.max_power, + ch_info->eeprom.max_power_avg); + + /* else just use EEPROM limits */ + else +#endif + max_power = ch_info->eeprom.max_power_avg; + + return min(max_power, ch_info->max_power_avg); +} + +/** + * il3945_hw_reg_comp_txpower_temp - Compensate for temperature + * + * Compensate txpower settings of *all* channels for temperature. + * This only accounts for the difference between current temperature + * and the factory calibration temperatures, and bases the new settings + * on the channel's base_power_idx. + * + * If RxOn is "associated", this sends the new Txpower to NIC! + */ +static int +il3945_hw_reg_comp_txpower_temp(struct il_priv *il) +{ + struct il_channel_info *ch_info = NULL; + struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom; + int delta_idx; + const s8 *clip_pwrs; /* array of h/w max power levels for each rate */ + u8 a_band; + u8 rate_idx; + u8 scan_tbl_idx; + u8 i; + int ref_temp; + int temperature = il->temperature; + + if (il->disable_tx_power_cal || test_bit(S_SCANNING, &il->status)) { + /* do not perform tx power calibration */ + return 0; + } + /* set up new Tx power info for each and every channel, 2.4 and 5.x */ + for (i = 0; i < il->channel_count; i++) { + ch_info = &il->channel_info[i]; + a_band = il_is_channel_a_band(ch_info); + + /* Get this chnlgrp's factory calibration temperature */ + ref_temp = (s16) eeprom->groups[ch_info->group_idx].temperature; + + /* get power idx adjustment based on current and factory + * temps */ + delta_idx = + il3945_hw_reg_adjust_power_by_temp(temperature, ref_temp); + + /* set tx power value for all rates, OFDM and CCK */ + for (rate_idx = 0; rate_idx < RATE_COUNT_3945; rate_idx++) { + int power_idx = + ch_info->power_info[rate_idx].base_power_idx; + + /* temperature compensate */ + power_idx += delta_idx; + + /* stay within table range */ + power_idx = il3945_hw_reg_fix_power_idx(power_idx); + ch_info->power_info[rate_idx].power_table_idx = + (u8) power_idx; + ch_info->power_info[rate_idx].tpc = + power_gain_table[a_band][power_idx]; + } + + /* Get this chnlgrp's rate-to-max/clip-powers table */ + clip_pwrs = + il->_3945.clip_groups[ch_info->group_idx].clip_powers; + + /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */ + for (scan_tbl_idx = 0; scan_tbl_idx < IL_NUM_SCAN_RATES; + scan_tbl_idx++) { + s32 actual_idx = + (scan_tbl_idx == + 0) ? RATE_1M_IDX_TBL : RATE_6M_IDX_TBL; + il3945_hw_reg_set_scan_power(il, scan_tbl_idx, + actual_idx, clip_pwrs, + ch_info, a_band); + } + } + + /* send Txpower command for current channel to ucode */ + return il->cfg->ops->lib->send_tx_power(il); +} + +int +il3945_hw_reg_set_txpower(struct il_priv *il, s8 power) +{ + struct il_channel_info *ch_info; + s8 max_power; + u8 a_band; + u8 i; + + if (il->tx_power_user_lmt == power) { + D_POWER("Requested Tx power same as current " "limit: %ddBm.\n", + power); + return 0; + } + + D_POWER("Setting upper limit clamp to %ddBm.\n", power); + il->tx_power_user_lmt = power; + + /* set up new Tx powers for each and every channel, 2.4 and 5.x */ + + for (i = 0; i < il->channel_count; i++) { + ch_info = &il->channel_info[i]; + a_band = il_is_channel_a_band(ch_info); + + /* find minimum power of all user and regulatory constraints + * (does not consider h/w clipping limitations) */ + max_power = il3945_hw_reg_get_ch_txpower_limit(ch_info); + max_power = min(power, max_power); + if (max_power != ch_info->curr_txpow) { + ch_info->curr_txpow = max_power; + + /* this considers the h/w clipping limitations */ + il3945_hw_reg_set_new_power(il, ch_info); + } + } + + /* update txpower settings for all channels, + * send to NIC if associated. */ + il3945_is_temp_calib_needed(il); + il3945_hw_reg_comp_txpower_temp(il); + + return 0; +} + +static int +il3945_send_rxon_assoc(struct il_priv *il, struct il_rxon_context *ctx) +{ + int rc = 0; + struct il_rx_pkt *pkt; + struct il3945_rxon_assoc_cmd rxon_assoc; + struct il_host_cmd cmd = { + .id = C_RXON_ASSOC, + .len = sizeof(rxon_assoc), + .flags = CMD_WANT_SKB, + .data = &rxon_assoc, + }; + const struct il_rxon_cmd *rxon1 = &ctx->staging; + const struct il_rxon_cmd *rxon2 = &ctx->active; + + if (rxon1->flags == rxon2->flags && + rxon1->filter_flags == rxon2->filter_flags && + rxon1->cck_basic_rates == rxon2->cck_basic_rates && + rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates) { + D_INFO("Using current RXON_ASSOC. Not resending.\n"); + return 0; + } + + rxon_assoc.flags = ctx->staging.flags; + rxon_assoc.filter_flags = ctx->staging.filter_flags; + rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates; + rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates; + rxon_assoc.reserved = 0; + + rc = il_send_cmd_sync(il, &cmd); + if (rc) + return rc; + + pkt = (struct il_rx_pkt *)cmd.reply_page; + if (pkt->hdr.flags & IL_CMD_FAILED_MSK) { + IL_ERR("Bad return from C_RXON_ASSOC command\n"); + rc = -EIO; + } + + il_free_pages(il, cmd.reply_page); + + return rc; +} + +/** + * il3945_commit_rxon - commit staging_rxon to hardware + * + * The RXON command in staging_rxon is committed to the hardware and + * the active_rxon structure is updated with the new data. This + * function correctly transitions out of the RXON_ASSOC_MSK state if + * a HW tune is required based on the RXON structure changes. + */ +int +il3945_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx) +{ + /* cast away the const for active_rxon in this function */ + struct il3945_rxon_cmd *active_rxon = (void *)&ctx->active; + struct il3945_rxon_cmd *staging_rxon = (void *)&ctx->staging; + int rc = 0; + bool new_assoc = !!(staging_rxon->filter_flags & RXON_FILTER_ASSOC_MSK); + + if (test_bit(S_EXIT_PENDING, &il->status)) + return -EINVAL; + + if (!il_is_alive(il)) + return -1; + + /* always get timestamp with Rx frame */ + staging_rxon->flags |= RXON_FLG_TSF2HOST_MSK; + + /* select antenna */ + staging_rxon->flags &= ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK); + staging_rxon->flags |= il3945_get_antenna_flags(il); + + rc = il_check_rxon_cmd(il, ctx); + if (rc) { + IL_ERR("Invalid RXON configuration. Not committing.\n"); + return -EINVAL; + } + + /* If we don't need to send a full RXON, we can use + * il3945_rxon_assoc_cmd which is used to reconfigure filter + * and other flags for the current radio configuration. */ + if (!il_full_rxon_required(il, &il->ctx)) { + rc = il_send_rxon_assoc(il, &il->ctx); + if (rc) { + IL_ERR("Error setting RXON_ASSOC " + "configuration (%d).\n", rc); + return rc; + } + + memcpy(active_rxon, staging_rxon, sizeof(*active_rxon)); + /* + * We do not commit tx power settings while channel changing, + * do it now if tx power changed. + */ + il_set_tx_power(il, il->tx_power_next, false); + return 0; + } + + /* If we are currently associated and the new config requires + * an RXON_ASSOC and the new config wants the associated mask enabled, + * we must clear the associated from the active configuration + * before we apply the new config */ + if (il_is_associated(il) && new_assoc) { + D_INFO("Toggling associated bit on current RXON\n"); + active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; + + /* + * reserved4 and 5 could have been filled by the iwlcore code. + * Let's clear them before pushing to the 3945. + */ + active_rxon->reserved4 = 0; + active_rxon->reserved5 = 0; + rc = il_send_cmd_pdu(il, C_RXON, sizeof(struct il3945_rxon_cmd), + &il->ctx.active); + + /* If the mask clearing failed then we set + * active_rxon back to what it was previously */ + if (rc) { + active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK; + IL_ERR("Error clearing ASSOC_MSK on current " + "configuration (%d).\n", rc); + return rc; + } + il_clear_ucode_stations(il, &il->ctx); + il_restore_stations(il, &il->ctx); + } + + D_INFO("Sending RXON\n" "* with%s RXON_FILTER_ASSOC_MSK\n" + "* channel = %d\n" "* bssid = %pM\n", (new_assoc ? "" : "out"), + le16_to_cpu(staging_rxon->channel), staging_rxon->bssid_addr); + + /* + * reserved4 and 5 could have been filled by the iwlcore code. + * Let's clear them before pushing to the 3945. + */ + staging_rxon->reserved4 = 0; + staging_rxon->reserved5 = 0; + + il_set_rxon_hwcrypto(il, ctx, !il3945_mod_params.sw_crypto); + + /* Apply the new configuration */ + rc = il_send_cmd_pdu(il, C_RXON, sizeof(struct il3945_rxon_cmd), + staging_rxon); + if (rc) { + IL_ERR("Error setting new configuration (%d).\n", rc); + return rc; + } + + memcpy(active_rxon, staging_rxon, sizeof(*active_rxon)); + + if (!new_assoc) { + il_clear_ucode_stations(il, &il->ctx); + il_restore_stations(il, &il->ctx); + } + + /* If we issue a new RXON command which required a tune then we must + * send a new TXPOWER command or we won't be able to Tx any frames */ + rc = il_set_tx_power(il, il->tx_power_next, true); + if (rc) { + IL_ERR("Error setting Tx power (%d).\n", rc); + return rc; + } + + /* Init the hardware's rate fallback order based on the band */ + rc = il3945_init_hw_rate_table(il); + if (rc) { + IL_ERR("Error setting HW rate table: %02X\n", rc); + return -EIO; + } + + return 0; +} + +/** + * il3945_reg_txpower_periodic - called when time to check our temperature. + * + * -- reset periodic timer + * -- see if temp has changed enough to warrant re-calibration ... if so: + * -- correct coeffs for temp (can reset temp timer) + * -- save this temp as "last", + * -- send new set of gain settings to NIC + * NOTE: This should continue working, even when we're not associated, + * so we can keep our internal table of scan powers current. */ +void +il3945_reg_txpower_periodic(struct il_priv *il) +{ + /* This will kick in the "brute force" + * il3945_hw_reg_comp_txpower_temp() below */ + if (!il3945_is_temp_calib_needed(il)) + goto reschedule; + + /* Set up a new set of temp-adjusted TxPowers, send to NIC. + * This is based *only* on current temperature, + * ignoring any previous power measurements */ + il3945_hw_reg_comp_txpower_temp(il); + +reschedule: + queue_delayed_work(il->workqueue, &il->_3945.thermal_periodic, + REG_RECALIB_PERIOD * HZ); +} + +static void +il3945_bg_reg_txpower_periodic(struct work_struct *work) +{ + struct il_priv *il = container_of(work, struct il_priv, + _3945.thermal_periodic.work); + + if (test_bit(S_EXIT_PENDING, &il->status)) + return; + + mutex_lock(&il->mutex); + il3945_reg_txpower_periodic(il); + mutex_unlock(&il->mutex); +} + +/** + * il3945_hw_reg_get_ch_grp_idx - find the channel-group idx (0-4) for channel. + * + * This function is used when initializing channel-info structs. + * + * NOTE: These channel groups do *NOT* match the bands above! + * These channel groups are based on factory-tested channels; + * on A-band, EEPROM's "group frequency" entries represent the top + * channel in each group 1-4. Group 5 All B/G channels are in group 0. + */ +static u16 +il3945_hw_reg_get_ch_grp_idx(struct il_priv *il, + const struct il_channel_info *ch_info) +{ + struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom; + struct il3945_eeprom_txpower_group *ch_grp = &eeprom->groups[0]; + u8 group; + u16 group_idx = 0; /* based on factory calib frequencies */ + u8 grp_channel; + + /* Find the group idx for the channel ... don't use idx 1(?) */ + if (il_is_channel_a_band(ch_info)) { + for (group = 1; group < 5; group++) { + grp_channel = ch_grp[group].group_channel; + if (ch_info->channel <= grp_channel) { + group_idx = group; + break; + } + } + /* group 4 has a few channels *above* its factory cal freq */ + if (group == 5) + group_idx = 4; + } else + group_idx = 0; /* 2.4 GHz, group 0 */ + + D_POWER("Chnl %d mapped to grp %d\n", ch_info->channel, group_idx); + return group_idx; +} + +/** + * il3945_hw_reg_get_matched_power_idx - Interpolate to get nominal idx + * + * Interpolate to get nominal (i.e. at factory calibration temperature) idx + * into radio/DSP gain settings table for requested power. + */ +static int +il3945_hw_reg_get_matched_power_idx(struct il_priv *il, s8 requested_power, + s32 setting_idx, s32 *new_idx) +{ + const struct il3945_eeprom_txpower_group *chnl_grp = NULL; + struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom; + s32 idx0, idx1; + s32 power = 2 * requested_power; + s32 i; + const struct il3945_eeprom_txpower_sample *samples; + s32 gains0, gains1; + s32 res; + s32 denominator; + + chnl_grp = &eeprom->groups[setting_idx]; + samples = chnl_grp->samples; + for (i = 0; i < 5; i++) { + if (power == samples[i].power) { + *new_idx = samples[i].gain_idx; + return 0; + } + } + + if (power > samples[1].power) { + idx0 = 0; + idx1 = 1; + } else if (power > samples[2].power) { + idx0 = 1; + idx1 = 2; + } else if (power > samples[3].power) { + idx0 = 2; + idx1 = 3; + } else { + idx0 = 3; + idx1 = 4; + } + + denominator = (s32) samples[idx1].power - (s32) samples[idx0].power; + if (denominator == 0) + return -EINVAL; + gains0 = (s32) samples[idx0].gain_idx * (1 << 19); + gains1 = (s32) samples[idx1].gain_idx * (1 << 19); + res = + gains0 + (gains1 - gains0) * ((s32) power - + (s32) samples[idx0].power) / + denominator + (1 << 18); + *new_idx = res >> 19; + return 0; +} + +static void +il3945_hw_reg_init_channel_groups(struct il_priv *il) +{ + u32 i; + s32 rate_idx; + struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom; + const struct il3945_eeprom_txpower_group *group; + + D_POWER("Initializing factory calib info from EEPROM\n"); + + for (i = 0; i < IL_NUM_TX_CALIB_GROUPS; i++) { + s8 *clip_pwrs; /* table of power levels for each rate */ + s8 satur_pwr; /* saturation power for each chnl group */ + group = &eeprom->groups[i]; + + /* sanity check on factory saturation power value */ + if (group->saturation_power < 40) { + IL_WARN("Error: saturation power is %d, " + "less than minimum expected 40\n", + group->saturation_power); + return; + } + + /* + * Derive requested power levels for each rate, based on + * hardware capabilities (saturation power for band). + * Basic value is 3dB down from saturation, with further + * power reductions for highest 3 data rates. These + * backoffs provide headroom for high rate modulation + * power peaks, without too much distortion (clipping). + */ + /* we'll fill in this array with h/w max power levels */ + clip_pwrs = (s8 *) il->_3945.clip_groups[i].clip_powers; + + /* divide factory saturation power by 2 to find -3dB level */ + satur_pwr = (s8) (group->saturation_power >> 1); + + /* fill in channel group's nominal powers for each rate */ + for (rate_idx = 0; rate_idx < RATE_COUNT_3945; + rate_idx++, clip_pwrs++) { + switch (rate_idx) { + case RATE_36M_IDX_TBL: + if (i == 0) /* B/G */ + *clip_pwrs = satur_pwr; + else /* A */ + *clip_pwrs = satur_pwr - 5; + break; + case RATE_48M_IDX_TBL: + if (i == 0) + *clip_pwrs = satur_pwr - 7; + else + *clip_pwrs = satur_pwr - 10; + break; + case RATE_54M_IDX_TBL: + if (i == 0) + *clip_pwrs = satur_pwr - 9; + else + *clip_pwrs = satur_pwr - 12; + break; + default: + *clip_pwrs = satur_pwr; + break; + } + } + } +} + +/** + * il3945_txpower_set_from_eeprom - Set channel power info based on EEPROM + * + * Second pass (during init) to set up il->channel_info + * + * Set up Tx-power settings in our channel info database for each VALID + * (for this geo/SKU) channel, at all Tx data rates, based on eeprom values + * and current temperature. + * + * Since this is based on current temperature (at init time), these values may + * not be valid for very long, but it gives us a starting/default point, + * and allows us to active (i.e. using Tx) scan. + * + * This does *not* write values to NIC, just sets up our internal table. + */ +int +il3945_txpower_set_from_eeprom(struct il_priv *il) +{ + struct il_channel_info *ch_info = NULL; + struct il3945_channel_power_info *pwr_info; + struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom; + int delta_idx; + u8 rate_idx; + u8 scan_tbl_idx; + const s8 *clip_pwrs; /* array of power levels for each rate */ + u8 gain, dsp_atten; + s8 power; + u8 pwr_idx, base_pwr_idx, a_band; + u8 i; + int temperature; + + /* save temperature reference, + * so we can determine next time to calibrate */ + temperature = il3945_hw_reg_txpower_get_temperature(il); + il->last_temperature = temperature; + + il3945_hw_reg_init_channel_groups(il); + + /* initialize Tx power info for each and every channel, 2.4 and 5.x */ + for (i = 0, ch_info = il->channel_info; i < il->channel_count; + i++, ch_info++) { + a_band = il_is_channel_a_band(ch_info); + if (!il_is_channel_valid(ch_info)) + continue; + + /* find this channel's channel group (*not* "band") idx */ + ch_info->group_idx = il3945_hw_reg_get_ch_grp_idx(il, ch_info); + + /* Get this chnlgrp's rate->max/clip-powers table */ + clip_pwrs = + il->_3945.clip_groups[ch_info->group_idx].clip_powers; + + /* calculate power idx *adjustment* value according to + * diff between current temperature and factory temperature */ + delta_idx = + il3945_hw_reg_adjust_power_by_temp(temperature, + eeprom->groups[ch_info-> + group_idx]. + temperature); + + D_POWER("Delta idx for channel %d: %d [%d]\n", ch_info->channel, + delta_idx, temperature + IL_TEMP_CONVERT); + + /* set tx power value for all OFDM rates */ + for (rate_idx = 0; rate_idx < IL_OFDM_RATES; rate_idx++) { + s32 uninitialized_var(power_idx); + int rc; + + /* use channel group's clip-power table, + * but don't exceed channel's max power */ + s8 pwr = min(ch_info->max_power_avg, + clip_pwrs[rate_idx]); + + pwr_info = &ch_info->power_info[rate_idx]; + + /* get base (i.e. at factory-measured temperature) + * power table idx for this rate's power */ + rc = il3945_hw_reg_get_matched_power_idx(il, pwr, + ch_info-> + group_idx, + &power_idx); + if (rc) { + IL_ERR("Invalid power idx\n"); + return rc; + } + pwr_info->base_power_idx = (u8) power_idx; + + /* temperature compensate */ + power_idx += delta_idx; + + /* stay within range of gain table */ + power_idx = il3945_hw_reg_fix_power_idx(power_idx); + + /* fill 1 OFDM rate's il3945_channel_power_info struct */ + pwr_info->requested_power = pwr; + pwr_info->power_table_idx = (u8) power_idx; + pwr_info->tpc.tx_gain = + power_gain_table[a_band][power_idx].tx_gain; + pwr_info->tpc.dsp_atten = + power_gain_table[a_band][power_idx].dsp_atten; + } + + /* set tx power for CCK rates, based on OFDM 12 Mbit settings */ + pwr_info = &ch_info->power_info[RATE_12M_IDX_TBL]; + power = pwr_info->requested_power + IL_CCK_FROM_OFDM_POWER_DIFF; + pwr_idx = pwr_info->power_table_idx + IL_CCK_FROM_OFDM_IDX_DIFF; + base_pwr_idx = + pwr_info->base_power_idx + IL_CCK_FROM_OFDM_IDX_DIFF; + + /* stay within table range */ + pwr_idx = il3945_hw_reg_fix_power_idx(pwr_idx); + gain = power_gain_table[a_band][pwr_idx].tx_gain; + dsp_atten = power_gain_table[a_band][pwr_idx].dsp_atten; + + /* fill each CCK rate's il3945_channel_power_info structure + * NOTE: All CCK-rate Txpwrs are the same for a given chnl! + * NOTE: CCK rates start at end of OFDM rates! */ + for (rate_idx = 0; rate_idx < IL_CCK_RATES; rate_idx++) { + pwr_info = + &ch_info->power_info[rate_idx + IL_OFDM_RATES]; + pwr_info->requested_power = power; + pwr_info->power_table_idx = pwr_idx; + pwr_info->base_power_idx = base_pwr_idx; + pwr_info->tpc.tx_gain = gain; + pwr_info->tpc.dsp_atten = dsp_atten; + } + + /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */ + for (scan_tbl_idx = 0; scan_tbl_idx < IL_NUM_SCAN_RATES; + scan_tbl_idx++) { + s32 actual_idx = + (scan_tbl_idx == + 0) ? RATE_1M_IDX_TBL : RATE_6M_IDX_TBL; + il3945_hw_reg_set_scan_power(il, scan_tbl_idx, + actual_idx, clip_pwrs, + ch_info, a_band); + } + } + + return 0; +} + +int +il3945_hw_rxq_stop(struct il_priv *il) +{ + int rc; + + il_wr(il, FH39_RCSR_CONFIG(0), 0); + rc = il_poll_bit(il, FH39_RSSR_STATUS, + FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); + if (rc < 0) + IL_ERR("Can't stop Rx DMA.\n"); + + return 0; +} + +int +il3945_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq) +{ + int txq_id = txq->q.id; + + struct il3945_shared *shared_data = il->_3945.shared_virt; + + shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32) txq->q.dma_addr); + + il_wr(il, FH39_CBCC_CTRL(txq_id), 0); + il_wr(il, FH39_CBCC_BASE(txq_id), 0); + + il_wr(il, FH39_TCSR_CONFIG(txq_id), + FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT | + FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF | + FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD | + FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL | + FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE); + + /* fake read to flush all prev. writes */ + _il_rd(il, FH39_TSSR_CBB_BASE); + + return 0; +} + +/* + * HCMD utils + */ +static u16 +il3945_get_hcmd_size(u8 cmd_id, u16 len) +{ + switch (cmd_id) { + case C_RXON: + return sizeof(struct il3945_rxon_cmd); + case C_POWER_TBL: + return sizeof(struct il3945_powertable_cmd); + default: + return len; + } +} + +static u16 +il3945_build_addsta_hcmd(const struct il_addsta_cmd *cmd, u8 * data) +{ + struct il3945_addsta_cmd *addsta = (struct il3945_addsta_cmd *)data; + addsta->mode = cmd->mode; + memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify)); + memcpy(&addsta->key, &cmd->key, sizeof(struct il4965_keyinfo)); + addsta->station_flags = cmd->station_flags; + addsta->station_flags_msk = cmd->station_flags_msk; + addsta->tid_disable_tx = cpu_to_le16(0); + addsta->rate_n_flags = cmd->rate_n_flags; + addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid; + addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid; + addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn; + + return (u16) sizeof(struct il3945_addsta_cmd); +} + +static int +il3945_add_bssid_station(struct il_priv *il, const u8 * addr, u8 * sta_id_r) +{ + struct il_rxon_context *ctx = &il->ctx; + int ret; + u8 sta_id; + unsigned long flags; + + if (sta_id_r) + *sta_id_r = IL_INVALID_STATION; + + ret = il_add_station_common(il, ctx, addr, 0, NULL, &sta_id); + if (ret) { + IL_ERR("Unable to add station %pM\n", addr); + return ret; + } + + if (sta_id_r) + *sta_id_r = sta_id; + + spin_lock_irqsave(&il->sta_lock, flags); + il->stations[sta_id].used |= IL_STA_LOCAL; + spin_unlock_irqrestore(&il->sta_lock, flags); + + return 0; +} + +static int +il3945_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif, + bool add) +{ + struct il_vif_priv *vif_priv = (void *)vif->drv_priv; + int ret; + + if (add) { + ret = + il3945_add_bssid_station(il, vif->bss_conf.bssid, + &vif_priv->ibss_bssid_sta_id); + if (ret) + return ret; + + il3945_sync_sta(il, vif_priv->ibss_bssid_sta_id, + (il->band == + IEEE80211_BAND_5GHZ) ? RATE_6M_PLCP : + RATE_1M_PLCP); + il3945_rate_scale_init(il->hw, vif_priv->ibss_bssid_sta_id); + + return 0; + } + + return il_remove_station(il, vif_priv->ibss_bssid_sta_id, + vif->bss_conf.bssid); +} + +/** + * il3945_init_hw_rate_table - Initialize the hardware rate fallback table + */ +int +il3945_init_hw_rate_table(struct il_priv *il) +{ + int rc, i, idx, prev_idx; + struct il3945_rate_scaling_cmd rate_cmd = { + .reserved = {0, 0, 0}, + }; + struct il3945_rate_scaling_info *table = rate_cmd.table; + + for (i = 0; i < ARRAY_SIZE(il3945_rates); i++) { + idx = il3945_rates[i].table_rs_idx; + + table[idx].rate_n_flags = cpu_to_le16(il3945_rates[i].plcp); + table[idx].try_cnt = il->retry_rate; + prev_idx = il3945_get_prev_ieee_rate(i); + table[idx].next_rate_idx = il3945_rates[prev_idx].table_rs_idx; + } + + switch (il->band) { + case IEEE80211_BAND_5GHZ: + D_RATE("Select A mode rate scale\n"); + /* If one of the following CCK rates is used, + * have it fall back to the 6M OFDM rate */ + for (i = RATE_1M_IDX_TBL; i <= RATE_11M_IDX_TBL; i++) + table[i].next_rate_idx = + il3945_rates[IL_FIRST_OFDM_RATE].table_rs_idx; + + /* Don't fall back to CCK rates */ + table[RATE_12M_IDX_TBL].next_rate_idx = RATE_9M_IDX_TBL; + + /* Don't drop out of OFDM rates */ + table[RATE_6M_IDX_TBL].next_rate_idx = + il3945_rates[IL_FIRST_OFDM_RATE].table_rs_idx; + break; + + case IEEE80211_BAND_2GHZ: + D_RATE("Select B/G mode rate scale\n"); + /* If an OFDM rate is used, have it fall back to the + * 1M CCK rates */ + + if (!(il->_3945.sta_supp_rates & IL_OFDM_RATES_MASK) && + il_is_associated(il)) { + + idx = IL_FIRST_CCK_RATE; + for (i = RATE_6M_IDX_TBL; i <= RATE_54M_IDX_TBL; i++) + table[i].next_rate_idx = + il3945_rates[idx].table_rs_idx; + + idx = RATE_11M_IDX_TBL; + /* CCK shouldn't fall back to OFDM... */ + table[idx].next_rate_idx = RATE_5M_IDX_TBL; + } + break; + + default: + WARN_ON(1); + break; + } + + /* Update the rate scaling for control frame Tx */ + rate_cmd.table_id = 0; + rc = il_send_cmd_pdu(il, C_RATE_SCALE, sizeof(rate_cmd), &rate_cmd); + if (rc) + return rc; + + /* Update the rate scaling for data frame Tx */ + rate_cmd.table_id = 1; + return il_send_cmd_pdu(il, C_RATE_SCALE, sizeof(rate_cmd), &rate_cmd); +} + +/* Called when initializing driver */ +int +il3945_hw_set_hw_params(struct il_priv *il) +{ + memset((void *)&il->hw_params, 0, sizeof(struct il_hw_params)); + + il->_3945.shared_virt = + dma_alloc_coherent(&il->pci_dev->dev, sizeof(struct il3945_shared), + &il->_3945.shared_phys, GFP_KERNEL); + if (!il->_3945.shared_virt) { + IL_ERR("failed to allocate pci memory\n"); + return -ENOMEM; + } + + /* Assign number of Usable TX queues */ + il->hw_params.max_txq_num = il->cfg->base_params->num_of_queues; + + il->hw_params.tfd_size = sizeof(struct il3945_tfd); + il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_3K); + il->hw_params.max_rxq_size = RX_QUEUE_SIZE; + il->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG; + il->hw_params.max_stations = IL3945_STATION_COUNT; + il->ctx.bcast_sta_id = IL3945_BROADCAST_ID; + + il->sta_key_max_num = STA_KEY_MAX_NUM; + + il->hw_params.rx_wrt_ptr_reg = FH39_RSCSR_CHNL0_WPTR; + il->hw_params.max_beacon_itrvl = IL39_MAX_UCODE_BEACON_INTERVAL; + il->hw_params.beacon_time_tsf_bits = IL3945_EXT_BEACON_TIME_POS; + + return 0; +} + +unsigned int +il3945_hw_get_beacon_cmd(struct il_priv *il, struct il3945_frame *frame, + u8 rate) +{ + struct il3945_tx_beacon_cmd *tx_beacon_cmd; + unsigned int frame_size; + + tx_beacon_cmd = (struct il3945_tx_beacon_cmd *)&frame->u; + memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd)); + + tx_beacon_cmd->tx.sta_id = il->ctx.bcast_sta_id; + tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; + + frame_size = + il3945_fill_beacon_frame(il, tx_beacon_cmd->frame, + sizeof(frame->u) - sizeof(*tx_beacon_cmd)); + + BUG_ON(frame_size > MAX_MPDU_SIZE); + tx_beacon_cmd->tx.len = cpu_to_le16((u16) frame_size); + + tx_beacon_cmd->tx.rate = rate; + tx_beacon_cmd->tx.tx_flags = + (TX_CMD_FLG_SEQ_CTL_MSK | TX_CMD_FLG_TSF_MSK); + + /* supp_rates[0] == OFDM start at IL_FIRST_OFDM_RATE */ + tx_beacon_cmd->tx.supp_rates[0] = + (IL_OFDM_BASIC_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF; + + tx_beacon_cmd->tx.supp_rates[1] = (IL_CCK_BASIC_RATES_MASK & 0xF); + + return sizeof(struct il3945_tx_beacon_cmd) + frame_size; +} + +void +il3945_hw_handler_setup(struct il_priv *il) +{ + il->handlers[C_TX] = il3945_hdl_tx; + il->handlers[N_3945_RX] = il3945_hdl_rx; +} + +void +il3945_hw_setup_deferred_work(struct il_priv *il) +{ + INIT_DELAYED_WORK(&il->_3945.thermal_periodic, + il3945_bg_reg_txpower_periodic); +} + +void +il3945_hw_cancel_deferred_work(struct il_priv *il) +{ + cancel_delayed_work(&il->_3945.thermal_periodic); +} + +/* check contents of special bootstrap uCode SRAM */ +static int +il3945_verify_bsm(struct il_priv *il) +{ + __le32 *image = il->ucode_boot.v_addr; + u32 len = il->ucode_boot.len; + u32 reg; + u32 val; + + D_INFO("Begin verify bsm\n"); + + /* verify BSM SRAM contents */ + val = il_rd_prph(il, BSM_WR_DWCOUNT_REG); + for (reg = BSM_SRAM_LOWER_BOUND; reg < BSM_SRAM_LOWER_BOUND + len; + reg += sizeof(u32), image++) { + val = il_rd_prph(il, reg); + if (val != le32_to_cpu(*image)) { + IL_ERR("BSM uCode verification failed at " + "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n", + BSM_SRAM_LOWER_BOUND, reg - BSM_SRAM_LOWER_BOUND, + len, val, le32_to_cpu(*image)); + return -EIO; + } + } + + D_INFO("BSM bootstrap uCode image OK\n"); + + return 0; +} + +/****************************************************************************** + * + * EEPROM related functions + * + ******************************************************************************/ + +/* + * Clear the OWNER_MSK, to establish driver (instead of uCode running on + * embedded controller) as EEPROM reader; each read is a series of pulses + * to/from the EEPROM chip, not a single event, so even reads could conflict + * if they weren't arbitrated by some ownership mechanism. Here, the driver + * simply claims ownership, which should be safe when this function is called + * (i.e. before loading uCode!). + */ +static int +il3945_eeprom_acquire_semaphore(struct il_priv *il) +{ + _il_clear_bit(il, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK); + return 0; +} + +static void +il3945_eeprom_release_semaphore(struct il_priv *il) +{ + return; +} + + /** + * il3945_load_bsm - Load bootstrap instructions + * + * BSM operation: + * + * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program + * in special SRAM that does not power down during RFKILL. When powering back + * up after power-saving sleeps (or during initial uCode load), the BSM loads + * the bootstrap program into the on-board processor, and starts it. + * + * The bootstrap program loads (via DMA) instructions and data for a new + * program from host DRAM locations indicated by the host driver in the + * BSM_DRAM_* registers. Once the new program is loaded, it starts + * automatically. + * + * When initializing the NIC, the host driver points the BSM to the + * "initialize" uCode image. This uCode sets up some internal data, then + * notifies host via "initialize alive" that it is complete. + * + * The host then replaces the BSM_DRAM_* pointer values to point to the + * normal runtime uCode instructions and a backup uCode data cache buffer + * (filled initially with starting data values for the on-board processor), + * then triggers the "initialize" uCode to load and launch the runtime uCode, + * which begins normal operation. + * + * When doing a power-save shutdown, runtime uCode saves data SRAM into + * the backup data cache in DRAM before SRAM is powered down. + * + * When powering back up, the BSM loads the bootstrap program. This reloads + * the runtime uCode instructions and the backup data cache into SRAM, + * and re-launches the runtime uCode from where it left off. + */ +static int +il3945_load_bsm(struct il_priv *il) +{ + __le32 *image = il->ucode_boot.v_addr; + u32 len = il->ucode_boot.len; + dma_addr_t pinst; + dma_addr_t pdata; + u32 inst_len; + u32 data_len; + int rc; + int i; + u32 done; + u32 reg_offset; + + D_INFO("Begin load bsm\n"); + + /* make sure bootstrap program is no larger than BSM's SRAM size */ + if (len > IL39_MAX_BSM_SIZE) + return -EINVAL; + + /* Tell bootstrap uCode where to find the "Initialize" uCode + * in host DRAM ... host DRAM physical address bits 31:0 for 3945. + * NOTE: il3945_initialize_alive_start() will replace these values, + * after the "initialize" uCode has run, to point to + * runtime/protocol instructions and backup data cache. */ + pinst = il->ucode_init.p_addr; + pdata = il->ucode_init_data.p_addr; + inst_len = il->ucode_init.len; + data_len = il->ucode_init_data.len; + + il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst); + il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata); + il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG, inst_len); + il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, data_len); + + /* Fill BSM memory with bootstrap instructions */ + for (reg_offset = BSM_SRAM_LOWER_BOUND; + reg_offset < BSM_SRAM_LOWER_BOUND + len; + reg_offset += sizeof(u32), image++) + _il_wr_prph(il, reg_offset, le32_to_cpu(*image)); + + rc = il3945_verify_bsm(il); + if (rc) + return rc; + + /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */ + il_wr_prph(il, BSM_WR_MEM_SRC_REG, 0x0); + il_wr_prph(il, BSM_WR_MEM_DST_REG, IL39_RTC_INST_LOWER_BOUND); + il_wr_prph(il, BSM_WR_DWCOUNT_REG, len / sizeof(u32)); + + /* Load bootstrap code into instruction SRAM now, + * to prepare to load "initialize" uCode */ + il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START); + + /* Wait for load of bootstrap uCode to finish */ + for (i = 0; i < 100; i++) { + done = il_rd_prph(il, BSM_WR_CTRL_REG); + if (!(done & BSM_WR_CTRL_REG_BIT_START)) + break; + udelay(10); + } + if (i < 100) + D_INFO("BSM write complete, poll %d iterations\n", i); + else { + IL_ERR("BSM write did not complete!\n"); + return -EIO; + } + + /* Enable future boot loads whenever power management unit triggers it + * (e.g. when powering back up after power-save shutdown) */ + il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN); + + return 0; +} + +static struct il_hcmd_ops il3945_hcmd = { + .rxon_assoc = il3945_send_rxon_assoc, + .commit_rxon = il3945_commit_rxon, +}; + +static struct il_lib_ops il3945_lib = { + .txq_attach_buf_to_tfd = il3945_hw_txq_attach_buf_to_tfd, + .txq_free_tfd = il3945_hw_txq_free_tfd, + .txq_init = il3945_hw_tx_queue_init, + .load_ucode = il3945_load_bsm, + .dump_nic_error_log = il3945_dump_nic_error_log, + .apm_ops = { + .init = il3945_apm_init, + .config = il3945_nic_config, + }, + .eeprom_ops = { + .regulatory_bands = { + EEPROM_REGULATORY_BAND_1_CHANNELS, + EEPROM_REGULATORY_BAND_2_CHANNELS, + EEPROM_REGULATORY_BAND_3_CHANNELS, + EEPROM_REGULATORY_BAND_4_CHANNELS, + EEPROM_REGULATORY_BAND_5_CHANNELS, + EEPROM_REGULATORY_BAND_NO_HT40, + EEPROM_REGULATORY_BAND_NO_HT40, + }, + .acquire_semaphore = il3945_eeprom_acquire_semaphore, + .release_semaphore = il3945_eeprom_release_semaphore, + }, + .send_tx_power = il3945_send_tx_power, + .is_valid_rtc_data_addr = il3945_hw_valid_rtc_data_addr, + +#ifdef CONFIG_IWLEGACY_DEBUGFS + .debugfs_ops = { + .rx_stats_read = il3945_ucode_rx_stats_read, + .tx_stats_read = il3945_ucode_tx_stats_read, + .general_stats_read = il3945_ucode_general_stats_read, + }, +#endif +}; + +static const struct il_legacy_ops il3945_legacy_ops = { + .post_associate = il3945_post_associate, + .config_ap = il3945_config_ap, + .manage_ibss_station = il3945_manage_ibss_station, +}; + +static struct il_hcmd_utils_ops il3945_hcmd_utils = { + .get_hcmd_size = il3945_get_hcmd_size, + .build_addsta_hcmd = il3945_build_addsta_hcmd, + .request_scan = il3945_request_scan, + .post_scan = il3945_post_scan, +}; + +static const struct il_ops il3945_ops = { + .lib = &il3945_lib, + .hcmd = &il3945_hcmd, + .utils = &il3945_hcmd_utils, + .led = &il3945_led_ops, + .legacy = &il3945_legacy_ops, + .ieee80211_ops = &il3945_hw_ops, +}; + +static struct il_base_params il3945_base_params = { + .eeprom_size = IL3945_EEPROM_IMG_SIZE, + .num_of_queues = IL39_NUM_QUEUES, + .pll_cfg_val = CSR39_ANA_PLL_CFG_VAL, + .set_l0s = false, + .use_bsm = true, + .led_compensation = 64, + .wd_timeout = IL_DEF_WD_TIMEOUT, +}; + +static struct il_cfg il3945_bg_cfg = { + .name = "3945BG", + .fw_name_pre = IL3945_FW_PRE, + .ucode_api_max = IL3945_UCODE_API_MAX, + .ucode_api_min = IL3945_UCODE_API_MIN, + .sku = IL_SKU_G, + .eeprom_ver = EEPROM_3945_EEPROM_VERSION, + .ops = &il3945_ops, + .mod_params = &il3945_mod_params, + .base_params = &il3945_base_params, + .led_mode = IL_LED_BLINK, +}; + +static struct il_cfg il3945_abg_cfg = { + .name = "3945ABG", + .fw_name_pre = IL3945_FW_PRE, + .ucode_api_max = IL3945_UCODE_API_MAX, + .ucode_api_min = IL3945_UCODE_API_MIN, + .sku = IL_SKU_A | IL_SKU_G, + .eeprom_ver = EEPROM_3945_EEPROM_VERSION, + .ops = &il3945_ops, + .mod_params = &il3945_mod_params, + .base_params = &il3945_base_params, + .led_mode = IL_LED_BLINK, +}; + +DEFINE_PCI_DEVICE_TABLE(il3945_hw_card_ids) = { + {IL_PCI_DEVICE(0x4222, 0x1005, il3945_bg_cfg)}, + {IL_PCI_DEVICE(0x4222, 0x1034, il3945_bg_cfg)}, + {IL_PCI_DEVICE(0x4222, 0x1044, il3945_bg_cfg)}, + {IL_PCI_DEVICE(0x4227, 0x1014, il3945_bg_cfg)}, + {IL_PCI_DEVICE(0x4222, PCI_ANY_ID, il3945_abg_cfg)}, + {IL_PCI_DEVICE(0x4227, PCI_ANY_ID, il3945_abg_cfg)}, + {0} +}; + +MODULE_DEVICE_TABLE(pci, il3945_hw_card_ids); diff --git a/drivers/net/wireless/iwlegacy/3945.h b/drivers/net/wireless/iwlegacy/3945.h new file mode 100644 index 00000000000..9f42f79f877 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/3945.h @@ -0,0 +1,607 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __il_3945_h__ +#define __il_3945_h__ + +#include <linux/pci.h> /* for struct pci_device_id */ +#include <linux/kernel.h> +#include <net/ieee80211_radiotap.h> + +/* Hardware specific file defines the PCI IDs table for that hardware module */ +extern const struct pci_device_id il3945_hw_card_ids[]; + +#include "common.h" + +/* Highest firmware API version supported */ +#define IL3945_UCODE_API_MAX 2 + +/* Lowest firmware API version supported */ +#define IL3945_UCODE_API_MIN 1 + +#define IL3945_FW_PRE "iwlwifi-3945-" +#define _IL3945_MODULE_FIRMWARE(api) IL3945_FW_PRE #api ".ucode" +#define IL3945_MODULE_FIRMWARE(api) _IL3945_MODULE_FIRMWARE(api) + +/* Default noise level to report when noise measurement is not available. + * This may be because we're: + * 1) Not associated (4965, no beacon stats being sent to driver) + * 2) Scanning (noise measurement does not apply to associated channel) + * 3) Receiving CCK (3945 delivers noise info only for OFDM frames) + * Use default noise value of -127 ... this is below the range of measurable + * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user. + * Also, -127 works better than 0 when averaging frames with/without + * noise info (e.g. averaging might be done in app); measured dBm values are + * always negative ... using a negative value as the default keeps all + * averages within an s8's (used in some apps) range of negative values. */ +#define IL_NOISE_MEAS_NOT_AVAILABLE (-127) + +/* Module parameters accessible from iwl-*.c */ +extern struct il_mod_params il3945_mod_params; + +struct il3945_rate_scale_data { + u64 data; + s32 success_counter; + s32 success_ratio; + s32 counter; + s32 average_tpt; + unsigned long stamp; +}; + +struct il3945_rs_sta { + spinlock_t lock; + struct il_priv *il; + s32 *expected_tpt; + unsigned long last_partial_flush; + unsigned long last_flush; + u32 flush_time; + u32 last_tx_packets; + u32 tx_packets; + u8 tgg; + u8 flush_pending; + u8 start_rate; + struct timer_list rate_scale_flush; + struct il3945_rate_scale_data win[RATE_COUNT_3945]; +#ifdef CONFIG_MAC80211_DEBUGFS + struct dentry *rs_sta_dbgfs_stats_table_file; +#endif + + /* used to be in sta_info */ + int last_txrate_idx; +}; + +/* + * The common struct MUST be first because it is shared between + * 3945 and 4965! + */ +struct il3945_sta_priv { + struct il_station_priv_common common; + struct il3945_rs_sta rs_sta; +}; + +enum il3945_antenna { + IL_ANTENNA_DIVERSITY, + IL_ANTENNA_MAIN, + IL_ANTENNA_AUX +}; + +/* + * RTS threshold here is total size [2347] minus 4 FCS bytes + * Per spec: + * a value of 0 means RTS on all data/management packets + * a value > max MSDU size means no RTS + * else RTS for data/management frames where MPDU is larger + * than RTS value. + */ +#define DEFAULT_RTS_THRESHOLD 2347U +#define MIN_RTS_THRESHOLD 0U +#define MAX_RTS_THRESHOLD 2347U +#define MAX_MSDU_SIZE 2304U +#define MAX_MPDU_SIZE 2346U +#define DEFAULT_BEACON_INTERVAL 100U +#define DEFAULT_SHORT_RETRY_LIMIT 7U +#define DEFAULT_LONG_RETRY_LIMIT 4U + +#define IL_TX_FIFO_AC0 0 +#define IL_TX_FIFO_AC1 1 +#define IL_TX_FIFO_AC2 2 +#define IL_TX_FIFO_AC3 3 +#define IL_TX_FIFO_HCCA_1 5 +#define IL_TX_FIFO_HCCA_2 6 +#define IL_TX_FIFO_NONE 7 + +#define IEEE80211_DATA_LEN 2304 +#define IEEE80211_4ADDR_LEN 30 +#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN) +#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN) + +struct il3945_frame { + union { + struct ieee80211_hdr frame; + struct il3945_tx_beacon_cmd beacon; + u8 raw[IEEE80211_FRAME_LEN]; + u8 cmd[360]; + } u; + struct list_head list; +}; + +#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4) +#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ) +#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4) + +#define SUP_RATE_11A_MAX_NUM_CHANNELS 8 +#define SUP_RATE_11B_MAX_NUM_CHANNELS 4 +#define SUP_RATE_11G_MAX_NUM_CHANNELS 12 + +#define IL_SUPPORTED_RATES_IE_LEN 8 + +#define SCAN_INTERVAL 100 + +#define MAX_TID_COUNT 9 + +#define IL_INVALID_RATE 0xFF +#define IL_INVALID_VALUE -1 + +#define STA_PS_STATUS_WAKE 0 +#define STA_PS_STATUS_SLEEP 1 + +struct il3945_ibss_seq { + u8 mac[ETH_ALEN]; + u16 seq_num; + u16 frag_num; + unsigned long packet_time; + struct list_head list; +}; + +#define IL_RX_HDR(x) ((struct il3945_rx_frame_hdr *)(\ + x->u.rx_frame.stats.payload + \ + x->u.rx_frame.stats.phy_count)) +#define IL_RX_END(x) ((struct il3945_rx_frame_end *)(\ + IL_RX_HDR(x)->payload + \ + le16_to_cpu(IL_RX_HDR(x)->len))) +#define IL_RX_STATS(x) (&x->u.rx_frame.stats) +#define IL_RX_DATA(x) (IL_RX_HDR(x)->payload) + +/****************************************************************************** + * + * Functions implemented in iwl3945-base.c which are forward declared here + * for use by iwl-*.c + * + *****************************************************************************/ +extern int il3945_calc_db_from_ratio(int sig_ratio); +extern void il3945_rx_replenish(void *data); +extern void il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq); +extern unsigned int il3945_fill_beacon_frame(struct il_priv *il, + struct ieee80211_hdr *hdr, + int left); +extern int il3945_dump_nic_event_log(struct il_priv *il, bool full_log, + char **buf, bool display); +extern void il3945_dump_nic_error_log(struct il_priv *il); + +/****************************************************************************** + * + * Functions implemented in iwl-[34]*.c which are forward declared here + * for use by iwl3945-base.c + * + * NOTE: The implementation of these functions are hardware specific + * which is why they are in the hardware specific files (vs. iwl-base.c) + * + * Naming convention -- + * il3945_ <-- Its part of iwlwifi (should be changed to il3945_) + * il3945_hw_ <-- Hardware specific (implemented in iwl-XXXX.c by all HW) + * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX) + * il3945_bg_ <-- Called from work queue context + * il3945_mac_ <-- mac80211 callback + * + ****************************************************************************/ +extern void il3945_hw_handler_setup(struct il_priv *il); +extern void il3945_hw_setup_deferred_work(struct il_priv *il); +extern void il3945_hw_cancel_deferred_work(struct il_priv *il); +extern int il3945_hw_rxq_stop(struct il_priv *il); +extern int il3945_hw_set_hw_params(struct il_priv *il); +extern int il3945_hw_nic_init(struct il_priv *il); +extern int il3945_hw_nic_stop_master(struct il_priv *il); +extern void il3945_hw_txq_ctx_free(struct il_priv *il); +extern void il3945_hw_txq_ctx_stop(struct il_priv *il); +extern int il3945_hw_nic_reset(struct il_priv *il); +extern int il3945_hw_txq_attach_buf_to_tfd(struct il_priv *il, + struct il_tx_queue *txq, + dma_addr_t addr, u16 len, u8 reset, + u8 pad); +extern void il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq); +extern int il3945_hw_get_temperature(struct il_priv *il); +extern int il3945_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq); +extern unsigned int il3945_hw_get_beacon_cmd(struct il_priv *il, + struct il3945_frame *frame, + u8 rate); +void il3945_hw_build_tx_cmd_rate(struct il_priv *il, struct il_device_cmd *cmd, + struct ieee80211_tx_info *info, + struct ieee80211_hdr *hdr, int sta_id); +extern int il3945_hw_reg_send_txpower(struct il_priv *il); +extern int il3945_hw_reg_set_txpower(struct il_priv *il, s8 power); +extern void il3945_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb); +void il3945_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb); +extern void il3945_disable_events(struct il_priv *il); +extern int il4965_get_temperature(const struct il_priv *il); +extern void il3945_post_associate(struct il_priv *il); +extern void il3945_config_ap(struct il_priv *il); + +extern int il3945_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx); + +/** + * il3945_hw_find_station - Find station id for a given BSSID + * @bssid: MAC address of station ID to find + * + * NOTE: This should not be hardware specific but the code has + * not yet been merged into a single common layer for managing the + * station tables. + */ +extern u8 il3945_hw_find_station(struct il_priv *il, const u8 * bssid); + +extern struct ieee80211_ops il3945_hw_ops; + +extern __le32 il3945_get_antenna_flags(const struct il_priv *il); +extern int il3945_init_hw_rate_table(struct il_priv *il); +extern void il3945_reg_txpower_periodic(struct il_priv *il); +extern int il3945_txpower_set_from_eeprom(struct il_priv *il); + +extern int il3945_rs_next_rate(struct il_priv *il, int rate); + +/* scanning */ +int il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif); +void il3945_post_scan(struct il_priv *il); + +/* rates */ +extern const struct il3945_rate_info il3945_rates[RATE_COUNT_3945]; + +/* RSSI to dBm */ +#define IL39_RSSI_OFFSET 95 + +/* + * EEPROM related constants, enums, and structures. + */ +#define EEPROM_SKU_CAP_OP_MODE_MRC (1 << 7) + +/* + * Mapping of a Tx power level, at factory calibration temperature, + * to a radio/DSP gain table idx. + * One for each of 5 "sample" power levels in each band. + * v_det is measured at the factory, using the 3945's built-in power amplifier + * (PA) output voltage detector. This same detector is used during Tx of + * long packets in normal operation to provide feedback as to proper output + * level. + * Data copied from EEPROM. + * DO NOT ALTER THIS STRUCTURE!!! + */ +struct il3945_eeprom_txpower_sample { + u8 gain_idx; /* idx into power (gain) setup table ... */ + s8 power; /* ... for this pwr level for this chnl group */ + u16 v_det; /* PA output voltage */ +} __packed; + +/* + * Mappings of Tx power levels -> nominal radio/DSP gain table idxes. + * One for each channel group (a.k.a. "band") (1 for BG, 4 for A). + * Tx power setup code interpolates between the 5 "sample" power levels + * to determine the nominal setup for a requested power level. + * Data copied from EEPROM. + * DO NOT ALTER THIS STRUCTURE!!! + */ +struct il3945_eeprom_txpower_group { + struct il3945_eeprom_txpower_sample samples[5]; /* 5 power levels */ + s32 a, b, c, d, e; /* coefficients for voltage->power + * formula (signed) */ + s32 Fa, Fb, Fc, Fd, Fe; /* these modify coeffs based on + * frequency (signed) */ + s8 saturation_power; /* highest power possible by h/w in this + * band */ + u8 group_channel; /* "representative" channel # in this band */ + s16 temperature; /* h/w temperature at factory calib this band + * (signed) */ +} __packed; + +/* + * Temperature-based Tx-power compensation data, not band-specific. + * These coefficients are use to modify a/b/c/d/e coeffs based on + * difference between current temperature and factory calib temperature. + * Data copied from EEPROM. + */ +struct il3945_eeprom_temperature_corr { + u32 Ta; + u32 Tb; + u32 Tc; + u32 Td; + u32 Te; +} __packed; + +/* + * EEPROM map + */ +struct il3945_eeprom { + u8 reserved0[16]; + u16 device_id; /* abs.ofs: 16 */ + u8 reserved1[2]; + u16 pmc; /* abs.ofs: 20 */ + u8 reserved2[20]; + u8 mac_address[6]; /* abs.ofs: 42 */ + u8 reserved3[58]; + u16 board_revision; /* abs.ofs: 106 */ + u8 reserved4[11]; + u8 board_pba_number[9]; /* abs.ofs: 119 */ + u8 reserved5[8]; + u16 version; /* abs.ofs: 136 */ + u8 sku_cap; /* abs.ofs: 138 */ + u8 leds_mode; /* abs.ofs: 139 */ + u16 oem_mode; + u16 wowlan_mode; /* abs.ofs: 142 */ + u16 leds_time_interval; /* abs.ofs: 144 */ + u8 leds_off_time; /* abs.ofs: 146 */ + u8 leds_on_time; /* abs.ofs: 147 */ + u8 almgor_m_version; /* abs.ofs: 148 */ + u8 antenna_switch_type; /* abs.ofs: 149 */ + u8 reserved6[42]; + u8 sku_id[4]; /* abs.ofs: 192 */ + +/* + * Per-channel regulatory data. + * + * Each channel that *might* be supported by 3945 has a fixed location + * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory + * txpower (MSB). + * + * Entries immediately below are for 20 MHz channel width. + * + * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 + */ + u16 band_1_count; /* abs.ofs: 196 */ + struct il_eeprom_channel band_1_channels[14]; /* abs.ofs: 198 */ + +/* + * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196, + * 5.0 GHz channels 7, 8, 11, 12, 16 + * (4915-5080MHz) (none of these is ever supported) + */ + u16 band_2_count; /* abs.ofs: 226 */ + struct il_eeprom_channel band_2_channels[13]; /* abs.ofs: 228 */ + +/* + * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64 + * (5170-5320MHz) + */ + u16 band_3_count; /* abs.ofs: 254 */ + struct il_eeprom_channel band_3_channels[12]; /* abs.ofs: 256 */ + +/* + * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140 + * (5500-5700MHz) + */ + u16 band_4_count; /* abs.ofs: 280 */ + struct il_eeprom_channel band_4_channels[11]; /* abs.ofs: 282 */ + +/* + * 5.7 GHz channels 145, 149, 153, 157, 161, 165 + * (5725-5825MHz) + */ + u16 band_5_count; /* abs.ofs: 304 */ + struct il_eeprom_channel band_5_channels[6]; /* abs.ofs: 306 */ + + u8 reserved9[194]; + +/* + * 3945 Txpower calibration data. + */ +#define IL_NUM_TX_CALIB_GROUPS 5 + struct il3945_eeprom_txpower_group groups[IL_NUM_TX_CALIB_GROUPS]; +/* abs.ofs: 512 */ + struct il3945_eeprom_temperature_corr corrections; /* abs.ofs: 832 */ + u8 reserved16[172]; /* fill out to full 1024 byte block */ +} __packed; + +#define IL3945_EEPROM_IMG_SIZE 1024 + +/* End of EEPROM */ + +#define PCI_CFG_REV_ID_BIT_BASIC_SKU (0x40) /* bit 6 */ +#define PCI_CFG_REV_ID_BIT_RTP (0x80) /* bit 7 */ + +/* 4 DATA + 1 CMD. There are 2 HCCA queues that are not used. */ +#define IL39_NUM_QUEUES 5 +#define IL39_CMD_QUEUE_NUM 4 + +#define IL_DEFAULT_TX_RETRY 15 + +/*********************************************/ + +#define RFD_SIZE 4 +#define NUM_TFD_CHUNKS 4 + +#define TFD_CTL_COUNT_SET(n) (n << 24) +#define TFD_CTL_COUNT_GET(ctl) ((ctl >> 24) & 7) +#define TFD_CTL_PAD_SET(n) (n << 28) +#define TFD_CTL_PAD_GET(ctl) (ctl >> 28) + +/* Sizes and addresses for instruction and data memory (SRAM) in + * 3945's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */ +#define IL39_RTC_INST_LOWER_BOUND (0x000000) +#define IL39_RTC_INST_UPPER_BOUND (0x014000) + +#define IL39_RTC_DATA_LOWER_BOUND (0x800000) +#define IL39_RTC_DATA_UPPER_BOUND (0x808000) + +#define IL39_RTC_INST_SIZE (IL39_RTC_INST_UPPER_BOUND - \ + IL39_RTC_INST_LOWER_BOUND) +#define IL39_RTC_DATA_SIZE (IL39_RTC_DATA_UPPER_BOUND - \ + IL39_RTC_DATA_LOWER_BOUND) + +#define IL39_MAX_INST_SIZE IL39_RTC_INST_SIZE +#define IL39_MAX_DATA_SIZE IL39_RTC_DATA_SIZE + +/* Size of uCode instruction memory in bootstrap state machine */ +#define IL39_MAX_BSM_SIZE IL39_RTC_INST_SIZE + +static inline int +il3945_hw_valid_rtc_data_addr(u32 addr) +{ + return (addr >= IL39_RTC_DATA_LOWER_BOUND && + addr < IL39_RTC_DATA_UPPER_BOUND); +} + +/* Base physical address of il3945_shared is provided to FH39_TSSR_CBB_BASE + * and &il3945_shared.rx_read_ptr[0] is provided to FH39_RCSR_RPTR_ADDR(0) */ +struct il3945_shared { + __le32 tx_base_ptr[8]; +} __packed; + +/************************************/ +/* iwl3945 Flow Handler Definitions */ +/************************************/ + +/** + * This I/O area is directly read/writable by driver (e.g. Linux uses writel()) + * Addresses are offsets from device's PCI hardware base address. + */ +#define FH39_MEM_LOWER_BOUND (0x0800) +#define FH39_MEM_UPPER_BOUND (0x1000) + +#define FH39_CBCC_TBL (FH39_MEM_LOWER_BOUND + 0x140) +#define FH39_TFDB_TBL (FH39_MEM_LOWER_BOUND + 0x180) +#define FH39_RCSR_TBL (FH39_MEM_LOWER_BOUND + 0x400) +#define FH39_RSSR_TBL (FH39_MEM_LOWER_BOUND + 0x4c0) +#define FH39_TCSR_TBL (FH39_MEM_LOWER_BOUND + 0x500) +#define FH39_TSSR_TBL (FH39_MEM_LOWER_BOUND + 0x680) + +/* TFDB (Transmit Frame Buffer Descriptor) */ +#define FH39_TFDB(_ch, buf) (FH39_TFDB_TBL + \ + ((_ch) * 2 + (buf)) * 0x28) +#define FH39_TFDB_CHNL_BUF_CTRL_REG(_ch) (FH39_TFDB_TBL + 0x50 * (_ch)) + +/* CBCC channel is [0,2] */ +#define FH39_CBCC(_ch) (FH39_CBCC_TBL + (_ch) * 0x8) +#define FH39_CBCC_CTRL(_ch) (FH39_CBCC(_ch) + 0x00) +#define FH39_CBCC_BASE(_ch) (FH39_CBCC(_ch) + 0x04) + +/* RCSR channel is [0,2] */ +#define FH39_RCSR(_ch) (FH39_RCSR_TBL + (_ch) * 0x40) +#define FH39_RCSR_CONFIG(_ch) (FH39_RCSR(_ch) + 0x00) +#define FH39_RCSR_RBD_BASE(_ch) (FH39_RCSR(_ch) + 0x04) +#define FH39_RCSR_WPTR(_ch) (FH39_RCSR(_ch) + 0x20) +#define FH39_RCSR_RPTR_ADDR(_ch) (FH39_RCSR(_ch) + 0x24) + +#define FH39_RSCSR_CHNL0_WPTR (FH39_RCSR_WPTR(0)) + +/* RSSR */ +#define FH39_RSSR_CTRL (FH39_RSSR_TBL + 0x000) +#define FH39_RSSR_STATUS (FH39_RSSR_TBL + 0x004) + +/* TCSR */ +#define FH39_TCSR(_ch) (FH39_TCSR_TBL + (_ch) * 0x20) +#define FH39_TCSR_CONFIG(_ch) (FH39_TCSR(_ch) + 0x00) +#define FH39_TCSR_CREDIT(_ch) (FH39_TCSR(_ch) + 0x04) +#define FH39_TCSR_BUFF_STTS(_ch) (FH39_TCSR(_ch) + 0x08) + +/* TSSR */ +#define FH39_TSSR_CBB_BASE (FH39_TSSR_TBL + 0x000) +#define FH39_TSSR_MSG_CONFIG (FH39_TSSR_TBL + 0x008) +#define FH39_TSSR_TX_STATUS (FH39_TSSR_TBL + 0x010) + +/* DBM */ + +#define FH39_SRVC_CHNL (6) + +#define FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE (20) +#define FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH (4) + +#define FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN (0x08000000) + +#define FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE (0x80000000) + +#define FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE (0x20000000) + +#define FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 (0x01000000) + +#define FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST (0x00001000) + +#define FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH (0x00000000) + +#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000) +#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRIVER (0x00000001) + +#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL (0x00000000) +#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL (0x00000008) + +#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000) + +#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000) + +#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000) +#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000) + +#define FH39_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00004000) + +#define FH39_TCSR_CHNL_TX_BUF_STS_REG_BIT_TFDB_WPTR (0x00000001) + +#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON (0xFF000000) +#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON (0x00FF0000) + +#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B (0x00000400) + +#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON (0x00000100) +#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON (0x00000080) + +#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH (0x00000020) +#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH (0x00000005) + +#define FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) (BIT(_ch) << 24) +#define FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch) (BIT(_ch) << 16) + +#define FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_ch) \ + (FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) | \ + FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch)) + +#define FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000) + +struct il3945_tfd_tb { + __le32 addr; + __le32 len; +} __packed; + +struct il3945_tfd { + __le32 control_flags; + struct il3945_tfd_tb tbs[4]; + u8 __pad[28]; +} __packed; + +#ifdef CONFIG_IWLEGACY_DEBUGFS +ssize_t il3945_ucode_rx_stats_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos); +ssize_t il3945_ucode_tx_stats_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos); +ssize_t il3945_ucode_general_stats_read(struct file *file, + char __user *user_buf, size_t count, + loff_t *ppos); +#endif + +#endif diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-calib.c b/drivers/net/wireless/iwlegacy/4965-calib.c index 162d877e686..d3248e3ef23 100644 --- a/drivers/net/wireless/iwlegacy/iwl-4965-calib.c +++ b/drivers/net/wireless/iwlegacy/4965-calib.c @@ -63,15 +63,14 @@ #include <linux/slab.h> #include <net/mac80211.h> -#include "iwl-dev.h" -#include "iwl-core.h" -#include "iwl-4965-calib.h" +#include "common.h" +#include "4965.h" /***************************************************************************** * INIT calibrations framework *****************************************************************************/ -struct statistics_general_data { +struct stats_general_data { u32 beacon_silence_rssi_a; u32 beacon_silence_rssi_b; u32 beacon_silence_rssi_c; @@ -80,14 +79,15 @@ struct statistics_general_data { u32 beacon_energy_c; }; -void iwl4965_calib_free_results(struct iwl_priv *priv) +void +il4965_calib_free_results(struct il_priv *il) { int i; - for (i = 0; i < IWL_CALIB_MAX; i++) { - kfree(priv->calib_results[i].buf); - priv->calib_results[i].buf = NULL; - priv->calib_results[i].buf_len = 0; + for (i = 0; i < IL_CALIB_MAX; i++) { + kfree(il->calib_results[i].buf); + il->calib_results[i].buf = NULL; + il->calib_results[i].buf_len = 0; } } @@ -103,10 +103,9 @@ void iwl4965_calib_free_results(struct iwl_priv *priv) * enough to receive all of our own network traffic, but not so * high that our DSP gets too busy trying to lock onto non-network * activity/noise. */ -static int iwl4965_sens_energy_cck(struct iwl_priv *priv, - u32 norm_fa, - u32 rx_enable_time, - struct statistics_general_data *rx_info) +static int +il4965_sens_energy_cck(struct il_priv *il, u32 norm_fa, u32 rx_enable_time, + struct stats_general_data *rx_info) { u32 max_nrg_cck = 0; int i = 0; @@ -129,22 +128,22 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv, u32 false_alarms = norm_fa * 200 * 1024; u32 max_false_alarms = MAX_FA_CCK * rx_enable_time; u32 min_false_alarms = MIN_FA_CCK * rx_enable_time; - struct iwl_sensitivity_data *data = NULL; - const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens; + struct il_sensitivity_data *data = NULL; + const struct il_sensitivity_ranges *ranges = il->hw_params.sens; - data = &(priv->sensitivity_data); + data = &(il->sensitivity_data); data->nrg_auto_corr_silence_diff = 0; /* Find max silence rssi among all 3 receivers. * This is background noise, which may include transmissions from other * networks, measured during silence before our network's beacon */ - silence_rssi_a = (u8)((rx_info->beacon_silence_rssi_a & - ALL_BAND_FILTER) >> 8); - silence_rssi_b = (u8)((rx_info->beacon_silence_rssi_b & - ALL_BAND_FILTER) >> 8); - silence_rssi_c = (u8)((rx_info->beacon_silence_rssi_c & - ALL_BAND_FILTER) >> 8); + silence_rssi_a = + (u8) ((rx_info->beacon_silence_rssi_a & ALL_BAND_FILTER) >> 8); + silence_rssi_b = + (u8) ((rx_info->beacon_silence_rssi_b & ALL_BAND_FILTER) >> 8); + silence_rssi_c = + (u8) ((rx_info->beacon_silence_rssi_c & ALL_BAND_FILTER) >> 8); val = max(silence_rssi_b, silence_rssi_c); max_silence_rssi = max(silence_rssi_a, (u8) val); @@ -160,9 +159,8 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv, val = data->nrg_silence_rssi[i]; silence_ref = max(silence_ref, val); } - IWL_DEBUG_CALIB(priv, "silence a %u, b %u, c %u, 20-bcn max %u\n", - silence_rssi_a, silence_rssi_b, silence_rssi_c, - silence_ref); + D_CALIB("silence a %u, b %u, c %u, 20-bcn max %u\n", silence_rssi_a, + silence_rssi_b, silence_rssi_c, silence_ref); /* Find max rx energy (min value!) among all 3 receivers, * measured during beacon frame. @@ -184,9 +182,9 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv, max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i])); max_nrg_cck += 6; - IWL_DEBUG_CALIB(priv, "rx energy a %u, b %u, c %u, 10-bcn max/min %u\n", - rx_info->beacon_energy_a, rx_info->beacon_energy_b, - rx_info->beacon_energy_c, max_nrg_cck - 6); + D_CALIB("rx energy a %u, b %u, c %u, 10-bcn max/min %u\n", + rx_info->beacon_energy_a, rx_info->beacon_energy_b, + rx_info->beacon_energy_c, max_nrg_cck - 6); /* Count number of consecutive beacons with fewer-than-desired * false alarms. */ @@ -194,35 +192,34 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv, data->num_in_cck_no_fa++; else data->num_in_cck_no_fa = 0; - IWL_DEBUG_CALIB(priv, "consecutive bcns with few false alarms = %u\n", - data->num_in_cck_no_fa); + D_CALIB("consecutive bcns with few false alarms = %u\n", + data->num_in_cck_no_fa); /* If we got too many false alarms this time, reduce sensitivity */ - if ((false_alarms > max_false_alarms) && - (data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK)) { - IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u\n", - false_alarms, max_false_alarms); - IWL_DEBUG_CALIB(priv, "... reducing sensitivity\n"); - data->nrg_curr_state = IWL_FA_TOO_MANY; + if (false_alarms > max_false_alarms && + data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK) { + D_CALIB("norm FA %u > max FA %u\n", false_alarms, + max_false_alarms); + D_CALIB("... reducing sensitivity\n"); + data->nrg_curr_state = IL_FA_TOO_MANY; /* Store for "fewer than desired" on later beacon */ data->nrg_silence_ref = silence_ref; /* increase energy threshold (reduce nrg value) * to decrease sensitivity */ data->nrg_th_cck = data->nrg_th_cck - NRG_STEP_CCK; - /* Else if we got fewer than desired, increase sensitivity */ + /* Else if we got fewer than desired, increase sensitivity */ } else if (false_alarms < min_false_alarms) { - data->nrg_curr_state = IWL_FA_TOO_FEW; + data->nrg_curr_state = IL_FA_TOO_FEW; /* Compare silence level with silence level for most recent * healthy number or too many false alarms */ - data->nrg_auto_corr_silence_diff = (s32)data->nrg_silence_ref - - (s32)silence_ref; + data->nrg_auto_corr_silence_diff = + (s32) data->nrg_silence_ref - (s32) silence_ref; - IWL_DEBUG_CALIB(priv, - "norm FA %u < min FA %u, silence diff %d\n", - false_alarms, min_false_alarms, - data->nrg_auto_corr_silence_diff); + D_CALIB("norm FA %u < min FA %u, silence diff %d\n", + false_alarms, min_false_alarms, + data->nrg_auto_corr_silence_diff); /* Increase value to increase sensitivity, but only if: * 1a) previous beacon did *not* have *too many* false alarms @@ -230,23 +227,22 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv, * from a previous beacon with too many, or healthy # FAs * OR 2) We've seen a lot of beacons (100) with too few * false alarms */ - if ((data->nrg_prev_state != IWL_FA_TOO_MANY) && - ((data->nrg_auto_corr_silence_diff > NRG_DIFF) || - (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) { + if (data->nrg_prev_state != IL_FA_TOO_MANY && + (data->nrg_auto_corr_silence_diff > NRG_DIFF || + data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA)) { - IWL_DEBUG_CALIB(priv, "... increasing sensitivity\n"); + D_CALIB("... increasing sensitivity\n"); /* Increase nrg value to increase sensitivity */ val = data->nrg_th_cck + NRG_STEP_CCK; - data->nrg_th_cck = min((u32)ranges->min_nrg_cck, val); + data->nrg_th_cck = min((u32) ranges->min_nrg_cck, val); } else { - IWL_DEBUG_CALIB(priv, - "... but not changing sensitivity\n"); + D_CALIB("... but not changing sensitivity\n"); } - /* Else we got a healthy number of false alarms, keep status quo */ + /* Else we got a healthy number of false alarms, keep status quo */ } else { - IWL_DEBUG_CALIB(priv, " FA in safe zone\n"); - data->nrg_curr_state = IWL_FA_GOOD_RANGE; + D_CALIB(" FA in safe zone\n"); + data->nrg_curr_state = IL_FA_GOOD_RANGE; /* Store for use in "fewer than desired" with later beacon */ data->nrg_silence_ref = silence_ref; @@ -254,8 +250,8 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv, /* If previous beacon had too many false alarms, * give it some extra margin by reducing sensitivity again * (but don't go below measured energy of desired Rx) */ - if (IWL_FA_TOO_MANY == data->nrg_prev_state) { - IWL_DEBUG_CALIB(priv, "... increasing margin\n"); + if (IL_FA_TOO_MANY == data->nrg_prev_state) { + D_CALIB("... increasing margin\n"); if (data->nrg_th_cck > (max_nrg_cck + NRG_MARGIN)) data->nrg_th_cck -= NRG_MARGIN; else @@ -269,7 +265,7 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv, * Lower value is higher energy, so we use max()! */ data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck); - IWL_DEBUG_CALIB(priv, "new nrg_th_cck %u\n", data->nrg_th_cck); + D_CALIB("new nrg_th_cck %u\n", data->nrg_th_cck); data->nrg_prev_state = data->nrg_curr_state; @@ -284,190 +280,187 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv, else { val = data->auto_corr_cck + AUTO_CORR_STEP_CCK; data->auto_corr_cck = - min((u32)ranges->auto_corr_max_cck, val); + min((u32) ranges->auto_corr_max_cck, val); } val = data->auto_corr_cck_mrc + AUTO_CORR_STEP_CCK; data->auto_corr_cck_mrc = - min((u32)ranges->auto_corr_max_cck_mrc, val); - } else if ((false_alarms < min_false_alarms) && - ((data->nrg_auto_corr_silence_diff > NRG_DIFF) || - (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) { + min((u32) ranges->auto_corr_max_cck_mrc, val); + } else if (false_alarms < min_false_alarms && + (data->nrg_auto_corr_silence_diff > NRG_DIFF || + data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA)) { /* Decrease auto_corr values to increase sensitivity */ val = data->auto_corr_cck - AUTO_CORR_STEP_CCK; - data->auto_corr_cck = - max((u32)ranges->auto_corr_min_cck, val); + data->auto_corr_cck = max((u32) ranges->auto_corr_min_cck, val); val = data->auto_corr_cck_mrc - AUTO_CORR_STEP_CCK; data->auto_corr_cck_mrc = - max((u32)ranges->auto_corr_min_cck_mrc, val); + max((u32) ranges->auto_corr_min_cck_mrc, val); } return 0; } - -static int iwl4965_sens_auto_corr_ofdm(struct iwl_priv *priv, - u32 norm_fa, - u32 rx_enable_time) +static int +il4965_sens_auto_corr_ofdm(struct il_priv *il, u32 norm_fa, u32 rx_enable_time) { u32 val; u32 false_alarms = norm_fa * 200 * 1024; u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time; u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time; - struct iwl_sensitivity_data *data = NULL; - const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens; + struct il_sensitivity_data *data = NULL; + const struct il_sensitivity_ranges *ranges = il->hw_params.sens; - data = &(priv->sensitivity_data); + data = &(il->sensitivity_data); /* If we got too many false alarms this time, reduce sensitivity */ if (false_alarms > max_false_alarms) { - IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u)\n", - false_alarms, max_false_alarms); + D_CALIB("norm FA %u > max FA %u)\n", false_alarms, + max_false_alarms); val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM; data->auto_corr_ofdm = - min((u32)ranges->auto_corr_max_ofdm, val); + min((u32) ranges->auto_corr_max_ofdm, val); val = data->auto_corr_ofdm_mrc + AUTO_CORR_STEP_OFDM; data->auto_corr_ofdm_mrc = - min((u32)ranges->auto_corr_max_ofdm_mrc, val); + min((u32) ranges->auto_corr_max_ofdm_mrc, val); val = data->auto_corr_ofdm_x1 + AUTO_CORR_STEP_OFDM; data->auto_corr_ofdm_x1 = - min((u32)ranges->auto_corr_max_ofdm_x1, val); + min((u32) ranges->auto_corr_max_ofdm_x1, val); val = data->auto_corr_ofdm_mrc_x1 + AUTO_CORR_STEP_OFDM; data->auto_corr_ofdm_mrc_x1 = - min((u32)ranges->auto_corr_max_ofdm_mrc_x1, val); + min((u32) ranges->auto_corr_max_ofdm_mrc_x1, val); } /* Else if we got fewer than desired, increase sensitivity */ else if (false_alarms < min_false_alarms) { - IWL_DEBUG_CALIB(priv, "norm FA %u < min FA %u\n", - false_alarms, min_false_alarms); + D_CALIB("norm FA %u < min FA %u\n", false_alarms, + min_false_alarms); val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM; data->auto_corr_ofdm = - max((u32)ranges->auto_corr_min_ofdm, val); + max((u32) ranges->auto_corr_min_ofdm, val); val = data->auto_corr_ofdm_mrc - AUTO_CORR_STEP_OFDM; data->auto_corr_ofdm_mrc = - max((u32)ranges->auto_corr_min_ofdm_mrc, val); + max((u32) ranges->auto_corr_min_ofdm_mrc, val); val = data->auto_corr_ofdm_x1 - AUTO_CORR_STEP_OFDM; data->auto_corr_ofdm_x1 = - max((u32)ranges->auto_corr_min_ofdm_x1, val); + max((u32) ranges->auto_corr_min_ofdm_x1, val); val = data->auto_corr_ofdm_mrc_x1 - AUTO_CORR_STEP_OFDM; data->auto_corr_ofdm_mrc_x1 = - max((u32)ranges->auto_corr_min_ofdm_mrc_x1, val); + max((u32) ranges->auto_corr_min_ofdm_mrc_x1, val); } else { - IWL_DEBUG_CALIB(priv, "min FA %u < norm FA %u < max FA %u OK\n", - min_false_alarms, false_alarms, max_false_alarms); + D_CALIB("min FA %u < norm FA %u < max FA %u OK\n", + min_false_alarms, false_alarms, max_false_alarms); } return 0; } -static void iwl4965_prepare_legacy_sensitivity_tbl(struct iwl_priv *priv, - struct iwl_sensitivity_data *data, - __le16 *tbl) +static void +il4965_prepare_legacy_sensitivity_tbl(struct il_priv *il, + struct il_sensitivity_data *data, + __le16 *tbl) { - tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] = - cpu_to_le16((u16)data->auto_corr_ofdm); - tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] = - cpu_to_le16((u16)data->auto_corr_ofdm_mrc); - tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] = - cpu_to_le16((u16)data->auto_corr_ofdm_x1); - tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] = - cpu_to_le16((u16)data->auto_corr_ofdm_mrc_x1); - - tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] = - cpu_to_le16((u16)data->auto_corr_cck); - tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] = - cpu_to_le16((u16)data->auto_corr_cck_mrc); - - tbl[HD_MIN_ENERGY_CCK_DET_INDEX] = - cpu_to_le16((u16)data->nrg_th_cck); - tbl[HD_MIN_ENERGY_OFDM_DET_INDEX] = - cpu_to_le16((u16)data->nrg_th_ofdm); - - tbl[HD_BARKER_CORR_TH_ADD_MIN_INDEX] = - cpu_to_le16(data->barker_corr_th_min); - tbl[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] = - cpu_to_le16(data->barker_corr_th_min_mrc); - tbl[HD_OFDM_ENERGY_TH_IN_INDEX] = - cpu_to_le16(data->nrg_th_cca); - - IWL_DEBUG_CALIB(priv, "ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n", - data->auto_corr_ofdm, data->auto_corr_ofdm_mrc, - data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1, - data->nrg_th_ofdm); - - IWL_DEBUG_CALIB(priv, "cck: ac %u mrc %u thresh %u\n", - data->auto_corr_cck, data->auto_corr_cck_mrc, - data->nrg_th_cck); + tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_IDX] = + cpu_to_le16((u16) data->auto_corr_ofdm); + tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX] = + cpu_to_le16((u16) data->auto_corr_ofdm_mrc); + tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_IDX] = + cpu_to_le16((u16) data->auto_corr_ofdm_x1); + tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX] = + cpu_to_le16((u16) data->auto_corr_ofdm_mrc_x1); + + tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX] = + cpu_to_le16((u16) data->auto_corr_cck); + tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX] = + cpu_to_le16((u16) data->auto_corr_cck_mrc); + + tbl[HD_MIN_ENERGY_CCK_DET_IDX] = cpu_to_le16((u16) data->nrg_th_cck); + tbl[HD_MIN_ENERGY_OFDM_DET_IDX] = cpu_to_le16((u16) data->nrg_th_ofdm); + + tbl[HD_BARKER_CORR_TH_ADD_MIN_IDX] = + cpu_to_le16(data->barker_corr_th_min); + tbl[HD_BARKER_CORR_TH_ADD_MIN_MRC_IDX] = + cpu_to_le16(data->barker_corr_th_min_mrc); + tbl[HD_OFDM_ENERGY_TH_IN_IDX] = cpu_to_le16(data->nrg_th_cca); + + D_CALIB("ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n", + data->auto_corr_ofdm, data->auto_corr_ofdm_mrc, + data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1, + data->nrg_th_ofdm); + + D_CALIB("cck: ac %u mrc %u thresh %u\n", data->auto_corr_cck, + data->auto_corr_cck_mrc, data->nrg_th_cck); } -/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */ -static int iwl4965_sensitivity_write(struct iwl_priv *priv) +/* Prepare a C_SENSITIVITY, send to uCode if values have changed */ +static int +il4965_sensitivity_write(struct il_priv *il) { - struct iwl_sensitivity_cmd cmd; - struct iwl_sensitivity_data *data = NULL; - struct iwl_host_cmd cmd_out = { - .id = SENSITIVITY_CMD, - .len = sizeof(struct iwl_sensitivity_cmd), + struct il_sensitivity_cmd cmd; + struct il_sensitivity_data *data = NULL; + struct il_host_cmd cmd_out = { + .id = C_SENSITIVITY, + .len = sizeof(struct il_sensitivity_cmd), .flags = CMD_ASYNC, .data = &cmd, }; - data = &(priv->sensitivity_data); + data = &(il->sensitivity_data); memset(&cmd, 0, sizeof(cmd)); - iwl4965_prepare_legacy_sensitivity_tbl(priv, data, &cmd.table[0]); + il4965_prepare_legacy_sensitivity_tbl(il, data, &cmd.table[0]); /* Update uCode's "work" table, and copy it to DSP */ - cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE; + cmd.control = C_SENSITIVITY_CONTROL_WORK_TBL; /* Don't send command to uCode if nothing has changed */ - if (!memcmp(&cmd.table[0], &(priv->sensitivity_tbl[0]), - sizeof(u16)*HD_TABLE_SIZE)) { - IWL_DEBUG_CALIB(priv, "No change in SENSITIVITY_CMD\n"); + if (!memcmp + (&cmd.table[0], &(il->sensitivity_tbl[0]), + sizeof(u16) * HD_TBL_SIZE)) { + D_CALIB("No change in C_SENSITIVITY\n"); return 0; } /* Copy table for comparison next time */ - memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]), - sizeof(u16)*HD_TABLE_SIZE); + memcpy(&(il->sensitivity_tbl[0]), &(cmd.table[0]), + sizeof(u16) * HD_TBL_SIZE); - return iwl_legacy_send_cmd(priv, &cmd_out); + return il_send_cmd(il, &cmd_out); } -void iwl4965_init_sensitivity(struct iwl_priv *priv) +void +il4965_init_sensitivity(struct il_priv *il) { int ret = 0; int i; - struct iwl_sensitivity_data *data = NULL; - const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens; + struct il_sensitivity_data *data = NULL; + const struct il_sensitivity_ranges *ranges = il->hw_params.sens; - if (priv->disable_sens_cal) + if (il->disable_sens_cal) return; - IWL_DEBUG_CALIB(priv, "Start iwl4965_init_sensitivity\n"); + D_CALIB("Start il4965_init_sensitivity\n"); /* Clear driver's sensitivity algo data */ - data = &(priv->sensitivity_data); + data = &(il->sensitivity_data); if (ranges == NULL) return; - memset(data, 0, sizeof(struct iwl_sensitivity_data)); + memset(data, 0, sizeof(struct il_sensitivity_data)); data->num_in_cck_no_fa = 0; - data->nrg_curr_state = IWL_FA_TOO_MANY; - data->nrg_prev_state = IWL_FA_TOO_MANY; + data->nrg_curr_state = IL_FA_TOO_MANY; + data->nrg_prev_state = IL_FA_TOO_MANY; data->nrg_silence_ref = 0; data->nrg_silence_idx = 0; data->nrg_energy_idx = 0; @@ -478,9 +471,9 @@ void iwl4965_init_sensitivity(struct iwl_priv *priv) for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) data->nrg_silence_rssi[i] = 0; - data->auto_corr_ofdm = ranges->auto_corr_min_ofdm; + data->auto_corr_ofdm = ranges->auto_corr_min_ofdm; data->auto_corr_ofdm_mrc = ranges->auto_corr_min_ofdm_mrc; - data->auto_corr_ofdm_x1 = ranges->auto_corr_min_ofdm_x1; + data->auto_corr_ofdm_x1 = ranges->auto_corr_min_ofdm_x1; data->auto_corr_ofdm_mrc_x1 = ranges->auto_corr_min_ofdm_mrc_x1; data->auto_corr_cck = AUTO_CORR_CCK_MIN_VAL_DEF; data->auto_corr_cck_mrc = ranges->auto_corr_min_cck_mrc; @@ -495,11 +488,12 @@ void iwl4965_init_sensitivity(struct iwl_priv *priv) data->last_bad_plcp_cnt_cck = 0; data->last_fa_cnt_cck = 0; - ret |= iwl4965_sensitivity_write(priv); - IWL_DEBUG_CALIB(priv, "<<return 0x%X\n", ret); + ret |= il4965_sensitivity_write(il); + D_CALIB("<<return 0x%X\n", ret); } -void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp) +void +il4965_sensitivity_calibration(struct il_priv *il, void *resp) { u32 rx_enable_time; u32 fa_cck; @@ -508,31 +502,31 @@ void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp) u32 bad_plcp_ofdm; u32 norm_fa_ofdm; u32 norm_fa_cck; - struct iwl_sensitivity_data *data = NULL; - struct statistics_rx_non_phy *rx_info; - struct statistics_rx_phy *ofdm, *cck; + struct il_sensitivity_data *data = NULL; + struct stats_rx_non_phy *rx_info; + struct stats_rx_phy *ofdm, *cck; unsigned long flags; - struct statistics_general_data statis; + struct stats_general_data statis; - if (priv->disable_sens_cal) + if (il->disable_sens_cal) return; - data = &(priv->sensitivity_data); + data = &(il->sensitivity_data); - if (!iwl_legacy_is_any_associated(priv)) { - IWL_DEBUG_CALIB(priv, "<< - not associated\n"); + if (!il_is_any_associated(il)) { + D_CALIB("<< - not associated\n"); return; } - spin_lock_irqsave(&priv->lock, flags); + spin_lock_irqsave(&il->lock, flags); - rx_info = &(((struct iwl_notif_statistics *)resp)->rx.general); - ofdm = &(((struct iwl_notif_statistics *)resp)->rx.ofdm); - cck = &(((struct iwl_notif_statistics *)resp)->rx.cck); + rx_info = &(((struct il_notif_stats *)resp)->rx.general); + ofdm = &(((struct il_notif_stats *)resp)->rx.ofdm); + cck = &(((struct il_notif_stats *)resp)->rx.cck); if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) { - IWL_DEBUG_CALIB(priv, "<< invalid data.\n"); - spin_unlock_irqrestore(&priv->lock, flags); + D_CALIB("<< invalid data.\n"); + spin_unlock_irqrestore(&il->lock, flags); return; } @@ -544,30 +538,27 @@ void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp) bad_plcp_ofdm = le32_to_cpu(ofdm->plcp_err); statis.beacon_silence_rssi_a = - le32_to_cpu(rx_info->beacon_silence_rssi_a); + le32_to_cpu(rx_info->beacon_silence_rssi_a); statis.beacon_silence_rssi_b = - le32_to_cpu(rx_info->beacon_silence_rssi_b); + le32_to_cpu(rx_info->beacon_silence_rssi_b); statis.beacon_silence_rssi_c = - le32_to_cpu(rx_info->beacon_silence_rssi_c); - statis.beacon_energy_a = - le32_to_cpu(rx_info->beacon_energy_a); - statis.beacon_energy_b = - le32_to_cpu(rx_info->beacon_energy_b); - statis.beacon_energy_c = - le32_to_cpu(rx_info->beacon_energy_c); + le32_to_cpu(rx_info->beacon_silence_rssi_c); + statis.beacon_energy_a = le32_to_cpu(rx_info->beacon_energy_a); + statis.beacon_energy_b = le32_to_cpu(rx_info->beacon_energy_b); + statis.beacon_energy_c = le32_to_cpu(rx_info->beacon_energy_c); - spin_unlock_irqrestore(&priv->lock, flags); + spin_unlock_irqrestore(&il->lock, flags); - IWL_DEBUG_CALIB(priv, "rx_enable_time = %u usecs\n", rx_enable_time); + D_CALIB("rx_enable_time = %u usecs\n", rx_enable_time); if (!rx_enable_time) { - IWL_DEBUG_CALIB(priv, "<< RX Enable Time == 0!\n"); + D_CALIB("<< RX Enable Time == 0!\n"); return; } - /* These statistics increase monotonically, and do not reset + /* These stats increase monotonically, and do not reset * at each beacon. Calculate difference from last value, or just - * use the new statistics value if it has reset or wrapped around. */ + * use the new stats value if it has reset or wrapped around. */ if (data->last_bad_plcp_cnt_cck > bad_plcp_cck) data->last_bad_plcp_cnt_cck = bad_plcp_cck; else { @@ -600,17 +591,17 @@ void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp) norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm; norm_fa_cck = fa_cck + bad_plcp_cck; - IWL_DEBUG_CALIB(priv, - "cck: fa %u badp %u ofdm: fa %u badp %u\n", fa_cck, - bad_plcp_cck, fa_ofdm, bad_plcp_ofdm); + D_CALIB("cck: fa %u badp %u ofdm: fa %u badp %u\n", fa_cck, + bad_plcp_cck, fa_ofdm, bad_plcp_ofdm); - iwl4965_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time); - iwl4965_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis); + il4965_sens_auto_corr_ofdm(il, norm_fa_ofdm, rx_enable_time); + il4965_sens_energy_cck(il, norm_fa_cck, rx_enable_time, &statis); - iwl4965_sensitivity_write(priv); + il4965_sensitivity_write(il); } -static inline u8 iwl4965_find_first_chain(u8 mask) +static inline u8 +il4965_find_first_chain(u8 mask) { if (mask & ANT_A) return CHAIN_A; @@ -624,8 +615,8 @@ static inline u8 iwl4965_find_first_chain(u8 mask) * disconnected. */ static void -iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig, - struct iwl_chain_noise_data *data) +il4965_find_disconn_antenna(struct il_priv *il, u32 * average_sig, + struct il_chain_noise_data *data) { u32 active_chains = 0; u32 max_average_sig; @@ -634,12 +625,15 @@ iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig, u8 first_chain; u16 i = 0; - average_sig[0] = data->chain_signal_a / - priv->cfg->base_params->chain_noise_num_beacons; - average_sig[1] = data->chain_signal_b / - priv->cfg->base_params->chain_noise_num_beacons; - average_sig[2] = data->chain_signal_c / - priv->cfg->base_params->chain_noise_num_beacons; + average_sig[0] = + data->chain_signal_a / + il->cfg->base_params->chain_noise_num_beacons; + average_sig[1] = + data->chain_signal_b / + il->cfg->base_params->chain_noise_num_beacons; + average_sig[2] = + data->chain_signal_c / + il->cfg->base_params->chain_noise_num_beacons; if (average_sig[0] >= average_sig[1]) { max_average_sig = average_sig[0]; @@ -657,10 +651,10 @@ iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig, active_chains = (1 << max_average_sig_antenna_i); } - IWL_DEBUG_CALIB(priv, "average_sig: a %d b %d c %d\n", - average_sig[0], average_sig[1], average_sig[2]); - IWL_DEBUG_CALIB(priv, "max_average_sig = %d, antenna %d\n", - max_average_sig, max_average_sig_antenna_i); + D_CALIB("average_sig: a %d b %d c %d\n", average_sig[0], average_sig[1], + average_sig[2]); + D_CALIB("max_average_sig = %d, antenna %d\n", max_average_sig, + max_average_sig_antenna_i); /* Compare signal strengths for all 3 receivers. */ for (i = 0; i < NUM_RX_CHAINS; i++) { @@ -673,9 +667,9 @@ iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig, data->disconn_array[i] = 1; else active_chains |= (1 << i); - IWL_DEBUG_CALIB(priv, "i = %d rssiDelta = %d " - "disconn_array[i] = %d\n", - i, rssi_delta, data->disconn_array[i]); + D_CALIB("i = %d rssiDelta = %d " + "disconn_array[i] = %d\n", i, rssi_delta, + data->disconn_array[i]); } } @@ -689,119 +683,110 @@ iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig, * To be safe, simply mask out any chains that we know * are not on the device. */ - active_chains &= priv->hw_params.valid_rx_ant; + active_chains &= il->hw_params.valid_rx_ant; num_tx_chains = 0; for (i = 0; i < NUM_RX_CHAINS; i++) { /* loops on all the bits of - * priv->hw_setting.valid_tx_ant */ + * il->hw_setting.valid_tx_ant */ u8 ant_msk = (1 << i); - if (!(priv->hw_params.valid_tx_ant & ant_msk)) + if (!(il->hw_params.valid_tx_ant & ant_msk)) continue; num_tx_chains++; if (data->disconn_array[i] == 0) /* there is a Tx antenna connected */ break; - if (num_tx_chains == priv->hw_params.tx_chains_num && + if (num_tx_chains == il->hw_params.tx_chains_num && data->disconn_array[i]) { /* * If all chains are disconnected * connect the first valid tx chain */ first_chain = - iwl4965_find_first_chain(priv->cfg->valid_tx_ant); + il4965_find_first_chain(il->cfg->valid_tx_ant); data->disconn_array[first_chain] = 0; active_chains |= BIT(first_chain); - IWL_DEBUG_CALIB(priv, - "All Tx chains are disconnected W/A - declare %d as connected\n", - first_chain); + D_CALIB("All Tx chains are disconnected" + "- declare %d as connected\n", first_chain); break; } } - if (active_chains != priv->hw_params.valid_rx_ant && - active_chains != priv->chain_noise_data.active_chains) - IWL_DEBUG_CALIB(priv, - "Detected that not all antennas are connected! " - "Connected: %#x, valid: %#x.\n", - active_chains, priv->hw_params.valid_rx_ant); + if (active_chains != il->hw_params.valid_rx_ant && + active_chains != il->chain_noise_data.active_chains) + D_CALIB("Detected that not all antennas are connected! " + "Connected: %#x, valid: %#x.\n", active_chains, + il->hw_params.valid_rx_ant); /* Save for use within RXON, TX, SCAN commands, etc. */ data->active_chains = active_chains; - IWL_DEBUG_CALIB(priv, "active_chains (bitwise) = 0x%x\n", - active_chains); + D_CALIB("active_chains (bitwise) = 0x%x\n", active_chains); } -static void iwl4965_gain_computation(struct iwl_priv *priv, - u32 *average_noise, - u16 min_average_noise_antenna_i, - u32 min_average_noise, - u8 default_chain) +static void +il4965_gain_computation(struct il_priv *il, u32 * average_noise, + u16 min_average_noise_antenna_i, u32 min_average_noise, + u8 default_chain) { int i, ret; - struct iwl_chain_noise_data *data = &priv->chain_noise_data; + struct il_chain_noise_data *data = &il->chain_noise_data; data->delta_gain_code[min_average_noise_antenna_i] = 0; for (i = default_chain; i < NUM_RX_CHAINS; i++) { s32 delta_g = 0; - if (!(data->disconn_array[i]) && - (data->delta_gain_code[i] == - CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) { + if (!data->disconn_array[i] && + data->delta_gain_code[i] == + CHAIN_NOISE_DELTA_GAIN_INIT_VAL) { delta_g = average_noise[i] - min_average_noise; - data->delta_gain_code[i] = (u8)((delta_g * 10) / 15); + data->delta_gain_code[i] = (u8) ((delta_g * 10) / 15); data->delta_gain_code[i] = - min(data->delta_gain_code[i], + min(data->delta_gain_code[i], (u8) CHAIN_NOISE_MAX_DELTA_GAIN_CODE); data->delta_gain_code[i] = - (data->delta_gain_code[i] | (1 << 2)); + (data->delta_gain_code[i] | (1 << 2)); } else { data->delta_gain_code[i] = 0; } } - IWL_DEBUG_CALIB(priv, "delta_gain_codes: a %d b %d c %d\n", - data->delta_gain_code[0], - data->delta_gain_code[1], - data->delta_gain_code[2]); + D_CALIB("delta_gain_codes: a %d b %d c %d\n", data->delta_gain_code[0], + data->delta_gain_code[1], data->delta_gain_code[2]); /* Differential gain gets sent to uCode only once */ if (!data->radio_write) { - struct iwl_calib_diff_gain_cmd cmd; + struct il_calib_diff_gain_cmd cmd; data->radio_write = 1; memset(&cmd, 0, sizeof(cmd)); - cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD; + cmd.hdr.op_code = IL_PHY_CALIBRATE_DIFF_GAIN_CMD; cmd.diff_gain_a = data->delta_gain_code[0]; cmd.diff_gain_b = data->delta_gain_code[1]; cmd.diff_gain_c = data->delta_gain_code[2]; - ret = iwl_legacy_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD, - sizeof(cmd), &cmd); + ret = il_send_cmd_pdu(il, C_PHY_CALIBRATION, sizeof(cmd), &cmd); if (ret) - IWL_DEBUG_CALIB(priv, "fail sending cmd " - "REPLY_PHY_CALIBRATION_CMD\n"); + D_CALIB("fail sending cmd " "C_PHY_CALIBRATION\n"); /* TODO we might want recalculate * rx_chain in rxon cmd */ /* Mark so we run this algo only once! */ - data->state = IWL_CHAIN_NOISE_CALIBRATED; + data->state = IL_CHAIN_NOISE_CALIBRATED; } } - - /* - * Accumulate 16 beacons of signal and noise statistics for each of + * Accumulate 16 beacons of signal and noise stats for each of * 3 receivers/antennas/rx-chains, then figure out: * 1) Which antennas are connected. * 2) Differential rx gain settings to balance the 3 receivers. */ -void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp) +void +il4965_chain_noise_calibration(struct il_priv *il, void *stat_resp) { - struct iwl_chain_noise_data *data = NULL; + struct il_chain_noise_data *data = NULL; u32 chain_noise_a; u32 chain_noise_b; @@ -809,8 +794,8 @@ void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp) u32 chain_sig_a; u32 chain_sig_b; u32 chain_sig_c; - u32 average_sig[NUM_RX_CHAINS] = {INITIALIZATION_VALUE}; - u32 average_noise[NUM_RX_CHAINS] = {INITIALIZATION_VALUE}; + u32 average_sig[NUM_RX_CHAINS] = { INITIALIZATION_VALUE }; + u32 average_noise[NUM_RX_CHAINS] = { INITIALIZATION_VALUE }; u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE; u16 min_average_noise_antenna_i = INITIALIZATION_VALUE; u16 i = 0; @@ -819,70 +804,69 @@ void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp) u8 rxon_band24; u8 stat_band24; unsigned long flags; - struct statistics_rx_non_phy *rx_info; + struct stats_rx_non_phy *rx_info; - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + struct il_rxon_context *ctx = &il->ctx; - if (priv->disable_chain_noise_cal) + if (il->disable_chain_noise_cal) return; - data = &(priv->chain_noise_data); + data = &(il->chain_noise_data); /* * Accumulate just the first "chain_noise_num_beacons" after * the first association, then we're done forever. */ - if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) { - if (data->state == IWL_CHAIN_NOISE_ALIVE) - IWL_DEBUG_CALIB(priv, "Wait for noise calib reset\n"); + if (data->state != IL_CHAIN_NOISE_ACCUMULATE) { + if (data->state == IL_CHAIN_NOISE_ALIVE) + D_CALIB("Wait for noise calib reset\n"); return; } - spin_lock_irqsave(&priv->lock, flags); + spin_lock_irqsave(&il->lock, flags); - rx_info = &(((struct iwl_notif_statistics *)stat_resp)-> - rx.general); + rx_info = &(((struct il_notif_stats *)stat_resp)->rx.general); if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) { - IWL_DEBUG_CALIB(priv, " << Interference data unavailable\n"); - spin_unlock_irqrestore(&priv->lock, flags); + D_CALIB(" << Interference data unavailable\n"); + spin_unlock_irqrestore(&il->lock, flags); return; } rxon_band24 = !!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK); rxon_chnum = le16_to_cpu(ctx->staging.channel); - stat_band24 = !!(((struct iwl_notif_statistics *) - stat_resp)->flag & - STATISTICS_REPLY_FLG_BAND_24G_MSK); - stat_chnum = le32_to_cpu(((struct iwl_notif_statistics *) - stat_resp)->flag) >> 16; + stat_band24 = + !!(((struct il_notif_stats *)stat_resp)-> + flag & STATS_REPLY_FLG_BAND_24G_MSK); + stat_chnum = + le32_to_cpu(((struct il_notif_stats *)stat_resp)->flag) >> 16; /* Make sure we accumulate data for just the associated channel * (even if scanning). */ - if ((rxon_chnum != stat_chnum) || (rxon_band24 != stat_band24)) { - IWL_DEBUG_CALIB(priv, "Stats not from chan=%d, band24=%d\n", - rxon_chnum, rxon_band24); - spin_unlock_irqrestore(&priv->lock, flags); + if (rxon_chnum != stat_chnum || rxon_band24 != stat_band24) { + D_CALIB("Stats not from chan=%d, band24=%d\n", rxon_chnum, + rxon_band24); + spin_unlock_irqrestore(&il->lock, flags); return; } /* - * Accumulate beacon statistics values across + * Accumulate beacon stats values across * "chain_noise_num_beacons" */ - chain_noise_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) & - IN_BAND_FILTER; - chain_noise_b = le32_to_cpu(rx_info->beacon_silence_rssi_b) & - IN_BAND_FILTER; - chain_noise_c = le32_to_cpu(rx_info->beacon_silence_rssi_c) & - IN_BAND_FILTER; + chain_noise_a = + le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER; + chain_noise_b = + le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER; + chain_noise_c = + le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER; chain_sig_a = le32_to_cpu(rx_info->beacon_rssi_a) & IN_BAND_FILTER; chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER; chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER; - spin_unlock_irqrestore(&priv->lock, flags); + spin_unlock_irqrestore(&il->lock, flags); data->beacon_count++; @@ -894,34 +878,33 @@ void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp) data->chain_signal_b = (chain_sig_b + data->chain_signal_b); data->chain_signal_c = (chain_sig_c + data->chain_signal_c); - IWL_DEBUG_CALIB(priv, "chan=%d, band24=%d, beacon=%d\n", - rxon_chnum, rxon_band24, data->beacon_count); - IWL_DEBUG_CALIB(priv, "chain_sig: a %d b %d c %d\n", - chain_sig_a, chain_sig_b, chain_sig_c); - IWL_DEBUG_CALIB(priv, "chain_noise: a %d b %d c %d\n", - chain_noise_a, chain_noise_b, chain_noise_c); + D_CALIB("chan=%d, band24=%d, beacon=%d\n", rxon_chnum, rxon_band24, + data->beacon_count); + D_CALIB("chain_sig: a %d b %d c %d\n", chain_sig_a, chain_sig_b, + chain_sig_c); + D_CALIB("chain_noise: a %d b %d c %d\n", chain_noise_a, chain_noise_b, + chain_noise_c); /* If this is the "chain_noise_num_beacons", determine: * 1) Disconnected antennas (using signal strengths) * 2) Differential gain (using silence noise) to balance receivers */ - if (data->beacon_count != - priv->cfg->base_params->chain_noise_num_beacons) + if (data->beacon_count != il->cfg->base_params->chain_noise_num_beacons) return; /* Analyze signal for disconnected antenna */ - iwl4965_find_disconn_antenna(priv, average_sig, data); + il4965_find_disconn_antenna(il, average_sig, data); /* Analyze noise for rx balance */ - average_noise[0] = data->chain_noise_a / - priv->cfg->base_params->chain_noise_num_beacons; - average_noise[1] = data->chain_noise_b / - priv->cfg->base_params->chain_noise_num_beacons; - average_noise[2] = data->chain_noise_c / - priv->cfg->base_params->chain_noise_num_beacons; + average_noise[0] = + data->chain_noise_a / il->cfg->base_params->chain_noise_num_beacons; + average_noise[1] = + data->chain_noise_b / il->cfg->base_params->chain_noise_num_beacons; + average_noise[2] = + data->chain_noise_c / il->cfg->base_params->chain_noise_num_beacons; for (i = 0; i < NUM_RX_CHAINS; i++) { - if (!(data->disconn_array[i]) && - (average_noise[i] <= min_average_noise)) { + if (!data->disconn_array[i] && + average_noise[i] <= min_average_noise) { /* This means that chain i is active and has * lower noise values so far: */ min_average_noise = average_noise[i]; @@ -929,39 +912,37 @@ void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp) } } - IWL_DEBUG_CALIB(priv, "average_noise: a %d b %d c %d\n", - average_noise[0], average_noise[1], - average_noise[2]); + D_CALIB("average_noise: a %d b %d c %d\n", average_noise[0], + average_noise[1], average_noise[2]); - IWL_DEBUG_CALIB(priv, "min_average_noise = %d, antenna %d\n", - min_average_noise, min_average_noise_antenna_i); + D_CALIB("min_average_noise = %d, antenna %d\n", min_average_noise, + min_average_noise_antenna_i); - iwl4965_gain_computation(priv, average_noise, - min_average_noise_antenna_i, min_average_noise, - iwl4965_find_first_chain(priv->cfg->valid_rx_ant)); + il4965_gain_computation(il, average_noise, min_average_noise_antenna_i, + min_average_noise, + il4965_find_first_chain(il->cfg->valid_rx_ant)); /* Some power changes may have been made during the calibration. * Update and commit the RXON */ - if (priv->cfg->ops->lib->update_chain_flags) - priv->cfg->ops->lib->update_chain_flags(priv); + if (il->cfg->ops->lib->update_chain_flags) + il->cfg->ops->lib->update_chain_flags(il); - data->state = IWL_CHAIN_NOISE_DONE; - iwl_legacy_power_update_mode(priv, false); + data->state = IL_CHAIN_NOISE_DONE; + il_power_update_mode(il, false); } -void iwl4965_reset_run_time_calib(struct iwl_priv *priv) +void +il4965_reset_run_time_calib(struct il_priv *il) { int i; - memset(&(priv->sensitivity_data), 0, - sizeof(struct iwl_sensitivity_data)); - memset(&(priv->chain_noise_data), 0, - sizeof(struct iwl_chain_noise_data)); + memset(&(il->sensitivity_data), 0, sizeof(struct il_sensitivity_data)); + memset(&(il->chain_noise_data), 0, sizeof(struct il_chain_noise_data)); for (i = 0; i < NUM_RX_CHAINS; i++) - priv->chain_noise_data.delta_gain_code[i] = - CHAIN_NOISE_DELTA_GAIN_INIT_VAL; + il->chain_noise_data.delta_gain_code[i] = + CHAIN_NOISE_DELTA_GAIN_INIT_VAL; - /* Ask for statistics now, the uCode will send notification + /* Ask for stats now, the uCode will send notification * periodically after association */ - iwl_legacy_send_statistics_request(priv, CMD_ASYNC, true); + il_send_stats_request(il, CMD_ASYNC, true); } diff --git a/drivers/net/wireless/iwlegacy/4965-debug.c b/drivers/net/wireless/iwlegacy/4965-debug.c new file mode 100644 index 00000000000..98ec39f56ba --- /dev/null +++ b/drivers/net/wireless/iwlegacy/4965-debug.c @@ -0,0 +1,746 @@ +/****************************************************************************** +* +* GPL LICENSE SUMMARY +* +* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of version 2 of the GNU General Public License as +* published by the Free Software Foundation. +* +* This program is distributed in the hope that it will be useful, but +* WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +* General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software +* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, +* USA +* +* The full GNU General Public License is included in this distribution +* in the file called LICENSE.GPL. +* +* Contact Information: +* Intel Linux Wireless <ilw@linux.intel.com> +* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +*****************************************************************************/ +#include "common.h" +#include "4965.h" + +static const char *fmt_value = " %-30s %10u\n"; +static const char *fmt_table = " %-30s %10u %10u %10u %10u\n"; +static const char *fmt_header = + "%-32s current cumulative delta max\n"; + +static int +il4965_stats_flag(struct il_priv *il, char *buf, int bufsz) +{ + int p = 0; + u32 flag; + + flag = le32_to_cpu(il->_4965.stats.flag); + + p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag); + if (flag & UCODE_STATS_CLEAR_MSK) + p += scnprintf(buf + p, bufsz - p, + "\tStatistics have been cleared\n"); + p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n", + (flag & UCODE_STATS_FREQUENCY_MSK) ? "2.4 GHz" : + "5.2 GHz"); + p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n", + (flag & UCODE_STATS_NARROW_BAND_MSK) ? "enabled" : + "disabled"); + + return p; +} + +ssize_t +il4965_ucode_rx_stats_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct il_priv *il = file->private_data; + int pos = 0; + char *buf; + int bufsz = + sizeof(struct stats_rx_phy) * 40 + + sizeof(struct stats_rx_non_phy) * 40 + + sizeof(struct stats_rx_ht_phy) * 40 + 400; + ssize_t ret; + struct stats_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm; + struct stats_rx_phy *cck, *accum_cck, *delta_cck, *max_cck; + struct stats_rx_non_phy *general, *accum_general; + struct stats_rx_non_phy *delta_general, *max_general; + struct stats_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht; + + if (!il_is_alive(il)) + return -EAGAIN; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) { + IL_ERR("Can not allocate Buffer\n"); + return -ENOMEM; + } + + /* + * the statistic information display here is based on + * the last stats notification from uCode + * might not reflect the current uCode activity + */ + ofdm = &il->_4965.stats.rx.ofdm; + cck = &il->_4965.stats.rx.cck; + general = &il->_4965.stats.rx.general; + ht = &il->_4965.stats.rx.ofdm_ht; + accum_ofdm = &il->_4965.accum_stats.rx.ofdm; + accum_cck = &il->_4965.accum_stats.rx.cck; + accum_general = &il->_4965.accum_stats.rx.general; + accum_ht = &il->_4965.accum_stats.rx.ofdm_ht; + delta_ofdm = &il->_4965.delta_stats.rx.ofdm; + delta_cck = &il->_4965.delta_stats.rx.cck; + delta_general = &il->_4965.delta_stats.rx.general; + delta_ht = &il->_4965.delta_stats.rx.ofdm_ht; + max_ofdm = &il->_4965.max_delta.rx.ofdm; + max_cck = &il->_4965.max_delta.rx.cck; + max_general = &il->_4965.max_delta.rx.general; + max_ht = &il->_4965.max_delta.rx.ofdm_ht; + + pos += il4965_stats_flag(il, buf, bufsz); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_header, + "Statistics_Rx - OFDM:"); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "ina_cnt:", + le32_to_cpu(ofdm->ina_cnt), accum_ofdm->ina_cnt, + delta_ofdm->ina_cnt, max_ofdm->ina_cnt); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_cnt:", + le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt, + delta_ofdm->fina_cnt, max_ofdm->fina_cnt); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "plcp_err:", + le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err, + delta_ofdm->plcp_err, max_ofdm->plcp_err); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_err:", + le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err, + delta_ofdm->crc32_err, max_ofdm->crc32_err); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "overrun_err:", + le32_to_cpu(ofdm->overrun_err), accum_ofdm->overrun_err, + delta_ofdm->overrun_err, max_ofdm->overrun_err); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "early_overrun_err:", + le32_to_cpu(ofdm->early_overrun_err), + accum_ofdm->early_overrun_err, + delta_ofdm->early_overrun_err, + max_ofdm->early_overrun_err); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_good:", + le32_to_cpu(ofdm->crc32_good), accum_ofdm->crc32_good, + delta_ofdm->crc32_good, max_ofdm->crc32_good); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "false_alarm_cnt:", + le32_to_cpu(ofdm->false_alarm_cnt), + accum_ofdm->false_alarm_cnt, delta_ofdm->false_alarm_cnt, + max_ofdm->false_alarm_cnt); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_sync_err_cnt:", + le32_to_cpu(ofdm->fina_sync_err_cnt), + accum_ofdm->fina_sync_err_cnt, + delta_ofdm->fina_sync_err_cnt, + max_ofdm->fina_sync_err_cnt); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "sfd_timeout:", + le32_to_cpu(ofdm->sfd_timeout), accum_ofdm->sfd_timeout, + delta_ofdm->sfd_timeout, max_ofdm->sfd_timeout); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_timeout:", + le32_to_cpu(ofdm->fina_timeout), accum_ofdm->fina_timeout, + delta_ofdm->fina_timeout, max_ofdm->fina_timeout); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "unresponded_rts:", + le32_to_cpu(ofdm->unresponded_rts), + accum_ofdm->unresponded_rts, delta_ofdm->unresponded_rts, + max_ofdm->unresponded_rts); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "rxe_frame_lmt_ovrun:", + le32_to_cpu(ofdm->rxe_frame_limit_overrun), + accum_ofdm->rxe_frame_limit_overrun, + delta_ofdm->rxe_frame_limit_overrun, + max_ofdm->rxe_frame_limit_overrun); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ack_cnt:", + le32_to_cpu(ofdm->sent_ack_cnt), accum_ofdm->sent_ack_cnt, + delta_ofdm->sent_ack_cnt, max_ofdm->sent_ack_cnt); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_cts_cnt:", + le32_to_cpu(ofdm->sent_cts_cnt), accum_ofdm->sent_cts_cnt, + delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ba_rsp_cnt:", + le32_to_cpu(ofdm->sent_ba_rsp_cnt), + accum_ofdm->sent_ba_rsp_cnt, delta_ofdm->sent_ba_rsp_cnt, + max_ofdm->sent_ba_rsp_cnt); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "dsp_self_kill:", + le32_to_cpu(ofdm->dsp_self_kill), + accum_ofdm->dsp_self_kill, delta_ofdm->dsp_self_kill, + max_ofdm->dsp_self_kill); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "mh_format_err:", + le32_to_cpu(ofdm->mh_format_err), + accum_ofdm->mh_format_err, delta_ofdm->mh_format_err, + max_ofdm->mh_format_err); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, + "re_acq_main_rssi_sum:", + le32_to_cpu(ofdm->re_acq_main_rssi_sum), + accum_ofdm->re_acq_main_rssi_sum, + delta_ofdm->re_acq_main_rssi_sum, + max_ofdm->re_acq_main_rssi_sum); + + pos += + scnprintf(buf + pos, bufsz - pos, fmt_header, + "Statistics_Rx - CCK:"); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "ina_cnt:", + le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt, + delta_cck->ina_cnt, max_cck->ina_cnt); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_cnt:", + le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt, + delta_cck->fina_cnt, max_cck->fina_cnt); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "plcp_err:", + le32_to_cpu(cck->plcp_err), accum_cck->plcp_err, + delta_cck->plcp_err, max_cck->plcp_err); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_err:", + le32_to_cpu(cck->crc32_err), accum_cck->crc32_err, + delta_cck->crc32_err, max_cck->crc32_err); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "overrun_err:", + le32_to_cpu(cck->overrun_err), accum_cck->overrun_err, + delta_cck->overrun_err, max_cck->overrun_err); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "early_overrun_err:", + le32_to_cpu(cck->early_overrun_err), + accum_cck->early_overrun_err, + delta_cck->early_overrun_err, max_cck->early_overrun_err); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_good:", + le32_to_cpu(cck->crc32_good), accum_cck->crc32_good, + delta_cck->crc32_good, max_cck->crc32_good); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "false_alarm_cnt:", + le32_to_cpu(cck->false_alarm_cnt), + accum_cck->false_alarm_cnt, delta_cck->false_alarm_cnt, + max_cck->false_alarm_cnt); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_sync_err_cnt:", + le32_to_cpu(cck->fina_sync_err_cnt), + accum_cck->fina_sync_err_cnt, + delta_cck->fina_sync_err_cnt, max_cck->fina_sync_err_cnt); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "sfd_timeout:", + le32_to_cpu(cck->sfd_timeout), accum_cck->sfd_timeout, + delta_cck->sfd_timeout, max_cck->sfd_timeout); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_timeout:", + le32_to_cpu(cck->fina_timeout), accum_cck->fina_timeout, + delta_cck->fina_timeout, max_cck->fina_timeout); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "unresponded_rts:", + le32_to_cpu(cck->unresponded_rts), + accum_cck->unresponded_rts, delta_cck->unresponded_rts, + max_cck->unresponded_rts); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "rxe_frame_lmt_ovrun:", + le32_to_cpu(cck->rxe_frame_limit_overrun), + accum_cck->rxe_frame_limit_overrun, + delta_cck->rxe_frame_limit_overrun, + max_cck->rxe_frame_limit_overrun); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ack_cnt:", + le32_to_cpu(cck->sent_ack_cnt), accum_cck->sent_ack_cnt, + delta_cck->sent_ack_cnt, max_cck->sent_ack_cnt); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_cts_cnt:", + le32_to_cpu(cck->sent_cts_cnt), accum_cck->sent_cts_cnt, + delta_cck->sent_cts_cnt, max_cck->sent_cts_cnt); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ba_rsp_cnt:", + le32_to_cpu(cck->sent_ba_rsp_cnt), + accum_cck->sent_ba_rsp_cnt, delta_cck->sent_ba_rsp_cnt, + max_cck->sent_ba_rsp_cnt); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "dsp_self_kill:", + le32_to_cpu(cck->dsp_self_kill), accum_cck->dsp_self_kill, + delta_cck->dsp_self_kill, max_cck->dsp_self_kill); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "mh_format_err:", + le32_to_cpu(cck->mh_format_err), accum_cck->mh_format_err, + delta_cck->mh_format_err, max_cck->mh_format_err); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, + "re_acq_main_rssi_sum:", + le32_to_cpu(cck->re_acq_main_rssi_sum), + accum_cck->re_acq_main_rssi_sum, + delta_cck->re_acq_main_rssi_sum, + max_cck->re_acq_main_rssi_sum); + + pos += + scnprintf(buf + pos, bufsz - pos, fmt_header, + "Statistics_Rx - GENERAL:"); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "bogus_cts:", + le32_to_cpu(general->bogus_cts), accum_general->bogus_cts, + delta_general->bogus_cts, max_general->bogus_cts); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "bogus_ack:", + le32_to_cpu(general->bogus_ack), accum_general->bogus_ack, + delta_general->bogus_ack, max_general->bogus_ack); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "non_bssid_frames:", + le32_to_cpu(general->non_bssid_frames), + accum_general->non_bssid_frames, + delta_general->non_bssid_frames, + max_general->non_bssid_frames); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "filtered_frames:", + le32_to_cpu(general->filtered_frames), + accum_general->filtered_frames, + delta_general->filtered_frames, + max_general->filtered_frames); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "non_channel_beacons:", + le32_to_cpu(general->non_channel_beacons), + accum_general->non_channel_beacons, + delta_general->non_channel_beacons, + max_general->non_channel_beacons); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "channel_beacons:", + le32_to_cpu(general->channel_beacons), + accum_general->channel_beacons, + delta_general->channel_beacons, + max_general->channel_beacons); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "num_missed_bcon:", + le32_to_cpu(general->num_missed_bcon), + accum_general->num_missed_bcon, + delta_general->num_missed_bcon, + max_general->num_missed_bcon); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, + "adc_rx_saturation_time:", + le32_to_cpu(general->adc_rx_saturation_time), + accum_general->adc_rx_saturation_time, + delta_general->adc_rx_saturation_time, + max_general->adc_rx_saturation_time); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, + "ina_detect_search_tm:", + le32_to_cpu(general->ina_detection_search_time), + accum_general->ina_detection_search_time, + delta_general->ina_detection_search_time, + max_general->ina_detection_search_time); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, + "beacon_silence_rssi_a:", + le32_to_cpu(general->beacon_silence_rssi_a), + accum_general->beacon_silence_rssi_a, + delta_general->beacon_silence_rssi_a, + max_general->beacon_silence_rssi_a); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, + "beacon_silence_rssi_b:", + le32_to_cpu(general->beacon_silence_rssi_b), + accum_general->beacon_silence_rssi_b, + delta_general->beacon_silence_rssi_b, + max_general->beacon_silence_rssi_b); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, + "beacon_silence_rssi_c:", + le32_to_cpu(general->beacon_silence_rssi_c), + accum_general->beacon_silence_rssi_c, + delta_general->beacon_silence_rssi_c, + max_general->beacon_silence_rssi_c); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, + "interference_data_flag:", + le32_to_cpu(general->interference_data_flag), + accum_general->interference_data_flag, + delta_general->interference_data_flag, + max_general->interference_data_flag); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "channel_load:", + le32_to_cpu(general->channel_load), + accum_general->channel_load, delta_general->channel_load, + max_general->channel_load); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "dsp_false_alarms:", + le32_to_cpu(general->dsp_false_alarms), + accum_general->dsp_false_alarms, + delta_general->dsp_false_alarms, + max_general->dsp_false_alarms); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_rssi_a:", + le32_to_cpu(general->beacon_rssi_a), + accum_general->beacon_rssi_a, + delta_general->beacon_rssi_a, max_general->beacon_rssi_a); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_rssi_b:", + le32_to_cpu(general->beacon_rssi_b), + accum_general->beacon_rssi_b, + delta_general->beacon_rssi_b, max_general->beacon_rssi_b); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_rssi_c:", + le32_to_cpu(general->beacon_rssi_c), + accum_general->beacon_rssi_c, + delta_general->beacon_rssi_c, max_general->beacon_rssi_c); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_energy_a:", + le32_to_cpu(general->beacon_energy_a), + accum_general->beacon_energy_a, + delta_general->beacon_energy_a, + max_general->beacon_energy_a); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_energy_b:", + le32_to_cpu(general->beacon_energy_b), + accum_general->beacon_energy_b, + delta_general->beacon_energy_b, + max_general->beacon_energy_b); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_energy_c:", + le32_to_cpu(general->beacon_energy_c), + accum_general->beacon_energy_c, + delta_general->beacon_energy_c, + max_general->beacon_energy_c); + + pos += + scnprintf(buf + pos, bufsz - pos, fmt_header, + "Statistics_Rx - OFDM_HT:"); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "plcp_err:", + le32_to_cpu(ht->plcp_err), accum_ht->plcp_err, + delta_ht->plcp_err, max_ht->plcp_err); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "overrun_err:", + le32_to_cpu(ht->overrun_err), accum_ht->overrun_err, + delta_ht->overrun_err, max_ht->overrun_err); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "early_overrun_err:", + le32_to_cpu(ht->early_overrun_err), + accum_ht->early_overrun_err, delta_ht->early_overrun_err, + max_ht->early_overrun_err); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_good:", + le32_to_cpu(ht->crc32_good), accum_ht->crc32_good, + delta_ht->crc32_good, max_ht->crc32_good); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_err:", + le32_to_cpu(ht->crc32_err), accum_ht->crc32_err, + delta_ht->crc32_err, max_ht->crc32_err); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "mh_format_err:", + le32_to_cpu(ht->mh_format_err), accum_ht->mh_format_err, + delta_ht->mh_format_err, max_ht->mh_format_err); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "agg_crc32_good:", + le32_to_cpu(ht->agg_crc32_good), accum_ht->agg_crc32_good, + delta_ht->agg_crc32_good, max_ht->agg_crc32_good); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "agg_mpdu_cnt:", + le32_to_cpu(ht->agg_mpdu_cnt), accum_ht->agg_mpdu_cnt, + delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "agg_cnt:", + le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt, + delta_ht->agg_cnt, max_ht->agg_cnt); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "unsupport_mcs:", + le32_to_cpu(ht->unsupport_mcs), accum_ht->unsupport_mcs, + delta_ht->unsupport_mcs, max_ht->unsupport_mcs); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +ssize_t +il4965_ucode_tx_stats_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct il_priv *il = file->private_data; + int pos = 0; + char *buf; + int bufsz = (sizeof(struct stats_tx) * 48) + 250; + ssize_t ret; + struct stats_tx *tx, *accum_tx, *delta_tx, *max_tx; + + if (!il_is_alive(il)) + return -EAGAIN; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) { + IL_ERR("Can not allocate Buffer\n"); + return -ENOMEM; + } + + /* the statistic information display here is based on + * the last stats notification from uCode + * might not reflect the current uCode activity + */ + tx = &il->_4965.stats.tx; + accum_tx = &il->_4965.accum_stats.tx; + delta_tx = &il->_4965.delta_stats.tx; + max_tx = &il->_4965.max_delta.tx; + + pos += il4965_stats_flag(il, buf, bufsz); + pos += scnprintf(buf + pos, bufsz - pos, fmt_header, "Statistics_Tx:"); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "preamble:", + le32_to_cpu(tx->preamble_cnt), accum_tx->preamble_cnt, + delta_tx->preamble_cnt, max_tx->preamble_cnt); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "rx_detected_cnt:", + le32_to_cpu(tx->rx_detected_cnt), + accum_tx->rx_detected_cnt, delta_tx->rx_detected_cnt, + max_tx->rx_detected_cnt); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "bt_prio_defer_cnt:", + le32_to_cpu(tx->bt_prio_defer_cnt), + accum_tx->bt_prio_defer_cnt, delta_tx->bt_prio_defer_cnt, + max_tx->bt_prio_defer_cnt); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "bt_prio_kill_cnt:", + le32_to_cpu(tx->bt_prio_kill_cnt), + accum_tx->bt_prio_kill_cnt, delta_tx->bt_prio_kill_cnt, + max_tx->bt_prio_kill_cnt); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "few_bytes_cnt:", + le32_to_cpu(tx->few_bytes_cnt), accum_tx->few_bytes_cnt, + delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "cts_timeout:", + le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout, + delta_tx->cts_timeout, max_tx->cts_timeout); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "ack_timeout:", + le32_to_cpu(tx->ack_timeout), accum_tx->ack_timeout, + delta_tx->ack_timeout, max_tx->ack_timeout); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "expected_ack_cnt:", + le32_to_cpu(tx->expected_ack_cnt), + accum_tx->expected_ack_cnt, delta_tx->expected_ack_cnt, + max_tx->expected_ack_cnt); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "actual_ack_cnt:", + le32_to_cpu(tx->actual_ack_cnt), accum_tx->actual_ack_cnt, + delta_tx->actual_ack_cnt, max_tx->actual_ack_cnt); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "dump_msdu_cnt:", + le32_to_cpu(tx->dump_msdu_cnt), accum_tx->dump_msdu_cnt, + delta_tx->dump_msdu_cnt, max_tx->dump_msdu_cnt); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, + "abort_nxt_frame_mismatch:", + le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt), + accum_tx->burst_abort_next_frame_mismatch_cnt, + delta_tx->burst_abort_next_frame_mismatch_cnt, + max_tx->burst_abort_next_frame_mismatch_cnt); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, + "abort_missing_nxt_frame:", + le32_to_cpu(tx->burst_abort_missing_next_frame_cnt), + accum_tx->burst_abort_missing_next_frame_cnt, + delta_tx->burst_abort_missing_next_frame_cnt, + max_tx->burst_abort_missing_next_frame_cnt); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, + "cts_timeout_collision:", + le32_to_cpu(tx->cts_timeout_collision), + accum_tx->cts_timeout_collision, + delta_tx->cts_timeout_collision, + max_tx->cts_timeout_collision); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, + "ack_ba_timeout_collision:", + le32_to_cpu(tx->ack_or_ba_timeout_collision), + accum_tx->ack_or_ba_timeout_collision, + delta_tx->ack_or_ba_timeout_collision, + max_tx->ack_or_ba_timeout_collision); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "agg ba_timeout:", + le32_to_cpu(tx->agg.ba_timeout), accum_tx->agg.ba_timeout, + delta_tx->agg.ba_timeout, max_tx->agg.ba_timeout); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, + "agg ba_resched_frames:", + le32_to_cpu(tx->agg.ba_reschedule_frames), + accum_tx->agg.ba_reschedule_frames, + delta_tx->agg.ba_reschedule_frames, + max_tx->agg.ba_reschedule_frames); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, + "agg scd_query_agg_frame:", + le32_to_cpu(tx->agg.scd_query_agg_frame_cnt), + accum_tx->agg.scd_query_agg_frame_cnt, + delta_tx->agg.scd_query_agg_frame_cnt, + max_tx->agg.scd_query_agg_frame_cnt); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, + "agg scd_query_no_agg:", + le32_to_cpu(tx->agg.scd_query_no_agg), + accum_tx->agg.scd_query_no_agg, + delta_tx->agg.scd_query_no_agg, + max_tx->agg.scd_query_no_agg); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "agg scd_query_agg:", + le32_to_cpu(tx->agg.scd_query_agg), + accum_tx->agg.scd_query_agg, delta_tx->agg.scd_query_agg, + max_tx->agg.scd_query_agg); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, + "agg scd_query_mismatch:", + le32_to_cpu(tx->agg.scd_query_mismatch), + accum_tx->agg.scd_query_mismatch, + delta_tx->agg.scd_query_mismatch, + max_tx->agg.scd_query_mismatch); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "agg frame_not_ready:", + le32_to_cpu(tx->agg.frame_not_ready), + accum_tx->agg.frame_not_ready, + delta_tx->agg.frame_not_ready, + max_tx->agg.frame_not_ready); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "agg underrun:", + le32_to_cpu(tx->agg.underrun), accum_tx->agg.underrun, + delta_tx->agg.underrun, max_tx->agg.underrun); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "agg bt_prio_kill:", + le32_to_cpu(tx->agg.bt_prio_kill), + accum_tx->agg.bt_prio_kill, delta_tx->agg.bt_prio_kill, + max_tx->agg.bt_prio_kill); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "agg rx_ba_rsp_cnt:", + le32_to_cpu(tx->agg.rx_ba_rsp_cnt), + accum_tx->agg.rx_ba_rsp_cnt, delta_tx->agg.rx_ba_rsp_cnt, + max_tx->agg.rx_ba_rsp_cnt); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +ssize_t +il4965_ucode_general_stats_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct il_priv *il = file->private_data; + int pos = 0; + char *buf; + int bufsz = sizeof(struct stats_general) * 10 + 300; + ssize_t ret; + struct stats_general_common *general, *accum_general; + struct stats_general_common *delta_general, *max_general; + struct stats_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg; + struct stats_div *div, *accum_div, *delta_div, *max_div; + + if (!il_is_alive(il)) + return -EAGAIN; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) { + IL_ERR("Can not allocate Buffer\n"); + return -ENOMEM; + } + + /* the statistic information display here is based on + * the last stats notification from uCode + * might not reflect the current uCode activity + */ + general = &il->_4965.stats.general.common; + dbg = &il->_4965.stats.general.common.dbg; + div = &il->_4965.stats.general.common.div; + accum_general = &il->_4965.accum_stats.general.common; + accum_dbg = &il->_4965.accum_stats.general.common.dbg; + accum_div = &il->_4965.accum_stats.general.common.div; + delta_general = &il->_4965.delta_stats.general.common; + max_general = &il->_4965.max_delta.general.common; + delta_dbg = &il->_4965.delta_stats.general.common.dbg; + max_dbg = &il->_4965.max_delta.general.common.dbg; + delta_div = &il->_4965.delta_stats.general.common.div; + max_div = &il->_4965.max_delta.general.common.div; + + pos += il4965_stats_flag(il, buf, bufsz); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_header, + "Statistics_General:"); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_value, "temperature:", + le32_to_cpu(general->temperature)); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_value, "ttl_timestamp:", + le32_to_cpu(general->ttl_timestamp)); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "burst_check:", + le32_to_cpu(dbg->burst_check), accum_dbg->burst_check, + delta_dbg->burst_check, max_dbg->burst_check); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "burst_count:", + le32_to_cpu(dbg->burst_count), accum_dbg->burst_count, + delta_dbg->burst_count, max_dbg->burst_count); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, + "wait_for_silence_timeout_count:", + le32_to_cpu(dbg->wait_for_silence_timeout_cnt), + accum_dbg->wait_for_silence_timeout_cnt, + delta_dbg->wait_for_silence_timeout_cnt, + max_dbg->wait_for_silence_timeout_cnt); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "sleep_time:", + le32_to_cpu(general->sleep_time), + accum_general->sleep_time, delta_general->sleep_time, + max_general->sleep_time); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "slots_out:", + le32_to_cpu(general->slots_out), accum_general->slots_out, + delta_general->slots_out, max_general->slots_out); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "slots_idle:", + le32_to_cpu(general->slots_idle), + accum_general->slots_idle, delta_general->slots_idle, + max_general->slots_idle); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "tx_on_a:", + le32_to_cpu(div->tx_on_a), accum_div->tx_on_a, + delta_div->tx_on_a, max_div->tx_on_a); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "tx_on_b:", + le32_to_cpu(div->tx_on_b), accum_div->tx_on_b, + delta_div->tx_on_b, max_div->tx_on_b); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "exec_time:", + le32_to_cpu(div->exec_time), accum_div->exec_time, + delta_div->exec_time, max_div->exec_time); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "probe_time:", + le32_to_cpu(div->probe_time), accum_div->probe_time, + delta_div->probe_time, max_div->probe_time); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "rx_enable_counter:", + le32_to_cpu(general->rx_enable_counter), + accum_general->rx_enable_counter, + delta_general->rx_enable_counter, + max_general->rx_enable_counter); + pos += + scnprintf(buf + pos, bufsz - pos, fmt_table, "num_of_sos_states:", + le32_to_cpu(general->num_of_sos_states), + accum_general->num_of_sos_states, + delta_general->num_of_sos_states, + max_general->num_of_sos_states); + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c new file mode 100644 index 00000000000..1667232af64 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/4965-mac.c @@ -0,0 +1,6515 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/pci-aspm.h> +#include <linux/slab.h> +#include <linux/dma-mapping.h> +#include <linux/delay.h> +#include <linux/sched.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/firmware.h> +#include <linux/etherdevice.h> +#include <linux/if_arp.h> + +#include <net/mac80211.h> + +#include <asm/div64.h> + +#define DRV_NAME "iwl4965" + +#include "common.h" +#include "4965.h" + +/****************************************************************************** + * + * module boiler plate + * + ******************************************************************************/ + +/* + * module name, copyright, version, etc. + */ +#define DRV_DESCRIPTION "Intel(R) Wireless WiFi 4965 driver for Linux" + +#ifdef CONFIG_IWLEGACY_DEBUG +#define VD "d" +#else +#define VD +#endif + +#define DRV_VERSION IWLWIFI_VERSION VD + +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_VERSION(DRV_VERSION); +MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("iwl4965"); + +void +il4965_check_abort_status(struct il_priv *il, u8 frame_count, u32 status) +{ + if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) { + IL_ERR("Tx flush command to flush out all frames\n"); + if (!test_bit(S_EXIT_PENDING, &il->status)) + queue_work(il->workqueue, &il->tx_flush); + } +} + +/* + * EEPROM + */ +struct il_mod_params il4965_mod_params = { + .amsdu_size_8K = 1, + .restart_fw = 1, + /* the rest are 0 by default */ +}; + +void +il4965_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq) +{ + unsigned long flags; + int i; + spin_lock_irqsave(&rxq->lock, flags); + INIT_LIST_HEAD(&rxq->rx_free); + INIT_LIST_HEAD(&rxq->rx_used); + /* Fill the rx_used queue with _all_ of the Rx buffers */ + for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { + /* In the reset function, these buffers may have been allocated + * to an SKB, so we need to unmap and free potential storage */ + if (rxq->pool[i].page != NULL) { + pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma, + PAGE_SIZE << il->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); + __il_free_pages(il, rxq->pool[i].page); + rxq->pool[i].page = NULL; + } + list_add_tail(&rxq->pool[i].list, &rxq->rx_used); + } + + for (i = 0; i < RX_QUEUE_SIZE; i++) + rxq->queue[i] = NULL; + + /* Set us so that we have processed and used all buffers, but have + * not restocked the Rx queue with fresh buffers */ + rxq->read = rxq->write = 0; + rxq->write_actual = 0; + rxq->free_count = 0; + spin_unlock_irqrestore(&rxq->lock, flags); +} + +int +il4965_rx_init(struct il_priv *il, struct il_rx_queue *rxq) +{ + u32 rb_size; + const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ + u32 rb_timeout = 0; + + if (il->cfg->mod_params->amsdu_size_8K) + rb_size = FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; + else + rb_size = FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; + + /* Stop Rx DMA */ + il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG, 0); + + /* Reset driver's Rx queue write idx */ + il_wr(il, FH49_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); + + /* Tell device where to find RBD circular buffer in DRAM */ + il_wr(il, FH49_RSCSR_CHNL0_RBDCB_BASE_REG, (u32) (rxq->bd_dma >> 8)); + + /* Tell device where in DRAM to update its Rx status */ + il_wr(il, FH49_RSCSR_CHNL0_STTS_WPTR_REG, rxq->rb_stts_dma >> 4); + + /* Enable Rx DMA + * Direct rx interrupts to hosts + * Rx buffer size 4 or 8k + * RB timeout 0x10 + * 256 RBDs + */ + il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG, + FH49_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | + FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | + FH49_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK | + rb_size | + (rb_timeout << FH49_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) | + (rfdnlog << FH49_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); + + /* Set interrupt coalescing timer to default (2048 usecs) */ + il_write8(il, CSR_INT_COALESCING, IL_HOST_INT_TIMEOUT_DEF); + + return 0; +} + +static void +il4965_set_pwr_vmain(struct il_priv *il) +{ +/* + * (for documentation purposes) + * to set power to V_AUX, do: + + if (pci_pme_capable(il->pci_dev, PCI_D3cold)) + il_set_bits_mask_prph(il, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_PWR_SRC_VAUX, + ~APMG_PS_CTRL_MSK_PWR_SRC); + */ + + il_set_bits_mask_prph(il, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, + ~APMG_PS_CTRL_MSK_PWR_SRC); +} + +int +il4965_hw_nic_init(struct il_priv *il) +{ + unsigned long flags; + struct il_rx_queue *rxq = &il->rxq; + int ret; + + /* nic_init */ + spin_lock_irqsave(&il->lock, flags); + il->cfg->ops->lib->apm_ops.init(il); + + /* Set interrupt coalescing calibration timer to default (512 usecs) */ + il_write8(il, CSR_INT_COALESCING, IL_HOST_INT_CALIB_TIMEOUT_DEF); + + spin_unlock_irqrestore(&il->lock, flags); + + il4965_set_pwr_vmain(il); + + il->cfg->ops->lib->apm_ops.config(il); + + /* Allocate the RX queue, or reset if it is already allocated */ + if (!rxq->bd) { + ret = il_rx_queue_alloc(il); + if (ret) { + IL_ERR("Unable to initialize Rx queue\n"); + return -ENOMEM; + } + } else + il4965_rx_queue_reset(il, rxq); + + il4965_rx_replenish(il); + + il4965_rx_init(il, rxq); + + spin_lock_irqsave(&il->lock, flags); + + rxq->need_update = 1; + il_rx_queue_update_write_ptr(il, rxq); + + spin_unlock_irqrestore(&il->lock, flags); + + /* Allocate or reset and init all Tx and Command queues */ + if (!il->txq) { + ret = il4965_txq_ctx_alloc(il); + if (ret) + return ret; + } else + il4965_txq_ctx_reset(il); + + set_bit(S_INIT, &il->status); + + return 0; +} + +/** + * il4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr + */ +static inline __le32 +il4965_dma_addr2rbd_ptr(struct il_priv *il, dma_addr_t dma_addr) +{ + return cpu_to_le32((u32) (dma_addr >> 8)); +} + +/** + * il4965_rx_queue_restock - refill RX queue from pre-allocated pool + * + * If there are slots in the RX queue that need to be restocked, + * and we have free pre-allocated buffers, fill the ranks as much + * as we can, pulling from rx_free. + * + * This moves the 'write' idx forward to catch up with 'processed', and + * also updates the memory address in the firmware to reference the new + * target buffer. + */ +void +il4965_rx_queue_restock(struct il_priv *il) +{ + struct il_rx_queue *rxq = &il->rxq; + struct list_head *element; + struct il_rx_buf *rxb; + unsigned long flags; + + spin_lock_irqsave(&rxq->lock, flags); + while (il_rx_queue_space(rxq) > 0 && rxq->free_count) { + /* The overwritten rxb must be a used one */ + rxb = rxq->queue[rxq->write]; + BUG_ON(rxb && rxb->page); + + /* Get next free Rx buffer, remove from free list */ + element = rxq->rx_free.next; + rxb = list_entry(element, struct il_rx_buf, list); + list_del(element); + + /* Point to Rx buffer via next RBD in circular buffer */ + rxq->bd[rxq->write] = + il4965_dma_addr2rbd_ptr(il, rxb->page_dma); + rxq->queue[rxq->write] = rxb; + rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; + rxq->free_count--; + } + spin_unlock_irqrestore(&rxq->lock, flags); + /* If the pre-allocated buffer pool is dropping low, schedule to + * refill it */ + if (rxq->free_count <= RX_LOW_WATERMARK) + queue_work(il->workqueue, &il->rx_replenish); + + /* If we've added more space for the firmware to place data, tell it. + * Increment device's write pointer in multiples of 8. */ + if (rxq->write_actual != (rxq->write & ~0x7)) { + spin_lock_irqsave(&rxq->lock, flags); + rxq->need_update = 1; + spin_unlock_irqrestore(&rxq->lock, flags); + il_rx_queue_update_write_ptr(il, rxq); + } +} + +/** + * il4965_rx_replenish - Move all used packet from rx_used to rx_free + * + * When moving to rx_free an SKB is allocated for the slot. + * + * Also restock the Rx queue via il_rx_queue_restock. + * This is called as a scheduled work item (except for during initialization) + */ +static void +il4965_rx_allocate(struct il_priv *il, gfp_t priority) +{ + struct il_rx_queue *rxq = &il->rxq; + struct list_head *element; + struct il_rx_buf *rxb; + struct page *page; + unsigned long flags; + gfp_t gfp_mask = priority; + + while (1) { + spin_lock_irqsave(&rxq->lock, flags); + if (list_empty(&rxq->rx_used)) { + spin_unlock_irqrestore(&rxq->lock, flags); + return; + } + spin_unlock_irqrestore(&rxq->lock, flags); + + if (rxq->free_count > RX_LOW_WATERMARK) + gfp_mask |= __GFP_NOWARN; + + if (il->hw_params.rx_page_order > 0) + gfp_mask |= __GFP_COMP; + + /* Alloc a new receive buffer */ + page = alloc_pages(gfp_mask, il->hw_params.rx_page_order); + if (!page) { + if (net_ratelimit()) + D_INFO("alloc_pages failed, " "order: %d\n", + il->hw_params.rx_page_order); + + if (rxq->free_count <= RX_LOW_WATERMARK && + net_ratelimit()) + IL_ERR("Failed to alloc_pages with %s. " + "Only %u free buffers remaining.\n", + priority == + GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL", + rxq->free_count); + /* We don't reschedule replenish work here -- we will + * call the restock method and if it still needs + * more buffers it will schedule replenish */ + return; + } + + spin_lock_irqsave(&rxq->lock, flags); + + if (list_empty(&rxq->rx_used)) { + spin_unlock_irqrestore(&rxq->lock, flags); + __free_pages(page, il->hw_params.rx_page_order); + return; + } + element = rxq->rx_used.next; + rxb = list_entry(element, struct il_rx_buf, list); + list_del(element); + + spin_unlock_irqrestore(&rxq->lock, flags); + + BUG_ON(rxb->page); + rxb->page = page; + /* Get physical address of the RB */ + rxb->page_dma = + pci_map_page(il->pci_dev, page, 0, + PAGE_SIZE << il->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); + /* dma address must be no more than 36 bits */ + BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); + /* and also 256 byte aligned! */ + BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); + + spin_lock_irqsave(&rxq->lock, flags); + + list_add_tail(&rxb->list, &rxq->rx_free); + rxq->free_count++; + il->alloc_rxb_page++; + + spin_unlock_irqrestore(&rxq->lock, flags); + } +} + +void +il4965_rx_replenish(struct il_priv *il) +{ + unsigned long flags; + + il4965_rx_allocate(il, GFP_KERNEL); + + spin_lock_irqsave(&il->lock, flags); + il4965_rx_queue_restock(il); + spin_unlock_irqrestore(&il->lock, flags); +} + +void +il4965_rx_replenish_now(struct il_priv *il) +{ + il4965_rx_allocate(il, GFP_ATOMIC); + + il4965_rx_queue_restock(il); +} + +/* Assumes that the skb field of the buffers in 'pool' is kept accurate. + * If an SKB has been detached, the POOL needs to have its SKB set to NULL + * This free routine walks the list of POOL entries and if SKB is set to + * non NULL it is unmapped and freed + */ +void +il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq) +{ + int i; + for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { + if (rxq->pool[i].page != NULL) { + pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma, + PAGE_SIZE << il->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); + __il_free_pages(il, rxq->pool[i].page); + rxq->pool[i].page = NULL; + } + } + + dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, + rxq->bd_dma); + dma_free_coherent(&il->pci_dev->dev, sizeof(struct il_rb_status), + rxq->rb_stts, rxq->rb_stts_dma); + rxq->bd = NULL; + rxq->rb_stts = NULL; +} + +int +il4965_rxq_stop(struct il_priv *il) +{ + + /* stop Rx DMA */ + il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG, 0); + il_poll_bit(il, FH49_MEM_RSSR_RX_STATUS_REG, + FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); + + return 0; +} + +int +il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band) +{ + int idx = 0; + int band_offset = 0; + + /* HT rate format: mac80211 wants an MCS number, which is just LSB */ + if (rate_n_flags & RATE_MCS_HT_MSK) { + idx = (rate_n_flags & 0xff); + return idx; + /* Legacy rate format, search for match in table */ + } else { + if (band == IEEE80211_BAND_5GHZ) + band_offset = IL_FIRST_OFDM_RATE; + for (idx = band_offset; idx < RATE_COUNT_LEGACY; idx++) + if (il_rates[idx].plcp == (rate_n_flags & 0xFF)) + return idx - band_offset; + } + + return -1; +} + +static int +il4965_calc_rssi(struct il_priv *il, struct il_rx_phy_res *rx_resp) +{ + /* data from PHY/DSP regarding signal strength, etc., + * contents are always there, not configurable by host. */ + struct il4965_rx_non_cfg_phy *ncphy = + (struct il4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf; + u32 agc = + (le16_to_cpu(ncphy->agc_info) & IL49_AGC_DB_MASK) >> + IL49_AGC_DB_POS; + + u32 valid_antennae = + (le16_to_cpu(rx_resp->phy_flags) & IL49_RX_PHY_FLAGS_ANTENNAE_MASK) + >> IL49_RX_PHY_FLAGS_ANTENNAE_OFFSET; + u8 max_rssi = 0; + u32 i; + + /* Find max rssi among 3 possible receivers. + * These values are measured by the digital signal processor (DSP). + * They should stay fairly constant even as the signal strength varies, + * if the radio's automatic gain control (AGC) is working right. + * AGC value (see below) will provide the "interesting" info. */ + for (i = 0; i < 3; i++) + if (valid_antennae & (1 << i)) + max_rssi = max(ncphy->rssi_info[i << 1], max_rssi); + + D_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n", + ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4], + max_rssi, agc); + + /* dBm = max_rssi dB - agc dB - constant. + * Higher AGC (higher radio gain) means lower signal. */ + return max_rssi - agc - IL4965_RSSI_OFFSET; +} + +static u32 +il4965_translate_rx_status(struct il_priv *il, u32 decrypt_in) +{ + u32 decrypt_out = 0; + + if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) == + RX_RES_STATUS_STATION_FOUND) + decrypt_out |= + (RX_RES_STATUS_STATION_FOUND | + RX_RES_STATUS_NO_STATION_INFO_MISMATCH); + + decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK); + + /* packet was not encrypted */ + if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) == + RX_RES_STATUS_SEC_TYPE_NONE) + return decrypt_out; + + /* packet was encrypted with unknown alg */ + if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) == + RX_RES_STATUS_SEC_TYPE_ERR) + return decrypt_out; + + /* decryption was not done in HW */ + if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) != + RX_MPDU_RES_STATUS_DEC_DONE_MSK) + return decrypt_out; + + switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) { + + case RX_RES_STATUS_SEC_TYPE_CCMP: + /* alg is CCM: check MIC only */ + if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK)) + /* Bad MIC */ + decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC; + else + decrypt_out |= RX_RES_STATUS_DECRYPT_OK; + + break; + + case RX_RES_STATUS_SEC_TYPE_TKIP: + if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) { + /* Bad TTAK */ + decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK; + break; + } + /* fall through if TTAK OK */ + default: + if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK)) + decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC; + else + decrypt_out |= RX_RES_STATUS_DECRYPT_OK; + break; + } + + D_RX("decrypt_in:0x%x decrypt_out = 0x%x\n", decrypt_in, decrypt_out); + + return decrypt_out; +} + +static void +il4965_pass_packet_to_mac80211(struct il_priv *il, struct ieee80211_hdr *hdr, + u16 len, u32 ampdu_status, struct il_rx_buf *rxb, + struct ieee80211_rx_status *stats) +{ + struct sk_buff *skb; + __le16 fc = hdr->frame_control; + + /* We only process data packets if the interface is open */ + if (unlikely(!il->is_open)) { + D_DROP("Dropping packet while interface is not open.\n"); + return; + } + + /* In case of HW accelerated crypto and bad decryption, drop */ + if (!il->cfg->mod_params->sw_crypto && + il_set_decrypted_flag(il, hdr, ampdu_status, stats)) + return; + + skb = dev_alloc_skb(128); + if (!skb) { + IL_ERR("dev_alloc_skb failed\n"); + return; + } + + skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len); + + il_update_stats(il, false, fc, len); + memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); + + ieee80211_rx(il->hw, skb); + il->alloc_rxb_page--; + rxb->page = NULL; +} + +/* Called for N_RX (legacy ABG frames), or + * N_RX_MPDU (HT high-throughput N frames). */ +void +il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb) +{ + struct ieee80211_hdr *header; + struct ieee80211_rx_status rx_status; + struct il_rx_pkt *pkt = rxb_addr(rxb); + struct il_rx_phy_res *phy_res; + __le32 rx_pkt_status; + struct il_rx_mpdu_res_start *amsdu; + u32 len; + u32 ampdu_status; + u32 rate_n_flags; + + /** + * N_RX and N_RX_MPDU are handled differently. + * N_RX: physical layer info is in this buffer + * N_RX_MPDU: physical layer info was sent in separate + * command and cached in il->last_phy_res + * + * Here we set up local variables depending on which command is + * received. + */ + if (pkt->hdr.cmd == N_RX) { + phy_res = (struct il_rx_phy_res *)pkt->u.raw; + header = + (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res) + + phy_res->cfg_phy_cnt); + + len = le16_to_cpu(phy_res->byte_count); + rx_pkt_status = + *(__le32 *) (pkt->u.raw + sizeof(*phy_res) + + phy_res->cfg_phy_cnt + len); + ampdu_status = le32_to_cpu(rx_pkt_status); + } else { + if (!il->_4965.last_phy_res_valid) { + IL_ERR("MPDU frame without cached PHY data\n"); + return; + } + phy_res = &il->_4965.last_phy_res; + amsdu = (struct il_rx_mpdu_res_start *)pkt->u.raw; + header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu)); + len = le16_to_cpu(amsdu->byte_count); + rx_pkt_status = *(__le32 *) (pkt->u.raw + sizeof(*amsdu) + len); + ampdu_status = + il4965_translate_rx_status(il, le32_to_cpu(rx_pkt_status)); + } + + if ((unlikely(phy_res->cfg_phy_cnt > 20))) { + D_DROP("dsp size out of range [0,20]: %d/n", + phy_res->cfg_phy_cnt); + return; + } + + if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) || + !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) { + D_RX("Bad CRC or FIFO: 0x%08X.\n", le32_to_cpu(rx_pkt_status)); + return; + } + + /* This will be used in several places later */ + rate_n_flags = le32_to_cpu(phy_res->rate_n_flags); + + /* rx_status carries information about the packet to mac80211 */ + rx_status.mactime = le64_to_cpu(phy_res->timestamp); + rx_status.band = + (phy_res-> + phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? IEEE80211_BAND_2GHZ : + IEEE80211_BAND_5GHZ; + rx_status.freq = + ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel), + rx_status.band); + rx_status.rate_idx = + il4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band); + rx_status.flag = 0; + + /* TSF isn't reliable. In order to allow smooth user experience, + * this W/A doesn't propagate it to the mac80211 */ + /*rx_status.flag |= RX_FLAG_MACTIME_MPDU; */ + + il->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp); + + /* Find max signal strength (dBm) among 3 antenna/receiver chains */ + rx_status.signal = il4965_calc_rssi(il, phy_res); + + il_dbg_log_rx_data_frame(il, len, header); + D_STATS("Rssi %d, TSF %llu\n", rx_status.signal, + (unsigned long long)rx_status.mactime); + + /* + * "antenna number" + * + * It seems that the antenna field in the phy flags value + * is actually a bit field. This is undefined by radiotap, + * it wants an actual antenna number but I always get "7" + * for most legacy frames I receive indicating that the + * same frame was received on all three RX chains. + * + * I think this field should be removed in favor of a + * new 802.11n radiotap field "RX chains" that is defined + * as a bitmask. + */ + rx_status.antenna = + (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) >> + RX_RES_PHY_FLAGS_ANTENNA_POS; + + /* set the preamble flag if appropriate */ + if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK) + rx_status.flag |= RX_FLAG_SHORTPRE; + + /* Set up the HT phy flags */ + if (rate_n_flags & RATE_MCS_HT_MSK) + rx_status.flag |= RX_FLAG_HT; + if (rate_n_flags & RATE_MCS_HT40_MSK) + rx_status.flag |= RX_FLAG_40MHZ; + if (rate_n_flags & RATE_MCS_SGI_MSK) + rx_status.flag |= RX_FLAG_SHORT_GI; + + il4965_pass_packet_to_mac80211(il, header, len, ampdu_status, rxb, + &rx_status); +} + +/* Cache phy data (Rx signal strength, etc) for HT frame (N_RX_PHY). + * This will be used later in il_hdl_rx() for N_RX_MPDU. */ +void +il4965_hdl_rx_phy(struct il_priv *il, struct il_rx_buf *rxb) +{ + struct il_rx_pkt *pkt = rxb_addr(rxb); + il->_4965.last_phy_res_valid = true; + memcpy(&il->_4965.last_phy_res, pkt->u.raw, + sizeof(struct il_rx_phy_res)); +} + +static int +il4965_get_channels_for_scan(struct il_priv *il, struct ieee80211_vif *vif, + enum ieee80211_band band, u8 is_active, + u8 n_probes, struct il_scan_channel *scan_ch) +{ + struct ieee80211_channel *chan; + const struct ieee80211_supported_band *sband; + const struct il_channel_info *ch_info; + u16 passive_dwell = 0; + u16 active_dwell = 0; + int added, i; + u16 channel; + + sband = il_get_hw_mode(il, band); + if (!sband) + return 0; + + active_dwell = il_get_active_dwell_time(il, band, n_probes); + passive_dwell = il_get_passive_dwell_time(il, band, vif); + + if (passive_dwell <= active_dwell) + passive_dwell = active_dwell + 1; + + for (i = 0, added = 0; i < il->scan_request->n_channels; i++) { + chan = il->scan_request->channels[i]; + + if (chan->band != band) + continue; + + channel = chan->hw_value; + scan_ch->channel = cpu_to_le16(channel); + + ch_info = il_get_channel_info(il, band, channel); + if (!il_is_channel_valid(ch_info)) { + D_SCAN("Channel %d is INVALID for this band.\n", + channel); + continue; + } + + if (!is_active || il_is_channel_passive(ch_info) || + (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) + scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE; + else + scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE; + + if (n_probes) + scan_ch->type |= IL_SCAN_PROBE_MASK(n_probes); + + scan_ch->active_dwell = cpu_to_le16(active_dwell); + scan_ch->passive_dwell = cpu_to_le16(passive_dwell); + + /* Set txpower levels to defaults */ + scan_ch->dsp_atten = 110; + + /* NOTE: if we were doing 6Mb OFDM for scans we'd use + * power level: + * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3; + */ + if (band == IEEE80211_BAND_5GHZ) + scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3; + else + scan_ch->tx_gain = ((1 << 5) | (5 << 3)); + + D_SCAN("Scanning ch=%d prob=0x%X [%s %d]\n", channel, + le32_to_cpu(scan_ch->type), + (scan_ch-> + type & SCAN_CHANNEL_TYPE_ACTIVE) ? "ACTIVE" : "PASSIVE", + (scan_ch-> + type & SCAN_CHANNEL_TYPE_ACTIVE) ? active_dwell : + passive_dwell); + + scan_ch++; + added++; + } + + D_SCAN("total channels to scan %d\n", added); + return added; +} + +static void +il4965_toggle_tx_ant(struct il_priv *il, u8 *ant, u8 valid) +{ + int i; + u8 ind = *ant; + + for (i = 0; i < RATE_ANT_NUM - 1; i++) { + ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0; + if (valid & BIT(ind)) { + *ant = ind; + return; + } + } +} + +int +il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif) +{ + struct il_host_cmd cmd = { + .id = C_SCAN, + .len = sizeof(struct il_scan_cmd), + .flags = CMD_SIZE_HUGE, + }; + struct il_scan_cmd *scan; + struct il_rxon_context *ctx = &il->ctx; + u32 rate_flags = 0; + u16 cmd_len; + u16 rx_chain = 0; + enum ieee80211_band band; + u8 n_probes = 0; + u8 rx_ant = il->hw_params.valid_rx_ant; + u8 rate; + bool is_active = false; + int chan_mod; + u8 active_chains; + u8 scan_tx_antennas = il->hw_params.valid_tx_ant; + int ret; + + lockdep_assert_held(&il->mutex); + + ctx = il_rxon_ctx_from_vif(vif); + + if (!il->scan_cmd) { + il->scan_cmd = + kmalloc(sizeof(struct il_scan_cmd) + IL_MAX_SCAN_SIZE, + GFP_KERNEL); + if (!il->scan_cmd) { + D_SCAN("fail to allocate memory for scan\n"); + return -ENOMEM; + } + } + scan = il->scan_cmd; + memset(scan, 0, sizeof(struct il_scan_cmd) + IL_MAX_SCAN_SIZE); + + scan->quiet_plcp_th = IL_PLCP_QUIET_THRESH; + scan->quiet_time = IL_ACTIVE_QUIET_TIME; + + if (il_is_any_associated(il)) { + u16 interval; + u32 extra; + u32 suspend_time = 100; + u32 scan_suspend_time = 100; + + D_INFO("Scanning while associated...\n"); + interval = vif->bss_conf.beacon_int; + + scan->suspend_time = 0; + scan->max_out_time = cpu_to_le32(200 * 1024); + if (!interval) + interval = suspend_time; + + extra = (suspend_time / interval) << 22; + scan_suspend_time = + (extra | ((suspend_time % interval) * 1024)); + scan->suspend_time = cpu_to_le32(scan_suspend_time); + D_SCAN("suspend_time 0x%X beacon interval %d\n", + scan_suspend_time, interval); + } + + if (il->scan_request->n_ssids) { + int i, p = 0; + D_SCAN("Kicking off active scan\n"); + for (i = 0; i < il->scan_request->n_ssids; i++) { + /* always does wildcard anyway */ + if (!il->scan_request->ssids[i].ssid_len) + continue; + scan->direct_scan[p].id = WLAN_EID_SSID; + scan->direct_scan[p].len = + il->scan_request->ssids[i].ssid_len; + memcpy(scan->direct_scan[p].ssid, + il->scan_request->ssids[i].ssid, + il->scan_request->ssids[i].ssid_len); + n_probes++; + p++; + } + is_active = true; + } else + D_SCAN("Start passive scan.\n"); + + scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK; + scan->tx_cmd.sta_id = ctx->bcast_sta_id; + scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; + + switch (il->scan_band) { + case IEEE80211_BAND_2GHZ: + scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; + chan_mod = + le32_to_cpu(il->ctx.active. + flags & RXON_FLG_CHANNEL_MODE_MSK) >> + RXON_FLG_CHANNEL_MODE_POS; + if (chan_mod == CHANNEL_MODE_PURE_40) { + rate = RATE_6M_PLCP; + } else { + rate = RATE_1M_PLCP; + rate_flags = RATE_MCS_CCK_MSK; + } + break; + case IEEE80211_BAND_5GHZ: + rate = RATE_6M_PLCP; + break; + default: + IL_WARN("Invalid scan band\n"); + return -EIO; + } + + /* + * If active scanning is requested but a certain channel is + * marked passive, we can do active scanning if we detect + * transmissions. + * + * There is an issue with some firmware versions that triggers + * a sysassert on a "good CRC threshold" of zero (== disabled), + * on a radar channel even though this means that we should NOT + * send probes. + * + * The "good CRC threshold" is the number of frames that we + * need to receive during our dwell time on a channel before + * sending out probes -- setting this to a huge value will + * mean we never reach it, but at the same time work around + * the aforementioned issue. Thus use IL_GOOD_CRC_TH_NEVER + * here instead of IL_GOOD_CRC_TH_DISABLED. + */ + scan->good_CRC_th = + is_active ? IL_GOOD_CRC_TH_DEFAULT : IL_GOOD_CRC_TH_NEVER; + + band = il->scan_band; + + if (il->cfg->scan_rx_antennas[band]) + rx_ant = il->cfg->scan_rx_antennas[band]; + + il4965_toggle_tx_ant(il, &il->scan_tx_ant[band], scan_tx_antennas); + rate_flags |= BIT(il->scan_tx_ant[band]) << RATE_MCS_ANT_POS; + scan->tx_cmd.rate_n_flags = cpu_to_le32(rate | rate_flags); + + /* In power save mode use one chain, otherwise use all chains */ + if (test_bit(S_POWER_PMI, &il->status)) { + /* rx_ant has been set to all valid chains previously */ + active_chains = + rx_ant & ((u8) (il->chain_noise_data.active_chains)); + if (!active_chains) + active_chains = rx_ant; + + D_SCAN("chain_noise_data.active_chains: %u\n", + il->chain_noise_data.active_chains); + + rx_ant = il4965_first_antenna(active_chains); + } + + /* MIMO is not used here, but value is required */ + rx_chain |= il->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS; + rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS; + rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS; + rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS; + scan->rx_chain = cpu_to_le16(rx_chain); + + cmd_len = + il_fill_probe_req(il, (struct ieee80211_mgmt *)scan->data, + vif->addr, il->scan_request->ie, + il->scan_request->ie_len, + IL_MAX_SCAN_SIZE - sizeof(*scan)); + scan->tx_cmd.len = cpu_to_le16(cmd_len); + + scan->filter_flags |= + (RXON_FILTER_ACCEPT_GRP_MSK | RXON_FILTER_BCON_AWARE_MSK); + + scan->channel_count = + il4965_get_channels_for_scan(il, vif, band, is_active, n_probes, + (void *)&scan->data[cmd_len]); + if (scan->channel_count == 0) { + D_SCAN("channel count %d\n", scan->channel_count); + return -EIO; + } + + cmd.len += + le16_to_cpu(scan->tx_cmd.len) + + scan->channel_count * sizeof(struct il_scan_channel); + cmd.data = scan; + scan->len = cpu_to_le16(cmd.len); + + set_bit(S_SCAN_HW, &il->status); + + ret = il_send_cmd_sync(il, &cmd); + if (ret) + clear_bit(S_SCAN_HW, &il->status); + + return ret; +} + +int +il4965_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif, + bool add) +{ + struct il_vif_priv *vif_priv = (void *)vif->drv_priv; + + if (add) + return il4965_add_bssid_station(il, vif_priv->ctx, + vif->bss_conf.bssid, + &vif_priv->ibss_bssid_sta_id); + return il_remove_station(il, vif_priv->ibss_bssid_sta_id, + vif->bss_conf.bssid); +} + +void +il4965_free_tfds_in_queue(struct il_priv *il, int sta_id, int tid, int freed) +{ + lockdep_assert_held(&il->sta_lock); + + if (il->stations[sta_id].tid[tid].tfds_in_queue >= freed) + il->stations[sta_id].tid[tid].tfds_in_queue -= freed; + else { + D_TX("free more than tfds_in_queue (%u:%d)\n", + il->stations[sta_id].tid[tid].tfds_in_queue, freed); + il->stations[sta_id].tid[tid].tfds_in_queue = 0; + } +} + +#define IL_TX_QUEUE_MSK 0xfffff + +static bool +il4965_is_single_rx_stream(struct il_priv *il) +{ + return il->current_ht_config.smps == IEEE80211_SMPS_STATIC || + il->current_ht_config.single_chain_sufficient; +} + +#define IL_NUM_RX_CHAINS_MULTIPLE 3 +#define IL_NUM_RX_CHAINS_SINGLE 2 +#define IL_NUM_IDLE_CHAINS_DUAL 2 +#define IL_NUM_IDLE_CHAINS_SINGLE 1 + +/* + * Determine how many receiver/antenna chains to use. + * + * More provides better reception via diversity. Fewer saves power + * at the expense of throughput, but only when not in powersave to + * start with. + * + * MIMO (dual stream) requires at least 2, but works better with 3. + * This does not determine *which* chains to use, just how many. + */ +static int +il4965_get_active_rx_chain_count(struct il_priv *il) +{ + /* # of Rx chains to use when expecting MIMO. */ + if (il4965_is_single_rx_stream(il)) + return IL_NUM_RX_CHAINS_SINGLE; + else + return IL_NUM_RX_CHAINS_MULTIPLE; +} + +/* + * When we are in power saving mode, unless device support spatial + * multiplexing power save, use the active count for rx chain count. + */ +static int +il4965_get_idle_rx_chain_count(struct il_priv *il, int active_cnt) +{ + /* # Rx chains when idling, depending on SMPS mode */ + switch (il->current_ht_config.smps) { + case IEEE80211_SMPS_STATIC: + case IEEE80211_SMPS_DYNAMIC: + return IL_NUM_IDLE_CHAINS_SINGLE; + case IEEE80211_SMPS_OFF: + return active_cnt; + default: + WARN(1, "invalid SMPS mode %d", il->current_ht_config.smps); + return active_cnt; + } +} + +/* up to 4 chains */ +static u8 +il4965_count_chain_bitmap(u32 chain_bitmap) +{ + u8 res; + res = (chain_bitmap & BIT(0)) >> 0; + res += (chain_bitmap & BIT(1)) >> 1; + res += (chain_bitmap & BIT(2)) >> 2; + res += (chain_bitmap & BIT(3)) >> 3; + return res; +} + +/** + * il4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image + * + * Selects how many and which Rx receivers/antennas/chains to use. + * This should not be used for scan command ... it puts data in wrong place. + */ +void +il4965_set_rxon_chain(struct il_priv *il, struct il_rxon_context *ctx) +{ + bool is_single = il4965_is_single_rx_stream(il); + bool is_cam = !test_bit(S_POWER_PMI, &il->status); + u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt; + u32 active_chains; + u16 rx_chain; + + /* Tell uCode which antennas are actually connected. + * Before first association, we assume all antennas are connected. + * Just after first association, il4965_chain_noise_calibration() + * checks which antennas actually *are* connected. */ + if (il->chain_noise_data.active_chains) + active_chains = il->chain_noise_data.active_chains; + else + active_chains = il->hw_params.valid_rx_ant; + + rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS; + + /* How many receivers should we use? */ + active_rx_cnt = il4965_get_active_rx_chain_count(il); + idle_rx_cnt = il4965_get_idle_rx_chain_count(il, active_rx_cnt); + + /* correct rx chain count according hw settings + * and chain noise calibration + */ + valid_rx_cnt = il4965_count_chain_bitmap(active_chains); + if (valid_rx_cnt < active_rx_cnt) + active_rx_cnt = valid_rx_cnt; + + if (valid_rx_cnt < idle_rx_cnt) + idle_rx_cnt = valid_rx_cnt; + + rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS; + rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS; + + ctx->staging.rx_chain = cpu_to_le16(rx_chain); + + if (!is_single && active_rx_cnt >= IL_NUM_RX_CHAINS_SINGLE && is_cam) + ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK; + else + ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK; + + D_ASSOC("rx_chain=0x%X active=%d idle=%d\n", ctx->staging.rx_chain, + active_rx_cnt, idle_rx_cnt); + + WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 || + active_rx_cnt < idle_rx_cnt); +} + +static const char * +il4965_get_fh_string(int cmd) +{ + switch (cmd) { + IL_CMD(FH49_RSCSR_CHNL0_STTS_WPTR_REG); + IL_CMD(FH49_RSCSR_CHNL0_RBDCB_BASE_REG); + IL_CMD(FH49_RSCSR_CHNL0_WPTR); + IL_CMD(FH49_MEM_RCSR_CHNL0_CONFIG_REG); + IL_CMD(FH49_MEM_RSSR_SHARED_CTRL_REG); + IL_CMD(FH49_MEM_RSSR_RX_STATUS_REG); + IL_CMD(FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV); + IL_CMD(FH49_TSSR_TX_STATUS_REG); + IL_CMD(FH49_TSSR_TX_ERROR_REG); + default: + return "UNKNOWN"; + } +} + +int +il4965_dump_fh(struct il_priv *il, char **buf, bool display) +{ + int i; +#ifdef CONFIG_IWLEGACY_DEBUG + int pos = 0; + size_t bufsz = 0; +#endif + static const u32 fh_tbl[] = { + FH49_RSCSR_CHNL0_STTS_WPTR_REG, + FH49_RSCSR_CHNL0_RBDCB_BASE_REG, + FH49_RSCSR_CHNL0_WPTR, + FH49_MEM_RCSR_CHNL0_CONFIG_REG, + FH49_MEM_RSSR_SHARED_CTRL_REG, + FH49_MEM_RSSR_RX_STATUS_REG, + FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV, + FH49_TSSR_TX_STATUS_REG, + FH49_TSSR_TX_ERROR_REG + }; +#ifdef CONFIG_IWLEGACY_DEBUG + if (display) { + bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40; + *buf = kmalloc(bufsz, GFP_KERNEL); + if (!*buf) + return -ENOMEM; + pos += + scnprintf(*buf + pos, bufsz - pos, "FH register values:\n"); + for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) { + pos += + scnprintf(*buf + pos, bufsz - pos, + " %34s: 0X%08x\n", + il4965_get_fh_string(fh_tbl[i]), + il_rd(il, fh_tbl[i])); + } + return pos; + } +#endif + IL_ERR("FH register values:\n"); + for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) { + IL_ERR(" %34s: 0X%08x\n", il4965_get_fh_string(fh_tbl[i]), + il_rd(il, fh_tbl[i])); + } + return 0; +} + +void +il4965_hdl_missed_beacon(struct il_priv *il, struct il_rx_buf *rxb) +{ + struct il_rx_pkt *pkt = rxb_addr(rxb); + struct il_missed_beacon_notif *missed_beacon; + + missed_beacon = &pkt->u.missed_beacon; + if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) > + il->missed_beacon_threshold) { + D_CALIB("missed bcn cnsq %d totl %d rcd %d expctd %d\n", + le32_to_cpu(missed_beacon->consecutive_missed_beacons), + le32_to_cpu(missed_beacon->total_missed_becons), + le32_to_cpu(missed_beacon->num_recvd_beacons), + le32_to_cpu(missed_beacon->num_expected_beacons)); + if (!test_bit(S_SCANNING, &il->status)) + il4965_init_sensitivity(il); + } +} + +/* Calculate noise level, based on measurements during network silence just + * before arriving beacon. This measurement can be done only if we know + * exactly when to expect beacons, therefore only when we're associated. */ +static void +il4965_rx_calc_noise(struct il_priv *il) +{ + struct stats_rx_non_phy *rx_info; + int num_active_rx = 0; + int total_silence = 0; + int bcn_silence_a, bcn_silence_b, bcn_silence_c; + int last_rx_noise; + + rx_info = &(il->_4965.stats.rx.general); + bcn_silence_a = + le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER; + bcn_silence_b = + le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER; + bcn_silence_c = + le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER; + + if (bcn_silence_a) { + total_silence += bcn_silence_a; + num_active_rx++; + } + if (bcn_silence_b) { + total_silence += bcn_silence_b; + num_active_rx++; + } + if (bcn_silence_c) { + total_silence += bcn_silence_c; + num_active_rx++; + } + + /* Average among active antennas */ + if (num_active_rx) + last_rx_noise = (total_silence / num_active_rx) - 107; + else + last_rx_noise = IL_NOISE_MEAS_NOT_AVAILABLE; + + D_CALIB("inband silence a %u, b %u, c %u, dBm %d\n", bcn_silence_a, + bcn_silence_b, bcn_silence_c, last_rx_noise); +} + +#ifdef CONFIG_IWLEGACY_DEBUGFS +/* + * based on the assumption of all stats counter are in DWORD + * FIXME: This function is for debugging, do not deal with + * the case of counters roll-over. + */ +static void +il4965_accumulative_stats(struct il_priv *il, __le32 * stats) +{ + int i, size; + __le32 *prev_stats; + u32 *accum_stats; + u32 *delta, *max_delta; + struct stats_general_common *general, *accum_general; + struct stats_tx *tx, *accum_tx; + + prev_stats = (__le32 *) &il->_4965.stats; + accum_stats = (u32 *) &il->_4965.accum_stats; + size = sizeof(struct il_notif_stats); + general = &il->_4965.stats.general.common; + accum_general = &il->_4965.accum_stats.general.common; + tx = &il->_4965.stats.tx; + accum_tx = &il->_4965.accum_stats.tx; + delta = (u32 *) &il->_4965.delta_stats; + max_delta = (u32 *) &il->_4965.max_delta; + + for (i = sizeof(__le32); i < size; + i += + sizeof(__le32), stats++, prev_stats++, delta++, max_delta++, + accum_stats++) { + if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) { + *delta = + (le32_to_cpu(*stats) - le32_to_cpu(*prev_stats)); + *accum_stats += *delta; + if (*delta > *max_delta) + *max_delta = *delta; + } + } + + /* reset accumulative stats for "no-counter" type stats */ + accum_general->temperature = general->temperature; + accum_general->ttl_timestamp = general->ttl_timestamp; +} +#endif + +#define REG_RECALIB_PERIOD (60) + +void +il4965_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb) +{ + int change; + struct il_rx_pkt *pkt = rxb_addr(rxb); + + D_RX("Statistics notification received (%d vs %d).\n", + (int)sizeof(struct il_notif_stats), + le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK); + + change = + ((il->_4965.stats.general.common.temperature != + pkt->u.stats.general.common.temperature) || + ((il->_4965.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK) != + (pkt->u.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK))); +#ifdef CONFIG_IWLEGACY_DEBUGFS + il4965_accumulative_stats(il, (__le32 *) &pkt->u.stats); +#endif + + /* TODO: reading some of stats is unneeded */ + memcpy(&il->_4965.stats, &pkt->u.stats, sizeof(il->_4965.stats)); + + set_bit(S_STATS, &il->status); + + /* Reschedule the stats timer to occur in + * REG_RECALIB_PERIOD seconds to ensure we get a + * thermal update even if the uCode doesn't give + * us one */ + mod_timer(&il->stats_periodic, + jiffies + msecs_to_jiffies(REG_RECALIB_PERIOD * 1000)); + + if (unlikely(!test_bit(S_SCANNING, &il->status)) && + (pkt->hdr.cmd == N_STATS)) { + il4965_rx_calc_noise(il); + queue_work(il->workqueue, &il->run_time_calib_work); + } + if (il->cfg->ops->lib->temp_ops.temperature && change) + il->cfg->ops->lib->temp_ops.temperature(il); +} + +void +il4965_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb) +{ + struct il_rx_pkt *pkt = rxb_addr(rxb); + + if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATS_CLEAR_MSK) { +#ifdef CONFIG_IWLEGACY_DEBUGFS + memset(&il->_4965.accum_stats, 0, + sizeof(struct il_notif_stats)); + memset(&il->_4965.delta_stats, 0, + sizeof(struct il_notif_stats)); + memset(&il->_4965.max_delta, 0, sizeof(struct il_notif_stats)); +#endif + D_RX("Statistics have been cleared\n"); + } + il4965_hdl_stats(il, rxb); +} + + +/* + * mac80211 queues, ACs, hardware queues, FIFOs. + * + * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues + * + * Mac80211 uses the following numbers, which we get as from it + * by way of skb_get_queue_mapping(skb): + * + * VO 0 + * VI 1 + * BE 2 + * BK 3 + * + * + * Regular (not A-MPDU) frames are put into hardware queues corresponding + * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their + * own queue per aggregation session (RA/TID combination), such queues are + * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In + * order to map frames to the right queue, we also need an AC->hw queue + * mapping. This is implemented here. + * + * Due to the way hw queues are set up (by the hw specific modules like + * 4965.c), the AC->hw queue mapping is the identity + * mapping. + */ + +static const u8 tid_to_ac[] = { + IEEE80211_AC_BE, + IEEE80211_AC_BK, + IEEE80211_AC_BK, + IEEE80211_AC_BE, + IEEE80211_AC_VI, + IEEE80211_AC_VI, + IEEE80211_AC_VO, + IEEE80211_AC_VO +}; + +static inline int +il4965_get_ac_from_tid(u16 tid) +{ + if (likely(tid < ARRAY_SIZE(tid_to_ac))) + return tid_to_ac[tid]; + + /* no support for TIDs 8-15 yet */ + return -EINVAL; +} + +static inline int +il4965_get_fifo_from_tid(struct il_rxon_context *ctx, u16 tid) +{ + if (likely(tid < ARRAY_SIZE(tid_to_ac))) + return ctx->ac_to_fifo[tid_to_ac[tid]]; + + /* no support for TIDs 8-15 yet */ + return -EINVAL; +} + +/* + * handle build C_TX command notification. + */ +static void +il4965_tx_cmd_build_basic(struct il_priv *il, struct sk_buff *skb, + struct il_tx_cmd *tx_cmd, + struct ieee80211_tx_info *info, + struct ieee80211_hdr *hdr, u8 std_id) +{ + __le16 fc = hdr->frame_control; + __le32 tx_flags = tx_cmd->tx_flags; + + tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; + if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { + tx_flags |= TX_CMD_FLG_ACK_MSK; + if (ieee80211_is_mgmt(fc)) + tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; + if (ieee80211_is_probe_resp(fc) && + !(le16_to_cpu(hdr->seq_ctrl) & 0xf)) + tx_flags |= TX_CMD_FLG_TSF_MSK; + } else { + tx_flags &= (~TX_CMD_FLG_ACK_MSK); + tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; + } + + if (ieee80211_is_back_req(fc)) + tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK; + + tx_cmd->sta_id = std_id; + if (ieee80211_has_morefrags(fc)) + tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; + + if (ieee80211_is_data_qos(fc)) { + u8 *qc = ieee80211_get_qos_ctl(hdr); + tx_cmd->tid_tspec = qc[0] & 0xf; + tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; + } else { + tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; + } + + il_tx_cmd_protection(il, info, fc, &tx_flags); + + tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); + if (ieee80211_is_mgmt(fc)) { + if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) + tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3); + else + tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2); + } else { + tx_cmd->timeout.pm_frame_timeout = 0; + } + + tx_cmd->driver_txop = 0; + tx_cmd->tx_flags = tx_flags; + tx_cmd->next_frame_len = 0; +} + +static void +il4965_tx_cmd_build_rate(struct il_priv *il, struct il_tx_cmd *tx_cmd, + struct ieee80211_tx_info *info, __le16 fc) +{ + const u8 rts_retry_limit = 60; + u32 rate_flags; + int rate_idx; + u8 data_retry_limit; + u8 rate_plcp; + + /* Set retry limit on DATA packets and Probe Responses */ + if (ieee80211_is_probe_resp(fc)) + data_retry_limit = 3; + else + data_retry_limit = IL4965_DEFAULT_TX_RETRY; + tx_cmd->data_retry_limit = data_retry_limit; + /* Set retry limit on RTS packets */ + tx_cmd->rts_retry_limit = min(data_retry_limit, rts_retry_limit); + + /* DATA packets will use the uCode station table for rate/antenna + * selection */ + if (ieee80211_is_data(fc)) { + tx_cmd->initial_rate_idx = 0; + tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK; + return; + } + + /** + * If the current TX rate stored in mac80211 has the MCS bit set, it's + * not really a TX rate. Thus, we use the lowest supported rate for + * this band. Also use the lowest supported rate if the stored rate + * idx is invalid. + */ + rate_idx = info->control.rates[0].idx; + if ((info->control.rates[0].flags & IEEE80211_TX_RC_MCS) || rate_idx < 0 + || rate_idx > RATE_COUNT_LEGACY) + rate_idx = + rate_lowest_index(&il->bands[info->band], + info->control.sta); + /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ + if (info->band == IEEE80211_BAND_5GHZ) + rate_idx += IL_FIRST_OFDM_RATE; + /* Get PLCP rate for tx_cmd->rate_n_flags */ + rate_plcp = il_rates[rate_idx].plcp; + /* Zero out flags for this packet */ + rate_flags = 0; + + /* Set CCK flag as needed */ + if (rate_idx >= IL_FIRST_CCK_RATE && rate_idx <= IL_LAST_CCK_RATE) + rate_flags |= RATE_MCS_CCK_MSK; + + /* Set up antennas */ + il4965_toggle_tx_ant(il, &il->mgmt_tx_ant, il->hw_params.valid_tx_ant); + rate_flags |= BIT(il->mgmt_tx_ant) << RATE_MCS_ANT_POS; + + /* Set the rate in the TX cmd */ + tx_cmd->rate_n_flags = cpu_to_le32(rate_plcp | rate_flags); +} + +static void +il4965_tx_cmd_build_hwcrypto(struct il_priv *il, struct ieee80211_tx_info *info, + struct il_tx_cmd *tx_cmd, struct sk_buff *skb_frag, + int sta_id) +{ + struct ieee80211_key_conf *keyconf = info->control.hw_key; + + switch (keyconf->cipher) { + case WLAN_CIPHER_SUITE_CCMP: + tx_cmd->sec_ctl = TX_CMD_SEC_CCM; + memcpy(tx_cmd->key, keyconf->key, keyconf->keylen); + if (info->flags & IEEE80211_TX_CTL_AMPDU) + tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK; + D_TX("tx_cmd with AES hwcrypto\n"); + break; + + case WLAN_CIPHER_SUITE_TKIP: + tx_cmd->sec_ctl = TX_CMD_SEC_TKIP; + ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key); + D_TX("tx_cmd with tkip hwcrypto\n"); + break; + + case WLAN_CIPHER_SUITE_WEP104: + tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; + /* fall through */ + case WLAN_CIPHER_SUITE_WEP40: + tx_cmd->sec_ctl |= + (TX_CMD_SEC_WEP | (keyconf->keyidx & TX_CMD_SEC_MSK) << + TX_CMD_SEC_SHIFT); + + memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen); + + D_TX("Configuring packet for WEP encryption " "with key %d\n", + keyconf->keyidx); + break; + + default: + IL_ERR("Unknown encode cipher %x\n", keyconf->cipher); + break; + } +} + +/* + * start C_TX command process + */ +int +il4965_tx_skb(struct il_priv *il, struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_sta *sta = info->control.sta; + struct il_station_priv *sta_priv = NULL; + struct il_tx_queue *txq; + struct il_queue *q; + struct il_device_cmd *out_cmd; + struct il_cmd_meta *out_meta; + struct il_tx_cmd *tx_cmd; + struct il_rxon_context *ctx = &il->ctx; + int txq_id; + dma_addr_t phys_addr; + dma_addr_t txcmd_phys; + dma_addr_t scratch_phys; + u16 len, firstlen, secondlen; + u16 seq_number = 0; + __le16 fc; + u8 hdr_len; + u8 sta_id; + u8 wait_write_ptr = 0; + u8 tid = 0; + u8 *qc = NULL; + unsigned long flags; + bool is_agg = false; + + if (info->control.vif) + ctx = il_rxon_ctx_from_vif(info->control.vif); + + spin_lock_irqsave(&il->lock, flags); + if (il_is_rfkill(il)) { + D_DROP("Dropping - RF KILL\n"); + goto drop_unlock; + } + + fc = hdr->frame_control; + +#ifdef CONFIG_IWLEGACY_DEBUG + if (ieee80211_is_auth(fc)) + D_TX("Sending AUTH frame\n"); + else if (ieee80211_is_assoc_req(fc)) + D_TX("Sending ASSOC frame\n"); + else if (ieee80211_is_reassoc_req(fc)) + D_TX("Sending REASSOC frame\n"); +#endif + + hdr_len = ieee80211_hdrlen(fc); + + /* For management frames use broadcast id to do not break aggregation */ + if (!ieee80211_is_data(fc)) + sta_id = ctx->bcast_sta_id; + else { + /* Find idx into station table for destination station */ + sta_id = il_sta_id_or_broadcast(il, ctx, info->control.sta); + + if (sta_id == IL_INVALID_STATION) { + D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1); + goto drop_unlock; + } + } + + D_TX("station Id %d\n", sta_id); + + if (sta) + sta_priv = (void *)sta->drv_priv; + + if (sta_priv && sta_priv->asleep && + (info->flags & IEEE80211_TX_CTL_POLL_RESPONSE)) { + /* + * This sends an asynchronous command to the device, + * but we can rely on it being processed before the + * next frame is processed -- and the next frame to + * this station is the one that will consume this + * counter. + * For now set the counter to just 1 since we do not + * support uAPSD yet. + */ + il4965_sta_modify_sleep_tx_count(il, sta_id, 1); + } + + /* + * Send this frame after DTIM -- there's a special queue + * reserved for this for contexts that support AP mode. + */ + if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { + txq_id = ctx->mcast_queue; + /* + * The microcode will clear the more data + * bit in the last frame it transmits. + */ + hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); + } else + txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)]; + + /* irqs already disabled/saved above when locking il->lock */ + spin_lock(&il->sta_lock); + + if (ieee80211_is_data_qos(fc)) { + qc = ieee80211_get_qos_ctl(hdr); + tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; + if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) { + spin_unlock(&il->sta_lock); + goto drop_unlock; + } + seq_number = il->stations[sta_id].tid[tid].seq_number; + seq_number &= IEEE80211_SCTL_SEQ; + hdr->seq_ctrl = + hdr->seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG); + hdr->seq_ctrl |= cpu_to_le16(seq_number); + seq_number += 0x10; + /* aggregation is on for this <sta,tid> */ + if (info->flags & IEEE80211_TX_CTL_AMPDU && + il->stations[sta_id].tid[tid].agg.state == IL_AGG_ON) { + txq_id = il->stations[sta_id].tid[tid].agg.txq_id; + is_agg = true; + } + } + + txq = &il->txq[txq_id]; + q = &txq->q; + + if (unlikely(il_queue_space(q) < q->high_mark)) { + spin_unlock(&il->sta_lock); + goto drop_unlock; + } + + if (ieee80211_is_data_qos(fc)) { + il->stations[sta_id].tid[tid].tfds_in_queue++; + if (!ieee80211_has_morefrags(fc)) + il->stations[sta_id].tid[tid].seq_number = seq_number; + } + + spin_unlock(&il->sta_lock); + + /* Set up driver data for this TFD */ + memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct il_tx_info)); + txq->txb[q->write_ptr].skb = skb; + txq->txb[q->write_ptr].ctx = ctx; + + /* Set up first empty entry in queue's array of Tx/cmd buffers */ + out_cmd = txq->cmd[q->write_ptr]; + out_meta = &txq->meta[q->write_ptr]; + tx_cmd = &out_cmd->cmd.tx; + memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); + memset(tx_cmd, 0, sizeof(struct il_tx_cmd)); + + /* + * Set up the Tx-command (not MAC!) header. + * Store the chosen Tx queue and TFD idx within the sequence field; + * after Tx, uCode's Tx response will return this value so driver can + * locate the frame within the tx queue and do post-tx processing. + */ + out_cmd->hdr.cmd = C_TX; + out_cmd->hdr.sequence = + cpu_to_le16((u16) + (QUEUE_TO_SEQ(txq_id) | IDX_TO_SEQ(q->write_ptr))); + + /* Copy MAC header from skb into command buffer */ + memcpy(tx_cmd->hdr, hdr, hdr_len); + + /* Total # bytes to be transmitted */ + len = (u16) skb->len; + tx_cmd->len = cpu_to_le16(len); + + if (info->control.hw_key) + il4965_tx_cmd_build_hwcrypto(il, info, tx_cmd, skb, sta_id); + + /* TODO need this for burst mode later on */ + il4965_tx_cmd_build_basic(il, skb, tx_cmd, info, hdr, sta_id); + il_dbg_log_tx_data_frame(il, len, hdr); + + il4965_tx_cmd_build_rate(il, tx_cmd, info, fc); + + il_update_stats(il, true, fc, len); + /* + * Use the first empty entry in this queue's command buffer array + * to contain the Tx command and MAC header concatenated together + * (payload data will be in another buffer). + * Size of this varies, due to varying MAC header length. + * If end is not dword aligned, we'll have 2 extra bytes at the end + * of the MAC header (device reads on dword boundaries). + * We'll tell device about this padding later. + */ + len = sizeof(struct il_tx_cmd) + sizeof(struct il_cmd_header) + hdr_len; + firstlen = (len + 3) & ~3; + + /* Tell NIC about any 2-byte padding after MAC header */ + if (firstlen != len) + tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; + + /* Physical address of this Tx command's header (not MAC header!), + * within command buffer array. */ + txcmd_phys = + pci_map_single(il->pci_dev, &out_cmd->hdr, firstlen, + PCI_DMA_BIDIRECTIONAL); + dma_unmap_addr_set(out_meta, mapping, txcmd_phys); + dma_unmap_len_set(out_meta, len, firstlen); + /* Add buffer containing Tx command and MAC(!) header to TFD's + * first entry */ + il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen, + 1, 0); + + if (!ieee80211_has_morefrags(hdr->frame_control)) { + txq->need_update = 1; + } else { + wait_write_ptr = 1; + txq->need_update = 0; + } + + /* Set up TFD's 2nd entry to point directly to remainder of skb, + * if any (802.11 null frames have no payload). */ + secondlen = skb->len - hdr_len; + if (secondlen > 0) { + phys_addr = + pci_map_single(il->pci_dev, skb->data + hdr_len, secondlen, + PCI_DMA_TODEVICE); + il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, phys_addr, + secondlen, 0, 0); + } + + scratch_phys = + txcmd_phys + sizeof(struct il_cmd_header) + + offsetof(struct il_tx_cmd, scratch); + + /* take back ownership of DMA buffer to enable update */ + pci_dma_sync_single_for_cpu(il->pci_dev, txcmd_phys, firstlen, + PCI_DMA_BIDIRECTIONAL); + tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); + tx_cmd->dram_msb_ptr = il_get_dma_hi_addr(scratch_phys); + + D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence)); + D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags)); + il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd, sizeof(*tx_cmd)); + il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd->hdr, hdr_len); + + /* Set up entry for this TFD in Tx byte-count array */ + if (info->flags & IEEE80211_TX_CTL_AMPDU) + il->cfg->ops->lib->txq_update_byte_cnt_tbl(il, txq, + le16_to_cpu(tx_cmd-> + len)); + + pci_dma_sync_single_for_device(il->pci_dev, txcmd_phys, firstlen, + PCI_DMA_BIDIRECTIONAL); + + /* Tell device the write idx *just past* this latest filled TFD */ + q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd); + il_txq_update_write_ptr(il, txq); + spin_unlock_irqrestore(&il->lock, flags); + + /* + * At this point the frame is "transmitted" successfully + * and we will get a TX status notification eventually, + * regardless of the value of ret. "ret" only indicates + * whether or not we should update the write pointer. + */ + + /* + * Avoid atomic ops if it isn't an associated client. + * Also, if this is a packet for aggregation, don't + * increase the counter because the ucode will stop + * aggregation queues when their respective station + * goes to sleep. + */ + if (sta_priv && sta_priv->client && !is_agg) + atomic_inc(&sta_priv->pending_frames); + + if (il_queue_space(q) < q->high_mark && il->mac80211_registered) { + if (wait_write_ptr) { + spin_lock_irqsave(&il->lock, flags); + txq->need_update = 1; + il_txq_update_write_ptr(il, txq); + spin_unlock_irqrestore(&il->lock, flags); + } else { + il_stop_queue(il, txq); + } + } + + return 0; + +drop_unlock: + spin_unlock_irqrestore(&il->lock, flags); + return -1; +} + +static inline int +il4965_alloc_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr, size_t size) +{ + ptr->addr = + dma_alloc_coherent(&il->pci_dev->dev, size, &ptr->dma, GFP_KERNEL); + if (!ptr->addr) + return -ENOMEM; + ptr->size = size; + return 0; +} + +static inline void +il4965_free_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr) +{ + if (unlikely(!ptr->addr)) + return; + + dma_free_coherent(&il->pci_dev->dev, ptr->size, ptr->addr, ptr->dma); + memset(ptr, 0, sizeof(*ptr)); +} + +/** + * il4965_hw_txq_ctx_free - Free TXQ Context + * + * Destroy all TX DMA queues and structures + */ +void +il4965_hw_txq_ctx_free(struct il_priv *il) +{ + int txq_id; + + /* Tx queues */ + if (il->txq) { + for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) + if (txq_id == il->cmd_queue) + il_cmd_queue_free(il); + else + il_tx_queue_free(il, txq_id); + } + il4965_free_dma_ptr(il, &il->kw); + + il4965_free_dma_ptr(il, &il->scd_bc_tbls); + + /* free tx queue structure */ + il_txq_mem(il); +} + +/** + * il4965_txq_ctx_alloc - allocate TX queue context + * Allocate all Tx DMA structures and initialize them + * + * @param il + * @return error code + */ +int +il4965_txq_ctx_alloc(struct il_priv *il) +{ + int ret; + int txq_id, slots_num; + unsigned long flags; + + /* Free all tx/cmd queues and keep-warm buffer */ + il4965_hw_txq_ctx_free(il); + + ret = + il4965_alloc_dma_ptr(il, &il->scd_bc_tbls, + il->hw_params.scd_bc_tbls_size); + if (ret) { + IL_ERR("Scheduler BC Table allocation failed\n"); + goto error_bc_tbls; + } + /* Alloc keep-warm buffer */ + ret = il4965_alloc_dma_ptr(il, &il->kw, IL_KW_SIZE); + if (ret) { + IL_ERR("Keep Warm allocation failed\n"); + goto error_kw; + } + + /* allocate tx queue structure */ + ret = il_alloc_txq_mem(il); + if (ret) + goto error; + + spin_lock_irqsave(&il->lock, flags); + + /* Turn off all Tx DMA fifos */ + il4965_txq_set_sched(il, 0); + + /* Tell NIC where to find the "keep warm" buffer */ + il_wr(il, FH49_KW_MEM_ADDR_REG, il->kw.dma >> 4); + + spin_unlock_irqrestore(&il->lock, flags); + + /* Alloc and init all Tx queues, including the command queue (#4/#9) */ + for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) { + slots_num = + (txq_id == + il->cmd_queue) ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; + ret = il_tx_queue_init(il, &il->txq[txq_id], slots_num, txq_id); + if (ret) { + IL_ERR("Tx %d queue init failed\n", txq_id); + goto error; + } + } + + return ret; + +error: + il4965_hw_txq_ctx_free(il); + il4965_free_dma_ptr(il, &il->kw); +error_kw: + il4965_free_dma_ptr(il, &il->scd_bc_tbls); +error_bc_tbls: + return ret; +} + +void +il4965_txq_ctx_reset(struct il_priv *il) +{ + int txq_id, slots_num; + unsigned long flags; + + spin_lock_irqsave(&il->lock, flags); + + /* Turn off all Tx DMA fifos */ + il4965_txq_set_sched(il, 0); + + /* Tell NIC where to find the "keep warm" buffer */ + il_wr(il, FH49_KW_MEM_ADDR_REG, il->kw.dma >> 4); + + spin_unlock_irqrestore(&il->lock, flags); + + /* Alloc and init all Tx queues, including the command queue (#4) */ + for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) { + slots_num = + txq_id == il->cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; + il_tx_queue_reset(il, &il->txq[txq_id], slots_num, txq_id); + } +} + +/** + * il4965_txq_ctx_stop - Stop all Tx DMA channels + */ +void +il4965_txq_ctx_stop(struct il_priv *il) +{ + int ch, txq_id; + unsigned long flags; + + /* Turn off all Tx DMA fifos */ + spin_lock_irqsave(&il->lock, flags); + + il4965_txq_set_sched(il, 0); + + /* Stop each Tx DMA channel, and wait for it to be idle */ + for (ch = 0; ch < il->hw_params.dma_chnl_num; ch++) { + il_wr(il, FH49_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); + if (il_poll_bit + (il, FH49_TSSR_TX_STATUS_REG, + FH49_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000)) + IL_ERR("Failing on timeout while stopping" + " DMA channel %d [0x%08x]", ch, + il_rd(il, FH49_TSSR_TX_STATUS_REG)); + } + spin_unlock_irqrestore(&il->lock, flags); + + if (!il->txq) + return; + + /* Unmap DMA from host system and free skb's */ + for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) + if (txq_id == il->cmd_queue) + il_cmd_queue_unmap(il); + else + il_tx_queue_unmap(il, txq_id); +} + +/* + * Find first available (lowest unused) Tx Queue, mark it "active". + * Called only when finding queue for aggregation. + * Should never return anything < 7, because they should already + * be in use as EDCA AC (0-3), Command (4), reserved (5, 6) + */ +static int +il4965_txq_ctx_activate_free(struct il_priv *il) +{ + int txq_id; + + for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) + if (!test_and_set_bit(txq_id, &il->txq_ctx_active_msk)) + return txq_id; + return -1; +} + +/** + * il4965_tx_queue_stop_scheduler - Stop queue, but keep configuration + */ +static void +il4965_tx_queue_stop_scheduler(struct il_priv *il, u16 txq_id) +{ + /* Simply stop the queue, but don't change any configuration; + * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */ + il_wr_prph(il, IL49_SCD_QUEUE_STATUS_BITS(txq_id), + (0 << IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) | + (1 << IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); +} + +/** + * il4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue + */ +static int +il4965_tx_queue_set_q2ratid(struct il_priv *il, u16 ra_tid, u16 txq_id) +{ + u32 tbl_dw_addr; + u32 tbl_dw; + u16 scd_q2ratid; + + scd_q2ratid = ra_tid & IL_SCD_QUEUE_RA_TID_MAP_RATID_MSK; + + tbl_dw_addr = + il->scd_base_addr + IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id); + + tbl_dw = il_read_targ_mem(il, tbl_dw_addr); + + if (txq_id & 0x1) + tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); + else + tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); + + il_write_targ_mem(il, tbl_dw_addr, tbl_dw); + + return 0; +} + +/** + * il4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue + * + * NOTE: txq_id must be greater than IL49_FIRST_AMPDU_QUEUE, + * i.e. it must be one of the higher queues used for aggregation + */ +static int +il4965_txq_agg_enable(struct il_priv *il, int txq_id, int tx_fifo, int sta_id, + int tid, u16 ssn_idx) +{ + unsigned long flags; + u16 ra_tid; + int ret; + + if ((IL49_FIRST_AMPDU_QUEUE > txq_id) || + (IL49_FIRST_AMPDU_QUEUE + + il->cfg->base_params->num_of_ampdu_queues <= txq_id)) { + IL_WARN("queue number out of range: %d, must be %d to %d\n", + txq_id, IL49_FIRST_AMPDU_QUEUE, + IL49_FIRST_AMPDU_QUEUE + + il->cfg->base_params->num_of_ampdu_queues - 1); + return -EINVAL; + } + + ra_tid = BUILD_RAxTID(sta_id, tid); + + /* Modify device's station table to Tx this TID */ + ret = il4965_sta_tx_modify_enable_tid(il, sta_id, tid); + if (ret) + return ret; + + spin_lock_irqsave(&il->lock, flags); + + /* Stop this Tx queue before configuring it */ + il4965_tx_queue_stop_scheduler(il, txq_id); + + /* Map receiver-address / traffic-ID to this queue */ + il4965_tx_queue_set_q2ratid(il, ra_tid, txq_id); + + /* Set this queue as a chain-building queue */ + il_set_bits_prph(il, IL49_SCD_QUEUECHAIN_SEL, (1 << txq_id)); + + /* Place first TFD at idx corresponding to start sequence number. + * Assumes that ssn_idx is valid (!= 0xFFF) */ + il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); + il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); + il4965_set_wr_ptrs(il, txq_id, ssn_idx); + + /* Set up Tx win size and frame limit for this queue */ + il_write_targ_mem(il, + il->scd_base_addr + + IL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id), + (SCD_WIN_SIZE << IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) + & IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK); + + il_write_targ_mem(il, + il->scd_base_addr + + IL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), + (SCD_FRAME_LIMIT << + IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & + IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK); + + il_set_bits_prph(il, IL49_SCD_INTERRUPT_MASK, (1 << txq_id)); + + /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */ + il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 1); + + spin_unlock_irqrestore(&il->lock, flags); + + return 0; +} + +int +il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid, u16 * ssn) +{ + int sta_id; + int tx_fifo; + int txq_id; + int ret; + unsigned long flags; + struct il_tid_data *tid_data; + + tx_fifo = il4965_get_fifo_from_tid(il_rxon_ctx_from_vif(vif), tid); + if (unlikely(tx_fifo < 0)) + return tx_fifo; + + D_HT("%s on ra = %pM tid = %d\n", __func__, sta->addr, tid); + + sta_id = il_sta_id(sta); + if (sta_id == IL_INVALID_STATION) { + IL_ERR("Start AGG on invalid station\n"); + return -ENXIO; + } + if (unlikely(tid >= MAX_TID_COUNT)) + return -EINVAL; + + if (il->stations[sta_id].tid[tid].agg.state != IL_AGG_OFF) { + IL_ERR("Start AGG when state is not IL_AGG_OFF !\n"); + return -ENXIO; + } + + txq_id = il4965_txq_ctx_activate_free(il); + if (txq_id == -1) { + IL_ERR("No free aggregation queue available\n"); + return -ENXIO; + } + + spin_lock_irqsave(&il->sta_lock, flags); + tid_data = &il->stations[sta_id].tid[tid]; + *ssn = SEQ_TO_SN(tid_data->seq_number); + tid_data->agg.txq_id = txq_id; + il_set_swq_id(&il->txq[txq_id], il4965_get_ac_from_tid(tid), txq_id); + spin_unlock_irqrestore(&il->sta_lock, flags); + + ret = il4965_txq_agg_enable(il, txq_id, tx_fifo, sta_id, tid, *ssn); + if (ret) + return ret; + + spin_lock_irqsave(&il->sta_lock, flags); + tid_data = &il->stations[sta_id].tid[tid]; + if (tid_data->tfds_in_queue == 0) { + D_HT("HW queue is empty\n"); + tid_data->agg.state = IL_AGG_ON; + ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); + } else { + D_HT("HW queue is NOT empty: %d packets in HW queue\n", + tid_data->tfds_in_queue); + tid_data->agg.state = IL_EMPTYING_HW_QUEUE_ADDBA; + } + spin_unlock_irqrestore(&il->sta_lock, flags); + return ret; +} + +/** + * txq_id must be greater than IL49_FIRST_AMPDU_QUEUE + * il->lock must be held by the caller + */ +static int +il4965_txq_agg_disable(struct il_priv *il, u16 txq_id, u16 ssn_idx, u8 tx_fifo) +{ + if ((IL49_FIRST_AMPDU_QUEUE > txq_id) || + (IL49_FIRST_AMPDU_QUEUE + + il->cfg->base_params->num_of_ampdu_queues <= txq_id)) { + IL_WARN("queue number out of range: %d, must be %d to %d\n", + txq_id, IL49_FIRST_AMPDU_QUEUE, + IL49_FIRST_AMPDU_QUEUE + + il->cfg->base_params->num_of_ampdu_queues - 1); + return -EINVAL; + } + + il4965_tx_queue_stop_scheduler(il, txq_id); + + il_clear_bits_prph(il, IL49_SCD_QUEUECHAIN_SEL, (1 << txq_id)); + + il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); + il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); + /* supposes that ssn_idx is valid (!= 0xFFF) */ + il4965_set_wr_ptrs(il, txq_id, ssn_idx); + + il_clear_bits_prph(il, IL49_SCD_INTERRUPT_MASK, (1 << txq_id)); + il_txq_ctx_deactivate(il, txq_id); + il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 0); + + return 0; +} + +int +il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid) +{ + int tx_fifo_id, txq_id, sta_id, ssn; + struct il_tid_data *tid_data; + int write_ptr, read_ptr; + unsigned long flags; + + tx_fifo_id = il4965_get_fifo_from_tid(il_rxon_ctx_from_vif(vif), tid); + if (unlikely(tx_fifo_id < 0)) + return tx_fifo_id; + + sta_id = il_sta_id(sta); + + if (sta_id == IL_INVALID_STATION) { + IL_ERR("Invalid station for AGG tid %d\n", tid); + return -ENXIO; + } + + spin_lock_irqsave(&il->sta_lock, flags); + + tid_data = &il->stations[sta_id].tid[tid]; + ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4; + txq_id = tid_data->agg.txq_id; + + switch (il->stations[sta_id].tid[tid].agg.state) { + case IL_EMPTYING_HW_QUEUE_ADDBA: + /* + * This can happen if the peer stops aggregation + * again before we've had a chance to drain the + * queue we selected previously, i.e. before the + * session was really started completely. + */ + D_HT("AGG stop before setup done\n"); + goto turn_off; + case IL_AGG_ON: + break; + default: + IL_WARN("Stopping AGG while state not ON or starting\n"); + } + + write_ptr = il->txq[txq_id].q.write_ptr; + read_ptr = il->txq[txq_id].q.read_ptr; + + /* The queue is not empty */ + if (write_ptr != read_ptr) { + D_HT("Stopping a non empty AGG HW QUEUE\n"); + il->stations[sta_id].tid[tid].agg.state = + IL_EMPTYING_HW_QUEUE_DELBA; + spin_unlock_irqrestore(&il->sta_lock, flags); + return 0; + } + + D_HT("HW queue is empty\n"); +turn_off: + il->stations[sta_id].tid[tid].agg.state = IL_AGG_OFF; + + /* do not restore/save irqs */ + spin_unlock(&il->sta_lock); + spin_lock(&il->lock); + + /* + * the only reason this call can fail is queue number out of range, + * which can happen if uCode is reloaded and all the station + * information are lost. if it is outside the range, there is no need + * to deactivate the uCode queue, just return "success" to allow + * mac80211 to clean up it own data. + */ + il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo_id); + spin_unlock_irqrestore(&il->lock, flags); + + ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); + + return 0; +} + +int +il4965_txq_check_empty(struct il_priv *il, int sta_id, u8 tid, int txq_id) +{ + struct il_queue *q = &il->txq[txq_id].q; + u8 *addr = il->stations[sta_id].sta.sta.addr; + struct il_tid_data *tid_data = &il->stations[sta_id].tid[tid]; + struct il_rxon_context *ctx; + + ctx = &il->ctx; + + lockdep_assert_held(&il->sta_lock); + + switch (il->stations[sta_id].tid[tid].agg.state) { + case IL_EMPTYING_HW_QUEUE_DELBA: + /* We are reclaiming the last packet of the */ + /* aggregated HW queue */ + if (txq_id == tid_data->agg.txq_id && + q->read_ptr == q->write_ptr) { + u16 ssn = SEQ_TO_SN(tid_data->seq_number); + int tx_fifo = il4965_get_fifo_from_tid(ctx, tid); + D_HT("HW queue empty: continue DELBA flow\n"); + il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo); + tid_data->agg.state = IL_AGG_OFF; + ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid); + } + break; + case IL_EMPTYING_HW_QUEUE_ADDBA: + /* We are reclaiming the last packet of the queue */ + if (tid_data->tfds_in_queue == 0) { + D_HT("HW queue empty: continue ADDBA flow\n"); + tid_data->agg.state = IL_AGG_ON; + ieee80211_start_tx_ba_cb_irqsafe(ctx->vif, addr, tid); + } + break; + } + + return 0; +} + +static void +il4965_non_agg_tx_status(struct il_priv *il, struct il_rxon_context *ctx, + const u8 *addr1) +{ + struct ieee80211_sta *sta; + struct il_station_priv *sta_priv; + + rcu_read_lock(); + sta = ieee80211_find_sta(ctx->vif, addr1); + if (sta) { + sta_priv = (void *)sta->drv_priv; + /* avoid atomic ops if this isn't a client */ + if (sta_priv->client && + atomic_dec_return(&sta_priv->pending_frames) == 0) + ieee80211_sta_block_awake(il->hw, sta, false); + } + rcu_read_unlock(); +} + +static void +il4965_tx_status(struct il_priv *il, struct il_tx_info *tx_info, bool is_agg) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data; + + if (!is_agg) + il4965_non_agg_tx_status(il, tx_info->ctx, hdr->addr1); + + ieee80211_tx_status_irqsafe(il->hw, tx_info->skb); +} + +int +il4965_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx) +{ + struct il_tx_queue *txq = &il->txq[txq_id]; + struct il_queue *q = &txq->q; + struct il_tx_info *tx_info; + int nfreed = 0; + struct ieee80211_hdr *hdr; + + if (idx >= q->n_bd || il_queue_used(q, idx) == 0) { + IL_ERR("Read idx for DMA queue txq id (%d), idx %d, " + "is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd, + q->write_ptr, q->read_ptr); + return 0; + } + + for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; + q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) { + + tx_info = &txq->txb[txq->q.read_ptr]; + + if (WARN_ON_ONCE(tx_info->skb == NULL)) + continue; + + hdr = (struct ieee80211_hdr *)tx_info->skb->data; + if (ieee80211_is_data_qos(hdr->frame_control)) + nfreed++; + + il4965_tx_status(il, tx_info, + txq_id >= IL4965_FIRST_AMPDU_QUEUE); + tx_info->skb = NULL; + + il->cfg->ops->lib->txq_free_tfd(il, txq); + } + return nfreed; +} + +/** + * il4965_tx_status_reply_compressed_ba - Update tx status from block-ack + * + * Go through block-ack's bitmap of ACK'd frames, update driver's record of + * ACK vs. not. This gets sent to mac80211, then to rate scaling algo. + */ +static int +il4965_tx_status_reply_compressed_ba(struct il_priv *il, struct il_ht_agg *agg, + struct il_compressed_ba_resp *ba_resp) +{ + int i, sh, ack; + u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl); + u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); + int successes = 0; + struct ieee80211_tx_info *info; + u64 bitmap, sent_bitmap; + + if (unlikely(!agg->wait_for_ba)) { + if (unlikely(ba_resp->bitmap)) + IL_ERR("Received BA when not expected\n"); + return -EINVAL; + } + + /* Mark that the expected block-ack response arrived */ + agg->wait_for_ba = 0; + D_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->seq_ctl); + + /* Calculate shift to align block-ack bits with our Tx win bits */ + sh = agg->start_idx - SEQ_TO_IDX(seq_ctl >> 4); + if (sh < 0) /* tbw something is wrong with indices */ + sh += 0x100; + + if (agg->frame_count > (64 - sh)) { + D_TX_REPLY("more frames than bitmap size"); + return -1; + } + + /* don't use 64-bit values for now */ + bitmap = le64_to_cpu(ba_resp->bitmap) >> sh; + + /* check for success or failure according to the + * transmitted bitmap and block-ack bitmap */ + sent_bitmap = bitmap & agg->bitmap; + + /* For each frame attempted in aggregation, + * update driver's record of tx frame's status. */ + i = 0; + while (sent_bitmap) { + ack = sent_bitmap & 1ULL; + successes += ack; + D_TX_REPLY("%s ON i=%d idx=%d raw=%d\n", ack ? "ACK" : "NACK", + i, (agg->start_idx + i) & 0xff, agg->start_idx + i); + sent_bitmap >>= 1; + ++i; + } + + D_TX_REPLY("Bitmap %llx\n", (unsigned long long)bitmap); + + info = IEEE80211_SKB_CB(il->txq[scd_flow].txb[agg->start_idx].skb); + memset(&info->status, 0, sizeof(info->status)); + info->flags |= IEEE80211_TX_STAT_ACK; + info->flags |= IEEE80211_TX_STAT_AMPDU; + info->status.ampdu_ack_len = successes; + info->status.ampdu_len = agg->frame_count; + il4965_hwrate_to_tx_control(il, agg->rate_n_flags, info); + + return 0; +} + +/** + * translate ucode response to mac80211 tx status control values + */ +void +il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags, + struct ieee80211_tx_info *info) +{ + struct ieee80211_tx_rate *r = &info->control.rates[0]; + + info->antenna_sel_tx = + ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS); + if (rate_n_flags & RATE_MCS_HT_MSK) + r->flags |= IEEE80211_TX_RC_MCS; + if (rate_n_flags & RATE_MCS_GF_MSK) + r->flags |= IEEE80211_TX_RC_GREEN_FIELD; + if (rate_n_flags & RATE_MCS_HT40_MSK) + r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; + if (rate_n_flags & RATE_MCS_DUP_MSK) + r->flags |= IEEE80211_TX_RC_DUP_DATA; + if (rate_n_flags & RATE_MCS_SGI_MSK) + r->flags |= IEEE80211_TX_RC_SHORT_GI; + r->idx = il4965_hwrate_to_mac80211_idx(rate_n_flags, info->band); +} + +/** + * il4965_hdl_compressed_ba - Handler for N_COMPRESSED_BA + * + * Handles block-acknowledge notification from device, which reports success + * of frames sent via aggregation. + */ +void +il4965_hdl_compressed_ba(struct il_priv *il, struct il_rx_buf *rxb) +{ + struct il_rx_pkt *pkt = rxb_addr(rxb); + struct il_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba; + struct il_tx_queue *txq = NULL; + struct il_ht_agg *agg; + int idx; + int sta_id; + int tid; + unsigned long flags; + + /* "flow" corresponds to Tx queue */ + u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); + + /* "ssn" is start of block-ack Tx win, corresponds to idx + * (in Tx queue's circular buffer) of first TFD/frame in win */ + u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn); + + if (scd_flow >= il->hw_params.max_txq_num) { + IL_ERR("BUG_ON scd_flow is bigger than number of queues\n"); + return; + } + + txq = &il->txq[scd_flow]; + sta_id = ba_resp->sta_id; + tid = ba_resp->tid; + agg = &il->stations[sta_id].tid[tid].agg; + if (unlikely(agg->txq_id != scd_flow)) { + /* + * FIXME: this is a uCode bug which need to be addressed, + * log the information and return for now! + * since it is possible happen very often and in order + * not to fill the syslog, don't enable the logging by default + */ + D_TX_REPLY("BA scd_flow %d does not match txq_id %d\n", + scd_flow, agg->txq_id); + return; + } + + /* Find idx just before block-ack win */ + idx = il_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd); + + spin_lock_irqsave(&il->sta_lock, flags); + + D_TX_REPLY("N_COMPRESSED_BA [%d] Received from %pM, " "sta_id = %d\n", + agg->wait_for_ba, (u8 *) &ba_resp->sta_addr_lo32, + ba_resp->sta_id); + D_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%llx," "scd_flow = " + "%d, scd_ssn = %d\n", ba_resp->tid, ba_resp->seq_ctl, + (unsigned long long)le64_to_cpu(ba_resp->bitmap), + ba_resp->scd_flow, ba_resp->scd_ssn); + D_TX_REPLY("DAT start_idx = %d, bitmap = 0x%llx\n", agg->start_idx, + (unsigned long long)agg->bitmap); + + /* Update driver's record of ACK vs. not for each frame in win */ + il4965_tx_status_reply_compressed_ba(il, agg, ba_resp); + + /* Release all TFDs before the SSN, i.e. all TFDs in front of + * block-ack win (we assume that they've been successfully + * transmitted ... if not, it's too late anyway). */ + if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) { + /* calculate mac80211 ampdu sw queue to wake */ + int freed = il4965_tx_queue_reclaim(il, scd_flow, idx); + il4965_free_tfds_in_queue(il, sta_id, tid, freed); + + if (il_queue_space(&txq->q) > txq->q.low_mark && + il->mac80211_registered && + agg->state != IL_EMPTYING_HW_QUEUE_DELBA) + il_wake_queue(il, txq); + + il4965_txq_check_empty(il, sta_id, tid, scd_flow); + } + + spin_unlock_irqrestore(&il->sta_lock, flags); +} + +#ifdef CONFIG_IWLEGACY_DEBUG +const char * +il4965_get_tx_fail_reason(u32 status) +{ +#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x +#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x + + switch (status & TX_STATUS_MSK) { + case TX_STATUS_SUCCESS: + return "SUCCESS"; + TX_STATUS_POSTPONE(DELAY); + TX_STATUS_POSTPONE(FEW_BYTES); + TX_STATUS_POSTPONE(QUIET_PERIOD); + TX_STATUS_POSTPONE(CALC_TTAK); + TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY); + TX_STATUS_FAIL(SHORT_LIMIT); + TX_STATUS_FAIL(LONG_LIMIT); + TX_STATUS_FAIL(FIFO_UNDERRUN); + TX_STATUS_FAIL(DRAIN_FLOW); + TX_STATUS_FAIL(RFKILL_FLUSH); + TX_STATUS_FAIL(LIFE_EXPIRE); + TX_STATUS_FAIL(DEST_PS); + TX_STATUS_FAIL(HOST_ABORTED); + TX_STATUS_FAIL(BT_RETRY); + TX_STATUS_FAIL(STA_INVALID); + TX_STATUS_FAIL(FRAG_DROPPED); + TX_STATUS_FAIL(TID_DISABLE); + TX_STATUS_FAIL(FIFO_FLUSHED); + TX_STATUS_FAIL(INSUFFICIENT_CF_POLL); + TX_STATUS_FAIL(PASSIVE_NO_RX); + TX_STATUS_FAIL(NO_BEACON_ON_RADAR); + } + + return "UNKNOWN"; + +#undef TX_STATUS_FAIL +#undef TX_STATUS_POSTPONE +} +#endif /* CONFIG_IWLEGACY_DEBUG */ + +static struct il_link_quality_cmd * +il4965_sta_alloc_lq(struct il_priv *il, u8 sta_id) +{ + int i, r; + struct il_link_quality_cmd *link_cmd; + u32 rate_flags = 0; + __le32 rate_n_flags; + + link_cmd = kzalloc(sizeof(struct il_link_quality_cmd), GFP_KERNEL); + if (!link_cmd) { + IL_ERR("Unable to allocate memory for LQ cmd.\n"); + return NULL; + } + /* Set up the rate scaling to start at selected rate, fall back + * all the way down to 1M in IEEE order, and then spin on 1M */ + if (il->band == IEEE80211_BAND_5GHZ) + r = RATE_6M_IDX; + else + r = RATE_1M_IDX; + + if (r >= IL_FIRST_CCK_RATE && r <= IL_LAST_CCK_RATE) + rate_flags |= RATE_MCS_CCK_MSK; + + rate_flags |= + il4965_first_antenna(il->hw_params. + valid_tx_ant) << RATE_MCS_ANT_POS; + rate_n_flags = cpu_to_le32(il_rates[r].plcp | rate_flags); + for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) + link_cmd->rs_table[i].rate_n_flags = rate_n_flags; + + link_cmd->general_params.single_stream_ant_msk = + il4965_first_antenna(il->hw_params.valid_tx_ant); + + link_cmd->general_params.dual_stream_ant_msk = + il->hw_params.valid_tx_ant & ~il4965_first_antenna(il->hw_params. + valid_tx_ant); + if (!link_cmd->general_params.dual_stream_ant_msk) { + link_cmd->general_params.dual_stream_ant_msk = ANT_AB; + } else if (il4965_num_of_ant(il->hw_params.valid_tx_ant) == 2) { + link_cmd->general_params.dual_stream_ant_msk = + il->hw_params.valid_tx_ant; + } + + link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF; + link_cmd->agg_params.agg_time_limit = + cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF); + + link_cmd->sta_id = sta_id; + + return link_cmd; +} + +/* + * il4965_add_bssid_station - Add the special IBSS BSSID station + * + * Function sleeps. + */ +int +il4965_add_bssid_station(struct il_priv *il, struct il_rxon_context *ctx, + const u8 *addr, u8 *sta_id_r) +{ + int ret; + u8 sta_id; + struct il_link_quality_cmd *link_cmd; + unsigned long flags; + + if (sta_id_r) + *sta_id_r = IL_INVALID_STATION; + + ret = il_add_station_common(il, ctx, addr, 0, NULL, &sta_id); + if (ret) { + IL_ERR("Unable to add station %pM\n", addr); + return ret; + } + + if (sta_id_r) + *sta_id_r = sta_id; + + spin_lock_irqsave(&il->sta_lock, flags); + il->stations[sta_id].used |= IL_STA_LOCAL; + spin_unlock_irqrestore(&il->sta_lock, flags); + + /* Set up default rate scaling table in device's station table */ + link_cmd = il4965_sta_alloc_lq(il, sta_id); + if (!link_cmd) { + IL_ERR("Unable to initialize rate scaling for station %pM.\n", + addr); + return -ENOMEM; + } + + ret = il_send_lq_cmd(il, ctx, link_cmd, CMD_SYNC, true); + if (ret) + IL_ERR("Link quality command failed (%d)\n", ret); + + spin_lock_irqsave(&il->sta_lock, flags); + il->stations[sta_id].lq = link_cmd; + spin_unlock_irqrestore(&il->sta_lock, flags); + + return 0; +} + +static int +il4965_static_wepkey_cmd(struct il_priv *il, struct il_rxon_context *ctx, + bool send_if_empty) +{ + int i, not_empty = 0; + u8 buff[sizeof(struct il_wep_cmd) + + sizeof(struct il_wep_key) * WEP_KEYS_MAX]; + struct il_wep_cmd *wep_cmd = (struct il_wep_cmd *)buff; + size_t cmd_size = sizeof(struct il_wep_cmd); + struct il_host_cmd cmd = { + .id = ctx->wep_key_cmd, + .data = wep_cmd, + .flags = CMD_SYNC, + }; + + might_sleep(); + + memset(wep_cmd, 0, + cmd_size + (sizeof(struct il_wep_key) * WEP_KEYS_MAX)); + + for (i = 0; i < WEP_KEYS_MAX; i++) { + wep_cmd->key[i].key_idx = i; + if (ctx->wep_keys[i].key_size) { + wep_cmd->key[i].key_offset = i; + not_empty = 1; + } else { + wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET; + } + + wep_cmd->key[i].key_size = ctx->wep_keys[i].key_size; + memcpy(&wep_cmd->key[i].key[3], ctx->wep_keys[i].key, + ctx->wep_keys[i].key_size); + } + + wep_cmd->global_key_type = WEP_KEY_WEP_TYPE; + wep_cmd->num_keys = WEP_KEYS_MAX; + + cmd_size += sizeof(struct il_wep_key) * WEP_KEYS_MAX; + + cmd.len = cmd_size; + + if (not_empty || send_if_empty) + return il_send_cmd(il, &cmd); + else + return 0; +} + +int +il4965_restore_default_wep_keys(struct il_priv *il, struct il_rxon_context *ctx) +{ + lockdep_assert_held(&il->mutex); + + return il4965_static_wepkey_cmd(il, ctx, false); +} + +int +il4965_remove_default_wep_key(struct il_priv *il, struct il_rxon_context *ctx, + struct ieee80211_key_conf *keyconf) +{ + int ret; + + lockdep_assert_held(&il->mutex); + + D_WEP("Removing default WEP key: idx=%d\n", keyconf->keyidx); + + memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0])); + if (il_is_rfkill(il)) { + D_WEP("Not sending C_WEPKEY command due to RFKILL.\n"); + /* but keys in device are clear anyway so return success */ + return 0; + } + ret = il4965_static_wepkey_cmd(il, ctx, 1); + D_WEP("Remove default WEP key: idx=%d ret=%d\n", keyconf->keyidx, ret); + + return ret; +} + +int +il4965_set_default_wep_key(struct il_priv *il, struct il_rxon_context *ctx, + struct ieee80211_key_conf *keyconf) +{ + int ret; + + lockdep_assert_held(&il->mutex); + + if (keyconf->keylen != WEP_KEY_LEN_128 && + keyconf->keylen != WEP_KEY_LEN_64) { + D_WEP("Bad WEP key length %d\n", keyconf->keylen); + return -EINVAL; + } + + keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV; + keyconf->hw_key_idx = HW_KEY_DEFAULT; + il->stations[ctx->ap_sta_id].keyinfo.cipher = keyconf->cipher; + + ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen; + memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key, + keyconf->keylen); + + ret = il4965_static_wepkey_cmd(il, ctx, false); + D_WEP("Set default WEP key: len=%d idx=%d ret=%d\n", keyconf->keylen, + keyconf->keyidx, ret); + + return ret; +} + +static int +il4965_set_wep_dynamic_key_info(struct il_priv *il, struct il_rxon_context *ctx, + struct ieee80211_key_conf *keyconf, u8 sta_id) +{ + unsigned long flags; + __le16 key_flags = 0; + struct il_addsta_cmd sta_cmd; + + lockdep_assert_held(&il->mutex); + + keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV; + + key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK); + key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); + key_flags &= ~STA_KEY_FLG_INVALID; + + if (keyconf->keylen == WEP_KEY_LEN_128) + key_flags |= STA_KEY_FLG_KEY_SIZE_MSK; + + if (sta_id == ctx->bcast_sta_id) + key_flags |= STA_KEY_MULTICAST_MSK; + + spin_lock_irqsave(&il->sta_lock, flags); + + il->stations[sta_id].keyinfo.cipher = keyconf->cipher; + il->stations[sta_id].keyinfo.keylen = keyconf->keylen; + il->stations[sta_id].keyinfo.keyidx = keyconf->keyidx; + + memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen); + + memcpy(&il->stations[sta_id].sta.key.key[3], keyconf->key, + keyconf->keylen); + + if ((il->stations[sta_id].sta.key. + key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC) + il->stations[sta_id].sta.key.key_offset = + il_get_free_ucode_key_idx(il); + /* else, we are overriding an existing key => no need to allocated room + * in uCode. */ + + WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, + "no space for a new key"); + + il->stations[sta_id].sta.key.key_flags = key_flags; + il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; + il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + + memcpy(&sta_cmd, &il->stations[sta_id].sta, + sizeof(struct il_addsta_cmd)); + spin_unlock_irqrestore(&il->sta_lock, flags); + + return il_send_add_sta(il, &sta_cmd, CMD_SYNC); +} + +static int +il4965_set_ccmp_dynamic_key_info(struct il_priv *il, + struct il_rxon_context *ctx, + struct ieee80211_key_conf *keyconf, u8 sta_id) +{ + unsigned long flags; + __le16 key_flags = 0; + struct il_addsta_cmd sta_cmd; + + lockdep_assert_held(&il->mutex); + + key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK); + key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); + key_flags &= ~STA_KEY_FLG_INVALID; + + if (sta_id == ctx->bcast_sta_id) + key_flags |= STA_KEY_MULTICAST_MSK; + + keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + + spin_lock_irqsave(&il->sta_lock, flags); + il->stations[sta_id].keyinfo.cipher = keyconf->cipher; + il->stations[sta_id].keyinfo.keylen = keyconf->keylen; + + memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen); + + memcpy(il->stations[sta_id].sta.key.key, keyconf->key, keyconf->keylen); + + if ((il->stations[sta_id].sta.key. + key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC) + il->stations[sta_id].sta.key.key_offset = + il_get_free_ucode_key_idx(il); + /* else, we are overriding an existing key => no need to allocated room + * in uCode. */ + + WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, + "no space for a new key"); + + il->stations[sta_id].sta.key.key_flags = key_flags; + il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; + il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + + memcpy(&sta_cmd, &il->stations[sta_id].sta, + sizeof(struct il_addsta_cmd)); + spin_unlock_irqrestore(&il->sta_lock, flags); + + return il_send_add_sta(il, &sta_cmd, CMD_SYNC); +} + +static int +il4965_set_tkip_dynamic_key_info(struct il_priv *il, + struct il_rxon_context *ctx, + struct ieee80211_key_conf *keyconf, u8 sta_id) +{ + unsigned long flags; + int ret = 0; + __le16 key_flags = 0; + + key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK); + key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); + key_flags &= ~STA_KEY_FLG_INVALID; + + if (sta_id == ctx->bcast_sta_id) + key_flags |= STA_KEY_MULTICAST_MSK; + + keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; + + spin_lock_irqsave(&il->sta_lock, flags); + + il->stations[sta_id].keyinfo.cipher = keyconf->cipher; + il->stations[sta_id].keyinfo.keylen = 16; + + if ((il->stations[sta_id].sta.key. + key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC) + il->stations[sta_id].sta.key.key_offset = + il_get_free_ucode_key_idx(il); + /* else, we are overriding an existing key => no need to allocated room + * in uCode. */ + + WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, + "no space for a new key"); + + il->stations[sta_id].sta.key.key_flags = key_flags; + + /* This copy is acutally not needed: we get the key with each TX */ + memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, 16); + + memcpy(il->stations[sta_id].sta.key.key, keyconf->key, 16); + + spin_unlock_irqrestore(&il->sta_lock, flags); + + return ret; +} + +void +il4965_update_tkip_key(struct il_priv *il, struct il_rxon_context *ctx, + struct ieee80211_key_conf *keyconf, + struct ieee80211_sta *sta, u32 iv32, u16 * phase1key) +{ + u8 sta_id; + unsigned long flags; + int i; + + if (il_scan_cancel(il)) { + /* cancel scan failed, just live w/ bad key and rely + briefly on SW decryption */ + return; + } + + sta_id = il_sta_id_or_broadcast(il, ctx, sta); + if (sta_id == IL_INVALID_STATION) + return; + + spin_lock_irqsave(&il->sta_lock, flags); + + il->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32; + + for (i = 0; i < 5; i++) + il->stations[sta_id].sta.key.tkip_rx_ttak[i] = + cpu_to_le16(phase1key[i]); + + il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; + il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + + il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC); + + spin_unlock_irqrestore(&il->sta_lock, flags); + +} + +int +il4965_remove_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx, + struct ieee80211_key_conf *keyconf, u8 sta_id) +{ + unsigned long flags; + u16 key_flags; + u8 keyidx; + struct il_addsta_cmd sta_cmd; + + lockdep_assert_held(&il->mutex); + + ctx->key_mapping_keys--; + + spin_lock_irqsave(&il->sta_lock, flags); + key_flags = le16_to_cpu(il->stations[sta_id].sta.key.key_flags); + keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3; + + D_WEP("Remove dynamic key: idx=%d sta=%d\n", keyconf->keyidx, sta_id); + + if (keyconf->keyidx != keyidx) { + /* We need to remove a key with idx different that the one + * in the uCode. This means that the key we need to remove has + * been replaced by another one with different idx. + * Don't do anything and return ok + */ + spin_unlock_irqrestore(&il->sta_lock, flags); + return 0; + } + + if (il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) { + IL_WARN("Removing wrong key %d 0x%x\n", keyconf->keyidx, + key_flags); + spin_unlock_irqrestore(&il->sta_lock, flags); + return 0; + } + + if (!test_and_clear_bit + (il->stations[sta_id].sta.key.key_offset, &il->ucode_key_table)) + IL_ERR("idx %d not used in uCode key table.\n", + il->stations[sta_id].sta.key.key_offset); + memset(&il->stations[sta_id].keyinfo, 0, sizeof(struct il_hw_key)); + memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo)); + il->stations[sta_id].sta.key.key_flags = + STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID; + il->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET; + il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; + il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + + if (il_is_rfkill(il)) { + D_WEP + ("Not sending C_ADD_STA command because RFKILL enabled.\n"); + spin_unlock_irqrestore(&il->sta_lock, flags); + return 0; + } + memcpy(&sta_cmd, &il->stations[sta_id].sta, + sizeof(struct il_addsta_cmd)); + spin_unlock_irqrestore(&il->sta_lock, flags); + + return il_send_add_sta(il, &sta_cmd, CMD_SYNC); +} + +int +il4965_set_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx, + struct ieee80211_key_conf *keyconf, u8 sta_id) +{ + int ret; + + lockdep_assert_held(&il->mutex); + + ctx->key_mapping_keys++; + keyconf->hw_key_idx = HW_KEY_DYNAMIC; + + switch (keyconf->cipher) { + case WLAN_CIPHER_SUITE_CCMP: + ret = + il4965_set_ccmp_dynamic_key_info(il, ctx, keyconf, sta_id); + break; + case WLAN_CIPHER_SUITE_TKIP: + ret = + il4965_set_tkip_dynamic_key_info(il, ctx, keyconf, sta_id); + break; + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + ret = il4965_set_wep_dynamic_key_info(il, ctx, keyconf, sta_id); + break; + default: + IL_ERR("Unknown alg: %s cipher = %x\n", __func__, + keyconf->cipher); + ret = -EINVAL; + } + + D_WEP("Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n", + keyconf->cipher, keyconf->keylen, keyconf->keyidx, sta_id, ret); + + return ret; +} + +/** + * il4965_alloc_bcast_station - add broadcast station into driver's station table. + * + * This adds the broadcast station into the driver's station table + * and marks it driver active, so that it will be restored to the + * device at the next best time. + */ +int +il4965_alloc_bcast_station(struct il_priv *il, struct il_rxon_context *ctx) +{ + struct il_link_quality_cmd *link_cmd; + unsigned long flags; + u8 sta_id; + + spin_lock_irqsave(&il->sta_lock, flags); + sta_id = il_prep_station(il, ctx, il_bcast_addr, false, NULL); + if (sta_id == IL_INVALID_STATION) { + IL_ERR("Unable to prepare broadcast station\n"); + spin_unlock_irqrestore(&il->sta_lock, flags); + + return -EINVAL; + } + + il->stations[sta_id].used |= IL_STA_DRIVER_ACTIVE; + il->stations[sta_id].used |= IL_STA_BCAST; + spin_unlock_irqrestore(&il->sta_lock, flags); + + link_cmd = il4965_sta_alloc_lq(il, sta_id); + if (!link_cmd) { + IL_ERR + ("Unable to initialize rate scaling for bcast station.\n"); + return -ENOMEM; + } + + spin_lock_irqsave(&il->sta_lock, flags); + il->stations[sta_id].lq = link_cmd; + spin_unlock_irqrestore(&il->sta_lock, flags); + + return 0; +} + +/** + * il4965_update_bcast_station - update broadcast station's LQ command + * + * Only used by iwl4965. Placed here to have all bcast station management + * code together. + */ +static int +il4965_update_bcast_station(struct il_priv *il, struct il_rxon_context *ctx) +{ + unsigned long flags; + struct il_link_quality_cmd *link_cmd; + u8 sta_id = ctx->bcast_sta_id; + + link_cmd = il4965_sta_alloc_lq(il, sta_id); + if (!link_cmd) { + IL_ERR("Unable to initialize rate scaling for bcast sta.\n"); + return -ENOMEM; + } + + spin_lock_irqsave(&il->sta_lock, flags); + if (il->stations[sta_id].lq) + kfree(il->stations[sta_id].lq); + else + D_INFO("Bcast sta rate scaling has not been initialized.\n"); + il->stations[sta_id].lq = link_cmd; + spin_unlock_irqrestore(&il->sta_lock, flags); + + return 0; +} + +int +il4965_update_bcast_stations(struct il_priv *il) +{ + return il4965_update_bcast_station(il, &il->ctx); +} + +/** + * il4965_sta_tx_modify_enable_tid - Enable Tx for this TID in station table + */ +int +il4965_sta_tx_modify_enable_tid(struct il_priv *il, int sta_id, int tid) +{ + unsigned long flags; + struct il_addsta_cmd sta_cmd; + + lockdep_assert_held(&il->mutex); + + /* Remove "disable" flag, to enable Tx for this TID */ + spin_lock_irqsave(&il->sta_lock, flags); + il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX; + il->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid)); + il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + memcpy(&sta_cmd, &il->stations[sta_id].sta, + sizeof(struct il_addsta_cmd)); + spin_unlock_irqrestore(&il->sta_lock, flags); + + return il_send_add_sta(il, &sta_cmd, CMD_SYNC); +} + +int +il4965_sta_rx_agg_start(struct il_priv *il, struct ieee80211_sta *sta, int tid, + u16 ssn) +{ + unsigned long flags; + int sta_id; + struct il_addsta_cmd sta_cmd; + + lockdep_assert_held(&il->mutex); + + sta_id = il_sta_id(sta); + if (sta_id == IL_INVALID_STATION) + return -ENXIO; + + spin_lock_irqsave(&il->sta_lock, flags); + il->stations[sta_id].sta.station_flags_msk = 0; + il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK; + il->stations[sta_id].sta.add_immediate_ba_tid = (u8) tid; + il->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn); + il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + memcpy(&sta_cmd, &il->stations[sta_id].sta, + sizeof(struct il_addsta_cmd)); + spin_unlock_irqrestore(&il->sta_lock, flags); + + return il_send_add_sta(il, &sta_cmd, CMD_SYNC); +} + +int +il4965_sta_rx_agg_stop(struct il_priv *il, struct ieee80211_sta *sta, int tid) +{ + unsigned long flags; + int sta_id; + struct il_addsta_cmd sta_cmd; + + lockdep_assert_held(&il->mutex); + + sta_id = il_sta_id(sta); + if (sta_id == IL_INVALID_STATION) { + IL_ERR("Invalid station for AGG tid %d\n", tid); + return -ENXIO; + } + + spin_lock_irqsave(&il->sta_lock, flags); + il->stations[sta_id].sta.station_flags_msk = 0; + il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK; + il->stations[sta_id].sta.remove_immediate_ba_tid = (u8) tid; + il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + memcpy(&sta_cmd, &il->stations[sta_id].sta, + sizeof(struct il_addsta_cmd)); + spin_unlock_irqrestore(&il->sta_lock, flags); + + return il_send_add_sta(il, &sta_cmd, CMD_SYNC); +} + +void +il4965_sta_modify_sleep_tx_count(struct il_priv *il, int sta_id, int cnt) +{ + unsigned long flags; + + spin_lock_irqsave(&il->sta_lock, flags); + il->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK; + il->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK; + il->stations[sta_id].sta.sta.modify_mask = + STA_MODIFY_SLEEP_TX_COUNT_MSK; + il->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt); + il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC); + spin_unlock_irqrestore(&il->sta_lock, flags); + +} + +void +il4965_update_chain_flags(struct il_priv *il) +{ + if (il->cfg->ops->hcmd->set_rxon_chain) { + il->cfg->ops->hcmd->set_rxon_chain(il, &il->ctx); + if (il->ctx.active.rx_chain != il->ctx.staging.rx_chain) + il_commit_rxon(il, &il->ctx); + } +} + +static void +il4965_clear_free_frames(struct il_priv *il) +{ + struct list_head *element; + + D_INFO("%d frames on pre-allocated heap on clear.\n", il->frames_count); + + while (!list_empty(&il->free_frames)) { + element = il->free_frames.next; + list_del(element); + kfree(list_entry(element, struct il_frame, list)); + il->frames_count--; + } + + if (il->frames_count) { + IL_WARN("%d frames still in use. Did we lose one?\n", + il->frames_count); + il->frames_count = 0; + } +} + +static struct il_frame * +il4965_get_free_frame(struct il_priv *il) +{ + struct il_frame *frame; + struct list_head *element; + if (list_empty(&il->free_frames)) { + frame = kzalloc(sizeof(*frame), GFP_KERNEL); + if (!frame) { + IL_ERR("Could not allocate frame!\n"); + return NULL; + } + + il->frames_count++; + return frame; + } + + element = il->free_frames.next; + list_del(element); + return list_entry(element, struct il_frame, list); +} + +static void +il4965_free_frame(struct il_priv *il, struct il_frame *frame) +{ + memset(frame, 0, sizeof(*frame)); + list_add(&frame->list, &il->free_frames); +} + +static u32 +il4965_fill_beacon_frame(struct il_priv *il, struct ieee80211_hdr *hdr, + int left) +{ + lockdep_assert_held(&il->mutex); + + if (!il->beacon_skb) + return 0; + + if (il->beacon_skb->len > left) + return 0; + + memcpy(hdr, il->beacon_skb->data, il->beacon_skb->len); + + return il->beacon_skb->len; +} + +/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */ +static void +il4965_set_beacon_tim(struct il_priv *il, + struct il_tx_beacon_cmd *tx_beacon_cmd, u8 * beacon, + u32 frame_size) +{ + u16 tim_idx; + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon; + + /* + * The idx is relative to frame start but we start looking at the + * variable-length part of the beacon. + */ + tim_idx = mgmt->u.beacon.variable - beacon; + + /* Parse variable-length elements of beacon to find WLAN_EID_TIM */ + while ((tim_idx < (frame_size - 2)) && + (beacon[tim_idx] != WLAN_EID_TIM)) + tim_idx += beacon[tim_idx + 1] + 2; + + /* If TIM field was found, set variables */ + if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) { + tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx); + tx_beacon_cmd->tim_size = beacon[tim_idx + 1]; + } else + IL_WARN("Unable to find TIM Element in beacon\n"); +} + +static unsigned int +il4965_hw_get_beacon_cmd(struct il_priv *il, struct il_frame *frame) +{ + struct il_tx_beacon_cmd *tx_beacon_cmd; + u32 frame_size; + u32 rate_flags; + u32 rate; + /* + * We have to set up the TX command, the TX Beacon command, and the + * beacon contents. + */ + + lockdep_assert_held(&il->mutex); + + if (!il->beacon_ctx) { + IL_ERR("trying to build beacon w/o beacon context!\n"); + return 0; + } + + /* Initialize memory */ + tx_beacon_cmd = &frame->u.beacon; + memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd)); + + /* Set up TX beacon contents */ + frame_size = + il4965_fill_beacon_frame(il, tx_beacon_cmd->frame, + sizeof(frame->u) - sizeof(*tx_beacon_cmd)); + if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE)) + return 0; + if (!frame_size) + return 0; + + /* Set up TX command fields */ + tx_beacon_cmd->tx.len = cpu_to_le16((u16) frame_size); + tx_beacon_cmd->tx.sta_id = il->beacon_ctx->bcast_sta_id; + tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; + tx_beacon_cmd->tx.tx_flags = + TX_CMD_FLG_SEQ_CTL_MSK | TX_CMD_FLG_TSF_MSK | + TX_CMD_FLG_STA_RATE_MSK; + + /* Set up TX beacon command fields */ + il4965_set_beacon_tim(il, tx_beacon_cmd, (u8 *) tx_beacon_cmd->frame, + frame_size); + + /* Set up packet rate and flags */ + rate = il_get_lowest_plcp(il, il->beacon_ctx); + il4965_toggle_tx_ant(il, &il->mgmt_tx_ant, il->hw_params.valid_tx_ant); + rate_flags = BIT(il->mgmt_tx_ant) << RATE_MCS_ANT_POS; + if ((rate >= IL_FIRST_CCK_RATE) && (rate <= IL_LAST_CCK_RATE)) + rate_flags |= RATE_MCS_CCK_MSK; + tx_beacon_cmd->tx.rate_n_flags = cpu_to_le32(rate | rate_flags); + + return sizeof(*tx_beacon_cmd) + frame_size; +} + +int +il4965_send_beacon_cmd(struct il_priv *il) +{ + struct il_frame *frame; + unsigned int frame_size; + int rc; + + frame = il4965_get_free_frame(il); + if (!frame) { + IL_ERR("Could not obtain free frame buffer for beacon " + "command.\n"); + return -ENOMEM; + } + + frame_size = il4965_hw_get_beacon_cmd(il, frame); + if (!frame_size) { + IL_ERR("Error configuring the beacon command\n"); + il4965_free_frame(il, frame); + return -EINVAL; + } + + rc = il_send_cmd_pdu(il, C_TX_BEACON, frame_size, &frame->u.cmd[0]); + + il4965_free_frame(il, frame); + + return rc; +} + +static inline dma_addr_t +il4965_tfd_tb_get_addr(struct il_tfd *tfd, u8 idx) +{ + struct il_tfd_tb *tb = &tfd->tbs[idx]; + + dma_addr_t addr = get_unaligned_le32(&tb->lo); + if (sizeof(dma_addr_t) > sizeof(u32)) + addr |= + ((dma_addr_t) (le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << + 16; + + return addr; +} + +static inline u16 +il4965_tfd_tb_get_len(struct il_tfd *tfd, u8 idx) +{ + struct il_tfd_tb *tb = &tfd->tbs[idx]; + + return le16_to_cpu(tb->hi_n_len) >> 4; +} + +static inline void +il4965_tfd_set_tb(struct il_tfd *tfd, u8 idx, dma_addr_t addr, u16 len) +{ + struct il_tfd_tb *tb = &tfd->tbs[idx]; + u16 hi_n_len = len << 4; + + put_unaligned_le32(addr, &tb->lo); + if (sizeof(dma_addr_t) > sizeof(u32)) + hi_n_len |= ((addr >> 16) >> 16) & 0xF; + + tb->hi_n_len = cpu_to_le16(hi_n_len); + + tfd->num_tbs = idx + 1; +} + +static inline u8 +il4965_tfd_get_num_tbs(struct il_tfd *tfd) +{ + return tfd->num_tbs & 0x1f; +} + +/** + * il4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] + * @il - driver ilate data + * @txq - tx queue + * + * Does NOT advance any TFD circular buffer read/write idxes + * Does NOT free the TFD itself (which is within circular buffer) + */ +void +il4965_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq) +{ + struct il_tfd *tfd_tmp = (struct il_tfd *)txq->tfds; + struct il_tfd *tfd; + struct pci_dev *dev = il->pci_dev; + int idx = txq->q.read_ptr; + int i; + int num_tbs; + + tfd = &tfd_tmp[idx]; + + /* Sanity check on number of chunks */ + num_tbs = il4965_tfd_get_num_tbs(tfd); + + if (num_tbs >= IL_NUM_OF_TBS) { + IL_ERR("Too many chunks: %i\n", num_tbs); + /* @todo issue fatal error, it is quite serious situation */ + return; + } + + /* Unmap tx_cmd */ + if (num_tbs) + pci_unmap_single(dev, dma_unmap_addr(&txq->meta[idx], mapping), + dma_unmap_len(&txq->meta[idx], len), + PCI_DMA_BIDIRECTIONAL); + + /* Unmap chunks, if any. */ + for (i = 1; i < num_tbs; i++) + pci_unmap_single(dev, il4965_tfd_tb_get_addr(tfd, i), + il4965_tfd_tb_get_len(tfd, i), + PCI_DMA_TODEVICE); + + /* free SKB */ + if (txq->txb) { + struct sk_buff *skb; + + skb = txq->txb[txq->q.read_ptr].skb; + + /* can be called from irqs-disabled context */ + if (skb) { + dev_kfree_skb_any(skb); + txq->txb[txq->q.read_ptr].skb = NULL; + } + } +} + +int +il4965_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq, + dma_addr_t addr, u16 len, u8 reset, u8 pad) +{ + struct il_queue *q; + struct il_tfd *tfd, *tfd_tmp; + u32 num_tbs; + + q = &txq->q; + tfd_tmp = (struct il_tfd *)txq->tfds; + tfd = &tfd_tmp[q->write_ptr]; + + if (reset) + memset(tfd, 0, sizeof(*tfd)); + + num_tbs = il4965_tfd_get_num_tbs(tfd); + + /* Each TFD can point to a maximum 20 Tx buffers */ + if (num_tbs >= IL_NUM_OF_TBS) { + IL_ERR("Error can not send more than %d chunks\n", + IL_NUM_OF_TBS); + return -EINVAL; + } + + BUG_ON(addr & ~DMA_BIT_MASK(36)); + if (unlikely(addr & ~IL_TX_DMA_MASK)) + IL_ERR("Unaligned address = %llx\n", (unsigned long long)addr); + + il4965_tfd_set_tb(tfd, num_tbs, addr, len); + + return 0; +} + +/* + * Tell nic where to find circular buffer of Tx Frame Descriptors for + * given Tx queue, and enable the DMA channel used for that queue. + * + * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA + * channels supported in hardware. + */ +int +il4965_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq) +{ + int txq_id = txq->q.id; + + /* Circular buffer (TFD queue in DRAM) physical base address */ + il_wr(il, FH49_MEM_CBBC_QUEUE(txq_id), txq->q.dma_addr >> 8); + + return 0; +} + +/****************************************************************************** + * + * Generic RX handler implementations + * + ******************************************************************************/ +static void +il4965_hdl_alive(struct il_priv *il, struct il_rx_buf *rxb) +{ + struct il_rx_pkt *pkt = rxb_addr(rxb); + struct il_alive_resp *palive; + struct delayed_work *pwork; + + palive = &pkt->u.alive_frame; + + D_INFO("Alive ucode status 0x%08X revision " "0x%01X 0x%01X\n", + palive->is_valid, palive->ver_type, palive->ver_subtype); + + if (palive->ver_subtype == INITIALIZE_SUBTYPE) { + D_INFO("Initialization Alive received.\n"); + memcpy(&il->card_alive_init, &pkt->u.alive_frame, + sizeof(struct il_init_alive_resp)); + pwork = &il->init_alive_start; + } else { + D_INFO("Runtime Alive received.\n"); + memcpy(&il->card_alive, &pkt->u.alive_frame, + sizeof(struct il_alive_resp)); + pwork = &il->alive_start; + } + + /* We delay the ALIVE response by 5ms to + * give the HW RF Kill time to activate... */ + if (palive->is_valid == UCODE_VALID_OK) + queue_delayed_work(il->workqueue, pwork, msecs_to_jiffies(5)); + else + IL_WARN("uCode did not respond OK.\n"); +} + +/** + * il4965_bg_stats_periodic - Timer callback to queue stats + * + * This callback is provided in order to send a stats request. + * + * This timer function is continually reset to execute within + * REG_RECALIB_PERIOD seconds since the last N_STATS + * was received. We need to ensure we receive the stats in order + * to update the temperature used for calibrating the TXPOWER. + */ +static void +il4965_bg_stats_periodic(unsigned long data) +{ + struct il_priv *il = (struct il_priv *)data; + + if (test_bit(S_EXIT_PENDING, &il->status)) + return; + + /* dont send host command if rf-kill is on */ + if (!il_is_ready_rf(il)) + return; + + il_send_stats_request(il, CMD_ASYNC, false); +} + +static void +il4965_hdl_beacon(struct il_priv *il, struct il_rx_buf *rxb) +{ + struct il_rx_pkt *pkt = rxb_addr(rxb); + struct il4965_beacon_notif *beacon = + (struct il4965_beacon_notif *)pkt->u.raw; +#ifdef CONFIG_IWLEGACY_DEBUG + u8 rate = il4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags); + + D_RX("beacon status %x retries %d iss %d tsf:0x%.8x%.8x rate %d\n", + le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK, + beacon->beacon_notify_hdr.failure_frame, + le32_to_cpu(beacon->ibss_mgr_status), + le32_to_cpu(beacon->high_tsf), le32_to_cpu(beacon->low_tsf), rate); +#endif + il->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status); +} + +static void +il4965_perform_ct_kill_task(struct il_priv *il) +{ + unsigned long flags; + + D_POWER("Stop all queues\n"); + + if (il->mac80211_registered) + ieee80211_stop_queues(il->hw); + + _il_wr(il, CSR_UCODE_DRV_GP1_SET, + CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT); + _il_rd(il, CSR_UCODE_DRV_GP1); + + spin_lock_irqsave(&il->reg_lock, flags); + if (!_il_grab_nic_access(il)) + _il_release_nic_access(il); + spin_unlock_irqrestore(&il->reg_lock, flags); +} + +/* Handle notification from uCode that card's power state is changing + * due to software, hardware, or critical temperature RFKILL */ +static void +il4965_hdl_card_state(struct il_priv *il, struct il_rx_buf *rxb) +{ + struct il_rx_pkt *pkt = rxb_addr(rxb); + u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); + unsigned long status = il->status; + + D_RF_KILL("Card state received: HW:%s SW:%s CT:%s\n", + (flags & HW_CARD_DISABLED) ? "Kill" : "On", + (flags & SW_CARD_DISABLED) ? "Kill" : "On", + (flags & CT_CARD_DISABLED) ? "Reached" : "Not reached"); + + if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED | CT_CARD_DISABLED)) { + + _il_wr(il, CSR_UCODE_DRV_GP1_SET, + CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); + + il_wr(il, HBUS_TARG_MBX_C, HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); + + if (!(flags & RXON_CARD_DISABLED)) { + _il_wr(il, CSR_UCODE_DRV_GP1_CLR, + CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); + il_wr(il, HBUS_TARG_MBX_C, + HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); + } + } + + if (flags & CT_CARD_DISABLED) + il4965_perform_ct_kill_task(il); + + if (flags & HW_CARD_DISABLED) + set_bit(S_RF_KILL_HW, &il->status); + else + clear_bit(S_RF_KILL_HW, &il->status); + + if (!(flags & RXON_CARD_DISABLED)) + il_scan_cancel(il); + + if ((test_bit(S_RF_KILL_HW, &status) != + test_bit(S_RF_KILL_HW, &il->status))) + wiphy_rfkill_set_hw_state(il->hw->wiphy, + test_bit(S_RF_KILL_HW, &il->status)); + else + wake_up(&il->wait_command_queue); +} + +/** + * il4965_setup_handlers - Initialize Rx handler callbacks + * + * Setup the RX handlers for each of the reply types sent from the uCode + * to the host. + * + * This function chains into the hardware specific files for them to setup + * any hardware specific handlers as well. + */ +static void +il4965_setup_handlers(struct il_priv *il) +{ + il->handlers[N_ALIVE] = il4965_hdl_alive; + il->handlers[N_ERROR] = il_hdl_error; + il->handlers[N_CHANNEL_SWITCH] = il_hdl_csa; + il->handlers[N_SPECTRUM_MEASUREMENT] = il_hdl_spectrum_measurement; + il->handlers[N_PM_SLEEP] = il_hdl_pm_sleep; + il->handlers[N_PM_DEBUG_STATS] = il_hdl_pm_debug_stats; + il->handlers[N_BEACON] = il4965_hdl_beacon; + + /* + * The same handler is used for both the REPLY to a discrete + * stats request from the host as well as for the periodic + * stats notifications (after received beacons) from the uCode. + */ + il->handlers[C_STATS] = il4965_hdl_c_stats; + il->handlers[N_STATS] = il4965_hdl_stats; + + il_setup_rx_scan_handlers(il); + + /* status change handler */ + il->handlers[N_CARD_STATE] = il4965_hdl_card_state; + + il->handlers[N_MISSED_BEACONS] = il4965_hdl_missed_beacon; + /* Rx handlers */ + il->handlers[N_RX_PHY] = il4965_hdl_rx_phy; + il->handlers[N_RX_MPDU] = il4965_hdl_rx; + /* block ack */ + il->handlers[N_COMPRESSED_BA] = il4965_hdl_compressed_ba; + /* Set up hardware specific Rx handlers */ + il->cfg->ops->lib->handler_setup(il); +} + +/** + * il4965_rx_handle - Main entry function for receiving responses from uCode + * + * Uses the il->handlers callback function array to invoke + * the appropriate handlers, including command responses, + * frame-received notifications, and other notifications. + */ +void +il4965_rx_handle(struct il_priv *il) +{ + struct il_rx_buf *rxb; + struct il_rx_pkt *pkt; + struct il_rx_queue *rxq = &il->rxq; + u32 r, i; + int reclaim; + unsigned long flags; + u8 fill_rx = 0; + u32 count = 8; + int total_empty; + + /* uCode's read idx (stored in shared DRAM) indicates the last Rx + * buffer that the driver may process (last buffer filled by ucode). */ + r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF; + i = rxq->read; + + /* Rx interrupt, but nothing sent from uCode */ + if (i == r) + D_RX("r = %d, i = %d\n", r, i); + + /* calculate total frames need to be restock after handling RX */ + total_empty = r - rxq->write_actual; + if (total_empty < 0) + total_empty += RX_QUEUE_SIZE; + + if (total_empty > (RX_QUEUE_SIZE / 2)) + fill_rx = 1; + + while (i != r) { + int len; + + rxb = rxq->queue[i]; + + /* If an RXB doesn't have a Rx queue slot associated with it, + * then a bug has been introduced in the queue refilling + * routines -- catch it here */ + BUG_ON(rxb == NULL); + + rxq->queue[i] = NULL; + + pci_unmap_page(il->pci_dev, rxb->page_dma, + PAGE_SIZE << il->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); + pkt = rxb_addr(rxb); + + len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK; + len += sizeof(u32); /* account for status word */ + + /* Reclaim a command buffer only if this packet is a response + * to a (driver-originated) command. + * If the packet (e.g. Rx frame) originated from uCode, + * there is no command buffer to reclaim. + * Ucode should set SEQ_RX_FRAME bit if ucode-originated, + * but apparently a few don't get set; catch them here. */ + reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) && + (pkt->hdr.cmd != N_RX_PHY) && (pkt->hdr.cmd != N_RX) && + (pkt->hdr.cmd != N_RX_MPDU) && + (pkt->hdr.cmd != N_COMPRESSED_BA) && + (pkt->hdr.cmd != N_STATS) && (pkt->hdr.cmd != C_TX); + + /* Based on type of command response or notification, + * handle those that need handling via function in + * handlers table. See il4965_setup_handlers() */ + if (il->handlers[pkt->hdr.cmd]) { + D_RX("r = %d, i = %d, %s, 0x%02x\n", r, i, + il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); + il->isr_stats.handlers[pkt->hdr.cmd]++; + il->handlers[pkt->hdr.cmd] (il, rxb); + } else { + /* No handling needed */ + D_RX("r %d i %d No handler needed for %s, 0x%02x\n", r, + i, il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); + } + + /* + * XXX: After here, we should always check rxb->page + * against NULL before touching it or its virtual + * memory (pkt). Because some handler might have + * already taken or freed the pages. + */ + + if (reclaim) { + /* Invoke any callbacks, transfer the buffer to caller, + * and fire off the (possibly) blocking il_send_cmd() + * as we reclaim the driver command queue */ + if (rxb->page) + il_tx_cmd_complete(il, rxb); + else + IL_WARN("Claim null rxb?\n"); + } + + /* Reuse the page if possible. For notification packets and + * SKBs that fail to Rx correctly, add them back into the + * rx_free list for reuse later. */ + spin_lock_irqsave(&rxq->lock, flags); + if (rxb->page != NULL) { + rxb->page_dma = + pci_map_page(il->pci_dev, rxb->page, 0, + PAGE_SIZE << il->hw_params. + rx_page_order, PCI_DMA_FROMDEVICE); + list_add_tail(&rxb->list, &rxq->rx_free); + rxq->free_count++; + } else + list_add_tail(&rxb->list, &rxq->rx_used); + + spin_unlock_irqrestore(&rxq->lock, flags); + + i = (i + 1) & RX_QUEUE_MASK; + /* If there are a lot of unused frames, + * restock the Rx queue so ucode wont assert. */ + if (fill_rx) { + count++; + if (count >= 8) { + rxq->read = i; + il4965_rx_replenish_now(il); + count = 0; + } + } + } + + /* Backtrack one entry */ + rxq->read = i; + if (fill_rx) + il4965_rx_replenish_now(il); + else + il4965_rx_queue_restock(il); +} + +/* call this function to flush any scheduled tasklet */ +static inline void +il4965_synchronize_irq(struct il_priv *il) +{ + /* wait to make sure we flush pending tasklet */ + synchronize_irq(il->pci_dev->irq); + tasklet_kill(&il->irq_tasklet); +} + +static void +il4965_irq_tasklet(struct il_priv *il) +{ + u32 inta, handled = 0; + u32 inta_fh; + unsigned long flags; + u32 i; +#ifdef CONFIG_IWLEGACY_DEBUG + u32 inta_mask; +#endif + + spin_lock_irqsave(&il->lock, flags); + + /* Ack/clear/reset pending uCode interrupts. + * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, + * and will clear only when CSR_FH_INT_STATUS gets cleared. */ + inta = _il_rd(il, CSR_INT); + _il_wr(il, CSR_INT, inta); + + /* Ack/clear/reset pending flow-handler (DMA) interrupts. + * Any new interrupts that happen after this, either while we're + * in this tasklet, or later, will show up in next ISR/tasklet. */ + inta_fh = _il_rd(il, CSR_FH_INT_STATUS); + _il_wr(il, CSR_FH_INT_STATUS, inta_fh); + +#ifdef CONFIG_IWLEGACY_DEBUG + if (il_get_debug_level(il) & IL_DL_ISR) { + /* just for debug */ + inta_mask = _il_rd(il, CSR_INT_MASK); + D_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta, + inta_mask, inta_fh); + } +#endif + + spin_unlock_irqrestore(&il->lock, flags); + + /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not + * atomic, make sure that inta covers all the interrupts that + * we've discovered, even if FH interrupt came in just after + * reading CSR_INT. */ + if (inta_fh & CSR49_FH_INT_RX_MASK) + inta |= CSR_INT_BIT_FH_RX; + if (inta_fh & CSR49_FH_INT_TX_MASK) + inta |= CSR_INT_BIT_FH_TX; + + /* Now service all interrupt bits discovered above. */ + if (inta & CSR_INT_BIT_HW_ERR) { + IL_ERR("Hardware error detected. Restarting.\n"); + + /* Tell the device to stop sending interrupts */ + il_disable_interrupts(il); + + il->isr_stats.hw++; + il_irq_handle_error(il); + + handled |= CSR_INT_BIT_HW_ERR; + + return; + } +#ifdef CONFIG_IWLEGACY_DEBUG + if (il_get_debug_level(il) & (IL_DL_ISR)) { + /* NIC fires this, but we don't use it, redundant with WAKEUP */ + if (inta & CSR_INT_BIT_SCD) { + D_ISR("Scheduler finished to transmit " + "the frame/frames.\n"); + il->isr_stats.sch++; + } + + /* Alive notification via Rx interrupt will do the real work */ + if (inta & CSR_INT_BIT_ALIVE) { + D_ISR("Alive interrupt\n"); + il->isr_stats.alive++; + } + } +#endif + /* Safely ignore these bits for debug checks below */ + inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); + + /* HW RF KILL switch toggled */ + if (inta & CSR_INT_BIT_RF_KILL) { + int hw_rf_kill = 0; + if (! + (_il_rd(il, CSR_GP_CNTRL) & + CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) + hw_rf_kill = 1; + + IL_WARN("RF_KILL bit toggled to %s.\n", + hw_rf_kill ? "disable radio" : "enable radio"); + + il->isr_stats.rfkill++; + + /* driver only loads ucode once setting the interface up. + * the driver allows loading the ucode even if the radio + * is killed. Hence update the killswitch state here. The + * rfkill handler will care about restarting if needed. + */ + if (!test_bit(S_ALIVE, &il->status)) { + if (hw_rf_kill) + set_bit(S_RF_KILL_HW, &il->status); + else + clear_bit(S_RF_KILL_HW, &il->status); + wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill); + } + + handled |= CSR_INT_BIT_RF_KILL; + } + + /* Chip got too hot and stopped itself */ + if (inta & CSR_INT_BIT_CT_KILL) { + IL_ERR("Microcode CT kill error detected.\n"); + il->isr_stats.ctkill++; + handled |= CSR_INT_BIT_CT_KILL; + } + + /* Error detected by uCode */ + if (inta & CSR_INT_BIT_SW_ERR) { + IL_ERR("Microcode SW error detected. " " Restarting 0x%X.\n", + inta); + il->isr_stats.sw++; + il_irq_handle_error(il); + handled |= CSR_INT_BIT_SW_ERR; + } + + /* + * uCode wakes up after power-down sleep. + * Tell device about any new tx or host commands enqueued, + * and about any Rx buffers made available while asleep. + */ + if (inta & CSR_INT_BIT_WAKEUP) { + D_ISR("Wakeup interrupt\n"); + il_rx_queue_update_write_ptr(il, &il->rxq); + for (i = 0; i < il->hw_params.max_txq_num; i++) + il_txq_update_write_ptr(il, &il->txq[i]); + il->isr_stats.wakeup++; + handled |= CSR_INT_BIT_WAKEUP; + } + + /* All uCode command responses, including Tx command responses, + * Rx "responses" (frame-received notification), and other + * notifications from uCode come through here*/ + if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { + il4965_rx_handle(il); + il->isr_stats.rx++; + handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); + } + + /* This "Tx" DMA channel is used only for loading uCode */ + if (inta & CSR_INT_BIT_FH_TX) { + D_ISR("uCode load interrupt\n"); + il->isr_stats.tx++; + handled |= CSR_INT_BIT_FH_TX; + /* Wake up uCode load routine, now that load is complete */ + il->ucode_write_complete = 1; + wake_up(&il->wait_command_queue); + } + + if (inta & ~handled) { + IL_ERR("Unhandled INTA bits 0x%08x\n", inta & ~handled); + il->isr_stats.unhandled++; + } + + if (inta & ~(il->inta_mask)) { + IL_WARN("Disabled INTA bits 0x%08x were pending\n", + inta & ~il->inta_mask); + IL_WARN(" with FH49_INT = 0x%08x\n", inta_fh); + } + + /* Re-enable all interrupts */ + /* only Re-enable if disabled by irq */ + if (test_bit(S_INT_ENABLED, &il->status)) + il_enable_interrupts(il); + /* Re-enable RF_KILL if it occurred */ + else if (handled & CSR_INT_BIT_RF_KILL) + il_enable_rfkill_int(il); + +#ifdef CONFIG_IWLEGACY_DEBUG + if (il_get_debug_level(il) & (IL_DL_ISR)) { + inta = _il_rd(il, CSR_INT); + inta_mask = _il_rd(il, CSR_INT_MASK); + inta_fh = _il_rd(il, CSR_FH_INT_STATUS); + D_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, " + "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags); + } +#endif +} + +/***************************************************************************** + * + * sysfs attributes + * + *****************************************************************************/ + +#ifdef CONFIG_IWLEGACY_DEBUG + +/* + * The following adds a new attribute to the sysfs representation + * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/) + * used for controlling the debug level. + * + * See the level definitions in iwl for details. + * + * The debug_level being managed using sysfs below is a per device debug + * level that is used instead of the global debug level if it (the per + * device debug level) is set. + */ +static ssize_t +il4965_show_debug_level(struct device *d, struct device_attribute *attr, + char *buf) +{ + struct il_priv *il = dev_get_drvdata(d); + return sprintf(buf, "0x%08X\n", il_get_debug_level(il)); +} + +static ssize_t +il4965_store_debug_level(struct device *d, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct il_priv *il = dev_get_drvdata(d); + unsigned long val; + int ret; + + ret = strict_strtoul(buf, 0, &val); + if (ret) + IL_ERR("%s is not in hex or decimal form.\n", buf); + else { + il->debug_level = val; + if (il_alloc_traffic_mem(il)) + IL_ERR("Not enough memory to generate traffic log\n"); + } + return strnlen(buf, count); +} + +static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, il4965_show_debug_level, + il4965_store_debug_level); + +#endif /* CONFIG_IWLEGACY_DEBUG */ + +static ssize_t +il4965_show_temperature(struct device *d, struct device_attribute *attr, + char *buf) +{ + struct il_priv *il = dev_get_drvdata(d); + + if (!il_is_alive(il)) + return -EAGAIN; + + return sprintf(buf, "%d\n", il->temperature); +} + +static DEVICE_ATTR(temperature, S_IRUGO, il4965_show_temperature, NULL); + +static ssize_t +il4965_show_tx_power(struct device *d, struct device_attribute *attr, char *buf) +{ + struct il_priv *il = dev_get_drvdata(d); + + if (!il_is_ready_rf(il)) + return sprintf(buf, "off\n"); + else + return sprintf(buf, "%d\n", il->tx_power_user_lmt); +} + +static ssize_t +il4965_store_tx_power(struct device *d, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct il_priv *il = dev_get_drvdata(d); + unsigned long val; + int ret; + + ret = strict_strtoul(buf, 10, &val); + if (ret) + IL_INFO("%s is not in decimal form.\n", buf); + else { + ret = il_set_tx_power(il, val, false); + if (ret) + IL_ERR("failed setting tx power (0x%d).\n", ret); + else + ret = count; + } + return ret; +} + +static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, il4965_show_tx_power, + il4965_store_tx_power); + +static struct attribute *il_sysfs_entries[] = { + &dev_attr_temperature.attr, + &dev_attr_tx_power.attr, +#ifdef CONFIG_IWLEGACY_DEBUG + &dev_attr_debug_level.attr, +#endif + NULL +}; + +static struct attribute_group il_attribute_group = { + .name = NULL, /* put in device directory */ + .attrs = il_sysfs_entries, +}; + +/****************************************************************************** + * + * uCode download functions + * + ******************************************************************************/ + +static void +il4965_dealloc_ucode_pci(struct il_priv *il) +{ + il_free_fw_desc(il->pci_dev, &il->ucode_code); + il_free_fw_desc(il->pci_dev, &il->ucode_data); + il_free_fw_desc(il->pci_dev, &il->ucode_data_backup); + il_free_fw_desc(il->pci_dev, &il->ucode_init); + il_free_fw_desc(il->pci_dev, &il->ucode_init_data); + il_free_fw_desc(il->pci_dev, &il->ucode_boot); +} + +static void +il4965_nic_start(struct il_priv *il) +{ + /* Remove all resets to allow NIC to operate */ + _il_wr(il, CSR_RESET, 0); +} + +static void il4965_ucode_callback(const struct firmware *ucode_raw, + void *context); +static int il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length); + +static int __must_check +il4965_request_firmware(struct il_priv *il, bool first) +{ + const char *name_pre = il->cfg->fw_name_pre; + char tag[8]; + + if (first) { + il->fw_idx = il->cfg->ucode_api_max; + sprintf(tag, "%d", il->fw_idx); + } else { + il->fw_idx--; + sprintf(tag, "%d", il->fw_idx); + } + + if (il->fw_idx < il->cfg->ucode_api_min) { + IL_ERR("no suitable firmware found!\n"); + return -ENOENT; + } + + sprintf(il->firmware_name, "%s%s%s", name_pre, tag, ".ucode"); + + D_INFO("attempting to load firmware '%s'\n", il->firmware_name); + + return request_firmware_nowait(THIS_MODULE, 1, il->firmware_name, + &il->pci_dev->dev, GFP_KERNEL, il, + il4965_ucode_callback); +} + +struct il4965_firmware_pieces { + const void *inst, *data, *init, *init_data, *boot; + size_t inst_size, data_size, init_size, init_data_size, boot_size; +}; + +static int +il4965_load_firmware(struct il_priv *il, const struct firmware *ucode_raw, + struct il4965_firmware_pieces *pieces) +{ + struct il_ucode_header *ucode = (void *)ucode_raw->data; + u32 api_ver, hdr_size; + const u8 *src; + + il->ucode_ver = le32_to_cpu(ucode->ver); + api_ver = IL_UCODE_API(il->ucode_ver); + + switch (api_ver) { + default: + case 0: + case 1: + case 2: + hdr_size = 24; + if (ucode_raw->size < hdr_size) { + IL_ERR("File size too small!\n"); + return -EINVAL; + } + pieces->inst_size = le32_to_cpu(ucode->v1.inst_size); + pieces->data_size = le32_to_cpu(ucode->v1.data_size); + pieces->init_size = le32_to_cpu(ucode->v1.init_size); + pieces->init_data_size = le32_to_cpu(ucode->v1.init_data_size); + pieces->boot_size = le32_to_cpu(ucode->v1.boot_size); + src = ucode->v1.data; + break; + } + + /* Verify size of file vs. image size info in file's header */ + if (ucode_raw->size != + hdr_size + pieces->inst_size + pieces->data_size + + pieces->init_size + pieces->init_data_size + pieces->boot_size) { + + IL_ERR("uCode file size %d does not match expected size\n", + (int)ucode_raw->size); + return -EINVAL; + } + + pieces->inst = src; + src += pieces->inst_size; + pieces->data = src; + src += pieces->data_size; + pieces->init = src; + src += pieces->init_size; + pieces->init_data = src; + src += pieces->init_data_size; + pieces->boot = src; + src += pieces->boot_size; + + return 0; +} + +/** + * il4965_ucode_callback - callback when firmware was loaded + * + * If loaded successfully, copies the firmware into buffers + * for the card to fetch (via DMA). + */ +static void +il4965_ucode_callback(const struct firmware *ucode_raw, void *context) +{ + struct il_priv *il = context; + struct il_ucode_header *ucode; + int err; + struct il4965_firmware_pieces pieces; + const unsigned int api_max = il->cfg->ucode_api_max; + const unsigned int api_min = il->cfg->ucode_api_min; + u32 api_ver; + + u32 max_probe_length = 200; + u32 standard_phy_calibration_size = + IL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE; + + memset(&pieces, 0, sizeof(pieces)); + + if (!ucode_raw) { + if (il->fw_idx <= il->cfg->ucode_api_max) + IL_ERR("request for firmware file '%s' failed.\n", + il->firmware_name); + goto try_again; + } + + D_INFO("Loaded firmware file '%s' (%zd bytes).\n", il->firmware_name, + ucode_raw->size); + + /* Make sure that we got at least the API version number */ + if (ucode_raw->size < 4) { + IL_ERR("File size way too small!\n"); + goto try_again; + } + + /* Data from ucode file: header followed by uCode images */ + ucode = (struct il_ucode_header *)ucode_raw->data; + + err = il4965_load_firmware(il, ucode_raw, &pieces); + + if (err) + goto try_again; + + api_ver = IL_UCODE_API(il->ucode_ver); + + /* + * api_ver should match the api version forming part of the + * firmware filename ... but we don't check for that and only rely + * on the API version read from firmware header from here on forward + */ + if (api_ver < api_min || api_ver > api_max) { + IL_ERR("Driver unable to support your firmware API. " + "Driver supports v%u, firmware is v%u.\n", api_max, + api_ver); + goto try_again; + } + + if (api_ver != api_max) + IL_ERR("Firmware has old API version. Expected v%u, " + "got v%u. New firmware can be obtained " + "from http://www.intellinuxwireless.org.\n", api_max, + api_ver); + + IL_INFO("loaded firmware version %u.%u.%u.%u\n", + IL_UCODE_MAJOR(il->ucode_ver), IL_UCODE_MINOR(il->ucode_ver), + IL_UCODE_API(il->ucode_ver), IL_UCODE_SERIAL(il->ucode_ver)); + + snprintf(il->hw->wiphy->fw_version, sizeof(il->hw->wiphy->fw_version), + "%u.%u.%u.%u", IL_UCODE_MAJOR(il->ucode_ver), + IL_UCODE_MINOR(il->ucode_ver), IL_UCODE_API(il->ucode_ver), + IL_UCODE_SERIAL(il->ucode_ver)); + + /* + * For any of the failures below (before allocating pci memory) + * we will try to load a version with a smaller API -- maybe the + * user just got a corrupted version of the latest API. + */ + + D_INFO("f/w package hdr ucode version raw = 0x%x\n", il->ucode_ver); + D_INFO("f/w package hdr runtime inst size = %Zd\n", pieces.inst_size); + D_INFO("f/w package hdr runtime data size = %Zd\n", pieces.data_size); + D_INFO("f/w package hdr init inst size = %Zd\n", pieces.init_size); + D_INFO("f/w package hdr init data size = %Zd\n", pieces.init_data_size); + D_INFO("f/w package hdr boot inst size = %Zd\n", pieces.boot_size); + + /* Verify that uCode images will fit in card's SRAM */ + if (pieces.inst_size > il->hw_params.max_inst_size) { + IL_ERR("uCode instr len %Zd too large to fit in\n", + pieces.inst_size); + goto try_again; + } + + if (pieces.data_size > il->hw_params.max_data_size) { + IL_ERR("uCode data len %Zd too large to fit in\n", + pieces.data_size); + goto try_again; + } + + if (pieces.init_size > il->hw_params.max_inst_size) { + IL_ERR("uCode init instr len %Zd too large to fit in\n", + pieces.init_size); + goto try_again; + } + + if (pieces.init_data_size > il->hw_params.max_data_size) { + IL_ERR("uCode init data len %Zd too large to fit in\n", + pieces.init_data_size); + goto try_again; + } + + if (pieces.boot_size > il->hw_params.max_bsm_size) { + IL_ERR("uCode boot instr len %Zd too large to fit in\n", + pieces.boot_size); + goto try_again; + } + + /* Allocate ucode buffers for card's bus-master loading ... */ + + /* Runtime instructions and 2 copies of data: + * 1) unmodified from disk + * 2) backup cache for save/restore during power-downs */ + il->ucode_code.len = pieces.inst_size; + il_alloc_fw_desc(il->pci_dev, &il->ucode_code); + + il->ucode_data.len = pieces.data_size; + il_alloc_fw_desc(il->pci_dev, &il->ucode_data); + + il->ucode_data_backup.len = pieces.data_size; + il_alloc_fw_desc(il->pci_dev, &il->ucode_data_backup); + + if (!il->ucode_code.v_addr || !il->ucode_data.v_addr || + !il->ucode_data_backup.v_addr) + goto err_pci_alloc; + + /* Initialization instructions and data */ + if (pieces.init_size && pieces.init_data_size) { + il->ucode_init.len = pieces.init_size; + il_alloc_fw_desc(il->pci_dev, &il->ucode_init); + + il->ucode_init_data.len = pieces.init_data_size; + il_alloc_fw_desc(il->pci_dev, &il->ucode_init_data); + + if (!il->ucode_init.v_addr || !il->ucode_init_data.v_addr) + goto err_pci_alloc; + } + + /* Bootstrap (instructions only, no data) */ + if (pieces.boot_size) { + il->ucode_boot.len = pieces.boot_size; + il_alloc_fw_desc(il->pci_dev, &il->ucode_boot); + + if (!il->ucode_boot.v_addr) + goto err_pci_alloc; + } + + /* Now that we can no longer fail, copy information */ + + il->sta_key_max_num = STA_KEY_MAX_NUM; + + /* Copy images into buffers for card's bus-master reads ... */ + + /* Runtime instructions (first block of data in file) */ + D_INFO("Copying (but not loading) uCode instr len %Zd\n", + pieces.inst_size); + memcpy(il->ucode_code.v_addr, pieces.inst, pieces.inst_size); + + D_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n", + il->ucode_code.v_addr, (u32) il->ucode_code.p_addr); + + /* + * Runtime data + * NOTE: Copy into backup buffer will be done in il_up() + */ + D_INFO("Copying (but not loading) uCode data len %Zd\n", + pieces.data_size); + memcpy(il->ucode_data.v_addr, pieces.data, pieces.data_size); + memcpy(il->ucode_data_backup.v_addr, pieces.data, pieces.data_size); + + /* Initialization instructions */ + if (pieces.init_size) { + D_INFO("Copying (but not loading) init instr len %Zd\n", + pieces.init_size); + memcpy(il->ucode_init.v_addr, pieces.init, pieces.init_size); + } + + /* Initialization data */ + if (pieces.init_data_size) { + D_INFO("Copying (but not loading) init data len %Zd\n", + pieces.init_data_size); + memcpy(il->ucode_init_data.v_addr, pieces.init_data, + pieces.init_data_size); + } + + /* Bootstrap instructions */ + D_INFO("Copying (but not loading) boot instr len %Zd\n", + pieces.boot_size); + memcpy(il->ucode_boot.v_addr, pieces.boot, pieces.boot_size); + + /* + * figure out the offset of chain noise reset and gain commands + * base on the size of standard phy calibration commands table size + */ + il->_4965.phy_calib_chain_noise_reset_cmd = + standard_phy_calibration_size; + il->_4965.phy_calib_chain_noise_gain_cmd = + standard_phy_calibration_size + 1; + + /************************************************** + * This is still part of probe() in a sense... + * + * 9. Setup and register with mac80211 and debugfs + **************************************************/ + err = il4965_mac_setup_register(il, max_probe_length); + if (err) + goto out_unbind; + + err = il_dbgfs_register(il, DRV_NAME); + if (err) + IL_ERR("failed to create debugfs files. Ignoring error: %d\n", + err); + + err = sysfs_create_group(&il->pci_dev->dev.kobj, &il_attribute_group); + if (err) { + IL_ERR("failed to create sysfs device attributes\n"); + goto out_unbind; + } + + /* We have our copies now, allow OS release its copies */ + release_firmware(ucode_raw); + complete(&il->_4965.firmware_loading_complete); + return; + +try_again: + /* try next, if any */ + if (il4965_request_firmware(il, false)) + goto out_unbind; + release_firmware(ucode_raw); + return; + +err_pci_alloc: + IL_ERR("failed to allocate pci memory\n"); + il4965_dealloc_ucode_pci(il); +out_unbind: + complete(&il->_4965.firmware_loading_complete); + device_release_driver(&il->pci_dev->dev); + release_firmware(ucode_raw); +} + +static const char *const desc_lookup_text[] = { + "OK", + "FAIL", + "BAD_PARAM", + "BAD_CHECKSUM", + "NMI_INTERRUPT_WDG", + "SYSASSERT", + "FATAL_ERROR", + "BAD_COMMAND", + "HW_ERROR_TUNE_LOCK", + "HW_ERROR_TEMPERATURE", + "ILLEGAL_CHAN_FREQ", + "VCC_NOT_STBL", + "FH49_ERROR", + "NMI_INTERRUPT_HOST", + "NMI_INTERRUPT_ACTION_PT", + "NMI_INTERRUPT_UNKNOWN", + "UCODE_VERSION_MISMATCH", + "HW_ERROR_ABS_LOCK", + "HW_ERROR_CAL_LOCK_FAIL", + "NMI_INTERRUPT_INST_ACTION_PT", + "NMI_INTERRUPT_DATA_ACTION_PT", + "NMI_TRM_HW_ER", + "NMI_INTERRUPT_TRM", + "NMI_INTERRUPT_BREAK_POINT", + "DEBUG_0", + "DEBUG_1", + "DEBUG_2", + "DEBUG_3", +}; + +static struct { + char *name; + u8 num; +} advanced_lookup[] = { + { + "NMI_INTERRUPT_WDG", 0x34}, { + "SYSASSERT", 0x35}, { + "UCODE_VERSION_MISMATCH", 0x37}, { + "BAD_COMMAND", 0x38}, { + "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C}, { + "FATAL_ERROR", 0x3D}, { + "NMI_TRM_HW_ERR", 0x46}, { + "NMI_INTERRUPT_TRM", 0x4C}, { + "NMI_INTERRUPT_BREAK_POINT", 0x54}, { + "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C}, { + "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64}, { + "NMI_INTERRUPT_HOST", 0x66}, { + "NMI_INTERRUPT_ACTION_PT", 0x7C}, { + "NMI_INTERRUPT_UNKNOWN", 0x84}, { + "NMI_INTERRUPT_INST_ACTION_PT", 0x86}, { +"ADVANCED_SYSASSERT", 0},}; + +static const char * +il4965_desc_lookup(u32 num) +{ + int i; + int max = ARRAY_SIZE(desc_lookup_text); + + if (num < max) + return desc_lookup_text[num]; + + max = ARRAY_SIZE(advanced_lookup) - 1; + for (i = 0; i < max; i++) { + if (advanced_lookup[i].num == num) + break; + } + return advanced_lookup[i].name; +} + +#define ERROR_START_OFFSET (1 * sizeof(u32)) +#define ERROR_ELEM_SIZE (7 * sizeof(u32)) + +void +il4965_dump_nic_error_log(struct il_priv *il) +{ + u32 data2, line; + u32 desc, time, count, base, data1; + u32 blink1, blink2, ilink1, ilink2; + u32 pc, hcmd; + + if (il->ucode_type == UCODE_INIT) + base = le32_to_cpu(il->card_alive_init.error_event_table_ptr); + else + base = le32_to_cpu(il->card_alive.error_event_table_ptr); + + if (!il->cfg->ops->lib->is_valid_rtc_data_addr(base)) { + IL_ERR("Not valid error log pointer 0x%08X for %s uCode\n", + base, (il->ucode_type == UCODE_INIT) ? "Init" : "RT"); + return; + } + + count = il_read_targ_mem(il, base); + + if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) { + IL_ERR("Start IWL Error Log Dump:\n"); + IL_ERR("Status: 0x%08lX, count: %d\n", il->status, count); + } + + desc = il_read_targ_mem(il, base + 1 * sizeof(u32)); + il->isr_stats.err_code = desc; + pc = il_read_targ_mem(il, base + 2 * sizeof(u32)); + blink1 = il_read_targ_mem(il, base + 3 * sizeof(u32)); + blink2 = il_read_targ_mem(il, base + 4 * sizeof(u32)); + ilink1 = il_read_targ_mem(il, base + 5 * sizeof(u32)); + ilink2 = il_read_targ_mem(il, base + 6 * sizeof(u32)); + data1 = il_read_targ_mem(il, base + 7 * sizeof(u32)); + data2 = il_read_targ_mem(il, base + 8 * sizeof(u32)); + line = il_read_targ_mem(il, base + 9 * sizeof(u32)); + time = il_read_targ_mem(il, base + 11 * sizeof(u32)); + hcmd = il_read_targ_mem(il, base + 22 * sizeof(u32)); + + IL_ERR("Desc Time " + "data1 data2 line\n"); + IL_ERR("%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n", + il4965_desc_lookup(desc), desc, time, data1, data2, line); + IL_ERR("pc blink1 blink2 ilink1 ilink2 hcmd\n"); + IL_ERR("0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n", pc, blink1, + blink2, ilink1, ilink2, hcmd); +} + +static void +il4965_rf_kill_ct_config(struct il_priv *il) +{ + struct il_ct_kill_config cmd; + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&il->lock, flags); + _il_wr(il, CSR_UCODE_DRV_GP1_CLR, + CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT); + spin_unlock_irqrestore(&il->lock, flags); + + cmd.critical_temperature_R = + cpu_to_le32(il->hw_params.ct_kill_threshold); + + ret = il_send_cmd_pdu(il, C_CT_KILL_CONFIG, sizeof(cmd), &cmd); + if (ret) + IL_ERR("C_CT_KILL_CONFIG failed\n"); + else + D_INFO("C_CT_KILL_CONFIG " "succeeded, " + "critical temperature is %d\n", + il->hw_params.ct_kill_threshold); +} + +static const s8 default_queue_to_tx_fifo[] = { + IL_TX_FIFO_VO, + IL_TX_FIFO_VI, + IL_TX_FIFO_BE, + IL_TX_FIFO_BK, + IL49_CMD_FIFO_NUM, + IL_TX_FIFO_UNUSED, + IL_TX_FIFO_UNUSED, +}; + +#define IL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo)))) + +static int +il4965_alive_notify(struct il_priv *il) +{ + u32 a; + unsigned long flags; + int i, chan; + u32 reg_val; + + spin_lock_irqsave(&il->lock, flags); + + /* Clear 4965's internal Tx Scheduler data base */ + il->scd_base_addr = il_rd_prph(il, IL49_SCD_SRAM_BASE_ADDR); + a = il->scd_base_addr + IL49_SCD_CONTEXT_DATA_OFFSET; + for (; a < il->scd_base_addr + IL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4) + il_write_targ_mem(il, a, 0); + for (; a < il->scd_base_addr + IL49_SCD_TRANSLATE_TBL_OFFSET; a += 4) + il_write_targ_mem(il, a, 0); + for (; + a < + il->scd_base_addr + + IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(il->hw_params.max_txq_num); + a += 4) + il_write_targ_mem(il, a, 0); + + /* Tel 4965 where to find Tx byte count tables */ + il_wr_prph(il, IL49_SCD_DRAM_BASE_ADDR, il->scd_bc_tbls.dma >> 10); + + /* Enable DMA channel */ + for (chan = 0; chan < FH49_TCSR_CHNL_NUM; chan++) + il_wr(il, FH49_TCSR_CHNL_TX_CONFIG_REG(chan), + FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | + FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); + + /* Update FH chicken bits */ + reg_val = il_rd(il, FH49_TX_CHICKEN_BITS_REG); + il_wr(il, FH49_TX_CHICKEN_BITS_REG, + reg_val | FH49_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); + + /* Disable chain mode for all queues */ + il_wr_prph(il, IL49_SCD_QUEUECHAIN_SEL, 0); + + /* Initialize each Tx queue (including the command queue) */ + for (i = 0; i < il->hw_params.max_txq_num; i++) { + + /* TFD circular buffer read/write idxes */ + il_wr_prph(il, IL49_SCD_QUEUE_RDPTR(i), 0); + il_wr(il, HBUS_TARG_WRPTR, 0 | (i << 8)); + + /* Max Tx Window size for Scheduler-ACK mode */ + il_write_targ_mem(il, + il->scd_base_addr + + IL49_SCD_CONTEXT_QUEUE_OFFSET(i), + (SCD_WIN_SIZE << + IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) & + IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK); + + /* Frame limit */ + il_write_targ_mem(il, + il->scd_base_addr + + IL49_SCD_CONTEXT_QUEUE_OFFSET(i) + + sizeof(u32), + (SCD_FRAME_LIMIT << + IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & + IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK); + + } + il_wr_prph(il, IL49_SCD_INTERRUPT_MASK, + (1 << il->hw_params.max_txq_num) - 1); + + /* Activate all Tx DMA/FIFO channels */ + il4965_txq_set_sched(il, IL_MASK(0, 6)); + + il4965_set_wr_ptrs(il, IL_DEFAULT_CMD_QUEUE_NUM, 0); + + /* make sure all queue are not stopped */ + memset(&il->queue_stopped[0], 0, sizeof(il->queue_stopped)); + for (i = 0; i < 4; i++) + atomic_set(&il->queue_stop_count[i], 0); + + /* reset to 0 to enable all the queue first */ + il->txq_ctx_active_msk = 0; + /* Map each Tx/cmd queue to its corresponding fifo */ + BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7); + + for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) { + int ac = default_queue_to_tx_fifo[i]; + + il_txq_ctx_activate(il, i); + + if (ac == IL_TX_FIFO_UNUSED) + continue; + + il4965_tx_queue_set_status(il, &il->txq[i], ac, 0); + } + + spin_unlock_irqrestore(&il->lock, flags); + + return 0; +} + +/** + * il4965_alive_start - called after N_ALIVE notification received + * from protocol/runtime uCode (initialization uCode's + * Alive gets handled by il_init_alive_start()). + */ +static void +il4965_alive_start(struct il_priv *il) +{ + int ret = 0; + struct il_rxon_context *ctx = &il->ctx; + + D_INFO("Runtime Alive received.\n"); + + if (il->card_alive.is_valid != UCODE_VALID_OK) { + /* We had an error bringing up the hardware, so take it + * all the way back down so we can try again */ + D_INFO("Alive failed.\n"); + goto restart; + } + + /* Initialize uCode has loaded Runtime uCode ... verify inst image. + * This is a paranoid check, because we would not have gotten the + * "runtime" alive if code weren't properly loaded. */ + if (il4965_verify_ucode(il)) { + /* Runtime instruction load was bad; + * take it all the way back down so we can try again */ + D_INFO("Bad runtime uCode load.\n"); + goto restart; + } + + ret = il4965_alive_notify(il); + if (ret) { + IL_WARN("Could not complete ALIVE transition [ntf]: %d\n", ret); + goto restart; + } + + /* After the ALIVE response, we can send host commands to the uCode */ + set_bit(S_ALIVE, &il->status); + + /* Enable watchdog to monitor the driver tx queues */ + il_setup_watchdog(il); + + if (il_is_rfkill(il)) + return; + + ieee80211_wake_queues(il->hw); + + il->active_rate = RATES_MASK; + + if (il_is_associated_ctx(ctx)) { + struct il_rxon_cmd *active_rxon = + (struct il_rxon_cmd *)&ctx->active; + /* apply any changes in staging */ + ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; + active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; + } else { + /* Initialize our rx_config data */ + il_connection_init_rx_config(il, &il->ctx); + + if (il->cfg->ops->hcmd->set_rxon_chain) + il->cfg->ops->hcmd->set_rxon_chain(il, ctx); + } + + /* Configure bluetooth coexistence if enabled */ + il_send_bt_config(il); + + il4965_reset_run_time_calib(il); + + set_bit(S_READY, &il->status); + + /* Configure the adapter for unassociated operation */ + il_commit_rxon(il, ctx); + + /* At this point, the NIC is initialized and operational */ + il4965_rf_kill_ct_config(il); + + D_INFO("ALIVE processing complete.\n"); + wake_up(&il->wait_command_queue); + + il_power_update_mode(il, true); + D_INFO("Updated power mode\n"); + + return; + +restart: + queue_work(il->workqueue, &il->restart); +} + +static void il4965_cancel_deferred_work(struct il_priv *il); + +static void +__il4965_down(struct il_priv *il) +{ + unsigned long flags; + int exit_pending; + + D_INFO(DRV_NAME " is going down\n"); + + il_scan_cancel_timeout(il, 200); + + exit_pending = test_and_set_bit(S_EXIT_PENDING, &il->status); + + /* Stop TX queues watchdog. We need to have S_EXIT_PENDING bit set + * to prevent rearm timer */ + del_timer_sync(&il->watchdog); + + il_clear_ucode_stations(il, NULL); + il_dealloc_bcast_stations(il); + il_clear_driver_stations(il); + + /* Unblock any waiting calls */ + wake_up_all(&il->wait_command_queue); + + /* Wipe out the EXIT_PENDING status bit if we are not actually + * exiting the module */ + if (!exit_pending) + clear_bit(S_EXIT_PENDING, &il->status); + + /* stop and reset the on-board processor */ + _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); + + /* tell the device to stop sending interrupts */ + spin_lock_irqsave(&il->lock, flags); + il_disable_interrupts(il); + spin_unlock_irqrestore(&il->lock, flags); + il4965_synchronize_irq(il); + + if (il->mac80211_registered) + ieee80211_stop_queues(il->hw); + + /* If we have not previously called il_init() then + * clear all bits but the RF Kill bit and return */ + if (!il_is_init(il)) { + il->status = + test_bit(S_RF_KILL_HW, + &il-> + status) << S_RF_KILL_HW | + test_bit(S_GEO_CONFIGURED, + &il-> + status) << S_GEO_CONFIGURED | + test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING; + goto exit; + } + + /* ...otherwise clear out all the status bits but the RF Kill + * bit and continue taking the NIC down. */ + il->status &= + test_bit(S_RF_KILL_HW, + &il->status) << S_RF_KILL_HW | test_bit(S_GEO_CONFIGURED, + &il-> + status) << + S_GEO_CONFIGURED | test_bit(S_FW_ERROR, + &il-> + status) << S_FW_ERROR | + test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING; + + il4965_txq_ctx_stop(il); + il4965_rxq_stop(il); + + /* Power-down device's busmaster DMA clocks */ + il_wr_prph(il, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT); + udelay(5); + + /* Make sure (redundant) we've released our request to stay awake */ + il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + + /* Stop the device, and put it in low power state */ + il_apm_stop(il); + +exit: + memset(&il->card_alive, 0, sizeof(struct il_alive_resp)); + + dev_kfree_skb(il->beacon_skb); + il->beacon_skb = NULL; + + /* clear out any free frames */ + il4965_clear_free_frames(il); +} + +static void +il4965_down(struct il_priv *il) +{ + mutex_lock(&il->mutex); + __il4965_down(il); + mutex_unlock(&il->mutex); + + il4965_cancel_deferred_work(il); +} + +#define HW_READY_TIMEOUT (50) + +static int +il4965_set_hw_ready(struct il_priv *il) +{ + int ret = 0; + + il_set_bit(il, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_NIC_READY); + + /* See if we got it */ + ret = + _il_poll_bit(il, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, + CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, HW_READY_TIMEOUT); + if (ret != -ETIMEDOUT) + il->hw_ready = true; + else + il->hw_ready = false; + + D_INFO("hardware %s\n", (il->hw_ready == 1) ? "ready" : "not ready"); + return ret; +} + +static int +il4965_prepare_card_hw(struct il_priv *il) +{ + int ret = 0; + + D_INFO("il4965_prepare_card_hw enter\n"); + + ret = il4965_set_hw_ready(il); + if (il->hw_ready) + return ret; + + /* If HW is not ready, prepare the conditions to check again */ + il_set_bit(il, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PREPARE); + + ret = + _il_poll_bit(il, CSR_HW_IF_CONFIG_REG, + ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, + CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000); + + /* HW should be ready by now, check again. */ + if (ret != -ETIMEDOUT) + il4965_set_hw_ready(il); + + return ret; +} + +#define MAX_HW_RESTARTS 5 + +static int +__il4965_up(struct il_priv *il) +{ + int i; + int ret; + + if (test_bit(S_EXIT_PENDING, &il->status)) { + IL_WARN("Exit pending; will not bring the NIC up\n"); + return -EIO; + } + + if (!il->ucode_data_backup.v_addr || !il->ucode_data.v_addr) { + IL_ERR("ucode not available for device bringup\n"); + return -EIO; + } + + ret = il4965_alloc_bcast_station(il, &il->ctx); + if (ret) { + il_dealloc_bcast_stations(il); + return ret; + } + + il4965_prepare_card_hw(il); + + if (!il->hw_ready) { + IL_WARN("Exit HW not ready\n"); + return -EIO; + } + + /* If platform's RF_KILL switch is NOT set to KILL */ + if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) + clear_bit(S_RF_KILL_HW, &il->status); + else + set_bit(S_RF_KILL_HW, &il->status); + + if (il_is_rfkill(il)) { + wiphy_rfkill_set_hw_state(il->hw->wiphy, true); + + il_enable_interrupts(il); + IL_WARN("Radio disabled by HW RF Kill switch\n"); + return 0; + } + + _il_wr(il, CSR_INT, 0xFFFFFFFF); + + /* must be initialised before il_hw_nic_init */ + il->cmd_queue = IL_DEFAULT_CMD_QUEUE_NUM; + + ret = il4965_hw_nic_init(il); + if (ret) { + IL_ERR("Unable to init nic\n"); + return ret; + } + + /* make sure rfkill handshake bits are cleared */ + _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); + + /* clear (again), then enable host interrupts */ + _il_wr(il, CSR_INT, 0xFFFFFFFF); + il_enable_interrupts(il); + + /* really make sure rfkill handshake bits are cleared */ + _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + + /* Copy original ucode data image from disk into backup cache. + * This will be used to initialize the on-board processor's + * data SRAM for a clean start when the runtime program first loads. */ + memcpy(il->ucode_data_backup.v_addr, il->ucode_data.v_addr, + il->ucode_data.len); + + for (i = 0; i < MAX_HW_RESTARTS; i++) { + + /* load bootstrap state machine, + * load bootstrap program into processor's memory, + * prepare to load the "initialize" uCode */ + ret = il->cfg->ops->lib->load_ucode(il); + + if (ret) { + IL_ERR("Unable to set up bootstrap uCode: %d\n", ret); + continue; + } + + /* start card; "initialize" will load runtime ucode */ + il4965_nic_start(il); + + D_INFO(DRV_NAME " is coming up\n"); + + return 0; + } + + set_bit(S_EXIT_PENDING, &il->status); + __il4965_down(il); + clear_bit(S_EXIT_PENDING, &il->status); + + /* tried to restart and config the device for as long as our + * patience could withstand */ + IL_ERR("Unable to initialize device after %d attempts.\n", i); + return -EIO; +} + +/***************************************************************************** + * + * Workqueue callbacks + * + *****************************************************************************/ + +static void +il4965_bg_init_alive_start(struct work_struct *data) +{ + struct il_priv *il = + container_of(data, struct il_priv, init_alive_start.work); + + mutex_lock(&il->mutex); + if (test_bit(S_EXIT_PENDING, &il->status)) + goto out; + + il->cfg->ops->lib->init_alive_start(il); +out: + mutex_unlock(&il->mutex); +} + +static void +il4965_bg_alive_start(struct work_struct *data) +{ + struct il_priv *il = + container_of(data, struct il_priv, alive_start.work); + + mutex_lock(&il->mutex); + if (test_bit(S_EXIT_PENDING, &il->status)) + goto out; + + il4965_alive_start(il); +out: + mutex_unlock(&il->mutex); +} + +static void +il4965_bg_run_time_calib_work(struct work_struct *work) +{ + struct il_priv *il = container_of(work, struct il_priv, + run_time_calib_work); + + mutex_lock(&il->mutex); + + if (test_bit(S_EXIT_PENDING, &il->status) || + test_bit(S_SCANNING, &il->status)) { + mutex_unlock(&il->mutex); + return; + } + + if (il->start_calib) { + il4965_chain_noise_calibration(il, (void *)&il->_4965.stats); + il4965_sensitivity_calibration(il, (void *)&il->_4965.stats); + } + + mutex_unlock(&il->mutex); +} + +static void +il4965_bg_restart(struct work_struct *data) +{ + struct il_priv *il = container_of(data, struct il_priv, restart); + + if (test_bit(S_EXIT_PENDING, &il->status)) + return; + + if (test_and_clear_bit(S_FW_ERROR, &il->status)) { + mutex_lock(&il->mutex); + il->ctx.vif = NULL; + il->is_open = 0; + + __il4965_down(il); + + mutex_unlock(&il->mutex); + il4965_cancel_deferred_work(il); + ieee80211_restart_hw(il->hw); + } else { + il4965_down(il); + + mutex_lock(&il->mutex); + if (test_bit(S_EXIT_PENDING, &il->status)) { + mutex_unlock(&il->mutex); + return; + } + + __il4965_up(il); + mutex_unlock(&il->mutex); + } +} + +static void +il4965_bg_rx_replenish(struct work_struct *data) +{ + struct il_priv *il = container_of(data, struct il_priv, rx_replenish); + + if (test_bit(S_EXIT_PENDING, &il->status)) + return; + + mutex_lock(&il->mutex); + il4965_rx_replenish(il); + mutex_unlock(&il->mutex); +} + +/***************************************************************************** + * + * mac80211 entry point functions + * + *****************************************************************************/ + +#define UCODE_READY_TIMEOUT (4 * HZ) + +/* + * Not a mac80211 entry point function, but it fits in with all the + * other mac80211 functions grouped here. + */ +static int +il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length) +{ + int ret; + struct ieee80211_hw *hw = il->hw; + + hw->rate_control_algorithm = "iwl-4965-rs"; + + /* Tell mac80211 our characteristics */ + hw->flags = + IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_AMPDU_AGGREGATION | + IEEE80211_HW_NEED_DTIM_PERIOD | IEEE80211_HW_SPECTRUM_MGMT | + IEEE80211_HW_REPORTS_TX_ACK_STATUS; + + if (il->cfg->sku & IL_SKU_N) + hw->flags |= + IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | + IEEE80211_HW_SUPPORTS_STATIC_SMPS; + + hw->sta_data_size = sizeof(struct il_station_priv); + hw->vif_data_size = sizeof(struct il_vif_priv); + + hw->wiphy->interface_modes |= il->ctx.interface_modes; + hw->wiphy->interface_modes |= il->ctx.exclusive_interface_modes; + + hw->wiphy->flags |= + WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS; + + /* + * For now, disable PS by default because it affects + * RX performance significantly. + */ + hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; + + hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX; + /* we create the 802.11 header and a zero-length SSID element */ + hw->wiphy->max_scan_ie_len = max_probe_length - 24 - 2; + + /* Default value; 4 EDCA QOS priorities */ + hw->queues = 4; + + hw->max_listen_interval = IL_CONN_MAX_LISTEN_INTERVAL; + + if (il->bands[IEEE80211_BAND_2GHZ].n_channels) + il->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = + &il->bands[IEEE80211_BAND_2GHZ]; + if (il->bands[IEEE80211_BAND_5GHZ].n_channels) + il->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = + &il->bands[IEEE80211_BAND_5GHZ]; + + il_leds_init(il); + + ret = ieee80211_register_hw(il->hw); + if (ret) { + IL_ERR("Failed to register hw (error %d)\n", ret); + return ret; + } + il->mac80211_registered = 1; + + return 0; +} + +int +il4965_mac_start(struct ieee80211_hw *hw) +{ + struct il_priv *il = hw->priv; + int ret; + + D_MAC80211("enter\n"); + + /* we should be verifying the device is ready to be opened */ + mutex_lock(&il->mutex); + ret = __il4965_up(il); + mutex_unlock(&il->mutex); + + if (ret) + return ret; + + if (il_is_rfkill(il)) + goto out; + + D_INFO("Start UP work done.\n"); + + /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from + * mac80211 will not be run successfully. */ + ret = wait_event_timeout(il->wait_command_queue, + test_bit(S_READY, &il->status), + UCODE_READY_TIMEOUT); + if (!ret) { + if (!test_bit(S_READY, &il->status)) { + IL_ERR("START_ALIVE timeout after %dms.\n", + jiffies_to_msecs(UCODE_READY_TIMEOUT)); + return -ETIMEDOUT; + } + } + + il4965_led_enable(il); + +out: + il->is_open = 1; + D_MAC80211("leave\n"); + return 0; +} + +void +il4965_mac_stop(struct ieee80211_hw *hw) +{ + struct il_priv *il = hw->priv; + + D_MAC80211("enter\n"); + + if (!il->is_open) + return; + + il->is_open = 0; + + il4965_down(il); + + flush_workqueue(il->workqueue); + + /* User space software may expect getting rfkill changes + * even if interface is down */ + _il_wr(il, CSR_INT, 0xFFFFFFFF); + il_enable_rfkill_int(il); + + D_MAC80211("leave\n"); +} + +void +il4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + struct il_priv *il = hw->priv; + + D_MACDUMP("enter\n"); + + D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, + ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); + + if (il4965_tx_skb(il, skb)) + dev_kfree_skb_any(skb); + + D_MACDUMP("leave\n"); +} + +void +il4965_mac_update_tkip_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_key_conf *keyconf, + struct ieee80211_sta *sta, u32 iv32, u16 * phase1key) +{ + struct il_priv *il = hw->priv; + struct il_vif_priv *vif_priv = (void *)vif->drv_priv; + + D_MAC80211("enter\n"); + + il4965_update_tkip_key(il, vif_priv->ctx, keyconf, sta, iv32, + phase1key); + + D_MAC80211("leave\n"); +} + +int +il4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct il_priv *il = hw->priv; + struct il_vif_priv *vif_priv = (void *)vif->drv_priv; + struct il_rxon_context *ctx = vif_priv->ctx; + int ret; + u8 sta_id; + bool is_default_wep_key = false; + + D_MAC80211("enter\n"); + + if (il->cfg->mod_params->sw_crypto) { + D_MAC80211("leave - hwcrypto disabled\n"); + return -EOPNOTSUPP; + } + + sta_id = il_sta_id_or_broadcast(il, vif_priv->ctx, sta); + if (sta_id == IL_INVALID_STATION) + return -EINVAL; + + mutex_lock(&il->mutex); + il_scan_cancel_timeout(il, 100); + + /* + * If we are getting WEP group key and we didn't receive any key mapping + * so far, we are in legacy wep mode (group key only), otherwise we are + * in 1X mode. + * In legacy wep mode, we use another host command to the uCode. + */ + if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 || + key->cipher == WLAN_CIPHER_SUITE_WEP104) && !sta) { + if (cmd == SET_KEY) + is_default_wep_key = !ctx->key_mapping_keys; + else + is_default_wep_key = + (key->hw_key_idx == HW_KEY_DEFAULT); + } + + switch (cmd) { + case SET_KEY: + if (is_default_wep_key) + ret = + il4965_set_default_wep_key(il, vif_priv->ctx, key); + else + ret = + il4965_set_dynamic_key(il, vif_priv->ctx, key, + sta_id); + + D_MAC80211("enable hwcrypto key\n"); + break; + case DISABLE_KEY: + if (is_default_wep_key) + ret = il4965_remove_default_wep_key(il, ctx, key); + else + ret = il4965_remove_dynamic_key(il, ctx, key, sta_id); + + D_MAC80211("disable hwcrypto key\n"); + break; + default: + ret = -EINVAL; + } + + mutex_unlock(&il->mutex); + D_MAC80211("leave\n"); + + return ret; +} + +int +il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + enum ieee80211_ampdu_mlme_action action, + struct ieee80211_sta *sta, u16 tid, u16 * ssn, + u8 buf_size) +{ + struct il_priv *il = hw->priv; + int ret = -EINVAL; + + D_HT("A-MPDU action on addr %pM tid %d\n", sta->addr, tid); + + if (!(il->cfg->sku & IL_SKU_N)) + return -EACCES; + + mutex_lock(&il->mutex); + + switch (action) { + case IEEE80211_AMPDU_RX_START: + D_HT("start Rx\n"); + ret = il4965_sta_rx_agg_start(il, sta, tid, *ssn); + break; + case IEEE80211_AMPDU_RX_STOP: + D_HT("stop Rx\n"); + ret = il4965_sta_rx_agg_stop(il, sta, tid); + if (test_bit(S_EXIT_PENDING, &il->status)) + ret = 0; + break; + case IEEE80211_AMPDU_TX_START: + D_HT("start Tx\n"); + ret = il4965_tx_agg_start(il, vif, sta, tid, ssn); + break; + case IEEE80211_AMPDU_TX_STOP: + D_HT("stop Tx\n"); + ret = il4965_tx_agg_stop(il, vif, sta, tid); + if (test_bit(S_EXIT_PENDING, &il->status)) + ret = 0; + break; + case IEEE80211_AMPDU_TX_OPERATIONAL: + ret = 0; + break; + } + mutex_unlock(&il->mutex); + + return ret; +} + +int +il4965_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct il_priv *il = hw->priv; + struct il_station_priv *sta_priv = (void *)sta->drv_priv; + struct il_vif_priv *vif_priv = (void *)vif->drv_priv; + bool is_ap = vif->type == NL80211_IFTYPE_STATION; + int ret; + u8 sta_id; + + D_INFO("received request to add station %pM\n", sta->addr); + mutex_lock(&il->mutex); + D_INFO("proceeding to add station %pM\n", sta->addr); + sta_priv->common.sta_id = IL_INVALID_STATION; + + atomic_set(&sta_priv->pending_frames, 0); + + ret = + il_add_station_common(il, vif_priv->ctx, sta->addr, is_ap, sta, + &sta_id); + if (ret) { + IL_ERR("Unable to add station %pM (%d)\n", sta->addr, ret); + /* Should we return success if return code is EEXIST ? */ + mutex_unlock(&il->mutex); + return ret; + } + + sta_priv->common.sta_id = sta_id; + + /* Initialize rate scaling */ + D_INFO("Initializing rate scaling for station %pM\n", sta->addr); + il4965_rs_rate_init(il, sta, sta_id); + mutex_unlock(&il->mutex); + + return 0; +} + +void +il4965_mac_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_channel_switch *ch_switch) +{ + struct il_priv *il = hw->priv; + const struct il_channel_info *ch_info; + struct ieee80211_conf *conf = &hw->conf; + struct ieee80211_channel *channel = ch_switch->channel; + struct il_ht_config *ht_conf = &il->current_ht_config; + + struct il_rxon_context *ctx = &il->ctx; + u16 ch; + + D_MAC80211("enter\n"); + + mutex_lock(&il->mutex); + + if (il_is_rfkill(il)) + goto out; + + if (test_bit(S_EXIT_PENDING, &il->status) || + test_bit(S_SCANNING, &il->status) || + test_bit(S_CHANNEL_SWITCH_PENDING, &il->status)) + goto out; + + if (!il_is_associated_ctx(ctx)) + goto out; + + if (!il->cfg->ops->lib->set_channel_switch) + goto out; + + ch = channel->hw_value; + if (le16_to_cpu(ctx->active.channel) == ch) + goto out; + + ch_info = il_get_channel_info(il, channel->band, ch); + if (!il_is_channel_valid(ch_info)) { + D_MAC80211("invalid channel\n"); + goto out; + } + + spin_lock_irq(&il->lock); + + il->current_ht_config.smps = conf->smps_mode; + + /* Configure HT40 channels */ + ctx->ht.enabled = conf_is_ht(conf); + if (ctx->ht.enabled) { + if (conf_is_ht40_minus(conf)) { + ctx->ht.extension_chan_offset = + IEEE80211_HT_PARAM_CHA_SEC_BELOW; + ctx->ht.is_40mhz = true; + } else if (conf_is_ht40_plus(conf)) { + ctx->ht.extension_chan_offset = + IEEE80211_HT_PARAM_CHA_SEC_ABOVE; + ctx->ht.is_40mhz = true; + } else { + ctx->ht.extension_chan_offset = + IEEE80211_HT_PARAM_CHA_SEC_NONE; + ctx->ht.is_40mhz = false; + } + } else + ctx->ht.is_40mhz = false; + + if ((le16_to_cpu(ctx->staging.channel) != ch)) + ctx->staging.flags = 0; + + il_set_rxon_channel(il, channel, ctx); + il_set_rxon_ht(il, ht_conf); + il_set_flags_for_band(il, ctx, channel->band, ctx->vif); + + spin_unlock_irq(&il->lock); + + il_set_rate(il); + /* + * at this point, staging_rxon has the + * configuration for channel switch + */ + set_bit(S_CHANNEL_SWITCH_PENDING, &il->status); + il->switch_channel = cpu_to_le16(ch); + if (il->cfg->ops->lib->set_channel_switch(il, ch_switch)) { + clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status); + il->switch_channel = 0; + ieee80211_chswitch_done(ctx->vif, false); + } + +out: + mutex_unlock(&il->mutex); + D_MAC80211("leave\n"); +} + +void +il4965_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, + unsigned int *total_flags, u64 multicast) +{ + struct il_priv *il = hw->priv; + __le32 filter_or = 0, filter_nand = 0; + +#define CHK(test, flag) do { \ + if (*total_flags & (test)) \ + filter_or |= (flag); \ + else \ + filter_nand |= (flag); \ + } while (0) + + D_MAC80211("Enter: changed: 0x%x, total: 0x%x\n", changed_flags, + *total_flags); + + CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK); + /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */ + CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK); + CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK); + +#undef CHK + + mutex_lock(&il->mutex); + + il->ctx.staging.filter_flags &= ~filter_nand; + il->ctx.staging.filter_flags |= filter_or; + + /* + * Not committing directly because hardware can perform a scan, + * but we'll eventually commit the filter flags change anyway. + */ + + mutex_unlock(&il->mutex); + + /* + * Receiving all multicast frames is always enabled by the + * default flags setup in il_connection_init_rx_config() + * since we currently do not support programming multicast + * filters into the device. + */ + *total_flags &= + FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS | + FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; +} + +/***************************************************************************** + * + * driver setup and teardown + * + *****************************************************************************/ + +static void +il4965_bg_txpower_work(struct work_struct *work) +{ + struct il_priv *il = container_of(work, struct il_priv, + txpower_work); + + mutex_lock(&il->mutex); + + /* If a scan happened to start before we got here + * then just return; the stats notification will + * kick off another scheduled work to compensate for + * any temperature delta we missed here. */ + if (test_bit(S_EXIT_PENDING, &il->status) || + test_bit(S_SCANNING, &il->status)) + goto out; + + /* Regardless of if we are associated, we must reconfigure the + * TX power since frames can be sent on non-radar channels while + * not associated */ + il->cfg->ops->lib->send_tx_power(il); + + /* Update last_temperature to keep is_calib_needed from running + * when it isn't needed... */ + il->last_temperature = il->temperature; +out: + mutex_unlock(&il->mutex); +} + +static void +il4965_setup_deferred_work(struct il_priv *il) +{ + il->workqueue = create_singlethread_workqueue(DRV_NAME); + + init_waitqueue_head(&il->wait_command_queue); + + INIT_WORK(&il->restart, il4965_bg_restart); + INIT_WORK(&il->rx_replenish, il4965_bg_rx_replenish); + INIT_WORK(&il->run_time_calib_work, il4965_bg_run_time_calib_work); + INIT_DELAYED_WORK(&il->init_alive_start, il4965_bg_init_alive_start); + INIT_DELAYED_WORK(&il->alive_start, il4965_bg_alive_start); + + il_setup_scan_deferred_work(il); + + INIT_WORK(&il->txpower_work, il4965_bg_txpower_work); + + init_timer(&il->stats_periodic); + il->stats_periodic.data = (unsigned long)il; + il->stats_periodic.function = il4965_bg_stats_periodic; + + init_timer(&il->watchdog); + il->watchdog.data = (unsigned long)il; + il->watchdog.function = il_bg_watchdog; + + tasklet_init(&il->irq_tasklet, + (void (*)(unsigned long))il4965_irq_tasklet, + (unsigned long)il); +} + +static void +il4965_cancel_deferred_work(struct il_priv *il) +{ + cancel_work_sync(&il->txpower_work); + cancel_delayed_work_sync(&il->init_alive_start); + cancel_delayed_work(&il->alive_start); + cancel_work_sync(&il->run_time_calib_work); + + il_cancel_scan_deferred_work(il); + + del_timer_sync(&il->stats_periodic); +} + +static void +il4965_init_hw_rates(struct il_priv *il, struct ieee80211_rate *rates) +{ + int i; + + for (i = 0; i < RATE_COUNT_LEGACY; i++) { + rates[i].bitrate = il_rates[i].ieee * 5; + rates[i].hw_value = i; /* Rate scaling will work on idxes */ + rates[i].hw_value_short = i; + rates[i].flags = 0; + if ((i >= IL_FIRST_CCK_RATE) && (i <= IL_LAST_CCK_RATE)) { + /* + * If CCK != 1M then set short preamble rate flag. + */ + rates[i].flags |= + (il_rates[i].plcp == + RATE_1M_PLCP) ? 0 : IEEE80211_RATE_SHORT_PREAMBLE; + } + } +} + +/* + * Acquire il->lock before calling this function ! + */ +void +il4965_set_wr_ptrs(struct il_priv *il, int txq_id, u32 idx) +{ + il_wr(il, HBUS_TARG_WRPTR, (idx & 0xff) | (txq_id << 8)); + il_wr_prph(il, IL49_SCD_QUEUE_RDPTR(txq_id), idx); +} + +void +il4965_tx_queue_set_status(struct il_priv *il, struct il_tx_queue *txq, + int tx_fifo_id, int scd_retry) +{ + int txq_id = txq->q.id; + + /* Find out whether to activate Tx queue */ + int active = test_bit(txq_id, &il->txq_ctx_active_msk) ? 1 : 0; + + /* Set up and activate */ + il_wr_prph(il, IL49_SCD_QUEUE_STATUS_BITS(txq_id), + (active << IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) | + (tx_fifo_id << IL49_SCD_QUEUE_STTS_REG_POS_TXF) | + (scd_retry << IL49_SCD_QUEUE_STTS_REG_POS_WSL) | + (scd_retry << IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) | + IL49_SCD_QUEUE_STTS_REG_MSK); + + txq->sched_retry = scd_retry; + + D_INFO("%s %s Queue %d on AC %d\n", active ? "Activate" : "Deactivate", + scd_retry ? "BA" : "AC", txq_id, tx_fifo_id); +} + +static int +il4965_init_drv(struct il_priv *il) +{ + int ret; + + spin_lock_init(&il->sta_lock); + spin_lock_init(&il->hcmd_lock); + + INIT_LIST_HEAD(&il->free_frames); + + mutex_init(&il->mutex); + + il->ieee_channels = NULL; + il->ieee_rates = NULL; + il->band = IEEE80211_BAND_2GHZ; + + il->iw_mode = NL80211_IFTYPE_STATION; + il->current_ht_config.smps = IEEE80211_SMPS_STATIC; + il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF; + + /* initialize force reset */ + il->force_reset.reset_duration = IL_DELAY_NEXT_FORCE_FW_RELOAD; + + /* Choose which receivers/antennas to use */ + if (il->cfg->ops->hcmd->set_rxon_chain) + il->cfg->ops->hcmd->set_rxon_chain(il, &il->ctx); + + il_init_scan_params(il); + + ret = il_init_channel_map(il); + if (ret) { + IL_ERR("initializing regulatory failed: %d\n", ret); + goto err; + } + + ret = il_init_geos(il); + if (ret) { + IL_ERR("initializing geos failed: %d\n", ret); + goto err_free_channel_map; + } + il4965_init_hw_rates(il, il->ieee_rates); + + return 0; + +err_free_channel_map: + il_free_channel_map(il); +err: + return ret; +} + +static void +il4965_uninit_drv(struct il_priv *il) +{ + il4965_calib_free_results(il); + il_free_geos(il); + il_free_channel_map(il); + kfree(il->scan_cmd); +} + +static void +il4965_hw_detect(struct il_priv *il) +{ + il->hw_rev = _il_rd(il, CSR_HW_REV); + il->hw_wa_rev = _il_rd(il, CSR_HW_REV_WA_REG); + il->rev_id = il->pci_dev->revision; + D_INFO("HW Revision ID = 0x%X\n", il->rev_id); +} + +static int +il4965_set_hw_params(struct il_priv *il) +{ + il->hw_params.max_rxq_size = RX_QUEUE_SIZE; + il->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG; + if (il->cfg->mod_params->amsdu_size_8K) + il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_8K); + else + il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_4K); + + il->hw_params.max_beacon_itrvl = IL_MAX_UCODE_BEACON_INTERVAL; + + if (il->cfg->mod_params->disable_11n) + il->cfg->sku &= ~IL_SKU_N; + + /* Device-specific setup */ + return il->cfg->ops->lib->set_hw_params(il); +} + +static const u8 il4965_bss_ac_to_fifo[] = { + IL_TX_FIFO_VO, + IL_TX_FIFO_VI, + IL_TX_FIFO_BE, + IL_TX_FIFO_BK, +}; + +static const u8 il4965_bss_ac_to_queue[] = { + 0, 1, 2, 3, +}; + +static int +il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + int err = 0; + struct il_priv *il; + struct ieee80211_hw *hw; + struct il_cfg *cfg = (struct il_cfg *)(ent->driver_data); + unsigned long flags; + u16 pci_cmd; + + /************************ + * 1. Allocating HW data + ************************/ + + hw = il_alloc_all(cfg); + if (!hw) { + err = -ENOMEM; + goto out; + } + il = hw->priv; + /* At this point both hw and il are allocated. */ + + il->ctx.ctxid = 0; + + il->ctx.always_active = true; + il->ctx.is_active = true; + il->ctx.rxon_cmd = C_RXON; + il->ctx.rxon_timing_cmd = C_RXON_TIMING; + il->ctx.rxon_assoc_cmd = C_RXON_ASSOC; + il->ctx.qos_cmd = C_QOS_PARAM; + il->ctx.ap_sta_id = IL_AP_ID; + il->ctx.wep_key_cmd = C_WEPKEY; + il->ctx.ac_to_fifo = il4965_bss_ac_to_fifo; + il->ctx.ac_to_queue = il4965_bss_ac_to_queue; + il->ctx.exclusive_interface_modes = BIT(NL80211_IFTYPE_ADHOC); + il->ctx.interface_modes = BIT(NL80211_IFTYPE_STATION); + il->ctx.ap_devtype = RXON_DEV_TYPE_AP; + il->ctx.ibss_devtype = RXON_DEV_TYPE_IBSS; + il->ctx.station_devtype = RXON_DEV_TYPE_ESS; + il->ctx.unused_devtype = RXON_DEV_TYPE_ESS; + + SET_IEEE80211_DEV(hw, &pdev->dev); + + D_INFO("*** LOAD DRIVER ***\n"); + il->cfg = cfg; + il->pci_dev = pdev; + il->inta_mask = CSR_INI_SET_MASK; + + if (il_alloc_traffic_mem(il)) + IL_ERR("Not enough memory to generate traffic log\n"); + + /************************** + * 2. Initializing PCI bus + **************************/ + pci_disable_link_state(pdev, + PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | + PCIE_LINK_STATE_CLKPM); + + if (pci_enable_device(pdev)) { + err = -ENODEV; + goto out_ieee80211_free_hw; + } + + pci_set_master(pdev); + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); + if (!err) + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36)); + if (err) { + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (!err) + err = + pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + /* both attempts failed: */ + if (err) { + IL_WARN("No suitable DMA available.\n"); + goto out_pci_disable_device; + } + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) + goto out_pci_disable_device; + + pci_set_drvdata(pdev, il); + + /*********************** + * 3. Read REV register + ***********************/ + il->hw_base = pci_iomap(pdev, 0, 0); + if (!il->hw_base) { + err = -ENODEV; + goto out_pci_release_regions; + } + + D_INFO("pci_resource_len = 0x%08llx\n", + (unsigned long long)pci_resource_len(pdev, 0)); + D_INFO("pci_resource_base = %p\n", il->hw_base); + + /* these spin locks will be used in apm_ops.init and EEPROM access + * we should init now + */ + spin_lock_init(&il->reg_lock); + spin_lock_init(&il->lock); + + /* + * stop and reset the on-board processor just in case it is in a + * strange state ... like being left stranded by a primary kernel + * and this is now the kdump kernel trying to start up + */ + _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); + + il4965_hw_detect(il); + IL_INFO("Detected %s, REV=0x%X\n", il->cfg->name, il->hw_rev); + + /* We disable the RETRY_TIMEOUT register (0x41) to keep + * PCI Tx retries from interfering with C3 CPU state */ + pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); + + il4965_prepare_card_hw(il); + if (!il->hw_ready) { + IL_WARN("Failed, HW not ready\n"); + goto out_iounmap; + } + + /***************** + * 4. Read EEPROM + *****************/ + /* Read the EEPROM */ + err = il_eeprom_init(il); + if (err) { + IL_ERR("Unable to init EEPROM\n"); + goto out_iounmap; + } + err = il4965_eeprom_check_version(il); + if (err) + goto out_free_eeprom; + + if (err) + goto out_free_eeprom; + + /* extract MAC Address */ + il4965_eeprom_get_mac(il, il->addresses[0].addr); + D_INFO("MAC address: %pM\n", il->addresses[0].addr); + il->hw->wiphy->addresses = il->addresses; + il->hw->wiphy->n_addresses = 1; + + /************************ + * 5. Setup HW constants + ************************/ + if (il4965_set_hw_params(il)) { + IL_ERR("failed to set hw parameters\n"); + goto out_free_eeprom; + } + + /******************* + * 6. Setup il + *******************/ + + err = il4965_init_drv(il); + if (err) + goto out_free_eeprom; + /* At this point both hw and il are initialized. */ + + /******************** + * 7. Setup services + ********************/ + spin_lock_irqsave(&il->lock, flags); + il_disable_interrupts(il); + spin_unlock_irqrestore(&il->lock, flags); + + pci_enable_msi(il->pci_dev); + + err = request_irq(il->pci_dev->irq, il_isr, IRQF_SHARED, DRV_NAME, il); + if (err) { + IL_ERR("Error allocating IRQ %d\n", il->pci_dev->irq); + goto out_disable_msi; + } + + il4965_setup_deferred_work(il); + il4965_setup_handlers(il); + + /********************************************* + * 8. Enable interrupts and read RFKILL state + *********************************************/ + + /* enable rfkill interrupt: hw bug w/a */ + pci_read_config_word(il->pci_dev, PCI_COMMAND, &pci_cmd); + if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { + pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; + pci_write_config_word(il->pci_dev, PCI_COMMAND, pci_cmd); + } + + il_enable_rfkill_int(il); + + /* If platform's RF_KILL switch is NOT set to KILL */ + if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) + clear_bit(S_RF_KILL_HW, &il->status); + else + set_bit(S_RF_KILL_HW, &il->status); + + wiphy_rfkill_set_hw_state(il->hw->wiphy, + test_bit(S_RF_KILL_HW, &il->status)); + + il_power_initialize(il); + + init_completion(&il->_4965.firmware_loading_complete); + + err = il4965_request_firmware(il, true); + if (err) + goto out_destroy_workqueue; + + return 0; + +out_destroy_workqueue: + destroy_workqueue(il->workqueue); + il->workqueue = NULL; + free_irq(il->pci_dev->irq, il); +out_disable_msi: + pci_disable_msi(il->pci_dev); + il4965_uninit_drv(il); +out_free_eeprom: + il_eeprom_free(il); +out_iounmap: + pci_iounmap(pdev, il->hw_base); +out_pci_release_regions: + pci_set_drvdata(pdev, NULL); + pci_release_regions(pdev); +out_pci_disable_device: + pci_disable_device(pdev); +out_ieee80211_free_hw: + il_free_traffic_mem(il); + ieee80211_free_hw(il->hw); +out: + return err; +} + +static void __devexit +il4965_pci_remove(struct pci_dev *pdev) +{ + struct il_priv *il = pci_get_drvdata(pdev); + unsigned long flags; + + if (!il) + return; + + wait_for_completion(&il->_4965.firmware_loading_complete); + + D_INFO("*** UNLOAD DRIVER ***\n"); + + il_dbgfs_unregister(il); + sysfs_remove_group(&pdev->dev.kobj, &il_attribute_group); + + /* ieee80211_unregister_hw call wil cause il_mac_stop to + * to be called and il4965_down since we are removing the device + * we need to set S_EXIT_PENDING bit. + */ + set_bit(S_EXIT_PENDING, &il->status); + + il_leds_exit(il); + + if (il->mac80211_registered) { + ieee80211_unregister_hw(il->hw); + il->mac80211_registered = 0; + } else { + il4965_down(il); + } + + /* + * Make sure device is reset to low power before unloading driver. + * This may be redundant with il4965_down(), but there are paths to + * run il4965_down() without calling apm_ops.stop(), and there are + * paths to avoid running il4965_down() at all before leaving driver. + * This (inexpensive) call *makes sure* device is reset. + */ + il_apm_stop(il); + + /* make sure we flush any pending irq or + * tasklet for the driver + */ + spin_lock_irqsave(&il->lock, flags); + il_disable_interrupts(il); + spin_unlock_irqrestore(&il->lock, flags); + + il4965_synchronize_irq(il); + + il4965_dealloc_ucode_pci(il); + + if (il->rxq.bd) + il4965_rx_queue_free(il, &il->rxq); + il4965_hw_txq_ctx_free(il); + + il_eeprom_free(il); + + /*netif_stop_queue(dev); */ + flush_workqueue(il->workqueue); + + /* ieee80211_unregister_hw calls il_mac_stop, which flushes + * il->workqueue... so we can't take down the workqueue + * until now... */ + destroy_workqueue(il->workqueue); + il->workqueue = NULL; + il_free_traffic_mem(il); + + free_irq(il->pci_dev->irq, il); + pci_disable_msi(il->pci_dev); + pci_iounmap(pdev, il->hw_base); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + + il4965_uninit_drv(il); + + dev_kfree_skb(il->beacon_skb); + + ieee80211_free_hw(il->hw); +} + +/* + * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask + * must be called under il->lock and mac access + */ +void +il4965_txq_set_sched(struct il_priv *il, u32 mask) +{ + il_wr_prph(il, IL49_SCD_TXFACT, mask); +} + +/***************************************************************************** + * + * driver and module entry point + * + *****************************************************************************/ + +/* Hardware specific file defines the PCI IDs table for that hardware module */ +static DEFINE_PCI_DEVICE_TABLE(il4965_hw_card_ids) = { + {IL_PCI_DEVICE(0x4229, PCI_ANY_ID, il4965_cfg)}, + {IL_PCI_DEVICE(0x4230, PCI_ANY_ID, il4965_cfg)}, + {0} +}; +MODULE_DEVICE_TABLE(pci, il4965_hw_card_ids); + +static struct pci_driver il4965_driver = { + .name = DRV_NAME, + .id_table = il4965_hw_card_ids, + .probe = il4965_pci_probe, + .remove = __devexit_p(il4965_pci_remove), + .driver.pm = IL_LEGACY_PM_OPS, +}; + +static int __init +il4965_init(void) +{ + + int ret; + pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n"); + pr_info(DRV_COPYRIGHT "\n"); + + ret = il4965_rate_control_register(); + if (ret) { + pr_err("Unable to register rate control algorithm: %d\n", ret); + return ret; + } + + ret = pci_register_driver(&il4965_driver); + if (ret) { + pr_err("Unable to initialize PCI module\n"); + goto error_register; + } + + return ret; + +error_register: + il4965_rate_control_unregister(); + return ret; +} + +static void __exit +il4965_exit(void) +{ + pci_unregister_driver(&il4965_driver); + il4965_rate_control_unregister(); +} + +module_exit(il4965_exit); +module_init(il4965_init); + +#ifdef CONFIG_IWLEGACY_DEBUG +module_param_named(debug, il_debug_level, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(debug, "debug output mask"); +#endif + +module_param_named(swcrypto, il4965_mod_params.sw_crypto, int, S_IRUGO); +MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])"); +module_param_named(queues_num, il4965_mod_params.num_of_queues, int, S_IRUGO); +MODULE_PARM_DESC(queues_num, "number of hw queues."); +module_param_named(11n_disable, il4965_mod_params.disable_11n, int, S_IRUGO); +MODULE_PARM_DESC(11n_disable, "disable 11n functionality"); +module_param_named(amsdu_size_8K, il4965_mod_params.amsdu_size_8K, int, + S_IRUGO); +MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size"); +module_param_named(fw_restart, il4965_mod_params.restart_fw, int, S_IRUGO); +MODULE_PARM_DESC(fw_restart, "restart firmware in case of error"); diff --git a/drivers/net/wireless/iwlegacy/4965-rs.c b/drivers/net/wireless/iwlegacy/4965-rs.c new file mode 100644 index 00000000000..467d0cb14ec --- /dev/null +++ b/drivers/net/wireless/iwlegacy/4965-rs.c @@ -0,0 +1,2860 @@ +/****************************************************************************** + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/skbuff.h> +#include <linux/slab.h> +#include <net/mac80211.h> + +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/delay.h> + +#include <linux/workqueue.h> + +#include "common.h" +#include "4965.h" + +#define IL4965_RS_NAME "iwl-4965-rs" + +#define NUM_TRY_BEFORE_ANT_TOGGLE 1 +#define IL_NUMBER_TRY 1 +#define IL_HT_NUMBER_TRY 3 + +#define RATE_MAX_WINDOW 62 /* # tx in history win */ +#define RATE_MIN_FAILURE_TH 6 /* min failures to calc tpt */ +#define RATE_MIN_SUCCESS_TH 8 /* min successes to calc tpt */ + +/* max allowed rate miss before sync LQ cmd */ +#define IL_MISSED_RATE_MAX 15 +/* max time to accum history 2 seconds */ +#define RATE_SCALE_FLUSH_INTVL (3*HZ) + +static u8 rs_ht_to_legacy[] = { + RATE_6M_IDX, RATE_6M_IDX, + RATE_6M_IDX, RATE_6M_IDX, + RATE_6M_IDX, + RATE_6M_IDX, RATE_9M_IDX, + RATE_12M_IDX, RATE_18M_IDX, + RATE_24M_IDX, RATE_36M_IDX, + RATE_48M_IDX, RATE_54M_IDX +}; + +static const u8 ant_toggle_lookup[] = { + /*ANT_NONE -> */ ANT_NONE, + /*ANT_A -> */ ANT_B, + /*ANT_B -> */ ANT_C, + /*ANT_AB -> */ ANT_BC, + /*ANT_C -> */ ANT_A, + /*ANT_AC -> */ ANT_AB, + /*ANT_BC -> */ ANT_AC, + /*ANT_ABC -> */ ANT_ABC, +}; + +#define IL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \ + [RATE_##r##M_IDX] = { RATE_##r##M_PLCP, \ + RATE_SISO_##s##M_PLCP, \ + RATE_MIMO2_##s##M_PLCP,\ + RATE_##r##M_IEEE, \ + RATE_##ip##M_IDX, \ + RATE_##in##M_IDX, \ + RATE_##rp##M_IDX, \ + RATE_##rn##M_IDX, \ + RATE_##pp##M_IDX, \ + RATE_##np##M_IDX } + +/* + * Parameter order: + * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate + * + * If there isn't a valid next or previous rate then INV is used which + * maps to RATE_INVALID + * + */ +const struct il_rate_info il_rates[RATE_COUNT] = { + IL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */ + IL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */ + IL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */ + IL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */ + IL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */ + IL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */ + IL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */ + IL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */ + IL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */ + IL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */ + IL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */ + IL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */ + IL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */ +}; + +static int +il4965_hwrate_to_plcp_idx(u32 rate_n_flags) +{ + int idx = 0; + + /* HT rate format */ + if (rate_n_flags & RATE_MCS_HT_MSK) { + idx = (rate_n_flags & 0xff); + + if (idx >= RATE_MIMO2_6M_PLCP) + idx = idx - RATE_MIMO2_6M_PLCP; + + idx += IL_FIRST_OFDM_RATE; + /* skip 9M not supported in ht */ + if (idx >= RATE_9M_IDX) + idx += 1; + if (idx >= IL_FIRST_OFDM_RATE && idx <= IL_LAST_OFDM_RATE) + return idx; + + /* legacy rate format, search for match in table */ + } else { + for (idx = 0; idx < ARRAY_SIZE(il_rates); idx++) + if (il_rates[idx].plcp == (rate_n_flags & 0xFF)) + return idx; + } + + return -1; +} + +static void il4965_rs_rate_scale_perform(struct il_priv *il, + struct sk_buff *skb, + struct ieee80211_sta *sta, + struct il_lq_sta *lq_sta); +static void il4965_rs_fill_link_cmd(struct il_priv *il, + struct il_lq_sta *lq_sta, u32 rate_n_flags); +static void il4965_rs_stay_in_table(struct il_lq_sta *lq_sta, + bool force_search); + +#ifdef CONFIG_MAC80211_DEBUGFS +static void il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta, + u32 *rate_n_flags, int idx); +#else +static void +il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta, u32 * rate_n_flags, int idx) +{ +} +#endif + +/** + * The following tables contain the expected throughput metrics for all rates + * + * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits + * + * where invalid entries are zeros. + * + * CCK rates are only valid in legacy table and will only be used in G + * (2.4 GHz) band. + */ + +static s32 expected_tpt_legacy[RATE_COUNT] = { + 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0 +}; + +static s32 expected_tpt_siso20MHz[4][RATE_COUNT] = { + {0, 0, 0, 0, 42, 0, 76, 102, 124, 158, 183, 193, 202}, /* Norm */ + {0, 0, 0, 0, 46, 0, 82, 110, 132, 167, 192, 202, 210}, /* SGI */ + {0, 0, 0, 0, 48, 0, 93, 135, 176, 251, 319, 351, 381}, /* AGG */ + {0, 0, 0, 0, 53, 0, 102, 149, 193, 275, 348, 381, 413}, /* AGG+SGI */ +}; + +static s32 expected_tpt_siso40MHz[4][RATE_COUNT] = { + {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */ + {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */ + {0, 0, 0, 0, 96, 0, 182, 259, 328, 451, 553, 598, 640}, /* AGG */ + {0, 0, 0, 0, 106, 0, 199, 282, 357, 487, 593, 640, 683}, /* AGG+SGI */ +}; + +static s32 expected_tpt_mimo2_20MHz[4][RATE_COUNT] = { + {0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250}, /* Norm */ + {0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256}, /* SGI */ + {0, 0, 0, 0, 92, 0, 175, 250, 317, 436, 534, 578, 619}, /* AGG */ + {0, 0, 0, 0, 102, 0, 192, 273, 344, 470, 573, 619, 660}, /* AGG+SGI */ +}; + +static s32 expected_tpt_mimo2_40MHz[4][RATE_COUNT] = { + {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */ + {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */ + {0, 0, 0, 0, 180, 0, 327, 446, 545, 708, 828, 878, 922}, /* AGG */ + {0, 0, 0, 0, 197, 0, 355, 481, 584, 752, 872, 922, 966}, /* AGG+SGI */ +}; + +/* mbps, mcs */ +static const struct il_rate_mcs_info il_rate_mcs[RATE_COUNT] = { + {"1", "BPSK DSSS"}, + {"2", "QPSK DSSS"}, + {"5.5", "BPSK CCK"}, + {"11", "QPSK CCK"}, + {"6", "BPSK 1/2"}, + {"9", "BPSK 1/2"}, + {"12", "QPSK 1/2"}, + {"18", "QPSK 3/4"}, + {"24", "16QAM 1/2"}, + {"36", "16QAM 3/4"}, + {"48", "64QAM 2/3"}, + {"54", "64QAM 3/4"}, + {"60", "64QAM 5/6"}, +}; + +#define MCS_IDX_PER_STREAM (8) + +static inline u8 +il4965_rs_extract_rate(u32 rate_n_flags) +{ + return (u8) (rate_n_flags & 0xFF); +} + +static void +il4965_rs_rate_scale_clear_win(struct il_rate_scale_data *win) +{ + win->data = 0; + win->success_counter = 0; + win->success_ratio = IL_INVALID_VALUE; + win->counter = 0; + win->average_tpt = IL_INVALID_VALUE; + win->stamp = 0; +} + +static inline u8 +il4965_rs_is_valid_ant(u8 valid_antenna, u8 ant_type) +{ + return (ant_type & valid_antenna) == ant_type; +} + +/* + * removes the old data from the stats. All data that is older than + * TID_MAX_TIME_DIFF, will be deleted. + */ +static void +il4965_rs_tl_rm_old_stats(struct il_traffic_load *tl, u32 curr_time) +{ + /* The oldest age we want to keep */ + u32 oldest_time = curr_time - TID_MAX_TIME_DIFF; + + while (tl->queue_count && tl->time_stamp < oldest_time) { + tl->total -= tl->packet_count[tl->head]; + tl->packet_count[tl->head] = 0; + tl->time_stamp += TID_QUEUE_CELL_SPACING; + tl->queue_count--; + tl->head++; + if (tl->head >= TID_QUEUE_MAX_SIZE) + tl->head = 0; + } +} + +/* + * increment traffic load value for tid and also remove + * any old values if passed the certain time period + */ +static u8 +il4965_rs_tl_add_packet(struct il_lq_sta *lq_data, struct ieee80211_hdr *hdr) +{ + u32 curr_time = jiffies_to_msecs(jiffies); + u32 time_diff; + s32 idx; + struct il_traffic_load *tl = NULL; + u8 tid; + + if (ieee80211_is_data_qos(hdr->frame_control)) { + u8 *qc = ieee80211_get_qos_ctl(hdr); + tid = qc[0] & 0xf; + } else + return MAX_TID_COUNT; + + if (unlikely(tid >= TID_MAX_LOAD_COUNT)) + return MAX_TID_COUNT; + + tl = &lq_data->load[tid]; + + curr_time -= curr_time % TID_ROUND_VALUE; + + /* Happens only for the first packet. Initialize the data */ + if (!(tl->queue_count)) { + tl->total = 1; + tl->time_stamp = curr_time; + tl->queue_count = 1; + tl->head = 0; + tl->packet_count[0] = 1; + return MAX_TID_COUNT; + } + + time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time); + idx = time_diff / TID_QUEUE_CELL_SPACING; + + /* The history is too long: remove data that is older than */ + /* TID_MAX_TIME_DIFF */ + if (idx >= TID_QUEUE_MAX_SIZE) + il4965_rs_tl_rm_old_stats(tl, curr_time); + + idx = (tl->head + idx) % TID_QUEUE_MAX_SIZE; + tl->packet_count[idx] = tl->packet_count[idx] + 1; + tl->total = tl->total + 1; + + if ((idx + 1) > tl->queue_count) + tl->queue_count = idx + 1; + + return tid; +} + +/* + get the traffic load value for tid +*/ +static u32 +il4965_rs_tl_get_load(struct il_lq_sta *lq_data, u8 tid) +{ + u32 curr_time = jiffies_to_msecs(jiffies); + u32 time_diff; + s32 idx; + struct il_traffic_load *tl = NULL; + + if (tid >= TID_MAX_LOAD_COUNT) + return 0; + + tl = &(lq_data->load[tid]); + + curr_time -= curr_time % TID_ROUND_VALUE; + + if (!(tl->queue_count)) + return 0; + + time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time); + idx = time_diff / TID_QUEUE_CELL_SPACING; + + /* The history is too long: remove data that is older than */ + /* TID_MAX_TIME_DIFF */ + if (idx >= TID_QUEUE_MAX_SIZE) + il4965_rs_tl_rm_old_stats(tl, curr_time); + + return tl->total; +} + +static int +il4965_rs_tl_turn_on_agg_for_tid(struct il_priv *il, struct il_lq_sta *lq_data, + u8 tid, struct ieee80211_sta *sta) +{ + int ret = -EAGAIN; + u32 load; + + load = il4965_rs_tl_get_load(lq_data, tid); + + if (load > IL_AGG_LOAD_THRESHOLD) { + D_HT("Starting Tx agg: STA: %pM tid: %d\n", sta->addr, tid); + ret = ieee80211_start_tx_ba_session(sta, tid, 5000); + if (ret == -EAGAIN) { + /* + * driver and mac80211 is out of sync + * this might be cause by reloading firmware + * stop the tx ba session here + */ + IL_ERR("Fail start Tx agg on tid: %d\n", tid); + ieee80211_stop_tx_ba_session(sta, tid); + } + } else + D_HT("Aggregation not enabled for tid %d because load = %u\n", + tid, load); + + return ret; +} + +static void +il4965_rs_tl_turn_on_agg(struct il_priv *il, u8 tid, struct il_lq_sta *lq_data, + struct ieee80211_sta *sta) +{ + if (tid < TID_MAX_LOAD_COUNT) + il4965_rs_tl_turn_on_agg_for_tid(il, lq_data, tid, sta); + else + IL_ERR("tid exceeds max load count: %d/%d\n", tid, + TID_MAX_LOAD_COUNT); +} + +static inline int +il4965_get_il4965_num_of_ant_from_rate(u32 rate_n_flags) +{ + return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) + + !!(rate_n_flags & RATE_MCS_ANT_B_MSK) + + !!(rate_n_flags & RATE_MCS_ANT_C_MSK); +} + +/* + * Static function to get the expected throughput from an il_scale_tbl_info + * that wraps a NULL pointer check + */ +static s32 +il4965_get_expected_tpt(struct il_scale_tbl_info *tbl, int rs_idx) +{ + if (tbl->expected_tpt) + return tbl->expected_tpt[rs_idx]; + return 0; +} + +/** + * il4965_rs_collect_tx_data - Update the success/failure sliding win + * + * We keep a sliding win of the last 62 packets transmitted + * at this rate. win->data contains the bitmask of successful + * packets. + */ +static int +il4965_rs_collect_tx_data(struct il_scale_tbl_info *tbl, int scale_idx, + int attempts, int successes) +{ + struct il_rate_scale_data *win = NULL; + static const u64 mask = (((u64) 1) << (RATE_MAX_WINDOW - 1)); + s32 fail_count, tpt; + + if (scale_idx < 0 || scale_idx >= RATE_COUNT) + return -EINVAL; + + /* Select win for current tx bit rate */ + win = &(tbl->win[scale_idx]); + + /* Get expected throughput */ + tpt = il4965_get_expected_tpt(tbl, scale_idx); + + /* + * Keep track of only the latest 62 tx frame attempts in this rate's + * history win; anything older isn't really relevant any more. + * If we have filled up the sliding win, drop the oldest attempt; + * if the oldest attempt (highest bit in bitmap) shows "success", + * subtract "1" from the success counter (this is the main reason + * we keep these bitmaps!). + */ + while (attempts > 0) { + if (win->counter >= RATE_MAX_WINDOW) { + + /* remove earliest */ + win->counter = RATE_MAX_WINDOW - 1; + + if (win->data & mask) { + win->data &= ~mask; + win->success_counter--; + } + } + + /* Increment frames-attempted counter */ + win->counter++; + + /* Shift bitmap by one frame to throw away oldest history */ + win->data <<= 1; + + /* Mark the most recent #successes attempts as successful */ + if (successes > 0) { + win->success_counter++; + win->data |= 0x1; + successes--; + } + + attempts--; + } + + /* Calculate current success ratio, avoid divide-by-0! */ + if (win->counter > 0) + win->success_ratio = + 128 * (100 * win->success_counter) / win->counter; + else + win->success_ratio = IL_INVALID_VALUE; + + fail_count = win->counter - win->success_counter; + + /* Calculate average throughput, if we have enough history. */ + if (fail_count >= RATE_MIN_FAILURE_TH || + win->success_counter >= RATE_MIN_SUCCESS_TH) + win->average_tpt = (win->success_ratio * tpt + 64) / 128; + else + win->average_tpt = IL_INVALID_VALUE; + + /* Tag this win as having been updated */ + win->stamp = jiffies; + + return 0; +} + +/* + * Fill uCode API rate_n_flags field, based on "search" or "active" table. + */ +static u32 +il4965_rate_n_flags_from_tbl(struct il_priv *il, struct il_scale_tbl_info *tbl, + int idx, u8 use_green) +{ + u32 rate_n_flags = 0; + + if (is_legacy(tbl->lq_type)) { + rate_n_flags = il_rates[idx].plcp; + if (idx >= IL_FIRST_CCK_RATE && idx <= IL_LAST_CCK_RATE) + rate_n_flags |= RATE_MCS_CCK_MSK; + + } else if (is_Ht(tbl->lq_type)) { + if (idx > IL_LAST_OFDM_RATE) { + IL_ERR("Invalid HT rate idx %d\n", idx); + idx = IL_LAST_OFDM_RATE; + } + rate_n_flags = RATE_MCS_HT_MSK; + + if (is_siso(tbl->lq_type)) + rate_n_flags |= il_rates[idx].plcp_siso; + else + rate_n_flags |= il_rates[idx].plcp_mimo2; + } else { + IL_ERR("Invalid tbl->lq_type %d\n", tbl->lq_type); + } + + rate_n_flags |= + ((tbl->ant_type << RATE_MCS_ANT_POS) & RATE_MCS_ANT_ABC_MSK); + + if (is_Ht(tbl->lq_type)) { + if (tbl->is_ht40) { + if (tbl->is_dup) + rate_n_flags |= RATE_MCS_DUP_MSK; + else + rate_n_flags |= RATE_MCS_HT40_MSK; + } + if (tbl->is_SGI) + rate_n_flags |= RATE_MCS_SGI_MSK; + + if (use_green) { + rate_n_flags |= RATE_MCS_GF_MSK; + if (is_siso(tbl->lq_type) && tbl->is_SGI) { + rate_n_flags &= ~RATE_MCS_SGI_MSK; + IL_ERR("GF was set with SGI:SISO\n"); + } + } + } + return rate_n_flags; +} + +/* + * Interpret uCode API's rate_n_flags format, + * fill "search" or "active" tx mode table. + */ +static int +il4965_rs_get_tbl_info_from_mcs(const u32 rate_n_flags, + enum ieee80211_band band, + struct il_scale_tbl_info *tbl, int *rate_idx) +{ + u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK); + u8 il4965_num_of_ant = + il4965_get_il4965_num_of_ant_from_rate(rate_n_flags); + u8 mcs; + + memset(tbl, 0, sizeof(struct il_scale_tbl_info)); + *rate_idx = il4965_hwrate_to_plcp_idx(rate_n_flags); + + if (*rate_idx == RATE_INVALID) { + *rate_idx = -1; + return -EINVAL; + } + tbl->is_SGI = 0; /* default legacy setup */ + tbl->is_ht40 = 0; + tbl->is_dup = 0; + tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS); + tbl->lq_type = LQ_NONE; + tbl->max_search = IL_MAX_SEARCH; + + /* legacy rate format */ + if (!(rate_n_flags & RATE_MCS_HT_MSK)) { + if (il4965_num_of_ant == 1) { + if (band == IEEE80211_BAND_5GHZ) + tbl->lq_type = LQ_A; + else + tbl->lq_type = LQ_G; + } + /* HT rate format */ + } else { + if (rate_n_flags & RATE_MCS_SGI_MSK) + tbl->is_SGI = 1; + + if ((rate_n_flags & RATE_MCS_HT40_MSK) || + (rate_n_flags & RATE_MCS_DUP_MSK)) + tbl->is_ht40 = 1; + + if (rate_n_flags & RATE_MCS_DUP_MSK) + tbl->is_dup = 1; + + mcs = il4965_rs_extract_rate(rate_n_flags); + + /* SISO */ + if (mcs <= RATE_SISO_60M_PLCP) { + if (il4965_num_of_ant == 1) + tbl->lq_type = LQ_SISO; /*else NONE */ + /* MIMO2 */ + } else { + if (il4965_num_of_ant == 2) + tbl->lq_type = LQ_MIMO2; + } + } + return 0; +} + +/* switch to another antenna/antennas and return 1 */ +/* if no other valid antenna found, return 0 */ +static int +il4965_rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags, + struct il_scale_tbl_info *tbl) +{ + u8 new_ant_type; + + if (!tbl->ant_type || tbl->ant_type > ANT_ABC) + return 0; + + if (!il4965_rs_is_valid_ant(valid_ant, tbl->ant_type)) + return 0; + + new_ant_type = ant_toggle_lookup[tbl->ant_type]; + + while (new_ant_type != tbl->ant_type && + !il4965_rs_is_valid_ant(valid_ant, new_ant_type)) + new_ant_type = ant_toggle_lookup[new_ant_type]; + + if (new_ant_type == tbl->ant_type) + return 0; + + tbl->ant_type = new_ant_type; + *rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK; + *rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS; + return 1; +} + +/** + * Green-field mode is valid if the station supports it and + * there are no non-GF stations present in the BSS. + */ +static bool +il4965_rs_use_green(struct ieee80211_sta *sta) +{ + struct il_station_priv *sta_priv = (void *)sta->drv_priv; + struct il_rxon_context *ctx = sta_priv->common.ctx; + + return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) && + !(ctx->ht.non_gf_sta_present); +} + +/** + * il4965_rs_get_supported_rates - get the available rates + * + * if management frame or broadcast frame only return + * basic available rates. + * + */ +static u16 +il4965_rs_get_supported_rates(struct il_lq_sta *lq_sta, + struct ieee80211_hdr *hdr, + enum il_table_type rate_type) +{ + if (is_legacy(rate_type)) { + return lq_sta->active_legacy_rate; + } else { + if (is_siso(rate_type)) + return lq_sta->active_siso_rate; + else + return lq_sta->active_mimo2_rate; + } +} + +static u16 +il4965_rs_get_adjacent_rate(struct il_priv *il, u8 idx, u16 rate_mask, + int rate_type) +{ + u8 high = RATE_INVALID; + u8 low = RATE_INVALID; + + /* 802.11A or ht walks to the next literal adjacent rate in + * the rate table */ + if (is_a_band(rate_type) || !is_legacy(rate_type)) { + int i; + u32 mask; + + /* Find the previous rate that is in the rate mask */ + i = idx - 1; + for (mask = (1 << i); i >= 0; i--, mask >>= 1) { + if (rate_mask & mask) { + low = i; + break; + } + } + + /* Find the next rate that is in the rate mask */ + i = idx + 1; + for (mask = (1 << i); i < RATE_COUNT; i++, mask <<= 1) { + if (rate_mask & mask) { + high = i; + break; + } + } + + return (high << 8) | low; + } + + low = idx; + while (low != RATE_INVALID) { + low = il_rates[low].prev_rs; + if (low == RATE_INVALID) + break; + if (rate_mask & (1 << low)) + break; + D_RATE("Skipping masked lower rate: %d\n", low); + } + + high = idx; + while (high != RATE_INVALID) { + high = il_rates[high].next_rs; + if (high == RATE_INVALID) + break; + if (rate_mask & (1 << high)) + break; + D_RATE("Skipping masked higher rate: %d\n", high); + } + + return (high << 8) | low; +} + +static u32 +il4965_rs_get_lower_rate(struct il_lq_sta *lq_sta, + struct il_scale_tbl_info *tbl, u8 scale_idx, + u8 ht_possible) +{ + s32 low; + u16 rate_mask; + u16 high_low; + u8 switch_to_legacy = 0; + u8 is_green = lq_sta->is_green; + struct il_priv *il = lq_sta->drv; + + /* check if we need to switch from HT to legacy rates. + * assumption is that mandatory rates (1Mbps or 6Mbps) + * are always supported (spec demand) */ + if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_idx)) { + switch_to_legacy = 1; + scale_idx = rs_ht_to_legacy[scale_idx]; + if (lq_sta->band == IEEE80211_BAND_5GHZ) + tbl->lq_type = LQ_A; + else + tbl->lq_type = LQ_G; + + if (il4965_num_of_ant(tbl->ant_type) > 1) + tbl->ant_type = + il4965_first_antenna(il->hw_params.valid_tx_ant); + + tbl->is_ht40 = 0; + tbl->is_SGI = 0; + tbl->max_search = IL_MAX_SEARCH; + } + + rate_mask = il4965_rs_get_supported_rates(lq_sta, NULL, tbl->lq_type); + + /* Mask with station rate restriction */ + if (is_legacy(tbl->lq_type)) { + /* supp_rates has no CCK bits in A mode */ + if (lq_sta->band == IEEE80211_BAND_5GHZ) + rate_mask = + (u16) (rate_mask & + (lq_sta->supp_rates << IL_FIRST_OFDM_RATE)); + else + rate_mask = (u16) (rate_mask & lq_sta->supp_rates); + } + + /* If we switched from HT to legacy, check current rate */ + if (switch_to_legacy && (rate_mask & (1 << scale_idx))) { + low = scale_idx; + goto out; + } + + high_low = + il4965_rs_get_adjacent_rate(lq_sta->drv, scale_idx, rate_mask, + tbl->lq_type); + low = high_low & 0xff; + + if (low == RATE_INVALID) + low = scale_idx; + +out: + return il4965_rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green); +} + +/* + * Simple function to compare two rate scale table types + */ +static bool +il4965_table_type_matches(struct il_scale_tbl_info *a, + struct il_scale_tbl_info *b) +{ + return (a->lq_type == b->lq_type && a->ant_type == b->ant_type && + a->is_SGI == b->is_SGI); +} + +/* + * mac80211 sends us Tx status + */ +static void +il4965_rs_tx_status(void *il_r, struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta, void *il_sta, + struct sk_buff *skb) +{ + int legacy_success; + int retries; + int rs_idx, mac_idx, i; + struct il_lq_sta *lq_sta = il_sta; + struct il_link_quality_cmd *table; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct il_priv *il = (struct il_priv *)il_r; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + enum mac80211_rate_control_flags mac_flags; + u32 tx_rate; + struct il_scale_tbl_info tbl_type; + struct il_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl; + struct il_station_priv *sta_priv = (void *)sta->drv_priv; + struct il_rxon_context *ctx = sta_priv->common.ctx; + + D_RATE("get frame ack response, update rate scale win\n"); + + /* Treat uninitialized rate scaling data same as non-existing. */ + if (!lq_sta) { + D_RATE("Station rate scaling not created yet.\n"); + return; + } else if (!lq_sta->drv) { + D_RATE("Rate scaling not initialized yet.\n"); + return; + } + + if (!ieee80211_is_data(hdr->frame_control) || + (info->flags & IEEE80211_TX_CTL_NO_ACK)) + return; + + /* This packet was aggregated but doesn't carry status info */ + if ((info->flags & IEEE80211_TX_CTL_AMPDU) && + !(info->flags & IEEE80211_TX_STAT_AMPDU)) + return; + + /* + * Ignore this Tx frame response if its initial rate doesn't match + * that of latest Link Quality command. There may be stragglers + * from a previous Link Quality command, but we're no longer interested + * in those; they're either from the "active" mode while we're trying + * to check "search" mode, or a prior "search" mode after we've moved + * to a new "search" mode (which might become the new "active" mode). + */ + table = &lq_sta->lq; + tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags); + il4965_rs_get_tbl_info_from_mcs(tx_rate, il->band, &tbl_type, &rs_idx); + if (il->band == IEEE80211_BAND_5GHZ) + rs_idx -= IL_FIRST_OFDM_RATE; + mac_flags = info->status.rates[0].flags; + mac_idx = info->status.rates[0].idx; + /* For HT packets, map MCS to PLCP */ + if (mac_flags & IEEE80211_TX_RC_MCS) { + mac_idx &= RATE_MCS_CODE_MSK; /* Remove # of streams */ + if (mac_idx >= (RATE_9M_IDX - IL_FIRST_OFDM_RATE)) + mac_idx++; + /* + * mac80211 HT idx is always zero-idxed; we need to move + * HT OFDM rates after CCK rates in 2.4 GHz band + */ + if (il->band == IEEE80211_BAND_2GHZ) + mac_idx += IL_FIRST_OFDM_RATE; + } + /* Here we actually compare this rate to the latest LQ command */ + if (mac_idx < 0 || + tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI) || + tbl_type.is_ht40 != !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH) || + tbl_type.is_dup != !!(mac_flags & IEEE80211_TX_RC_DUP_DATA) || + tbl_type.ant_type != info->antenna_sel_tx || + !!(tx_rate & RATE_MCS_HT_MSK) != !!(mac_flags & IEEE80211_TX_RC_MCS) + || !!(tx_rate & RATE_MCS_GF_MSK) != + !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD) || rs_idx != mac_idx) { + D_RATE("initial rate %d does not match %d (0x%x)\n", mac_idx, + rs_idx, tx_rate); + /* + * Since rates mis-match, the last LQ command may have failed. + * After IL_MISSED_RATE_MAX mis-matches, resync the uCode with + * ... driver. + */ + lq_sta->missed_rate_counter++; + if (lq_sta->missed_rate_counter > IL_MISSED_RATE_MAX) { + lq_sta->missed_rate_counter = 0; + il_send_lq_cmd(il, ctx, &lq_sta->lq, CMD_ASYNC, false); + } + /* Regardless, ignore this status info for outdated rate */ + return; + } else + /* Rate did match, so reset the missed_rate_counter */ + lq_sta->missed_rate_counter = 0; + + /* Figure out if rate scale algorithm is in active or search table */ + if (il4965_table_type_matches + (&tbl_type, &(lq_sta->lq_info[lq_sta->active_tbl]))) { + curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]); + } else + if (il4965_table_type_matches + (&tbl_type, &lq_sta->lq_info[1 - lq_sta->active_tbl])) { + curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]); + other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + } else { + D_RATE("Neither active nor search matches tx rate\n"); + tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + D_RATE("active- lq:%x, ant:%x, SGI:%d\n", tmp_tbl->lq_type, + tmp_tbl->ant_type, tmp_tbl->is_SGI); + tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]); + D_RATE("search- lq:%x, ant:%x, SGI:%d\n", tmp_tbl->lq_type, + tmp_tbl->ant_type, tmp_tbl->is_SGI); + D_RATE("actual- lq:%x, ant:%x, SGI:%d\n", tbl_type.lq_type, + tbl_type.ant_type, tbl_type.is_SGI); + /* + * no matching table found, let's by-pass the data collection + * and continue to perform rate scale to find the rate table + */ + il4965_rs_stay_in_table(lq_sta, true); + goto done; + } + + /* + * Updating the frame history depends on whether packets were + * aggregated. + * + * For aggregation, all packets were transmitted at the same rate, the + * first idx into rate scale table. + */ + if (info->flags & IEEE80211_TX_STAT_AMPDU) { + tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags); + il4965_rs_get_tbl_info_from_mcs(tx_rate, il->band, &tbl_type, + &rs_idx); + il4965_rs_collect_tx_data(curr_tbl, rs_idx, + info->status.ampdu_len, + info->status.ampdu_ack_len); + + /* Update success/fail counts if not searching for new mode */ + if (lq_sta->stay_in_tbl) { + lq_sta->total_success += info->status.ampdu_ack_len; + lq_sta->total_failed += + (info->status.ampdu_len - + info->status.ampdu_ack_len); + } + } else { + /* + * For legacy, update frame history with for each Tx retry. + */ + retries = info->status.rates[0].count - 1; + /* HW doesn't send more than 15 retries */ + retries = min(retries, 15); + + /* The last transmission may have been successful */ + legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK); + /* Collect data for each rate used during failed TX attempts */ + for (i = 0; i <= retries; ++i) { + tx_rate = le32_to_cpu(table->rs_table[i].rate_n_flags); + il4965_rs_get_tbl_info_from_mcs(tx_rate, il->band, + &tbl_type, &rs_idx); + /* + * Only collect stats if retried rate is in the same RS + * table as active/search. + */ + if (il4965_table_type_matches(&tbl_type, curr_tbl)) + tmp_tbl = curr_tbl; + else if (il4965_table_type_matches + (&tbl_type, other_tbl)) + tmp_tbl = other_tbl; + else + continue; + il4965_rs_collect_tx_data(tmp_tbl, rs_idx, 1, + i < + retries ? 0 : legacy_success); + } + + /* Update success/fail counts if not searching for new mode */ + if (lq_sta->stay_in_tbl) { + lq_sta->total_success += legacy_success; + lq_sta->total_failed += retries + (1 - legacy_success); + } + } + /* The last TX rate is cached in lq_sta; it's set in if/else above */ + lq_sta->last_rate_n_flags = tx_rate; +done: + /* See if there's a better rate or modulation mode to try. */ + if (sta->supp_rates[sband->band]) + il4965_rs_rate_scale_perform(il, skb, sta, lq_sta); +} + +/* + * Begin a period of staying with a selected modulation mode. + * Set "stay_in_tbl" flag to prevent any mode switches. + * Set frame tx success limits according to legacy vs. high-throughput, + * and reset overall (spanning all rates) tx success history stats. + * These control how long we stay using same modulation mode before + * searching for a new mode. + */ +static void +il4965_rs_set_stay_in_table(struct il_priv *il, u8 is_legacy, + struct il_lq_sta *lq_sta) +{ + D_RATE("we are staying in the same table\n"); + lq_sta->stay_in_tbl = 1; /* only place this gets set */ + if (is_legacy) { + lq_sta->table_count_limit = IL_LEGACY_TBL_COUNT; + lq_sta->max_failure_limit = IL_LEGACY_FAILURE_LIMIT; + lq_sta->max_success_limit = IL_LEGACY_SUCCESS_LIMIT; + } else { + lq_sta->table_count_limit = IL_NONE_LEGACY_TBL_COUNT; + lq_sta->max_failure_limit = IL_NONE_LEGACY_FAILURE_LIMIT; + lq_sta->max_success_limit = IL_NONE_LEGACY_SUCCESS_LIMIT; + } + lq_sta->table_count = 0; + lq_sta->total_failed = 0; + lq_sta->total_success = 0; + lq_sta->flush_timer = jiffies; + lq_sta->action_counter = 0; +} + +/* + * Find correct throughput table for given mode of modulation + */ +static void +il4965_rs_set_expected_tpt_table(struct il_lq_sta *lq_sta, + struct il_scale_tbl_info *tbl) +{ + /* Used to choose among HT tables */ + s32(*ht_tbl_pointer)[RATE_COUNT]; + + /* Check for invalid LQ type */ + if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) { + tbl->expected_tpt = expected_tpt_legacy; + return; + } + + /* Legacy rates have only one table */ + if (is_legacy(tbl->lq_type)) { + tbl->expected_tpt = expected_tpt_legacy; + return; + } + + /* Choose among many HT tables depending on number of streams + * (SISO/MIMO2), channel width (20/40), SGI, and aggregation + * status */ + if (is_siso(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup)) + ht_tbl_pointer = expected_tpt_siso20MHz; + else if (is_siso(tbl->lq_type)) + ht_tbl_pointer = expected_tpt_siso40MHz; + else if (is_mimo2(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup)) + ht_tbl_pointer = expected_tpt_mimo2_20MHz; + else /* if (is_mimo2(tbl->lq_type)) <-- must be true */ + ht_tbl_pointer = expected_tpt_mimo2_40MHz; + + if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */ + tbl->expected_tpt = ht_tbl_pointer[0]; + else if (tbl->is_SGI && !lq_sta->is_agg) /* SGI */ + tbl->expected_tpt = ht_tbl_pointer[1]; + else if (!tbl->is_SGI && lq_sta->is_agg) /* AGG */ + tbl->expected_tpt = ht_tbl_pointer[2]; + else /* AGG+SGI */ + tbl->expected_tpt = ht_tbl_pointer[3]; +} + +/* + * Find starting rate for new "search" high-throughput mode of modulation. + * Goal is to find lowest expected rate (under perfect conditions) that is + * above the current measured throughput of "active" mode, to give new mode + * a fair chance to prove itself without too many challenges. + * + * This gets called when transitioning to more aggressive modulation + * (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive + * (i.e. MIMO to SISO). When moving to MIMO, bit rate will typically need + * to decrease to match "active" throughput. When moving from MIMO to SISO, + * bit rate will typically need to increase, but not if performance was bad. + */ +static s32 +il4965_rs_get_best_rate(struct il_priv *il, struct il_lq_sta *lq_sta, + struct il_scale_tbl_info *tbl, /* "search" */ + u16 rate_mask, s8 idx) +{ + /* "active" values */ + struct il_scale_tbl_info *active_tbl = + &(lq_sta->lq_info[lq_sta->active_tbl]); + s32 active_sr = active_tbl->win[idx].success_ratio; + s32 active_tpt = active_tbl->expected_tpt[idx]; + + /* expected "search" throughput */ + s32 *tpt_tbl = tbl->expected_tpt; + + s32 new_rate, high, low, start_hi; + u16 high_low; + s8 rate = idx; + + new_rate = high = low = start_hi = RATE_INVALID; + + for (;;) { + high_low = + il4965_rs_get_adjacent_rate(il, rate, rate_mask, + tbl->lq_type); + + low = high_low & 0xff; + high = (high_low >> 8) & 0xff; + + /* + * Lower the "search" bit rate, to give new "search" mode + * approximately the same throughput as "active" if: + * + * 1) "Active" mode has been working modestly well (but not + * great), and expected "search" throughput (under perfect + * conditions) at candidate rate is above the actual + * measured "active" throughput (but less than expected + * "active" throughput under perfect conditions). + * OR + * 2) "Active" mode has been working perfectly or very well + * and expected "search" throughput (under perfect + * conditions) at candidate rate is above expected + * "active" throughput (under perfect conditions). + */ + if ((100 * tpt_tbl[rate] > lq_sta->last_tpt && + (active_sr > RATE_DECREASE_TH && active_sr <= RATE_HIGH_TH + && tpt_tbl[rate] <= active_tpt)) || + (active_sr >= RATE_SCALE_SWITCH && + tpt_tbl[rate] > active_tpt)) { + + /* (2nd or later pass) + * If we've already tried to raise the rate, and are + * now trying to lower it, use the higher rate. */ + if (start_hi != RATE_INVALID) { + new_rate = start_hi; + break; + } + + new_rate = rate; + + /* Loop again with lower rate */ + if (low != RATE_INVALID) + rate = low; + + /* Lower rate not available, use the original */ + else + break; + + /* Else try to raise the "search" rate to match "active" */ + } else { + /* (2nd or later pass) + * If we've already tried to lower the rate, and are + * now trying to raise it, use the lower rate. */ + if (new_rate != RATE_INVALID) + break; + + /* Loop again with higher rate */ + else if (high != RATE_INVALID) { + start_hi = high; + rate = high; + + /* Higher rate not available, use the original */ + } else { + new_rate = rate; + break; + } + } + } + + return new_rate; +} + +/* + * Set up search table for MIMO2 + */ +static int +il4965_rs_switch_to_mimo2(struct il_priv *il, struct il_lq_sta *lq_sta, + struct ieee80211_conf *conf, + struct ieee80211_sta *sta, + struct il_scale_tbl_info *tbl, int idx) +{ + u16 rate_mask; + s32 rate; + s8 is_green = lq_sta->is_green; + struct il_station_priv *sta_priv = (void *)sta->drv_priv; + struct il_rxon_context *ctx = sta_priv->common.ctx; + + if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported) + return -1; + + if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2) == + WLAN_HT_CAP_SM_PS_STATIC) + return -1; + + /* Need both Tx chains/antennas to support MIMO */ + if (il->hw_params.tx_chains_num < 2) + return -1; + + D_RATE("LQ: try to switch to MIMO2\n"); + + tbl->lq_type = LQ_MIMO2; + tbl->is_dup = lq_sta->is_dup; + tbl->action = 0; + tbl->max_search = IL_MAX_SEARCH; + rate_mask = lq_sta->active_mimo2_rate; + + if (il_is_ht40_tx_allowed(il, ctx, &sta->ht_cap)) + tbl->is_ht40 = 1; + else + tbl->is_ht40 = 0; + + il4965_rs_set_expected_tpt_table(lq_sta, tbl); + + rate = il4965_rs_get_best_rate(il, lq_sta, tbl, rate_mask, idx); + + D_RATE("LQ: MIMO2 best rate %d mask %X\n", rate, rate_mask); + if (rate == RATE_INVALID || !((1 << rate) & rate_mask)) { + D_RATE("Can't switch with idx %d rate mask %x\n", rate, + rate_mask); + return -1; + } + tbl->current_rate = + il4965_rate_n_flags_from_tbl(il, tbl, rate, is_green); + + D_RATE("LQ: Switch to new mcs %X idx is green %X\n", tbl->current_rate, + is_green); + return 0; +} + +/* + * Set up search table for SISO + */ +static int +il4965_rs_switch_to_siso(struct il_priv *il, struct il_lq_sta *lq_sta, + struct ieee80211_conf *conf, struct ieee80211_sta *sta, + struct il_scale_tbl_info *tbl, int idx) +{ + u16 rate_mask; + u8 is_green = lq_sta->is_green; + s32 rate; + struct il_station_priv *sta_priv = (void *)sta->drv_priv; + struct il_rxon_context *ctx = sta_priv->common.ctx; + + if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported) + return -1; + + D_RATE("LQ: try to switch to SISO\n"); + + tbl->is_dup = lq_sta->is_dup; + tbl->lq_type = LQ_SISO; + tbl->action = 0; + tbl->max_search = IL_MAX_SEARCH; + rate_mask = lq_sta->active_siso_rate; + + if (il_is_ht40_tx_allowed(il, ctx, &sta->ht_cap)) + tbl->is_ht40 = 1; + else + tbl->is_ht40 = 0; + + if (is_green) + tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield */ + + il4965_rs_set_expected_tpt_table(lq_sta, tbl); + rate = il4965_rs_get_best_rate(il, lq_sta, tbl, rate_mask, idx); + + D_RATE("LQ: get best rate %d mask %X\n", rate, rate_mask); + if (rate == RATE_INVALID || !((1 << rate) & rate_mask)) { + D_RATE("can not switch with idx %d rate mask %x\n", rate, + rate_mask); + return -1; + } + tbl->current_rate = + il4965_rate_n_flags_from_tbl(il, tbl, rate, is_green); + D_RATE("LQ: Switch to new mcs %X idx is green %X\n", tbl->current_rate, + is_green); + return 0; +} + +/* + * Try to switch to new modulation mode from legacy + */ +static int +il4965_rs_move_legacy_other(struct il_priv *il, struct il_lq_sta *lq_sta, + struct ieee80211_conf *conf, + struct ieee80211_sta *sta, int idx) +{ + struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + struct il_scale_tbl_info *search_tbl = + &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); + struct il_rate_scale_data *win = &(tbl->win[idx]); + u32 sz = + (sizeof(struct il_scale_tbl_info) - + (sizeof(struct il_rate_scale_data) * RATE_COUNT)); + u8 start_action; + u8 valid_tx_ant = il->hw_params.valid_tx_ant; + u8 tx_chains_num = il->hw_params.tx_chains_num; + int ret = 0; + u8 update_search_tbl_counter = 0; + + tbl->action = IL_LEGACY_SWITCH_SISO; + + start_action = tbl->action; + for (;;) { + lq_sta->action_counter++; + switch (tbl->action) { + case IL_LEGACY_SWITCH_ANTENNA1: + case IL_LEGACY_SWITCH_ANTENNA2: + D_RATE("LQ: Legacy toggle Antenna\n"); + + if ((tbl->action == IL_LEGACY_SWITCH_ANTENNA1 && + tx_chains_num <= 1) || + (tbl->action == IL_LEGACY_SWITCH_ANTENNA2 && + tx_chains_num <= 2)) + break; + + /* Don't change antenna if success has been great */ + if (win->success_ratio >= IL_RS_GOOD_RATIO) + break; + + /* Set up search table to try other antenna */ + memcpy(search_tbl, tbl, sz); + + if (il4965_rs_toggle_antenna + (valid_tx_ant, &search_tbl->current_rate, + search_tbl)) { + update_search_tbl_counter = 1; + il4965_rs_set_expected_tpt_table(lq_sta, + search_tbl); + goto out; + } + break; + case IL_LEGACY_SWITCH_SISO: + D_RATE("LQ: Legacy switch to SISO\n"); + + /* Set up search table to try SISO */ + memcpy(search_tbl, tbl, sz); + search_tbl->is_SGI = 0; + ret = + il4965_rs_switch_to_siso(il, lq_sta, conf, sta, + search_tbl, idx); + if (!ret) { + lq_sta->action_counter = 0; + goto out; + } + + break; + case IL_LEGACY_SWITCH_MIMO2_AB: + case IL_LEGACY_SWITCH_MIMO2_AC: + case IL_LEGACY_SWITCH_MIMO2_BC: + D_RATE("LQ: Legacy switch to MIMO2\n"); + + /* Set up search table to try MIMO */ + memcpy(search_tbl, tbl, sz); + search_tbl->is_SGI = 0; + + if (tbl->action == IL_LEGACY_SWITCH_MIMO2_AB) + search_tbl->ant_type = ANT_AB; + else if (tbl->action == IL_LEGACY_SWITCH_MIMO2_AC) + search_tbl->ant_type = ANT_AC; + else + search_tbl->ant_type = ANT_BC; + + if (!il4965_rs_is_valid_ant + (valid_tx_ant, search_tbl->ant_type)) + break; + + ret = + il4965_rs_switch_to_mimo2(il, lq_sta, conf, sta, + search_tbl, idx); + if (!ret) { + lq_sta->action_counter = 0; + goto out; + } + break; + } + tbl->action++; + if (tbl->action > IL_LEGACY_SWITCH_MIMO2_BC) + tbl->action = IL_LEGACY_SWITCH_ANTENNA1; + + if (tbl->action == start_action) + break; + + } + search_tbl->lq_type = LQ_NONE; + return 0; + +out: + lq_sta->search_better_tbl = 1; + tbl->action++; + if (tbl->action > IL_LEGACY_SWITCH_MIMO2_BC) + tbl->action = IL_LEGACY_SWITCH_ANTENNA1; + if (update_search_tbl_counter) + search_tbl->action = tbl->action; + return 0; + +} + +/* + * Try to switch to new modulation mode from SISO + */ +static int +il4965_rs_move_siso_to_other(struct il_priv *il, struct il_lq_sta *lq_sta, + struct ieee80211_conf *conf, + struct ieee80211_sta *sta, int idx) +{ + u8 is_green = lq_sta->is_green; + struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + struct il_scale_tbl_info *search_tbl = + &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); + struct il_rate_scale_data *win = &(tbl->win[idx]); + struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + u32 sz = + (sizeof(struct il_scale_tbl_info) - + (sizeof(struct il_rate_scale_data) * RATE_COUNT)); + u8 start_action; + u8 valid_tx_ant = il->hw_params.valid_tx_ant; + u8 tx_chains_num = il->hw_params.tx_chains_num; + u8 update_search_tbl_counter = 0; + int ret; + + start_action = tbl->action; + + for (;;) { + lq_sta->action_counter++; + switch (tbl->action) { + case IL_SISO_SWITCH_ANTENNA1: + case IL_SISO_SWITCH_ANTENNA2: + D_RATE("LQ: SISO toggle Antenna\n"); + if ((tbl->action == IL_SISO_SWITCH_ANTENNA1 && + tx_chains_num <= 1) || + (tbl->action == IL_SISO_SWITCH_ANTENNA2 && + tx_chains_num <= 2)) + break; + + if (win->success_ratio >= IL_RS_GOOD_RATIO) + break; + + memcpy(search_tbl, tbl, sz); + if (il4965_rs_toggle_antenna + (valid_tx_ant, &search_tbl->current_rate, + search_tbl)) { + update_search_tbl_counter = 1; + goto out; + } + break; + case IL_SISO_SWITCH_MIMO2_AB: + case IL_SISO_SWITCH_MIMO2_AC: + case IL_SISO_SWITCH_MIMO2_BC: + D_RATE("LQ: SISO switch to MIMO2\n"); + memcpy(search_tbl, tbl, sz); + search_tbl->is_SGI = 0; + + if (tbl->action == IL_SISO_SWITCH_MIMO2_AB) + search_tbl->ant_type = ANT_AB; + else if (tbl->action == IL_SISO_SWITCH_MIMO2_AC) + search_tbl->ant_type = ANT_AC; + else + search_tbl->ant_type = ANT_BC; + + if (!il4965_rs_is_valid_ant + (valid_tx_ant, search_tbl->ant_type)) + break; + + ret = + il4965_rs_switch_to_mimo2(il, lq_sta, conf, sta, + search_tbl, idx); + if (!ret) + goto out; + break; + case IL_SISO_SWITCH_GI: + if (!tbl->is_ht40 && + !(ht_cap->cap & IEEE80211_HT_CAP_SGI_20)) + break; + if (tbl->is_ht40 && + !(ht_cap->cap & IEEE80211_HT_CAP_SGI_40)) + break; + + D_RATE("LQ: SISO toggle SGI/NGI\n"); + + memcpy(search_tbl, tbl, sz); + if (is_green) { + if (!tbl->is_SGI) + break; + else + IL_ERR("SGI was set in GF+SISO\n"); + } + search_tbl->is_SGI = !tbl->is_SGI; + il4965_rs_set_expected_tpt_table(lq_sta, search_tbl); + if (tbl->is_SGI) { + s32 tpt = lq_sta->last_tpt / 100; + if (tpt >= search_tbl->expected_tpt[idx]) + break; + } + search_tbl->current_rate = + il4965_rate_n_flags_from_tbl(il, search_tbl, idx, + is_green); + update_search_tbl_counter = 1; + goto out; + } + tbl->action++; + if (tbl->action > IL_SISO_SWITCH_GI) + tbl->action = IL_SISO_SWITCH_ANTENNA1; + + if (tbl->action == start_action) + break; + } + search_tbl->lq_type = LQ_NONE; + return 0; + +out: + lq_sta->search_better_tbl = 1; + tbl->action++; + if (tbl->action > IL_SISO_SWITCH_GI) + tbl->action = IL_SISO_SWITCH_ANTENNA1; + if (update_search_tbl_counter) + search_tbl->action = tbl->action; + + return 0; +} + +/* + * Try to switch to new modulation mode from MIMO2 + */ +static int +il4965_rs_move_mimo2_to_other(struct il_priv *il, struct il_lq_sta *lq_sta, + struct ieee80211_conf *conf, + struct ieee80211_sta *sta, int idx) +{ + s8 is_green = lq_sta->is_green; + struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + struct il_scale_tbl_info *search_tbl = + &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); + struct il_rate_scale_data *win = &(tbl->win[idx]); + struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + u32 sz = + (sizeof(struct il_scale_tbl_info) - + (sizeof(struct il_rate_scale_data) * RATE_COUNT)); + u8 start_action; + u8 valid_tx_ant = il->hw_params.valid_tx_ant; + u8 tx_chains_num = il->hw_params.tx_chains_num; + u8 update_search_tbl_counter = 0; + int ret; + + start_action = tbl->action; + for (;;) { + lq_sta->action_counter++; + switch (tbl->action) { + case IL_MIMO2_SWITCH_ANTENNA1: + case IL_MIMO2_SWITCH_ANTENNA2: + D_RATE("LQ: MIMO2 toggle Antennas\n"); + + if (tx_chains_num <= 2) + break; + + if (win->success_ratio >= IL_RS_GOOD_RATIO) + break; + + memcpy(search_tbl, tbl, sz); + if (il4965_rs_toggle_antenna + (valid_tx_ant, &search_tbl->current_rate, + search_tbl)) { + update_search_tbl_counter = 1; + goto out; + } + break; + case IL_MIMO2_SWITCH_SISO_A: + case IL_MIMO2_SWITCH_SISO_B: + case IL_MIMO2_SWITCH_SISO_C: + D_RATE("LQ: MIMO2 switch to SISO\n"); + + /* Set up new search table for SISO */ + memcpy(search_tbl, tbl, sz); + + if (tbl->action == IL_MIMO2_SWITCH_SISO_A) + search_tbl->ant_type = ANT_A; + else if (tbl->action == IL_MIMO2_SWITCH_SISO_B) + search_tbl->ant_type = ANT_B; + else + search_tbl->ant_type = ANT_C; + + if (!il4965_rs_is_valid_ant + (valid_tx_ant, search_tbl->ant_type)) + break; + + ret = + il4965_rs_switch_to_siso(il, lq_sta, conf, sta, + search_tbl, idx); + if (!ret) + goto out; + + break; + + case IL_MIMO2_SWITCH_GI: + if (!tbl->is_ht40 && + !(ht_cap->cap & IEEE80211_HT_CAP_SGI_20)) + break; + if (tbl->is_ht40 && + !(ht_cap->cap & IEEE80211_HT_CAP_SGI_40)) + break; + + D_RATE("LQ: MIMO2 toggle SGI/NGI\n"); + + /* Set up new search table for MIMO2 */ + memcpy(search_tbl, tbl, sz); + search_tbl->is_SGI = !tbl->is_SGI; + il4965_rs_set_expected_tpt_table(lq_sta, search_tbl); + /* + * If active table already uses the fastest possible + * modulation (dual stream with short guard interval), + * and it's working well, there's no need to look + * for a better type of modulation! + */ + if (tbl->is_SGI) { + s32 tpt = lq_sta->last_tpt / 100; + if (tpt >= search_tbl->expected_tpt[idx]) + break; + } + search_tbl->current_rate = + il4965_rate_n_flags_from_tbl(il, search_tbl, idx, + is_green); + update_search_tbl_counter = 1; + goto out; + + } + tbl->action++; + if (tbl->action > IL_MIMO2_SWITCH_GI) + tbl->action = IL_MIMO2_SWITCH_ANTENNA1; + + if (tbl->action == start_action) + break; + } + search_tbl->lq_type = LQ_NONE; + return 0; +out: + lq_sta->search_better_tbl = 1; + tbl->action++; + if (tbl->action > IL_MIMO2_SWITCH_GI) + tbl->action = IL_MIMO2_SWITCH_ANTENNA1; + if (update_search_tbl_counter) + search_tbl->action = tbl->action; + + return 0; + +} + +/* + * Check whether we should continue using same modulation mode, or + * begin search for a new mode, based on: + * 1) # tx successes or failures while using this mode + * 2) # times calling this function + * 3) elapsed time in this mode (not used, for now) + */ +static void +il4965_rs_stay_in_table(struct il_lq_sta *lq_sta, bool force_search) +{ + struct il_scale_tbl_info *tbl; + int i; + int active_tbl; + int flush_interval_passed = 0; + struct il_priv *il; + + il = lq_sta->drv; + active_tbl = lq_sta->active_tbl; + + tbl = &(lq_sta->lq_info[active_tbl]); + + /* If we've been disallowing search, see if we should now allow it */ + if (lq_sta->stay_in_tbl) { + + /* Elapsed time using current modulation mode */ + if (lq_sta->flush_timer) + flush_interval_passed = + time_after(jiffies, + (unsigned long)(lq_sta->flush_timer + + RATE_SCALE_FLUSH_INTVL)); + + /* + * Check if we should allow search for new modulation mode. + * If many frames have failed or succeeded, or we've used + * this same modulation for a long time, allow search, and + * reset history stats that keep track of whether we should + * allow a new search. Also (below) reset all bitmaps and + * stats in active history. + */ + if (force_search || + lq_sta->total_failed > lq_sta->max_failure_limit || + lq_sta->total_success > lq_sta->max_success_limit || + (!lq_sta->search_better_tbl && lq_sta->flush_timer && + flush_interval_passed)) { + D_RATE("LQ: stay is expired %d %d %d\n:", + lq_sta->total_failed, lq_sta->total_success, + flush_interval_passed); + + /* Allow search for new mode */ + lq_sta->stay_in_tbl = 0; /* only place reset */ + lq_sta->total_failed = 0; + lq_sta->total_success = 0; + lq_sta->flush_timer = 0; + + /* + * Else if we've used this modulation mode enough repetitions + * (regardless of elapsed time or success/failure), reset + * history bitmaps and rate-specific stats for all rates in + * active table. + */ + } else { + lq_sta->table_count++; + if (lq_sta->table_count >= lq_sta->table_count_limit) { + lq_sta->table_count = 0; + + D_RATE("LQ: stay in table clear win\n"); + for (i = 0; i < RATE_COUNT; i++) + il4965_rs_rate_scale_clear_win(& + (tbl-> + win + [i])); + } + } + + /* If transitioning to allow "search", reset all history + * bitmaps and stats in active table (this will become the new + * "search" table). */ + if (!lq_sta->stay_in_tbl) { + for (i = 0; i < RATE_COUNT; i++) + il4965_rs_rate_scale_clear_win(&(tbl->win[i])); + } + } +} + +/* + * setup rate table in uCode + */ +static void +il4965_rs_update_rate_tbl(struct il_priv *il, struct il_rxon_context *ctx, + struct il_lq_sta *lq_sta, + struct il_scale_tbl_info *tbl, int idx, u8 is_green) +{ + u32 rate; + + /* Update uCode's rate table. */ + rate = il4965_rate_n_flags_from_tbl(il, tbl, idx, is_green); + il4965_rs_fill_link_cmd(il, lq_sta, rate); + il_send_lq_cmd(il, ctx, &lq_sta->lq, CMD_ASYNC, false); +} + +/* + * Do rate scaling and search for new modulation mode. + */ +static void +il4965_rs_rate_scale_perform(struct il_priv *il, struct sk_buff *skb, + struct ieee80211_sta *sta, + struct il_lq_sta *lq_sta) +{ + struct ieee80211_hw *hw = il->hw; + struct ieee80211_conf *conf = &hw->conf; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + int low = RATE_INVALID; + int high = RATE_INVALID; + int idx; + int i; + struct il_rate_scale_data *win = NULL; + int current_tpt = IL_INVALID_VALUE; + int low_tpt = IL_INVALID_VALUE; + int high_tpt = IL_INVALID_VALUE; + u32 fail_count; + s8 scale_action = 0; + u16 rate_mask; + u8 update_lq = 0; + struct il_scale_tbl_info *tbl, *tbl1; + u16 rate_scale_idx_msk = 0; + u8 is_green = 0; + u8 active_tbl = 0; + u8 done_search = 0; + u16 high_low; + s32 sr; + u8 tid = MAX_TID_COUNT; + struct il_tid_data *tid_data; + struct il_station_priv *sta_priv = (void *)sta->drv_priv; + struct il_rxon_context *ctx = sta_priv->common.ctx; + + D_RATE("rate scale calculate new rate for skb\n"); + + /* Send management frames and NO_ACK data using lowest rate. */ + /* TODO: this could probably be improved.. */ + if (!ieee80211_is_data(hdr->frame_control) || + (info->flags & IEEE80211_TX_CTL_NO_ACK)) + return; + + lq_sta->supp_rates = sta->supp_rates[lq_sta->band]; + + tid = il4965_rs_tl_add_packet(lq_sta, hdr); + if (tid != MAX_TID_COUNT && (lq_sta->tx_agg_tid_en & (1 << tid))) { + tid_data = &il->stations[lq_sta->lq.sta_id].tid[tid]; + if (tid_data->agg.state == IL_AGG_OFF) + lq_sta->is_agg = 0; + else + lq_sta->is_agg = 1; + } else + lq_sta->is_agg = 0; + + /* + * Select rate-scale / modulation-mode table to work with in + * the rest of this function: "search" if searching for better + * modulation mode, or "active" if doing rate scaling within a mode. + */ + if (!lq_sta->search_better_tbl) + active_tbl = lq_sta->active_tbl; + else + active_tbl = 1 - lq_sta->active_tbl; + + tbl = &(lq_sta->lq_info[active_tbl]); + if (is_legacy(tbl->lq_type)) + lq_sta->is_green = 0; + else + lq_sta->is_green = il4965_rs_use_green(sta); + is_green = lq_sta->is_green; + + /* current tx rate */ + idx = lq_sta->last_txrate_idx; + + D_RATE("Rate scale idx %d for type %d\n", idx, tbl->lq_type); + + /* rates available for this association, and for modulation mode */ + rate_mask = il4965_rs_get_supported_rates(lq_sta, hdr, tbl->lq_type); + + D_RATE("mask 0x%04X\n", rate_mask); + + /* mask with station rate restriction */ + if (is_legacy(tbl->lq_type)) { + if (lq_sta->band == IEEE80211_BAND_5GHZ) + /* supp_rates has no CCK bits in A mode */ + rate_scale_idx_msk = + (u16) (rate_mask & + (lq_sta->supp_rates << IL_FIRST_OFDM_RATE)); + else + rate_scale_idx_msk = + (u16) (rate_mask & lq_sta->supp_rates); + + } else + rate_scale_idx_msk = rate_mask; + + if (!rate_scale_idx_msk) + rate_scale_idx_msk = rate_mask; + + if (!((1 << idx) & rate_scale_idx_msk)) { + IL_ERR("Current Rate is not valid\n"); + if (lq_sta->search_better_tbl) { + /* revert to active table if search table is not valid */ + tbl->lq_type = LQ_NONE; + lq_sta->search_better_tbl = 0; + tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + /* get "active" rate info */ + idx = il4965_hwrate_to_plcp_idx(tbl->current_rate); + il4965_rs_update_rate_tbl(il, ctx, lq_sta, tbl, idx, + is_green); + } + return; + } + + /* Get expected throughput table and history win for current rate */ + if (!tbl->expected_tpt) { + IL_ERR("tbl->expected_tpt is NULL\n"); + return; + } + + /* force user max rate if set by user */ + if (lq_sta->max_rate_idx != -1 && lq_sta->max_rate_idx < idx) { + idx = lq_sta->max_rate_idx; + update_lq = 1; + win = &(tbl->win[idx]); + goto lq_update; + } + + win = &(tbl->win[idx]); + + /* + * If there is not enough history to calculate actual average + * throughput, keep analyzing results of more tx frames, without + * changing rate or mode (bypass most of the rest of this function). + * Set up new rate table in uCode only if old rate is not supported + * in current association (use new rate found above). + */ + fail_count = win->counter - win->success_counter; + if (fail_count < RATE_MIN_FAILURE_TH && + win->success_counter < RATE_MIN_SUCCESS_TH) { + D_RATE("LQ: still below TH. succ=%d total=%d " "for idx %d\n", + win->success_counter, win->counter, idx); + + /* Can't calculate this yet; not enough history */ + win->average_tpt = IL_INVALID_VALUE; + + /* Should we stay with this modulation mode, + * or search for a new one? */ + il4965_rs_stay_in_table(lq_sta, false); + + goto out; + } + /* Else we have enough samples; calculate estimate of + * actual average throughput */ + if (win->average_tpt != + ((win->success_ratio * tbl->expected_tpt[idx] + 64) / 128)) { + IL_ERR("expected_tpt should have been calculated by now\n"); + win->average_tpt = + ((win->success_ratio * tbl->expected_tpt[idx] + 64) / 128); + } + + /* If we are searching for better modulation mode, check success. */ + if (lq_sta->search_better_tbl) { + /* If good success, continue using the "search" mode; + * no need to send new link quality command, since we're + * continuing to use the setup that we've been trying. */ + if (win->average_tpt > lq_sta->last_tpt) { + + D_RATE("LQ: SWITCHING TO NEW TBL " + "suc=%d cur-tpt=%d old-tpt=%d\n", + win->success_ratio, win->average_tpt, + lq_sta->last_tpt); + + if (!is_legacy(tbl->lq_type)) + lq_sta->enable_counter = 1; + + /* Swap tables; "search" becomes "active" */ + lq_sta->active_tbl = active_tbl; + current_tpt = win->average_tpt; + + /* Else poor success; go back to mode in "active" table */ + } else { + + D_RATE("LQ: GOING BACK TO THE OLD TBL " + "suc=%d cur-tpt=%d old-tpt=%d\n", + win->success_ratio, win->average_tpt, + lq_sta->last_tpt); + + /* Nullify "search" table */ + tbl->lq_type = LQ_NONE; + + /* Revert to "active" table */ + active_tbl = lq_sta->active_tbl; + tbl = &(lq_sta->lq_info[active_tbl]); + + /* Revert to "active" rate and throughput info */ + idx = il4965_hwrate_to_plcp_idx(tbl->current_rate); + current_tpt = lq_sta->last_tpt; + + /* Need to set up a new rate table in uCode */ + update_lq = 1; + } + + /* Either way, we've made a decision; modulation mode + * search is done, allow rate adjustment next time. */ + lq_sta->search_better_tbl = 0; + done_search = 1; /* Don't switch modes below! */ + goto lq_update; + } + + /* (Else) not in search of better modulation mode, try for better + * starting rate, while staying in this mode. */ + high_low = + il4965_rs_get_adjacent_rate(il, idx, rate_scale_idx_msk, + tbl->lq_type); + low = high_low & 0xff; + high = (high_low >> 8) & 0xff; + + /* If user set max rate, dont allow higher than user constrain */ + if (lq_sta->max_rate_idx != -1 && lq_sta->max_rate_idx < high) + high = RATE_INVALID; + + sr = win->success_ratio; + + /* Collect measured throughputs for current and adjacent rates */ + current_tpt = win->average_tpt; + if (low != RATE_INVALID) + low_tpt = tbl->win[low].average_tpt; + if (high != RATE_INVALID) + high_tpt = tbl->win[high].average_tpt; + + scale_action = 0; + + /* Too many failures, decrease rate */ + if (sr <= RATE_DECREASE_TH || current_tpt == 0) { + D_RATE("decrease rate because of low success_ratio\n"); + scale_action = -1; + + /* No throughput measured yet for adjacent rates; try increase. */ + } else if (low_tpt == IL_INVALID_VALUE && high_tpt == IL_INVALID_VALUE) { + + if (high != RATE_INVALID && sr >= RATE_INCREASE_TH) + scale_action = 1; + else if (low != RATE_INVALID) + scale_action = 0; + } + + /* Both adjacent throughputs are measured, but neither one has better + * throughput; we're using the best rate, don't change it! */ + else if (low_tpt != IL_INVALID_VALUE && high_tpt != IL_INVALID_VALUE && + low_tpt < current_tpt && high_tpt < current_tpt) + scale_action = 0; + + /* At least one adjacent rate's throughput is measured, + * and may have better performance. */ + else { + /* Higher adjacent rate's throughput is measured */ + if (high_tpt != IL_INVALID_VALUE) { + /* Higher rate has better throughput */ + if (high_tpt > current_tpt && sr >= RATE_INCREASE_TH) + scale_action = 1; + else + scale_action = 0; + + /* Lower adjacent rate's throughput is measured */ + } else if (low_tpt != IL_INVALID_VALUE) { + /* Lower rate has better throughput */ + if (low_tpt > current_tpt) { + D_RATE("decrease rate because of low tpt\n"); + scale_action = -1; + } else if (sr >= RATE_INCREASE_TH) { + scale_action = 1; + } + } + } + + /* Sanity check; asked for decrease, but success rate or throughput + * has been good at old rate. Don't change it. */ + if (scale_action == -1 && low != RATE_INVALID && + (sr > RATE_HIGH_TH || current_tpt > 100 * tbl->expected_tpt[low])) + scale_action = 0; + + switch (scale_action) { + case -1: + /* Decrease starting rate, update uCode's rate table */ + if (low != RATE_INVALID) { + update_lq = 1; + idx = low; + } + + break; + case 1: + /* Increase starting rate, update uCode's rate table */ + if (high != RATE_INVALID) { + update_lq = 1; + idx = high; + } + + break; + case 0: + /* No change */ + default: + break; + } + + D_RATE("choose rate scale idx %d action %d low %d " "high %d type %d\n", + idx, scale_action, low, high, tbl->lq_type); + +lq_update: + /* Replace uCode's rate table for the destination station. */ + if (update_lq) + il4965_rs_update_rate_tbl(il, ctx, lq_sta, tbl, idx, + is_green); + + /* Should we stay with this modulation mode, + * or search for a new one? */ + il4965_rs_stay_in_table(lq_sta, false); + + /* + * Search for new modulation mode if we're: + * 1) Not changing rates right now + * 2) Not just finishing up a search + * 3) Allowing a new search + */ + if (!update_lq && !done_search && !lq_sta->stay_in_tbl && win->counter) { + /* Save current throughput to compare with "search" throughput */ + lq_sta->last_tpt = current_tpt; + + /* Select a new "search" modulation mode to try. + * If one is found, set up the new "search" table. */ + if (is_legacy(tbl->lq_type)) + il4965_rs_move_legacy_other(il, lq_sta, conf, sta, idx); + else if (is_siso(tbl->lq_type)) + il4965_rs_move_siso_to_other(il, lq_sta, conf, sta, + idx); + else /* (is_mimo2(tbl->lq_type)) */ + il4965_rs_move_mimo2_to_other(il, lq_sta, conf, sta, + idx); + + /* If new "search" mode was selected, set up in uCode table */ + if (lq_sta->search_better_tbl) { + /* Access the "search" table, clear its history. */ + tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); + for (i = 0; i < RATE_COUNT; i++) + il4965_rs_rate_scale_clear_win(&(tbl->win[i])); + + /* Use new "search" start rate */ + idx = il4965_hwrate_to_plcp_idx(tbl->current_rate); + + D_RATE("Switch current mcs: %X idx: %d\n", + tbl->current_rate, idx); + il4965_rs_fill_link_cmd(il, lq_sta, tbl->current_rate); + il_send_lq_cmd(il, ctx, &lq_sta->lq, CMD_ASYNC, false); + } else + done_search = 1; + } + + if (done_search && !lq_sta->stay_in_tbl) { + /* If the "active" (non-search) mode was legacy, + * and we've tried switching antennas, + * but we haven't been able to try HT modes (not available), + * stay with best antenna legacy modulation for a while + * before next round of mode comparisons. */ + tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]); + if (is_legacy(tbl1->lq_type) && !conf_is_ht(conf) && + lq_sta->action_counter > tbl1->max_search) { + D_RATE("LQ: STAY in legacy table\n"); + il4965_rs_set_stay_in_table(il, 1, lq_sta); + } + + /* If we're in an HT mode, and all 3 mode switch actions + * have been tried and compared, stay in this best modulation + * mode for a while before next round of mode comparisons. */ + if (lq_sta->enable_counter && + lq_sta->action_counter >= tbl1->max_search) { + if (lq_sta->last_tpt > IL_AGG_TPT_THREHOLD && + (lq_sta->tx_agg_tid_en & (1 << tid)) && + tid != MAX_TID_COUNT) { + tid_data = + &il->stations[lq_sta->lq.sta_id].tid[tid]; + if (tid_data->agg.state == IL_AGG_OFF) { + D_RATE("try to aggregate tid %d\n", + tid); + il4965_rs_tl_turn_on_agg(il, tid, + lq_sta, sta); + } + } + il4965_rs_set_stay_in_table(il, 0, lq_sta); + } + } + +out: + tbl->current_rate = + il4965_rate_n_flags_from_tbl(il, tbl, idx, is_green); + i = idx; + lq_sta->last_txrate_idx = i; +} + +/** + * il4965_rs_initialize_lq - Initialize a station's hardware rate table + * + * The uCode's station table contains a table of fallback rates + * for automatic fallback during transmission. + * + * NOTE: This sets up a default set of values. These will be replaced later + * if the driver's iwl-4965-rs rate scaling algorithm is used, instead of + * rc80211_simple. + * + * NOTE: Run C_ADD_STA command to set up station table entry, before + * calling this function (which runs C_TX_LINK_QUALITY_CMD, + * which requires station table entry to exist). + */ +static void +il4965_rs_initialize_lq(struct il_priv *il, struct ieee80211_conf *conf, + struct ieee80211_sta *sta, struct il_lq_sta *lq_sta) +{ + struct il_scale_tbl_info *tbl; + int rate_idx; + int i; + u32 rate; + u8 use_green = il4965_rs_use_green(sta); + u8 active_tbl = 0; + u8 valid_tx_ant; + struct il_station_priv *sta_priv; + struct il_rxon_context *ctx; + + if (!sta || !lq_sta) + return; + + sta_priv = (void *)sta->drv_priv; + ctx = sta_priv->common.ctx; + + i = lq_sta->last_txrate_idx; + + valid_tx_ant = il->hw_params.valid_tx_ant; + + if (!lq_sta->search_better_tbl) + active_tbl = lq_sta->active_tbl; + else + active_tbl = 1 - lq_sta->active_tbl; + + tbl = &(lq_sta->lq_info[active_tbl]); + + if (i < 0 || i >= RATE_COUNT) + i = 0; + + rate = il_rates[i].plcp; + tbl->ant_type = il4965_first_antenna(valid_tx_ant); + rate |= tbl->ant_type << RATE_MCS_ANT_POS; + + if (i >= IL_FIRST_CCK_RATE && i <= IL_LAST_CCK_RATE) + rate |= RATE_MCS_CCK_MSK; + + il4965_rs_get_tbl_info_from_mcs(rate, il->band, tbl, &rate_idx); + if (!il4965_rs_is_valid_ant(valid_tx_ant, tbl->ant_type)) + il4965_rs_toggle_antenna(valid_tx_ant, &rate, tbl); + + rate = il4965_rate_n_flags_from_tbl(il, tbl, rate_idx, use_green); + tbl->current_rate = rate; + il4965_rs_set_expected_tpt_table(lq_sta, tbl); + il4965_rs_fill_link_cmd(NULL, lq_sta, rate); + il->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq; + il_send_lq_cmd(il, ctx, &lq_sta->lq, CMD_SYNC, true); +} + +static void +il4965_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta, + struct ieee80211_tx_rate_control *txrc) +{ + + struct sk_buff *skb = txrc->skb; + struct ieee80211_supported_band *sband = txrc->sband; + struct il_priv *il __maybe_unused = (struct il_priv *)il_r; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct il_lq_sta *lq_sta = il_sta; + int rate_idx; + + D_RATE("rate scale calculate new rate for skb\n"); + + /* Get max rate if user set max rate */ + if (lq_sta) { + lq_sta->max_rate_idx = txrc->max_rate_idx; + if (sband->band == IEEE80211_BAND_5GHZ && + lq_sta->max_rate_idx != -1) + lq_sta->max_rate_idx += IL_FIRST_OFDM_RATE; + if (lq_sta->max_rate_idx < 0 || + lq_sta->max_rate_idx >= RATE_COUNT) + lq_sta->max_rate_idx = -1; + } + + /* Treat uninitialized rate scaling data same as non-existing. */ + if (lq_sta && !lq_sta->drv) { + D_RATE("Rate scaling not initialized yet.\n"); + il_sta = NULL; + } + + /* Send management frames and NO_ACK data using lowest rate. */ + if (rate_control_send_low(sta, il_sta, txrc)) + return; + + if (!lq_sta) + return; + + rate_idx = lq_sta->last_txrate_idx; + + if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) { + rate_idx -= IL_FIRST_OFDM_RATE; + /* 6M and 9M shared same MCS idx */ + rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0; + if (il4965_rs_extract_rate(lq_sta->last_rate_n_flags) >= + RATE_MIMO2_6M_PLCP) + rate_idx = rate_idx + MCS_IDX_PER_STREAM; + info->control.rates[0].flags = IEEE80211_TX_RC_MCS; + if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK) + info->control.rates[0].flags |= + IEEE80211_TX_RC_SHORT_GI; + if (lq_sta->last_rate_n_flags & RATE_MCS_DUP_MSK) + info->control.rates[0].flags |= + IEEE80211_TX_RC_DUP_DATA; + if (lq_sta->last_rate_n_flags & RATE_MCS_HT40_MSK) + info->control.rates[0].flags |= + IEEE80211_TX_RC_40_MHZ_WIDTH; + if (lq_sta->last_rate_n_flags & RATE_MCS_GF_MSK) + info->control.rates[0].flags |= + IEEE80211_TX_RC_GREEN_FIELD; + } else { + /* Check for invalid rates */ + if (rate_idx < 0 || rate_idx >= RATE_COUNT_LEGACY || + (sband->band == IEEE80211_BAND_5GHZ && + rate_idx < IL_FIRST_OFDM_RATE)) + rate_idx = rate_lowest_index(sband, sta); + /* On valid 5 GHz rate, adjust idx */ + else if (sband->band == IEEE80211_BAND_5GHZ) + rate_idx -= IL_FIRST_OFDM_RATE; + info->control.rates[0].flags = 0; + } + info->control.rates[0].idx = rate_idx; + +} + +static void * +il4965_rs_alloc_sta(void *il_rate, struct ieee80211_sta *sta, gfp_t gfp) +{ + struct il_station_priv *sta_priv = + (struct il_station_priv *)sta->drv_priv; + struct il_priv *il; + + il = (struct il_priv *)il_rate; + D_RATE("create station rate scale win\n"); + + return &sta_priv->lq_sta; +} + +/* + * Called after adding a new station to initialize rate scaling + */ +void +il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id) +{ + int i, j; + struct ieee80211_hw *hw = il->hw; + struct ieee80211_conf *conf = &il->hw->conf; + struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + struct il_station_priv *sta_priv; + struct il_lq_sta *lq_sta; + struct ieee80211_supported_band *sband; + + sta_priv = (struct il_station_priv *)sta->drv_priv; + lq_sta = &sta_priv->lq_sta; + sband = hw->wiphy->bands[conf->channel->band]; + + lq_sta->lq.sta_id = sta_id; + + for (j = 0; j < LQ_SIZE; j++) + for (i = 0; i < RATE_COUNT; i++) + il4965_rs_rate_scale_clear_win(&lq_sta->lq_info[j]. + win[i]); + + lq_sta->flush_timer = 0; + lq_sta->supp_rates = sta->supp_rates[sband->band]; + for (j = 0; j < LQ_SIZE; j++) + for (i = 0; i < RATE_COUNT; i++) + il4965_rs_rate_scale_clear_win(&lq_sta->lq_info[j]. + win[i]); + + D_RATE("LQ:" "*** rate scale station global init for station %d ***\n", + sta_id); + /* TODO: what is a good starting rate for STA? About middle? Maybe not + * the lowest or the highest rate.. Could consider using RSSI from + * previous packets? Need to have IEEE 802.1X auth succeed immediately + * after assoc.. */ + + lq_sta->is_dup = 0; + lq_sta->max_rate_idx = -1; + lq_sta->missed_rate_counter = IL_MISSED_RATE_MAX; + lq_sta->is_green = il4965_rs_use_green(sta); + lq_sta->active_legacy_rate = il->active_rate & ~(0x1000); + lq_sta->band = il->band; + /* + * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3), + * supp_rates[] does not; shift to convert format, force 9 MBits off. + */ + lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1; + lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1; + lq_sta->active_siso_rate &= ~((u16) 0x2); + lq_sta->active_siso_rate <<= IL_FIRST_OFDM_RATE; + + /* Same here */ + lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1; + lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1; + lq_sta->active_mimo2_rate &= ~((u16) 0x2); + lq_sta->active_mimo2_rate <<= IL_FIRST_OFDM_RATE; + + /* These values will be overridden later */ + lq_sta->lq.general_params.single_stream_ant_msk = + il4965_first_antenna(il->hw_params.valid_tx_ant); + lq_sta->lq.general_params.dual_stream_ant_msk = + il->hw_params.valid_tx_ant & ~il4965_first_antenna(il->hw_params. + valid_tx_ant); + if (!lq_sta->lq.general_params.dual_stream_ant_msk) { + lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB; + } else if (il4965_num_of_ant(il->hw_params.valid_tx_ant) == 2) { + lq_sta->lq.general_params.dual_stream_ant_msk = + il->hw_params.valid_tx_ant; + } + + /* as default allow aggregation for all tids */ + lq_sta->tx_agg_tid_en = IL_AGG_ALL_TID; + lq_sta->drv = il; + + /* Set last_txrate_idx to lowest rate */ + lq_sta->last_txrate_idx = rate_lowest_index(sband, sta); + if (sband->band == IEEE80211_BAND_5GHZ) + lq_sta->last_txrate_idx += IL_FIRST_OFDM_RATE; + lq_sta->is_agg = 0; + +#ifdef CONFIG_MAC80211_DEBUGFS + lq_sta->dbg_fixed_rate = 0; +#endif + + il4965_rs_initialize_lq(il, conf, sta, lq_sta); +} + +static void +il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta, + u32 new_rate) +{ + struct il_scale_tbl_info tbl_type; + int idx = 0; + int rate_idx; + int repeat_rate = 0; + u8 ant_toggle_cnt = 0; + u8 use_ht_possible = 1; + u8 valid_tx_ant = 0; + struct il_link_quality_cmd *lq_cmd = &lq_sta->lq; + + /* Override starting rate (idx 0) if needed for debug purposes */ + il4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, idx); + + /* Interpret new_rate (rate_n_flags) */ + il4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, &tbl_type, + &rate_idx); + + /* How many times should we repeat the initial rate? */ + if (is_legacy(tbl_type.lq_type)) { + ant_toggle_cnt = 1; + repeat_rate = IL_NUMBER_TRY; + } else { + repeat_rate = IL_HT_NUMBER_TRY; + } + + lq_cmd->general_params.mimo_delimiter = + is_mimo(tbl_type.lq_type) ? 1 : 0; + + /* Fill 1st table entry (idx 0) */ + lq_cmd->rs_table[idx].rate_n_flags = cpu_to_le32(new_rate); + + if (il4965_num_of_ant(tbl_type.ant_type) == 1) { + lq_cmd->general_params.single_stream_ant_msk = + tbl_type.ant_type; + } else if (il4965_num_of_ant(tbl_type.ant_type) == 2) { + lq_cmd->general_params.dual_stream_ant_msk = tbl_type.ant_type; + } + /* otherwise we don't modify the existing value */ + idx++; + repeat_rate--; + if (il) + valid_tx_ant = il->hw_params.valid_tx_ant; + + /* Fill rest of rate table */ + while (idx < LINK_QUAL_MAX_RETRY_NUM) { + /* Repeat initial/next rate. + * For legacy IL_NUMBER_TRY == 1, this loop will not execute. + * For HT IL_HT_NUMBER_TRY == 3, this executes twice. */ + while (repeat_rate > 0 && idx < LINK_QUAL_MAX_RETRY_NUM) { + if (is_legacy(tbl_type.lq_type)) { + if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE) + ant_toggle_cnt++; + else if (il && + il4965_rs_toggle_antenna(valid_tx_ant, + &new_rate, + &tbl_type)) + ant_toggle_cnt = 1; + } + + /* Override next rate if needed for debug purposes */ + il4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, idx); + + /* Fill next table entry */ + lq_cmd->rs_table[idx].rate_n_flags = + cpu_to_le32(new_rate); + repeat_rate--; + idx++; + } + + il4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, + &tbl_type, &rate_idx); + + /* Indicate to uCode which entries might be MIMO. + * If initial rate was MIMO, this will finally end up + * as (IL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */ + if (is_mimo(tbl_type.lq_type)) + lq_cmd->general_params.mimo_delimiter = idx; + + /* Get next rate */ + new_rate = + il4965_rs_get_lower_rate(lq_sta, &tbl_type, rate_idx, + use_ht_possible); + + /* How many times should we repeat the next rate? */ + if (is_legacy(tbl_type.lq_type)) { + if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE) + ant_toggle_cnt++; + else if (il && + il4965_rs_toggle_antenna(valid_tx_ant, + &new_rate, &tbl_type)) + ant_toggle_cnt = 1; + + repeat_rate = IL_NUMBER_TRY; + } else { + repeat_rate = IL_HT_NUMBER_TRY; + } + + /* Don't allow HT rates after next pass. + * il4965_rs_get_lower_rate() will change type to LQ_A or LQ_G. */ + use_ht_possible = 0; + + /* Override next rate if needed for debug purposes */ + il4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, idx); + + /* Fill next table entry */ + lq_cmd->rs_table[idx].rate_n_flags = cpu_to_le32(new_rate); + + idx++; + repeat_rate--; + } + + lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF; + lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF; + + lq_cmd->agg_params.agg_time_limit = + cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF); +} + +static void * +il4965_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) +{ + return hw->priv; +} + +/* rate scale requires free function to be implemented */ +static void +il4965_rs_free(void *il_rate) +{ + return; +} + +static void +il4965_rs_free_sta(void *il_r, struct ieee80211_sta *sta, void *il_sta) +{ + struct il_priv *il __maybe_unused = il_r; + + D_RATE("enter\n"); + D_RATE("leave\n"); +} + +#ifdef CONFIG_MAC80211_DEBUGFS +static int +il4965_open_file_generic(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static void +il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta, u32 * rate_n_flags, int idx) +{ + struct il_priv *il; + u8 valid_tx_ant; + u8 ant_sel_tx; + + il = lq_sta->drv; + valid_tx_ant = il->hw_params.valid_tx_ant; + if (lq_sta->dbg_fixed_rate) { + ant_sel_tx = + ((lq_sta-> + dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK) >> + RATE_MCS_ANT_POS); + if ((valid_tx_ant & ant_sel_tx) == ant_sel_tx) { + *rate_n_flags = lq_sta->dbg_fixed_rate; + D_RATE("Fixed rate ON\n"); + } else { + lq_sta->dbg_fixed_rate = 0; + IL_ERR + ("Invalid antenna selection 0x%X, Valid is 0x%X\n", + ant_sel_tx, valid_tx_ant); + D_RATE("Fixed rate OFF\n"); + } + } else { + D_RATE("Fixed rate OFF\n"); + } +} + +static ssize_t +il4965_rs_sta_dbgfs_scale_table_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct il_lq_sta *lq_sta = file->private_data; + struct il_priv *il; + char buf[64]; + size_t buf_size; + u32 parsed_rate; + struct il_station_priv *sta_priv = + container_of(lq_sta, struct il_station_priv, lq_sta); + struct il_rxon_context *ctx = sta_priv->common.ctx; + + il = lq_sta->drv; + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + + if (sscanf(buf, "%x", &parsed_rate) == 1) + lq_sta->dbg_fixed_rate = parsed_rate; + else + lq_sta->dbg_fixed_rate = 0; + + lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */ + lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ + lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ + + D_RATE("sta_id %d rate 0x%X\n", lq_sta->lq.sta_id, + lq_sta->dbg_fixed_rate); + + if (lq_sta->dbg_fixed_rate) { + il4965_rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate); + il_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC, false); + } + + return count; +} + +static ssize_t +il4965_rs_sta_dbgfs_scale_table_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + char *buff; + int desc = 0; + int i = 0; + int idx = 0; + ssize_t ret; + + struct il_lq_sta *lq_sta = file->private_data; + struct il_priv *il; + struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + + il = lq_sta->drv; + buff = kmalloc(1024, GFP_KERNEL); + if (!buff) + return -ENOMEM; + + desc += sprintf(buff + desc, "sta_id %d\n", lq_sta->lq.sta_id); + desc += + sprintf(buff + desc, "failed=%d success=%d rate=0%X\n", + lq_sta->total_failed, lq_sta->total_success, + lq_sta->active_legacy_rate); + desc += + sprintf(buff + desc, "fixed rate 0x%X\n", lq_sta->dbg_fixed_rate); + desc += + sprintf(buff + desc, "valid_tx_ant %s%s%s\n", + (il->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "", + (il->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "", + (il->hw_params.valid_tx_ant & ANT_C) ? "ANT_C" : ""); + desc += + sprintf(buff + desc, "lq type %s\n", + (is_legacy(tbl->lq_type)) ? "legacy" : "HT"); + if (is_Ht(tbl->lq_type)) { + desc += + sprintf(buff + desc, " %s", + (is_siso(tbl->lq_type)) ? "SISO" : "MIMO2"); + desc += + sprintf(buff + desc, " %s", + (tbl->is_ht40) ? "40MHz" : "20MHz"); + desc += + sprintf(buff + desc, " %s %s %s\n", + (tbl->is_SGI) ? "SGI" : "", + (lq_sta->is_green) ? "GF enabled" : "", + (lq_sta->is_agg) ? "AGG on" : ""); + } + desc += + sprintf(buff + desc, "last tx rate=0x%X\n", + lq_sta->last_rate_n_flags); + desc += + sprintf(buff + desc, + "general:" "flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n", + lq_sta->lq.general_params.flags, + lq_sta->lq.general_params.mimo_delimiter, + lq_sta->lq.general_params.single_stream_ant_msk, + lq_sta->lq.general_params.dual_stream_ant_msk); + + desc += + sprintf(buff + desc, + "agg:" + "time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n", + le16_to_cpu(lq_sta->lq.agg_params.agg_time_limit), + lq_sta->lq.agg_params.agg_dis_start_th, + lq_sta->lq.agg_params.agg_frame_cnt_limit); + + desc += + sprintf(buff + desc, + "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n", + lq_sta->lq.general_params.start_rate_idx[0], + lq_sta->lq.general_params.start_rate_idx[1], + lq_sta->lq.general_params.start_rate_idx[2], + lq_sta->lq.general_params.start_rate_idx[3]); + + for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { + idx = + il4965_hwrate_to_plcp_idx(le32_to_cpu + (lq_sta->lq.rs_table[i]. + rate_n_flags)); + if (is_legacy(tbl->lq_type)) { + desc += + sprintf(buff + desc, " rate[%d] 0x%X %smbps\n", i, + le32_to_cpu(lq_sta->lq.rs_table[i]. + rate_n_flags), + il_rate_mcs[idx].mbps); + } else { + desc += + sprintf(buff + desc, " rate[%d] 0x%X %smbps (%s)\n", + i, + le32_to_cpu(lq_sta->lq.rs_table[i]. + rate_n_flags), + il_rate_mcs[idx].mbps, + il_rate_mcs[idx].mcs); + } + } + + ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); + kfree(buff); + return ret; +} + +static const struct file_operations rs_sta_dbgfs_scale_table_ops = { + .write = il4965_rs_sta_dbgfs_scale_table_write, + .read = il4965_rs_sta_dbgfs_scale_table_read, + .open = il4965_open_file_generic, + .llseek = default_llseek, +}; + +static ssize_t +il4965_rs_sta_dbgfs_stats_table_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + char *buff; + int desc = 0; + int i, j; + ssize_t ret; + + struct il_lq_sta *lq_sta = file->private_data; + + buff = kmalloc(1024, GFP_KERNEL); + if (!buff) + return -ENOMEM; + + for (i = 0; i < LQ_SIZE; i++) { + desc += + sprintf(buff + desc, + "%s type=%d SGI=%d HT40=%d DUP=%d GF=%d\n" + "rate=0x%X\n", lq_sta->active_tbl == i ? "*" : "x", + lq_sta->lq_info[i].lq_type, + lq_sta->lq_info[i].is_SGI, + lq_sta->lq_info[i].is_ht40, + lq_sta->lq_info[i].is_dup, lq_sta->is_green, + lq_sta->lq_info[i].current_rate); + for (j = 0; j < RATE_COUNT; j++) { + desc += + sprintf(buff + desc, + "counter=%d success=%d %%=%d\n", + lq_sta->lq_info[i].win[j].counter, + lq_sta->lq_info[i].win[j].success_counter, + lq_sta->lq_info[i].win[j].success_ratio); + } + } + ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); + kfree(buff); + return ret; +} + +static const struct file_operations rs_sta_dbgfs_stats_table_ops = { + .read = il4965_rs_sta_dbgfs_stats_table_read, + .open = il4965_open_file_generic, + .llseek = default_llseek, +}; + +static ssize_t +il4965_rs_sta_dbgfs_rate_scale_data_read(struct file *file, + char __user *user_buf, size_t count, + loff_t *ppos) +{ + char buff[120]; + int desc = 0; + struct il_lq_sta *lq_sta = file->private_data; + struct il_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl]; + + if (is_Ht(tbl->lq_type)) + desc += + sprintf(buff + desc, "Bit Rate= %d Mb/s\n", + tbl->expected_tpt[lq_sta->last_txrate_idx]); + else + desc += + sprintf(buff + desc, "Bit Rate= %d Mb/s\n", + il_rates[lq_sta->last_txrate_idx].ieee >> 1); + + return simple_read_from_buffer(user_buf, count, ppos, buff, desc); +} + +static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = { + .read = il4965_rs_sta_dbgfs_rate_scale_data_read, + .open = il4965_open_file_generic, + .llseek = default_llseek, +}; + +static void +il4965_rs_add_debugfs(void *il, void *il_sta, struct dentry *dir) +{ + struct il_lq_sta *lq_sta = il_sta; + lq_sta->rs_sta_dbgfs_scale_table_file = + debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir, + lq_sta, &rs_sta_dbgfs_scale_table_ops); + lq_sta->rs_sta_dbgfs_stats_table_file = + debugfs_create_file("rate_stats_table", S_IRUSR, dir, lq_sta, + &rs_sta_dbgfs_stats_table_ops); + lq_sta->rs_sta_dbgfs_rate_scale_data_file = + debugfs_create_file("rate_scale_data", S_IRUSR, dir, lq_sta, + &rs_sta_dbgfs_rate_scale_data_ops); + lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file = + debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir, + &lq_sta->tx_agg_tid_en); + +} + +static void +il4965_rs_remove_debugfs(void *il, void *il_sta) +{ + struct il_lq_sta *lq_sta = il_sta; + debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file); + debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file); + debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file); + debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file); +} +#endif + +/* + * Initialization of rate scaling information is done by driver after + * the station is added. Since mac80211 calls this function before a + * station is added we ignore it. + */ +static void +il4965_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta, void *il_sta) +{ +} + +static struct rate_control_ops rs_4965_ops = { + .module = NULL, + .name = IL4965_RS_NAME, + .tx_status = il4965_rs_tx_status, + .get_rate = il4965_rs_get_rate, + .rate_init = il4965_rs_rate_init_stub, + .alloc = il4965_rs_alloc, + .free = il4965_rs_free, + .alloc_sta = il4965_rs_alloc_sta, + .free_sta = il4965_rs_free_sta, +#ifdef CONFIG_MAC80211_DEBUGFS + .add_sta_debugfs = il4965_rs_add_debugfs, + .remove_sta_debugfs = il4965_rs_remove_debugfs, +#endif +}; + +int +il4965_rate_control_register(void) +{ + return ieee80211_rate_control_register(&rs_4965_ops); +} + +void +il4965_rate_control_unregister(void) +{ + ieee80211_rate_control_unregister(&rs_4965_ops); +} diff --git a/drivers/net/wireless/iwlegacy/4965.c b/drivers/net/wireless/iwlegacy/4965.c new file mode 100644 index 00000000000..cacbc03880b --- /dev/null +++ b/drivers/net/wireless/iwlegacy/4965.c @@ -0,0 +1,2402 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/dma-mapping.h> +#include <linux/delay.h> +#include <linux/sched.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <net/mac80211.h> +#include <linux/etherdevice.h> +#include <asm/unaligned.h> + +#include "common.h" +#include "4965.h" + +/** + * il_verify_inst_sparse - verify runtime uCode image in card vs. host, + * using sample data 100 bytes apart. If these sample points are good, + * it's a pretty good bet that everything between them is good, too. + */ +static int +il4965_verify_inst_sparse(struct il_priv *il, __le32 * image, u32 len) +{ + u32 val; + int ret = 0; + u32 errcnt = 0; + u32 i; + + D_INFO("ucode inst image size is %u\n", len); + + for (i = 0; i < len; i += 100, image += 100 / sizeof(u32)) { + /* read data comes through single port, auto-incr addr */ + /* NOTE: Use the debugless read so we don't flood kernel log + * if IL_DL_IO is set */ + il_wr(il, HBUS_TARG_MEM_RADDR, i + IL4965_RTC_INST_LOWER_BOUND); + val = _il_rd(il, HBUS_TARG_MEM_RDAT); + if (val != le32_to_cpu(*image)) { + ret = -EIO; + errcnt++; + if (errcnt >= 3) + break; + } + } + + return ret; +} + +/** + * il4965_verify_inst_full - verify runtime uCode image in card vs. host, + * looking at all data. + */ +static int +il4965_verify_inst_full(struct il_priv *il, __le32 * image, u32 len) +{ + u32 val; + u32 save_len = len; + int ret = 0; + u32 errcnt; + + D_INFO("ucode inst image size is %u\n", len); + + il_wr(il, HBUS_TARG_MEM_RADDR, IL4965_RTC_INST_LOWER_BOUND); + + errcnt = 0; + for (; len > 0; len -= sizeof(u32), image++) { + /* read data comes through single port, auto-incr addr */ + /* NOTE: Use the debugless read so we don't flood kernel log + * if IL_DL_IO is set */ + val = _il_rd(il, HBUS_TARG_MEM_RDAT); + if (val != le32_to_cpu(*image)) { + IL_ERR("uCode INST section is invalid at " + "offset 0x%x, is 0x%x, s/b 0x%x\n", + save_len - len, val, le32_to_cpu(*image)); + ret = -EIO; + errcnt++; + if (errcnt >= 20) + break; + } + } + + if (!errcnt) + D_INFO("ucode image in INSTRUCTION memory is good\n"); + + return ret; +} + +/** + * il4965_verify_ucode - determine which instruction image is in SRAM, + * and verify its contents + */ +int +il4965_verify_ucode(struct il_priv *il) +{ + __le32 *image; + u32 len; + int ret; + + /* Try bootstrap */ + image = (__le32 *) il->ucode_boot.v_addr; + len = il->ucode_boot.len; + ret = il4965_verify_inst_sparse(il, image, len); + if (!ret) { + D_INFO("Bootstrap uCode is good in inst SRAM\n"); + return 0; + } + + /* Try initialize */ + image = (__le32 *) il->ucode_init.v_addr; + len = il->ucode_init.len; + ret = il4965_verify_inst_sparse(il, image, len); + if (!ret) { + D_INFO("Initialize uCode is good in inst SRAM\n"); + return 0; + } + + /* Try runtime/protocol */ + image = (__le32 *) il->ucode_code.v_addr; + len = il->ucode_code.len; + ret = il4965_verify_inst_sparse(il, image, len); + if (!ret) { + D_INFO("Runtime uCode is good in inst SRAM\n"); + return 0; + } + + IL_ERR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n"); + + /* Since nothing seems to match, show first several data entries in + * instruction SRAM, so maybe visual inspection will give a clue. + * Selection of bootstrap image (vs. other images) is arbitrary. */ + image = (__le32 *) il->ucode_boot.v_addr; + len = il->ucode_boot.len; + ret = il4965_verify_inst_full(il, image, len); + + return ret; +} + +/****************************************************************************** + * + * EEPROM related functions + * +******************************************************************************/ + +/* + * The device's EEPROM semaphore prevents conflicts between driver and uCode + * when accessing the EEPROM; each access is a series of pulses to/from the + * EEPROM chip, not a single event, so even reads could conflict if they + * weren't arbitrated by the semaphore. + */ +int +il4965_eeprom_acquire_semaphore(struct il_priv *il) +{ + u16 count; + int ret; + + for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) { + /* Request semaphore */ + il_set_bit(il, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM); + + /* See if we got it */ + ret = + _il_poll_bit(il, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM, + CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM, + EEPROM_SEM_TIMEOUT); + if (ret >= 0) + return ret; + } + + return ret; +} + +void +il4965_eeprom_release_semaphore(struct il_priv *il) +{ + il_clear_bit(il, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM); + +} + +int +il4965_eeprom_check_version(struct il_priv *il) +{ + u16 eeprom_ver; + u16 calib_ver; + + eeprom_ver = il_eeprom_query16(il, EEPROM_VERSION); + calib_ver = il_eeprom_query16(il, EEPROM_4965_CALIB_VERSION_OFFSET); + + if (eeprom_ver < il->cfg->eeprom_ver || + calib_ver < il->cfg->eeprom_calib_ver) + goto err; + + IL_INFO("device EEPROM VER=0x%x, CALIB=0x%x\n", eeprom_ver, calib_ver); + + return 0; +err: + IL_ERR("Unsupported (too old) EEPROM VER=0x%x < 0x%x " + "CALIB=0x%x < 0x%x\n", eeprom_ver, il->cfg->eeprom_ver, + calib_ver, il->cfg->eeprom_calib_ver); + return -EINVAL; + +} + +void +il4965_eeprom_get_mac(const struct il_priv *il, u8 * mac) +{ + const u8 *addr = il_eeprom_query_addr(il, + EEPROM_MAC_ADDRESS); + memcpy(mac, addr, ETH_ALEN); +} + +/* Send led command */ +static int +il4965_send_led_cmd(struct il_priv *il, struct il_led_cmd *led_cmd) +{ + struct il_host_cmd cmd = { + .id = C_LEDS, + .len = sizeof(struct il_led_cmd), + .data = led_cmd, + .flags = CMD_ASYNC, + .callback = NULL, + }; + u32 reg; + + reg = _il_rd(il, CSR_LED_REG); + if (reg != (reg & CSR_LED_BSM_CTRL_MSK)) + _il_wr(il, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK); + + return il_send_cmd(il, &cmd); +} + +/* Set led register off */ +void +il4965_led_enable(struct il_priv *il) +{ + _il_wr(il, CSR_LED_REG, CSR_LED_REG_TRUN_ON); +} + +const struct il_led_ops il4965_led_ops = { + .cmd = il4965_send_led_cmd, +}; + +static int il4965_send_tx_power(struct il_priv *il); +static int il4965_hw_get_temperature(struct il_priv *il); + +/* Highest firmware API version supported */ +#define IL4965_UCODE_API_MAX 2 + +/* Lowest firmware API version supported */ +#define IL4965_UCODE_API_MIN 2 + +#define IL4965_FW_PRE "iwlwifi-4965-" +#define _IL4965_MODULE_FIRMWARE(api) IL4965_FW_PRE #api ".ucode" +#define IL4965_MODULE_FIRMWARE(api) _IL4965_MODULE_FIRMWARE(api) + +/* check contents of special bootstrap uCode SRAM */ +static int +il4965_verify_bsm(struct il_priv *il) +{ + __le32 *image = il->ucode_boot.v_addr; + u32 len = il->ucode_boot.len; + u32 reg; + u32 val; + + D_INFO("Begin verify bsm\n"); + + /* verify BSM SRAM contents */ + val = il_rd_prph(il, BSM_WR_DWCOUNT_REG); + for (reg = BSM_SRAM_LOWER_BOUND; reg < BSM_SRAM_LOWER_BOUND + len; + reg += sizeof(u32), image++) { + val = il_rd_prph(il, reg); + if (val != le32_to_cpu(*image)) { + IL_ERR("BSM uCode verification failed at " + "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n", + BSM_SRAM_LOWER_BOUND, reg - BSM_SRAM_LOWER_BOUND, + len, val, le32_to_cpu(*image)); + return -EIO; + } + } + + D_INFO("BSM bootstrap uCode image OK\n"); + + return 0; +} + +/** + * il4965_load_bsm - Load bootstrap instructions + * + * BSM operation: + * + * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program + * in special SRAM that does not power down during RFKILL. When powering back + * up after power-saving sleeps (or during initial uCode load), the BSM loads + * the bootstrap program into the on-board processor, and starts it. + * + * The bootstrap program loads (via DMA) instructions and data for a new + * program from host DRAM locations indicated by the host driver in the + * BSM_DRAM_* registers. Once the new program is loaded, it starts + * automatically. + * + * When initializing the NIC, the host driver points the BSM to the + * "initialize" uCode image. This uCode sets up some internal data, then + * notifies host via "initialize alive" that it is complete. + * + * The host then replaces the BSM_DRAM_* pointer values to point to the + * normal runtime uCode instructions and a backup uCode data cache buffer + * (filled initially with starting data values for the on-board processor), + * then triggers the "initialize" uCode to load and launch the runtime uCode, + * which begins normal operation. + * + * When doing a power-save shutdown, runtime uCode saves data SRAM into + * the backup data cache in DRAM before SRAM is powered down. + * + * When powering back up, the BSM loads the bootstrap program. This reloads + * the runtime uCode instructions and the backup data cache into SRAM, + * and re-launches the runtime uCode from where it left off. + */ +static int +il4965_load_bsm(struct il_priv *il) +{ + __le32 *image = il->ucode_boot.v_addr; + u32 len = il->ucode_boot.len; + dma_addr_t pinst; + dma_addr_t pdata; + u32 inst_len; + u32 data_len; + int i; + u32 done; + u32 reg_offset; + int ret; + + D_INFO("Begin load bsm\n"); + + il->ucode_type = UCODE_RT; + + /* make sure bootstrap program is no larger than BSM's SRAM size */ + if (len > IL49_MAX_BSM_SIZE) + return -EINVAL; + + /* Tell bootstrap uCode where to find the "Initialize" uCode + * in host DRAM ... host DRAM physical address bits 35:4 for 4965. + * NOTE: il_init_alive_start() will replace these values, + * after the "initialize" uCode has run, to point to + * runtime/protocol instructions and backup data cache. + */ + pinst = il->ucode_init.p_addr >> 4; + pdata = il->ucode_init_data.p_addr >> 4; + inst_len = il->ucode_init.len; + data_len = il->ucode_init_data.len; + + il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst); + il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata); + il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG, inst_len); + il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, data_len); + + /* Fill BSM memory with bootstrap instructions */ + for (reg_offset = BSM_SRAM_LOWER_BOUND; + reg_offset < BSM_SRAM_LOWER_BOUND + len; + reg_offset += sizeof(u32), image++) + _il_wr_prph(il, reg_offset, le32_to_cpu(*image)); + + ret = il4965_verify_bsm(il); + if (ret) + return ret; + + /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */ + il_wr_prph(il, BSM_WR_MEM_SRC_REG, 0x0); + il_wr_prph(il, BSM_WR_MEM_DST_REG, IL49_RTC_INST_LOWER_BOUND); + il_wr_prph(il, BSM_WR_DWCOUNT_REG, len / sizeof(u32)); + + /* Load bootstrap code into instruction SRAM now, + * to prepare to load "initialize" uCode */ + il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START); + + /* Wait for load of bootstrap uCode to finish */ + for (i = 0; i < 100; i++) { + done = il_rd_prph(il, BSM_WR_CTRL_REG); + if (!(done & BSM_WR_CTRL_REG_BIT_START)) + break; + udelay(10); + } + if (i < 100) + D_INFO("BSM write complete, poll %d iterations\n", i); + else { + IL_ERR("BSM write did not complete!\n"); + return -EIO; + } + + /* Enable future boot loads whenever power management unit triggers it + * (e.g. when powering back up after power-save shutdown) */ + il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN); + + return 0; +} + +/** + * il4965_set_ucode_ptrs - Set uCode address location + * + * Tell initialization uCode where to find runtime uCode. + * + * BSM registers initially contain pointers to initialization uCode. + * We need to replace them to load runtime uCode inst and data, + * and to save runtime data when powering down. + */ +static int +il4965_set_ucode_ptrs(struct il_priv *il) +{ + dma_addr_t pinst; + dma_addr_t pdata; + int ret = 0; + + /* bits 35:4 for 4965 */ + pinst = il->ucode_code.p_addr >> 4; + pdata = il->ucode_data_backup.p_addr >> 4; + + /* Tell bootstrap uCode where to find image to load */ + il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst); + il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata); + il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, il->ucode_data.len); + + /* Inst byte count must be last to set up, bit 31 signals uCode + * that all new ptr/size info is in place */ + il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG, + il->ucode_code.len | BSM_DRAM_INST_LOAD); + D_INFO("Runtime uCode pointers are set.\n"); + + return ret; +} + +/** + * il4965_init_alive_start - Called after N_ALIVE notification received + * + * Called after N_ALIVE notification received from "initialize" uCode. + * + * The 4965 "initialize" ALIVE reply contains calibration data for: + * Voltage, temperature, and MIMO tx gain correction, now stored in il + * (3945 does not contain this data). + * + * Tell "initialize" uCode to go ahead and load the runtime uCode. +*/ +static void +il4965_init_alive_start(struct il_priv *il) +{ + /* Bootstrap uCode has loaded initialize uCode ... verify inst image. + * This is a paranoid check, because we would not have gotten the + * "initialize" alive if code weren't properly loaded. */ + if (il4965_verify_ucode(il)) { + /* Runtime instruction load was bad; + * take it all the way back down so we can try again */ + D_INFO("Bad \"initialize\" uCode load.\n"); + goto restart; + } + + /* Calculate temperature */ + il->temperature = il4965_hw_get_temperature(il); + + /* Send pointers to protocol/runtime uCode image ... init code will + * load and launch runtime uCode, which will send us another "Alive" + * notification. */ + D_INFO("Initialization Alive received.\n"); + if (il4965_set_ucode_ptrs(il)) { + /* Runtime instruction load won't happen; + * take it all the way back down so we can try again */ + D_INFO("Couldn't set up uCode pointers.\n"); + goto restart; + } + return; + +restart: + queue_work(il->workqueue, &il->restart); +} + +static bool +iw4965_is_ht40_channel(__le32 rxon_flags) +{ + int chan_mod = + le32_to_cpu(rxon_flags & RXON_FLG_CHANNEL_MODE_MSK) >> + RXON_FLG_CHANNEL_MODE_POS; + return (chan_mod == CHANNEL_MODE_PURE_40 || + chan_mod == CHANNEL_MODE_MIXED); +} + +static void +il4965_nic_config(struct il_priv *il) +{ + unsigned long flags; + u16 radio_cfg; + + spin_lock_irqsave(&il->lock, flags); + + radio_cfg = il_eeprom_query16(il, EEPROM_RADIO_CONFIG); + + /* write radio config values to register */ + if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX) + il_set_bit(il, CSR_HW_IF_CONFIG_REG, + EEPROM_RF_CFG_TYPE_MSK(radio_cfg) | + EEPROM_RF_CFG_STEP_MSK(radio_cfg) | + EEPROM_RF_CFG_DASH_MSK(radio_cfg)); + + /* set CSR_HW_CONFIG_REG for uCode use */ + il_set_bit(il, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | + CSR_HW_IF_CONFIG_REG_BIT_MAC_SI); + + il->calib_info = + (struct il_eeprom_calib_info *) + il_eeprom_query_addr(il, EEPROM_4965_CALIB_TXPOWER_OFFSET); + + spin_unlock_irqrestore(&il->lock, flags); +} + +/* Reset differential Rx gains in NIC to prepare for chain noise calibration. + * Called after every association, but this runs only once! + * ... once chain noise is calibrated the first time, it's good forever. */ +static void +il4965_chain_noise_reset(struct il_priv *il) +{ + struct il_chain_noise_data *data = &(il->chain_noise_data); + + if (data->state == IL_CHAIN_NOISE_ALIVE && il_is_any_associated(il)) { + struct il_calib_diff_gain_cmd cmd; + + /* clear data for chain noise calibration algorithm */ + data->chain_noise_a = 0; + data->chain_noise_b = 0; + data->chain_noise_c = 0; + data->chain_signal_a = 0; + data->chain_signal_b = 0; + data->chain_signal_c = 0; + data->beacon_count = 0; + + memset(&cmd, 0, sizeof(cmd)); + cmd.hdr.op_code = IL_PHY_CALIBRATE_DIFF_GAIN_CMD; + cmd.diff_gain_a = 0; + cmd.diff_gain_b = 0; + cmd.diff_gain_c = 0; + if (il_send_cmd_pdu(il, C_PHY_CALIBRATION, sizeof(cmd), &cmd)) + IL_ERR("Could not send C_PHY_CALIBRATION\n"); + data->state = IL_CHAIN_NOISE_ACCUMULATE; + D_CALIB("Run chain_noise_calibrate\n"); + } +} + +static struct il_sensitivity_ranges il4965_sensitivity = { + .min_nrg_cck = 97, + .max_nrg_cck = 0, /* not used, set to 0 */ + + .auto_corr_min_ofdm = 85, + .auto_corr_min_ofdm_mrc = 170, + .auto_corr_min_ofdm_x1 = 105, + .auto_corr_min_ofdm_mrc_x1 = 220, + + .auto_corr_max_ofdm = 120, + .auto_corr_max_ofdm_mrc = 210, + .auto_corr_max_ofdm_x1 = 140, + .auto_corr_max_ofdm_mrc_x1 = 270, + + .auto_corr_min_cck = 125, + .auto_corr_max_cck = 200, + .auto_corr_min_cck_mrc = 200, + .auto_corr_max_cck_mrc = 400, + + .nrg_th_cck = 100, + .nrg_th_ofdm = 100, + + .barker_corr_th_min = 190, + .barker_corr_th_min_mrc = 390, + .nrg_th_cca = 62, +}; + +static void +il4965_set_ct_threshold(struct il_priv *il) +{ + /* want Kelvin */ + il->hw_params.ct_kill_threshold = + CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY); +} + +/** + * il4965_hw_set_hw_params + * + * Called when initializing driver + */ +static int +il4965_hw_set_hw_params(struct il_priv *il) +{ + if (il->cfg->mod_params->num_of_queues >= IL_MIN_NUM_QUEUES && + il->cfg->mod_params->num_of_queues <= IL49_NUM_QUEUES) + il->cfg->base_params->num_of_queues = + il->cfg->mod_params->num_of_queues; + + il->hw_params.max_txq_num = il->cfg->base_params->num_of_queues; + il->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM; + il->hw_params.scd_bc_tbls_size = + il->cfg->base_params->num_of_queues * + sizeof(struct il4965_scd_bc_tbl); + il->hw_params.tfd_size = sizeof(struct il_tfd); + il->hw_params.max_stations = IL4965_STATION_COUNT; + il->ctx.bcast_sta_id = IL4965_BROADCAST_ID; + il->hw_params.max_data_size = IL49_RTC_DATA_SIZE; + il->hw_params.max_inst_size = IL49_RTC_INST_SIZE; + il->hw_params.max_bsm_size = BSM_SRAM_SIZE; + il->hw_params.ht40_channel = BIT(IEEE80211_BAND_5GHZ); + + il->hw_params.rx_wrt_ptr_reg = FH49_RSCSR_CHNL0_WPTR; + + il->hw_params.tx_chains_num = il4965_num_of_ant(il->cfg->valid_tx_ant); + il->hw_params.rx_chains_num = il4965_num_of_ant(il->cfg->valid_rx_ant); + il->hw_params.valid_tx_ant = il->cfg->valid_tx_ant; + il->hw_params.valid_rx_ant = il->cfg->valid_rx_ant; + + il4965_set_ct_threshold(il); + + il->hw_params.sens = &il4965_sensitivity; + il->hw_params.beacon_time_tsf_bits = IL4965_EXT_BEACON_TIME_POS; + + return 0; +} + +static s32 +il4965_math_div_round(s32 num, s32 denom, s32 * res) +{ + s32 sign = 1; + + if (num < 0) { + sign = -sign; + num = -num; + } + if (denom < 0) { + sign = -sign; + denom = -denom; + } + *res = 1; + *res = ((num * 2 + denom) / (denom * 2)) * sign; + + return 1; +} + +/** + * il4965_get_voltage_compensation - Power supply voltage comp for txpower + * + * Determines power supply voltage compensation for txpower calculations. + * Returns number of 1/2-dB steps to subtract from gain table idx, + * to compensate for difference between power supply voltage during + * factory measurements, vs. current power supply voltage. + * + * Voltage indication is higher for lower voltage. + * Lower voltage requires more gain (lower gain table idx). + */ +static s32 +il4965_get_voltage_compensation(s32 eeprom_voltage, s32 current_voltage) +{ + s32 comp = 0; + + if (TX_POWER_IL_ILLEGAL_VOLTAGE == eeprom_voltage || + TX_POWER_IL_ILLEGAL_VOLTAGE == current_voltage) + return 0; + + il4965_math_div_round(current_voltage - eeprom_voltage, + TX_POWER_IL_VOLTAGE_CODES_PER_03V, &comp); + + if (current_voltage > eeprom_voltage) + comp *= 2; + if ((comp < -2) || (comp > 2)) + comp = 0; + + return comp; +} + +static s32 +il4965_get_tx_atten_grp(u16 channel) +{ + if (channel >= CALIB_IL_TX_ATTEN_GR5_FCH && + channel <= CALIB_IL_TX_ATTEN_GR5_LCH) + return CALIB_CH_GROUP_5; + + if (channel >= CALIB_IL_TX_ATTEN_GR1_FCH && + channel <= CALIB_IL_TX_ATTEN_GR1_LCH) + return CALIB_CH_GROUP_1; + + if (channel >= CALIB_IL_TX_ATTEN_GR2_FCH && + channel <= CALIB_IL_TX_ATTEN_GR2_LCH) + return CALIB_CH_GROUP_2; + + if (channel >= CALIB_IL_TX_ATTEN_GR3_FCH && + channel <= CALIB_IL_TX_ATTEN_GR3_LCH) + return CALIB_CH_GROUP_3; + + if (channel >= CALIB_IL_TX_ATTEN_GR4_FCH && + channel <= CALIB_IL_TX_ATTEN_GR4_LCH) + return CALIB_CH_GROUP_4; + + return -EINVAL; +} + +static u32 +il4965_get_sub_band(const struct il_priv *il, u32 channel) +{ + s32 b = -1; + + for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) { + if (il->calib_info->band_info[b].ch_from == 0) + continue; + + if (channel >= il->calib_info->band_info[b].ch_from && + channel <= il->calib_info->band_info[b].ch_to) + break; + } + + return b; +} + +static s32 +il4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2) +{ + s32 val; + + if (x2 == x1) + return y1; + else { + il4965_math_div_round((x2 - x) * (y1 - y2), (x2 - x1), &val); + return val + y2; + } +} + +/** + * il4965_interpolate_chan - Interpolate factory measurements for one channel + * + * Interpolates factory measurements from the two sample channels within a + * sub-band, to apply to channel of interest. Interpolation is proportional to + * differences in channel frequencies, which is proportional to differences + * in channel number. + */ +static int +il4965_interpolate_chan(struct il_priv *il, u32 channel, + struct il_eeprom_calib_ch_info *chan_info) +{ + s32 s = -1; + u32 c; + u32 m; + const struct il_eeprom_calib_measure *m1; + const struct il_eeprom_calib_measure *m2; + struct il_eeprom_calib_measure *omeas; + u32 ch_i1; + u32 ch_i2; + + s = il4965_get_sub_band(il, channel); + if (s >= EEPROM_TX_POWER_BANDS) { + IL_ERR("Tx Power can not find channel %d\n", channel); + return -1; + } + + ch_i1 = il->calib_info->band_info[s].ch1.ch_num; + ch_i2 = il->calib_info->band_info[s].ch2.ch_num; + chan_info->ch_num = (u8) channel; + + D_TXPOWER("channel %d subband %d factory cal ch %d & %d\n", channel, s, + ch_i1, ch_i2); + + for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) { + for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) { + m1 = &(il->calib_info->band_info[s].ch1. + measurements[c][m]); + m2 = &(il->calib_info->band_info[s].ch2. + measurements[c][m]); + omeas = &(chan_info->measurements[c][m]); + + omeas->actual_pow = + (u8) il4965_interpolate_value(channel, ch_i1, + m1->actual_pow, ch_i2, + m2->actual_pow); + omeas->gain_idx = + (u8) il4965_interpolate_value(channel, ch_i1, + m1->gain_idx, ch_i2, + m2->gain_idx); + omeas->temperature = + (u8) il4965_interpolate_value(channel, ch_i1, + m1->temperature, + ch_i2, + m2->temperature); + omeas->pa_det = + (s8) il4965_interpolate_value(channel, ch_i1, + m1->pa_det, ch_i2, + m2->pa_det); + + D_TXPOWER("chain %d meas %d AP1=%d AP2=%d AP=%d\n", c, + m, m1->actual_pow, m2->actual_pow, + omeas->actual_pow); + D_TXPOWER("chain %d meas %d NI1=%d NI2=%d NI=%d\n", c, + m, m1->gain_idx, m2->gain_idx, + omeas->gain_idx); + D_TXPOWER("chain %d meas %d PA1=%d PA2=%d PA=%d\n", c, + m, m1->pa_det, m2->pa_det, omeas->pa_det); + D_TXPOWER("chain %d meas %d T1=%d T2=%d T=%d\n", c, + m, m1->temperature, m2->temperature, + omeas->temperature); + } + } + + return 0; +} + +/* bit-rate-dependent table to prevent Tx distortion, in half-dB units, + * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. */ +static s32 back_off_table[] = { + 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */ + 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */ + 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */ + 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */ + 10 /* CCK */ +}; + +/* Thermal compensation values for txpower for various frequency ranges ... + * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust */ +static struct il4965_txpower_comp_entry { + s32 degrees_per_05db_a; + s32 degrees_per_05db_a_denom; +} tx_power_cmp_tble[CALIB_CH_GROUP_MAX] = { + { + 9, 2}, /* group 0 5.2, ch 34-43 */ + { + 4, 1}, /* group 1 5.2, ch 44-70 */ + { + 4, 1}, /* group 2 5.2, ch 71-124 */ + { + 4, 1}, /* group 3 5.2, ch 125-200 */ + { + 3, 1} /* group 4 2.4, ch all */ +}; + +static s32 +get_min_power_idx(s32 rate_power_idx, u32 band) +{ + if (!band) { + if ((rate_power_idx & 7) <= 4) + return MIN_TX_GAIN_IDX_52GHZ_EXT; + } + return MIN_TX_GAIN_IDX; +} + +struct gain_entry { + u8 dsp; + u8 radio; +}; + +static const struct gain_entry gain_table[2][108] = { + /* 5.2GHz power gain idx table */ + { + {123, 0x3F}, /* highest txpower */ + {117, 0x3F}, + {110, 0x3F}, + {104, 0x3F}, + {98, 0x3F}, + {110, 0x3E}, + {104, 0x3E}, + {98, 0x3E}, + {110, 0x3D}, + {104, 0x3D}, + {98, 0x3D}, + {110, 0x3C}, + {104, 0x3C}, + {98, 0x3C}, + {110, 0x3B}, + {104, 0x3B}, + {98, 0x3B}, + {110, 0x3A}, + {104, 0x3A}, + {98, 0x3A}, + {110, 0x39}, + {104, 0x39}, + {98, 0x39}, + {110, 0x38}, + {104, 0x38}, + {98, 0x38}, + {110, 0x37}, + {104, 0x37}, + {98, 0x37}, + {110, 0x36}, + {104, 0x36}, + {98, 0x36}, + {110, 0x35}, + {104, 0x35}, + {98, 0x35}, + {110, 0x34}, + {104, 0x34}, + {98, 0x34}, + {110, 0x33}, + {104, 0x33}, + {98, 0x33}, + {110, 0x32}, + {104, 0x32}, + {98, 0x32}, + {110, 0x31}, + {104, 0x31}, + {98, 0x31}, + {110, 0x30}, + {104, 0x30}, + {98, 0x30}, + {110, 0x25}, + {104, 0x25}, + {98, 0x25}, + {110, 0x24}, + {104, 0x24}, + {98, 0x24}, + {110, 0x23}, + {104, 0x23}, + {98, 0x23}, + {110, 0x22}, + {104, 0x18}, + {98, 0x18}, + {110, 0x17}, + {104, 0x17}, + {98, 0x17}, + {110, 0x16}, + {104, 0x16}, + {98, 0x16}, + {110, 0x15}, + {104, 0x15}, + {98, 0x15}, + {110, 0x14}, + {104, 0x14}, + {98, 0x14}, + {110, 0x13}, + {104, 0x13}, + {98, 0x13}, + {110, 0x12}, + {104, 0x08}, + {98, 0x08}, + {110, 0x07}, + {104, 0x07}, + {98, 0x07}, + {110, 0x06}, + {104, 0x06}, + {98, 0x06}, + {110, 0x05}, + {104, 0x05}, + {98, 0x05}, + {110, 0x04}, + {104, 0x04}, + {98, 0x04}, + {110, 0x03}, + {104, 0x03}, + {98, 0x03}, + {110, 0x02}, + {104, 0x02}, + {98, 0x02}, + {110, 0x01}, + {104, 0x01}, + {98, 0x01}, + {110, 0x00}, + {104, 0x00}, + {98, 0x00}, + {93, 0x00}, + {88, 0x00}, + {83, 0x00}, + {78, 0x00}, + }, + /* 2.4GHz power gain idx table */ + { + {110, 0x3f}, /* highest txpower */ + {104, 0x3f}, + {98, 0x3f}, + {110, 0x3e}, + {104, 0x3e}, + {98, 0x3e}, + {110, 0x3d}, + {104, 0x3d}, + {98, 0x3d}, + {110, 0x3c}, + {104, 0x3c}, + {98, 0x3c}, + {110, 0x3b}, + {104, 0x3b}, + {98, 0x3b}, + {110, 0x3a}, + {104, 0x3a}, + {98, 0x3a}, + {110, 0x39}, + {104, 0x39}, + {98, 0x39}, + {110, 0x38}, + {104, 0x38}, + {98, 0x38}, + {110, 0x37}, + {104, 0x37}, + {98, 0x37}, + {110, 0x36}, + {104, 0x36}, + {98, 0x36}, + {110, 0x35}, + {104, 0x35}, + {98, 0x35}, + {110, 0x34}, + {104, 0x34}, + {98, 0x34}, + {110, 0x33}, + {104, 0x33}, + {98, 0x33}, + {110, 0x32}, + {104, 0x32}, + {98, 0x32}, + {110, 0x31}, + {104, 0x31}, + {98, 0x31}, + {110, 0x30}, + {104, 0x30}, + {98, 0x30}, + {110, 0x6}, + {104, 0x6}, + {98, 0x6}, + {110, 0x5}, + {104, 0x5}, + {98, 0x5}, + {110, 0x4}, + {104, 0x4}, + {98, 0x4}, + {110, 0x3}, + {104, 0x3}, + {98, 0x3}, + {110, 0x2}, + {104, 0x2}, + {98, 0x2}, + {110, 0x1}, + {104, 0x1}, + {98, 0x1}, + {110, 0x0}, + {104, 0x0}, + {98, 0x0}, + {97, 0}, + {96, 0}, + {95, 0}, + {94, 0}, + {93, 0}, + {92, 0}, + {91, 0}, + {90, 0}, + {89, 0}, + {88, 0}, + {87, 0}, + {86, 0}, + {85, 0}, + {84, 0}, + {83, 0}, + {82, 0}, + {81, 0}, + {80, 0}, + {79, 0}, + {78, 0}, + {77, 0}, + {76, 0}, + {75, 0}, + {74, 0}, + {73, 0}, + {72, 0}, + {71, 0}, + {70, 0}, + {69, 0}, + {68, 0}, + {67, 0}, + {66, 0}, + {65, 0}, + {64, 0}, + {63, 0}, + {62, 0}, + {61, 0}, + {60, 0}, + {59, 0}, + } +}; + +static int +il4965_fill_txpower_tbl(struct il_priv *il, u8 band, u16 channel, u8 is_ht40, + u8 ctrl_chan_high, + struct il4965_tx_power_db *tx_power_tbl) +{ + u8 saturation_power; + s32 target_power; + s32 user_target_power; + s32 power_limit; + s32 current_temp; + s32 reg_limit; + s32 current_regulatory; + s32 txatten_grp = CALIB_CH_GROUP_MAX; + int i; + int c; + const struct il_channel_info *ch_info = NULL; + struct il_eeprom_calib_ch_info ch_eeprom_info; + const struct il_eeprom_calib_measure *measurement; + s16 voltage; + s32 init_voltage; + s32 voltage_compensation; + s32 degrees_per_05db_num; + s32 degrees_per_05db_denom; + s32 factory_temp; + s32 temperature_comp[2]; + s32 factory_gain_idx[2]; + s32 factory_actual_pwr[2]; + s32 power_idx; + + /* tx_power_user_lmt is in dBm, convert to half-dBm (half-dB units + * are used for idxing into txpower table) */ + user_target_power = 2 * il->tx_power_user_lmt; + + /* Get current (RXON) channel, band, width */ + D_TXPOWER("chan %d band %d is_ht40 %d\n", channel, band, is_ht40); + + ch_info = il_get_channel_info(il, il->band, channel); + + if (!il_is_channel_valid(ch_info)) + return -EINVAL; + + /* get txatten group, used to select 1) thermal txpower adjustment + * and 2) mimo txpower balance between Tx chains. */ + txatten_grp = il4965_get_tx_atten_grp(channel); + if (txatten_grp < 0) { + IL_ERR("Can't find txatten group for channel %d.\n", channel); + return txatten_grp; + } + + D_TXPOWER("channel %d belongs to txatten group %d\n", channel, + txatten_grp); + + if (is_ht40) { + if (ctrl_chan_high) + channel -= 2; + else + channel += 2; + } + + /* hardware txpower limits ... + * saturation (clipping distortion) txpowers are in half-dBm */ + if (band) + saturation_power = il->calib_info->saturation_power24; + else + saturation_power = il->calib_info->saturation_power52; + + if (saturation_power < IL_TX_POWER_SATURATION_MIN || + saturation_power > IL_TX_POWER_SATURATION_MAX) { + if (band) + saturation_power = IL_TX_POWER_DEFAULT_SATURATION_24; + else + saturation_power = IL_TX_POWER_DEFAULT_SATURATION_52; + } + + /* regulatory txpower limits ... reg_limit values are in half-dBm, + * max_power_avg values are in dBm, convert * 2 */ + if (is_ht40) + reg_limit = ch_info->ht40_max_power_avg * 2; + else + reg_limit = ch_info->max_power_avg * 2; + + if ((reg_limit < IL_TX_POWER_REGULATORY_MIN) || + (reg_limit > IL_TX_POWER_REGULATORY_MAX)) { + if (band) + reg_limit = IL_TX_POWER_DEFAULT_REGULATORY_24; + else + reg_limit = IL_TX_POWER_DEFAULT_REGULATORY_52; + } + + /* Interpolate txpower calibration values for this channel, + * based on factory calibration tests on spaced channels. */ + il4965_interpolate_chan(il, channel, &ch_eeprom_info); + + /* calculate tx gain adjustment based on power supply voltage */ + voltage = le16_to_cpu(il->calib_info->voltage); + init_voltage = (s32) le32_to_cpu(il->card_alive_init.voltage); + voltage_compensation = + il4965_get_voltage_compensation(voltage, init_voltage); + + D_TXPOWER("curr volt %d eeprom volt %d volt comp %d\n", init_voltage, + voltage, voltage_compensation); + + /* get current temperature (Celsius) */ + current_temp = max(il->temperature, IL_TX_POWER_TEMPERATURE_MIN); + current_temp = min(il->temperature, IL_TX_POWER_TEMPERATURE_MAX); + current_temp = KELVIN_TO_CELSIUS(current_temp); + + /* select thermal txpower adjustment params, based on channel group + * (same frequency group used for mimo txatten adjustment) */ + degrees_per_05db_num = + tx_power_cmp_tble[txatten_grp].degrees_per_05db_a; + degrees_per_05db_denom = + tx_power_cmp_tble[txatten_grp].degrees_per_05db_a_denom; + + /* get per-chain txpower values from factory measurements */ + for (c = 0; c < 2; c++) { + measurement = &ch_eeprom_info.measurements[c][1]; + + /* txgain adjustment (in half-dB steps) based on difference + * between factory and current temperature */ + factory_temp = measurement->temperature; + il4965_math_div_round((current_temp - + factory_temp) * degrees_per_05db_denom, + degrees_per_05db_num, + &temperature_comp[c]); + + factory_gain_idx[c] = measurement->gain_idx; + factory_actual_pwr[c] = measurement->actual_pow; + + D_TXPOWER("chain = %d\n", c); + D_TXPOWER("fctry tmp %d, " "curr tmp %d, comp %d steps\n", + factory_temp, current_temp, temperature_comp[c]); + + D_TXPOWER("fctry idx %d, fctry pwr %d\n", factory_gain_idx[c], + factory_actual_pwr[c]); + } + + /* for each of 33 bit-rates (including 1 for CCK) */ + for (i = 0; i < POWER_TBL_NUM_ENTRIES; i++) { + u8 is_mimo_rate; + union il4965_tx_power_dual_stream tx_power; + + /* for mimo, reduce each chain's txpower by half + * (3dB, 6 steps), so total output power is regulatory + * compliant. */ + if (i & 0x8) { + current_regulatory = + reg_limit - + IL_TX_POWER_MIMO_REGULATORY_COMPENSATION; + is_mimo_rate = 1; + } else { + current_regulatory = reg_limit; + is_mimo_rate = 0; + } + + /* find txpower limit, either hardware or regulatory */ + power_limit = saturation_power - back_off_table[i]; + if (power_limit > current_regulatory) + power_limit = current_regulatory; + + /* reduce user's txpower request if necessary + * for this rate on this channel */ + target_power = user_target_power; + if (target_power > power_limit) + target_power = power_limit; + + D_TXPOWER("rate %d sat %d reg %d usr %d tgt %d\n", i, + saturation_power - back_off_table[i], + current_regulatory, user_target_power, target_power); + + /* for each of 2 Tx chains (radio transmitters) */ + for (c = 0; c < 2; c++) { + s32 atten_value; + + if (is_mimo_rate) + atten_value = + (s32) le32_to_cpu(il->card_alive_init. + tx_atten[txatten_grp][c]); + else + atten_value = 0; + + /* calculate idx; higher idx means lower txpower */ + power_idx = + (u8) (factory_gain_idx[c] - + (target_power - factory_actual_pwr[c]) - + temperature_comp[c] - voltage_compensation + + atten_value); + +/* D_TXPOWER("calculated txpower idx %d\n", + power_idx); */ + + if (power_idx < get_min_power_idx(i, band)) + power_idx = get_min_power_idx(i, band); + + /* adjust 5 GHz idx to support negative idxes */ + if (!band) + power_idx += 9; + + /* CCK, rate 32, reduce txpower for CCK */ + if (i == POWER_TBL_CCK_ENTRY) + power_idx += + IL_TX_POWER_CCK_COMPENSATION_C_STEP; + + /* stay within the table! */ + if (power_idx > 107) { + IL_WARN("txpower idx %d > 107\n", power_idx); + power_idx = 107; + } + if (power_idx < 0) { + IL_WARN("txpower idx %d < 0\n", power_idx); + power_idx = 0; + } + + /* fill txpower command for this rate/chain */ + tx_power.s.radio_tx_gain[c] = + gain_table[band][power_idx].radio; + tx_power.s.dsp_predis_atten[c] = + gain_table[band][power_idx].dsp; + + D_TXPOWER("chain %d mimo %d idx %d " + "gain 0x%02x dsp %d\n", c, atten_value, + power_idx, tx_power.s.radio_tx_gain[c], + tx_power.s.dsp_predis_atten[c]); + } /* for each chain */ + + tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw); + + } /* for each rate */ + + return 0; +} + +/** + * il4965_send_tx_power - Configure the TXPOWER level user limit + * + * Uses the active RXON for channel, band, and characteristics (ht40, high) + * The power limit is taken from il->tx_power_user_lmt. + */ +static int +il4965_send_tx_power(struct il_priv *il) +{ + struct il4965_txpowertable_cmd cmd = { 0 }; + int ret; + u8 band = 0; + bool is_ht40 = false; + u8 ctrl_chan_high = 0; + struct il_rxon_context *ctx = &il->ctx; + + if (WARN_ONCE + (test_bit(S_SCAN_HW, &il->status), + "TX Power requested while scanning!\n")) + return -EAGAIN; + + band = il->band == IEEE80211_BAND_2GHZ; + + is_ht40 = iw4965_is_ht40_channel(ctx->active.flags); + + if (is_ht40 && (ctx->active.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK)) + ctrl_chan_high = 1; + + cmd.band = band; + cmd.channel = ctx->active.channel; + + ret = + il4965_fill_txpower_tbl(il, band, le16_to_cpu(ctx->active.channel), + is_ht40, ctrl_chan_high, &cmd.tx_power); + if (ret) + goto out; + + ret = il_send_cmd_pdu(il, C_TX_PWR_TBL, sizeof(cmd), &cmd); + +out: + return ret; +} + +static int +il4965_send_rxon_assoc(struct il_priv *il, struct il_rxon_context *ctx) +{ + int ret = 0; + struct il4965_rxon_assoc_cmd rxon_assoc; + const struct il_rxon_cmd *rxon1 = &ctx->staging; + const struct il_rxon_cmd *rxon2 = &ctx->active; + + if (rxon1->flags == rxon2->flags && + rxon1->filter_flags == rxon2->filter_flags && + rxon1->cck_basic_rates == rxon2->cck_basic_rates && + rxon1->ofdm_ht_single_stream_basic_rates == + rxon2->ofdm_ht_single_stream_basic_rates && + rxon1->ofdm_ht_dual_stream_basic_rates == + rxon2->ofdm_ht_dual_stream_basic_rates && + rxon1->rx_chain == rxon2->rx_chain && + rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates) { + D_INFO("Using current RXON_ASSOC. Not resending.\n"); + return 0; + } + + rxon_assoc.flags = ctx->staging.flags; + rxon_assoc.filter_flags = ctx->staging.filter_flags; + rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates; + rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates; + rxon_assoc.reserved = 0; + rxon_assoc.ofdm_ht_single_stream_basic_rates = + ctx->staging.ofdm_ht_single_stream_basic_rates; + rxon_assoc.ofdm_ht_dual_stream_basic_rates = + ctx->staging.ofdm_ht_dual_stream_basic_rates; + rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain; + + ret = + il_send_cmd_pdu_async(il, C_RXON_ASSOC, sizeof(rxon_assoc), + &rxon_assoc, NULL); + + return ret; +} + +static int +il4965_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx) +{ + /* cast away the const for active_rxon in this function */ + struct il_rxon_cmd *active_rxon = (void *)&ctx->active; + int ret; + bool new_assoc = !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK); + + if (!il_is_alive(il)) + return -EBUSY; + + if (!ctx->is_active) + return 0; + + /* always get timestamp with Rx frame */ + ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK; + + ret = il_check_rxon_cmd(il, ctx); + if (ret) { + IL_ERR("Invalid RXON configuration. Not committing.\n"); + return -EINVAL; + } + + /* + * receive commit_rxon request + * abort any previous channel switch if still in process + */ + if (test_bit(S_CHANNEL_SWITCH_PENDING, &il->status) && + il->switch_channel != ctx->staging.channel) { + D_11H("abort channel switch on %d\n", + le16_to_cpu(il->switch_channel)); + il_chswitch_done(il, false); + } + + /* If we don't need to send a full RXON, we can use + * il_rxon_assoc_cmd which is used to reconfigure filter + * and other flags for the current radio configuration. */ + if (!il_full_rxon_required(il, ctx)) { + ret = il_send_rxon_assoc(il, ctx); + if (ret) { + IL_ERR("Error setting RXON_ASSOC (%d)\n", ret); + return ret; + } + + memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon)); + il_print_rx_config_cmd(il, ctx); + /* + * We do not commit tx power settings while channel changing, + * do it now if tx power changed. + */ + il_set_tx_power(il, il->tx_power_next, false); + return 0; + } + + /* If we are currently associated and the new config requires + * an RXON_ASSOC and the new config wants the associated mask enabled, + * we must clear the associated from the active configuration + * before we apply the new config */ + if (il_is_associated_ctx(ctx) && new_assoc) { + D_INFO("Toggling associated bit on current RXON\n"); + active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; + + ret = + il_send_cmd_pdu(il, ctx->rxon_cmd, + sizeof(struct il_rxon_cmd), active_rxon); + + /* If the mask clearing failed then we set + * active_rxon back to what it was previously */ + if (ret) { + active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK; + IL_ERR("Error clearing ASSOC_MSK (%d)\n", ret); + return ret; + } + il_clear_ucode_stations(il, ctx); + il_restore_stations(il, ctx); + ret = il4965_restore_default_wep_keys(il, ctx); + if (ret) { + IL_ERR("Failed to restore WEP keys (%d)\n", ret); + return ret; + } + } + + D_INFO("Sending RXON\n" "* with%s RXON_FILTER_ASSOC_MSK\n" + "* channel = %d\n" "* bssid = %pM\n", (new_assoc ? "" : "out"), + le16_to_cpu(ctx->staging.channel), ctx->staging.bssid_addr); + + il_set_rxon_hwcrypto(il, ctx, !il->cfg->mod_params->sw_crypto); + + /* Apply the new configuration + * RXON unassoc clears the station table in uCode so restoration of + * stations is needed after it (the RXON command) completes + */ + if (!new_assoc) { + ret = + il_send_cmd_pdu(il, ctx->rxon_cmd, + sizeof(struct il_rxon_cmd), &ctx->staging); + if (ret) { + IL_ERR("Error setting new RXON (%d)\n", ret); + return ret; + } + D_INFO("Return from !new_assoc RXON.\n"); + memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon)); + il_clear_ucode_stations(il, ctx); + il_restore_stations(il, ctx); + ret = il4965_restore_default_wep_keys(il, ctx); + if (ret) { + IL_ERR("Failed to restore WEP keys (%d)\n", ret); + return ret; + } + } + if (new_assoc) { + il->start_calib = 0; + /* Apply the new configuration + * RXON assoc doesn't clear the station table in uCode, + */ + ret = + il_send_cmd_pdu(il, ctx->rxon_cmd, + sizeof(struct il_rxon_cmd), &ctx->staging); + if (ret) { + IL_ERR("Error setting new RXON (%d)\n", ret); + return ret; + } + memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon)); + } + il_print_rx_config_cmd(il, ctx); + + il4965_init_sensitivity(il); + + /* If we issue a new RXON command which required a tune then we must + * send a new TXPOWER command or we won't be able to Tx any frames */ + ret = il_set_tx_power(il, il->tx_power_next, true); + if (ret) { + IL_ERR("Error sending TX power (%d)\n", ret); + return ret; + } + + return 0; +} + +static int +il4965_hw_channel_switch(struct il_priv *il, + struct ieee80211_channel_switch *ch_switch) +{ + struct il_rxon_context *ctx = &il->ctx; + int rc; + u8 band = 0; + bool is_ht40 = false; + u8 ctrl_chan_high = 0; + struct il4965_channel_switch_cmd cmd; + const struct il_channel_info *ch_info; + u32 switch_time_in_usec, ucode_switch_time; + u16 ch; + u32 tsf_low; + u8 switch_count; + u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval); + struct ieee80211_vif *vif = ctx->vif; + band = il->band == IEEE80211_BAND_2GHZ; + + is_ht40 = iw4965_is_ht40_channel(ctx->staging.flags); + + if (is_ht40 && (ctx->staging.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK)) + ctrl_chan_high = 1; + + cmd.band = band; + cmd.expect_beacon = 0; + ch = ch_switch->channel->hw_value; + cmd.channel = cpu_to_le16(ch); + cmd.rxon_flags = ctx->staging.flags; + cmd.rxon_filter_flags = ctx->staging.filter_flags; + switch_count = ch_switch->count; + tsf_low = ch_switch->timestamp & 0x0ffffffff; + /* + * calculate the ucode channel switch time + * adding TSF as one of the factor for when to switch + */ + if (il->ucode_beacon_time > tsf_low && beacon_interval) { + if (switch_count > + ((il->ucode_beacon_time - tsf_low) / beacon_interval)) { + switch_count -= + (il->ucode_beacon_time - tsf_low) / beacon_interval; + } else + switch_count = 0; + } + if (switch_count <= 1) + cmd.switch_time = cpu_to_le32(il->ucode_beacon_time); + else { + switch_time_in_usec = + vif->bss_conf.beacon_int * switch_count * TIME_UNIT; + ucode_switch_time = + il_usecs_to_beacons(il, switch_time_in_usec, + beacon_interval); + cmd.switch_time = + il_add_beacon_time(il, il->ucode_beacon_time, + ucode_switch_time, beacon_interval); + } + D_11H("uCode time for the switch is 0x%x\n", cmd.switch_time); + ch_info = il_get_channel_info(il, il->band, ch); + if (ch_info) + cmd.expect_beacon = il_is_channel_radar(ch_info); + else { + IL_ERR("invalid channel switch from %u to %u\n", + ctx->active.channel, ch); + return -EFAULT; + } + + rc = il4965_fill_txpower_tbl(il, band, ch, is_ht40, ctrl_chan_high, + &cmd.tx_power); + if (rc) { + D_11H("error:%d fill txpower_tbl\n", rc); + return rc; + } + + return il_send_cmd_pdu(il, C_CHANNEL_SWITCH, sizeof(cmd), &cmd); +} + +/** + * il4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array + */ +static void +il4965_txq_update_byte_cnt_tbl(struct il_priv *il, struct il_tx_queue *txq, + u16 byte_cnt) +{ + struct il4965_scd_bc_tbl *scd_bc_tbl = il->scd_bc_tbls.addr; + int txq_id = txq->q.id; + int write_ptr = txq->q.write_ptr; + int len = byte_cnt + IL_TX_CRC_SIZE + IL_TX_DELIMITER_SIZE; + __le16 bc_ent; + + WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX); + + bc_ent = cpu_to_le16(len & 0xFFF); + /* Set up byte count within first 256 entries */ + scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; + + /* If within first 64 entries, duplicate at end */ + if (write_ptr < TFD_QUEUE_SIZE_BC_DUP) + scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = + bc_ent; +} + +/** + * il4965_hw_get_temperature - return the calibrated temperature (in Kelvin) + * @stats: Provides the temperature reading from the uCode + * + * A return of <0 indicates bogus data in the stats + */ +static int +il4965_hw_get_temperature(struct il_priv *il) +{ + s32 temperature; + s32 vt; + s32 R1, R2, R3; + u32 R4; + + if (test_bit(S_TEMPERATURE, &il->status) && + (il->_4965.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK)) { + D_TEMP("Running HT40 temperature calibration\n"); + R1 = (s32) le32_to_cpu(il->card_alive_init.therm_r1[1]); + R2 = (s32) le32_to_cpu(il->card_alive_init.therm_r2[1]); + R3 = (s32) le32_to_cpu(il->card_alive_init.therm_r3[1]); + R4 = le32_to_cpu(il->card_alive_init.therm_r4[1]); + } else { + D_TEMP("Running temperature calibration\n"); + R1 = (s32) le32_to_cpu(il->card_alive_init.therm_r1[0]); + R2 = (s32) le32_to_cpu(il->card_alive_init.therm_r2[0]); + R3 = (s32) le32_to_cpu(il->card_alive_init.therm_r3[0]); + R4 = le32_to_cpu(il->card_alive_init.therm_r4[0]); + } + + /* + * Temperature is only 23 bits, so sign extend out to 32. + * + * NOTE If we haven't received a stats notification yet + * with an updated temperature, use R4 provided to us in the + * "initialize" ALIVE response. + */ + if (!test_bit(S_TEMPERATURE, &il->status)) + vt = sign_extend32(R4, 23); + else + vt = sign_extend32(le32_to_cpu + (il->_4965.stats.general.common.temperature), + 23); + + D_TEMP("Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt); + + if (R3 == R1) { + IL_ERR("Calibration conflict R1 == R3\n"); + return -1; + } + + /* Calculate temperature in degrees Kelvin, adjust by 97%. + * Add offset to center the adjustment around 0 degrees Centigrade. */ + temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2); + temperature /= (R3 - R1); + temperature = + (temperature * 97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET; + + D_TEMP("Calibrated temperature: %dK, %dC\n", temperature, + KELVIN_TO_CELSIUS(temperature)); + + return temperature; +} + +/* Adjust Txpower only if temperature variance is greater than threshold. */ +#define IL_TEMPERATURE_THRESHOLD 3 + +/** + * il4965_is_temp_calib_needed - determines if new calibration is needed + * + * If the temperature changed has changed sufficiently, then a recalibration + * is needed. + * + * Assumes caller will replace il->last_temperature once calibration + * executed. + */ +static int +il4965_is_temp_calib_needed(struct il_priv *il) +{ + int temp_diff; + + if (!test_bit(S_STATS, &il->status)) { + D_TEMP("Temperature not updated -- no stats.\n"); + return 0; + } + + temp_diff = il->temperature - il->last_temperature; + + /* get absolute value */ + if (temp_diff < 0) { + D_POWER("Getting cooler, delta %d\n", temp_diff); + temp_diff = -temp_diff; + } else if (temp_diff == 0) + D_POWER("Temperature unchanged\n"); + else + D_POWER("Getting warmer, delta %d\n", temp_diff); + + if (temp_diff < IL_TEMPERATURE_THRESHOLD) { + D_POWER(" => thermal txpower calib not needed\n"); + return 0; + } + + D_POWER(" => thermal txpower calib needed\n"); + + return 1; +} + +static void +il4965_temperature_calib(struct il_priv *il) +{ + s32 temp; + + temp = il4965_hw_get_temperature(il); + if (IL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(temp)) + return; + + if (il->temperature != temp) { + if (il->temperature) + D_TEMP("Temperature changed " "from %dC to %dC\n", + KELVIN_TO_CELSIUS(il->temperature), + KELVIN_TO_CELSIUS(temp)); + else + D_TEMP("Temperature " "initialized to %dC\n", + KELVIN_TO_CELSIUS(temp)); + } + + il->temperature = temp; + set_bit(S_TEMPERATURE, &il->status); + + if (!il->disable_tx_power_cal && + unlikely(!test_bit(S_SCANNING, &il->status)) && + il4965_is_temp_calib_needed(il)) + queue_work(il->workqueue, &il->txpower_work); +} + +static u16 +il4965_get_hcmd_size(u8 cmd_id, u16 len) +{ + switch (cmd_id) { + case C_RXON: + return (u16) sizeof(struct il4965_rxon_cmd); + default: + return len; + } +} + +static u16 +il4965_build_addsta_hcmd(const struct il_addsta_cmd *cmd, u8 * data) +{ + struct il4965_addsta_cmd *addsta = (struct il4965_addsta_cmd *)data; + addsta->mode = cmd->mode; + memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify)); + memcpy(&addsta->key, &cmd->key, sizeof(struct il4965_keyinfo)); + addsta->station_flags = cmd->station_flags; + addsta->station_flags_msk = cmd->station_flags_msk; + addsta->tid_disable_tx = cmd->tid_disable_tx; + addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid; + addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid; + addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn; + addsta->sleep_tx_count = cmd->sleep_tx_count; + addsta->reserved1 = cpu_to_le16(0); + addsta->reserved2 = cpu_to_le16(0); + + return (u16) sizeof(struct il4965_addsta_cmd); +} + +static inline u32 +il4965_get_scd_ssn(struct il4965_tx_resp *tx_resp) +{ + return le32_to_cpup(&tx_resp->u.status + tx_resp->frame_count) & MAX_SN; +} + +static inline u32 +il4965_tx_status_to_mac80211(u32 status) +{ + status &= TX_STATUS_MSK; + + switch (status) { + case TX_STATUS_SUCCESS: + case TX_STATUS_DIRECT_DONE: + return IEEE80211_TX_STAT_ACK; + case TX_STATUS_FAIL_DEST_PS: + return IEEE80211_TX_STAT_TX_FILTERED; + default: + return 0; + } +} + +static inline bool +il4965_is_tx_success(u32 status) +{ + status &= TX_STATUS_MSK; + return (status == TX_STATUS_SUCCESS || status == TX_STATUS_DIRECT_DONE); +} + +/** + * il4965_tx_status_reply_tx - Handle Tx response for frames in aggregation queue + */ +static int +il4965_tx_status_reply_tx(struct il_priv *il, struct il_ht_agg *agg, + struct il4965_tx_resp *tx_resp, int txq_id, + u16 start_idx) +{ + u16 status; + struct agg_tx_status *frame_status = tx_resp->u.agg_status; + struct ieee80211_tx_info *info = NULL; + struct ieee80211_hdr *hdr = NULL; + u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags); + int i, sh, idx; + u16 seq; + if (agg->wait_for_ba) + D_TX_REPLY("got tx response w/o block-ack\n"); + + agg->frame_count = tx_resp->frame_count; + agg->start_idx = start_idx; + agg->rate_n_flags = rate_n_flags; + agg->bitmap = 0; + + /* num frames attempted by Tx command */ + if (agg->frame_count == 1) { + /* Only one frame was attempted; no block-ack will arrive */ + status = le16_to_cpu(frame_status[0].status); + idx = start_idx; + + D_TX_REPLY("FrameCnt = %d, StartIdx=%d idx=%d\n", + agg->frame_count, agg->start_idx, idx); + + info = IEEE80211_SKB_CB(il->txq[txq_id].txb[idx].skb); + info->status.rates[0].count = tx_resp->failure_frame + 1; + info->flags &= ~IEEE80211_TX_CTL_AMPDU; + info->flags |= il4965_tx_status_to_mac80211(status); + il4965_hwrate_to_tx_control(il, rate_n_flags, info); + + D_TX_REPLY("1 Frame 0x%x failure :%d\n", status & 0xff, + tx_resp->failure_frame); + D_TX_REPLY("Rate Info rate_n_flags=%x\n", rate_n_flags); + + agg->wait_for_ba = 0; + } else { + /* Two or more frames were attempted; expect block-ack */ + u64 bitmap = 0; + int start = agg->start_idx; + + /* Construct bit-map of pending frames within Tx win */ + for (i = 0; i < agg->frame_count; i++) { + u16 sc; + status = le16_to_cpu(frame_status[i].status); + seq = le16_to_cpu(frame_status[i].sequence); + idx = SEQ_TO_IDX(seq); + txq_id = SEQ_TO_QUEUE(seq); + + if (status & + (AGG_TX_STATE_FEW_BYTES_MSK | + AGG_TX_STATE_ABORT_MSK)) + continue; + + D_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n", + agg->frame_count, txq_id, idx); + + hdr = il_tx_queue_get_hdr(il, txq_id, idx); + if (!hdr) { + IL_ERR("BUG_ON idx doesn't point to valid skb" + " idx=%d, txq_id=%d\n", idx, txq_id); + return -1; + } + + sc = le16_to_cpu(hdr->seq_ctrl); + if (idx != (SEQ_TO_SN(sc) & 0xff)) { + IL_ERR("BUG_ON idx doesn't match seq control" + " idx=%d, seq_idx=%d, seq=%d\n", idx, + SEQ_TO_SN(sc), hdr->seq_ctrl); + return -1; + } + + D_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n", i, idx, + SEQ_TO_SN(sc)); + + sh = idx - start; + if (sh > 64) { + sh = (start - idx) + 0xff; + bitmap = bitmap << sh; + sh = 0; + start = idx; + } else if (sh < -64) + sh = 0xff - (start - idx); + else if (sh < 0) { + sh = start - idx; + start = idx; + bitmap = bitmap << sh; + sh = 0; + } + bitmap |= 1ULL << sh; + D_TX_REPLY("start=%d bitmap=0x%llx\n", start, + (unsigned long long)bitmap); + } + + agg->bitmap = bitmap; + agg->start_idx = start; + D_TX_REPLY("Frames %d start_idx=%d bitmap=0x%llx\n", + agg->frame_count, agg->start_idx, + (unsigned long long)agg->bitmap); + + if (bitmap) + agg->wait_for_ba = 1; + } + return 0; +} + +static u8 +il4965_find_station(struct il_priv *il, const u8 * addr) +{ + int i; + int start = 0; + int ret = IL_INVALID_STATION; + unsigned long flags; + + if ((il->iw_mode == NL80211_IFTYPE_ADHOC)) + start = IL_STA_ID; + + if (is_broadcast_ether_addr(addr)) + return il->ctx.bcast_sta_id; + + spin_lock_irqsave(&il->sta_lock, flags); + for (i = start; i < il->hw_params.max_stations; i++) + if (il->stations[i].used && + (!compare_ether_addr(il->stations[i].sta.sta.addr, addr))) { + ret = i; + goto out; + } + + D_ASSOC("can not find STA %pM total %d\n", addr, il->num_stations); + +out: + /* + * It may be possible that more commands interacting with stations + * arrive before we completed processing the adding of + * station + */ + if (ret != IL_INVALID_STATION && + (!(il->stations[ret].used & IL_STA_UCODE_ACTIVE) || + ((il->stations[ret].used & IL_STA_UCODE_ACTIVE) && + (il->stations[ret].used & IL_STA_UCODE_INPROGRESS)))) { + IL_ERR("Requested station info for sta %d before ready.\n", + ret); + ret = IL_INVALID_STATION; + } + spin_unlock_irqrestore(&il->sta_lock, flags); + return ret; +} + +static int +il4965_get_ra_sta_id(struct il_priv *il, struct ieee80211_hdr *hdr) +{ + if (il->iw_mode == NL80211_IFTYPE_STATION) { + return IL_AP_ID; + } else { + u8 *da = ieee80211_get_DA(hdr); + return il4965_find_station(il, da); + } +} + +/** + * il4965_hdl_tx - Handle standard (non-aggregation) Tx response + */ +static void +il4965_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb) +{ + struct il_rx_pkt *pkt = rxb_addr(rxb); + u16 sequence = le16_to_cpu(pkt->hdr.sequence); + int txq_id = SEQ_TO_QUEUE(sequence); + int idx = SEQ_TO_IDX(sequence); + struct il_tx_queue *txq = &il->txq[txq_id]; + struct ieee80211_hdr *hdr; + struct ieee80211_tx_info *info; + struct il4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; + u32 status = le32_to_cpu(tx_resp->u.status); + int uninitialized_var(tid); + int sta_id; + int freed; + u8 *qc = NULL; + unsigned long flags; + + if (idx >= txq->q.n_bd || il_queue_used(&txq->q, idx) == 0) { + IL_ERR("Read idx for DMA queue txq_id (%d) idx %d " + "is out of range [0-%d] %d %d\n", txq_id, idx, + txq->q.n_bd, txq->q.write_ptr, txq->q.read_ptr); + return; + } + + txq->time_stamp = jiffies; + info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb); + memset(&info->status, 0, sizeof(info->status)); + + hdr = il_tx_queue_get_hdr(il, txq_id, idx); + if (ieee80211_is_data_qos(hdr->frame_control)) { + qc = ieee80211_get_qos_ctl(hdr); + tid = qc[0] & 0xf; + } + + sta_id = il4965_get_ra_sta_id(il, hdr); + if (txq->sched_retry && unlikely(sta_id == IL_INVALID_STATION)) { + IL_ERR("Station not known\n"); + return; + } + + spin_lock_irqsave(&il->sta_lock, flags); + if (txq->sched_retry) { + const u32 scd_ssn = il4965_get_scd_ssn(tx_resp); + struct il_ht_agg *agg = NULL; + WARN_ON(!qc); + + agg = &il->stations[sta_id].tid[tid].agg; + + il4965_tx_status_reply_tx(il, agg, tx_resp, txq_id, idx); + + /* check if BAR is needed */ + if ((tx_resp->frame_count == 1) && + !il4965_is_tx_success(status)) + info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; + + if (txq->q.read_ptr != (scd_ssn & 0xff)) { + idx = il_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd); + D_TX_REPLY("Retry scheduler reclaim scd_ssn " + "%d idx %d\n", scd_ssn, idx); + freed = il4965_tx_queue_reclaim(il, txq_id, idx); + if (qc) + il4965_free_tfds_in_queue(il, sta_id, tid, + freed); + + if (il->mac80211_registered && + il_queue_space(&txq->q) > txq->q.low_mark && + agg->state != IL_EMPTYING_HW_QUEUE_DELBA) + il_wake_queue(il, txq); + } + } else { + info->status.rates[0].count = tx_resp->failure_frame + 1; + info->flags |= il4965_tx_status_to_mac80211(status); + il4965_hwrate_to_tx_control(il, + le32_to_cpu(tx_resp->rate_n_flags), + info); + + D_TX_REPLY("TXQ %d status %s (0x%08x) " + "rate_n_flags 0x%x retries %d\n", txq_id, + il4965_get_tx_fail_reason(status), status, + le32_to_cpu(tx_resp->rate_n_flags), + tx_resp->failure_frame); + + freed = il4965_tx_queue_reclaim(il, txq_id, idx); + if (qc && likely(sta_id != IL_INVALID_STATION)) + il4965_free_tfds_in_queue(il, sta_id, tid, freed); + else if (sta_id == IL_INVALID_STATION) + D_TX_REPLY("Station not known\n"); + + if (il->mac80211_registered && + il_queue_space(&txq->q) > txq->q.low_mark) + il_wake_queue(il, txq); + } + if (qc && likely(sta_id != IL_INVALID_STATION)) + il4965_txq_check_empty(il, sta_id, tid, txq_id); + + il4965_check_abort_status(il, tx_resp->frame_count, status); + + spin_unlock_irqrestore(&il->sta_lock, flags); +} + +/* Set up 4965-specific Rx frame reply handlers */ +static void +il4965_handler_setup(struct il_priv *il) +{ + /* Legacy Rx frames */ + il->handlers[N_RX] = il4965_hdl_rx; + /* Tx response */ + il->handlers[C_TX] = il4965_hdl_tx; +} + +static struct il_hcmd_ops il4965_hcmd = { + .rxon_assoc = il4965_send_rxon_assoc, + .commit_rxon = il4965_commit_rxon, + .set_rxon_chain = il4965_set_rxon_chain, +}; + +static void +il4965_post_scan(struct il_priv *il) +{ + struct il_rxon_context *ctx = &il->ctx; + + /* + * Since setting the RXON may have been deferred while + * performing the scan, fire one off if needed + */ + if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging))) + il_commit_rxon(il, ctx); +} + +static void +il4965_post_associate(struct il_priv *il) +{ + struct il_rxon_context *ctx = &il->ctx; + struct ieee80211_vif *vif = ctx->vif; + struct ieee80211_conf *conf = NULL; + int ret = 0; + + if (!vif || !il->is_open) + return; + + if (test_bit(S_EXIT_PENDING, &il->status)) + return; + + il_scan_cancel_timeout(il, 200); + + conf = &il->hw->conf; + + ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; + il_commit_rxon(il, ctx); + + ret = il_send_rxon_timing(il, ctx); + if (ret) + IL_WARN("RXON timing - " "Attempting to continue.\n"); + + ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; + + il_set_rxon_ht(il, &il->current_ht_config); + + if (il->cfg->ops->hcmd->set_rxon_chain) + il->cfg->ops->hcmd->set_rxon_chain(il, ctx); + + ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid); + + D_ASSOC("assoc id %d beacon interval %d\n", vif->bss_conf.aid, + vif->bss_conf.beacon_int); + + if (vif->bss_conf.use_short_preamble) + ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; + else + ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; + + if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) { + if (vif->bss_conf.use_short_slot) + ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; + else + ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; + } + + il_commit_rxon(il, ctx); + + D_ASSOC("Associated as %d to: %pM\n", vif->bss_conf.aid, + ctx->active.bssid_addr); + + switch (vif->type) { + case NL80211_IFTYPE_STATION: + break; + case NL80211_IFTYPE_ADHOC: + il4965_send_beacon_cmd(il); + break; + default: + IL_ERR("%s Should not be called in %d mode\n", __func__, + vif->type); + break; + } + + /* the chain noise calibration will enabled PM upon completion + * If chain noise has already been run, then we need to enable + * power management here */ + if (il->chain_noise_data.state == IL_CHAIN_NOISE_DONE) + il_power_update_mode(il, false); + + /* Enable Rx differential gain and sensitivity calibrations */ + il4965_chain_noise_reset(il); + il->start_calib = 1; +} + +static void +il4965_config_ap(struct il_priv *il) +{ + struct il_rxon_context *ctx = &il->ctx; + struct ieee80211_vif *vif = ctx->vif; + int ret = 0; + + lockdep_assert_held(&il->mutex); + + if (test_bit(S_EXIT_PENDING, &il->status)) + return; + + /* The following should be done only at AP bring up */ + if (!il_is_associated_ctx(ctx)) { + + /* RXON - unassoc (to set timing command) */ + ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; + il_commit_rxon(il, ctx); + + /* RXON Timing */ + ret = il_send_rxon_timing(il, ctx); + if (ret) + IL_WARN("RXON timing failed - " + "Attempting to continue.\n"); + + /* AP has all antennas */ + il->chain_noise_data.active_chains = il->hw_params.valid_rx_ant; + il_set_rxon_ht(il, &il->current_ht_config); + if (il->cfg->ops->hcmd->set_rxon_chain) + il->cfg->ops->hcmd->set_rxon_chain(il, ctx); + + ctx->staging.assoc_id = 0; + + if (vif->bss_conf.use_short_preamble) + ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; + else + ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; + + if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) { + if (vif->bss_conf.use_short_slot) + ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; + else + ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; + } + /* need to send beacon cmd before committing assoc RXON! */ + il4965_send_beacon_cmd(il); + /* restore RXON assoc */ + ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; + il_commit_rxon(il, ctx); + } + il4965_send_beacon_cmd(il); +} + +static struct il_hcmd_utils_ops il4965_hcmd_utils = { + .get_hcmd_size = il4965_get_hcmd_size, + .build_addsta_hcmd = il4965_build_addsta_hcmd, + .request_scan = il4965_request_scan, + .post_scan = il4965_post_scan, +}; + +static struct il_lib_ops il4965_lib = { + .set_hw_params = il4965_hw_set_hw_params, + .txq_update_byte_cnt_tbl = il4965_txq_update_byte_cnt_tbl, + .txq_attach_buf_to_tfd = il4965_hw_txq_attach_buf_to_tfd, + .txq_free_tfd = il4965_hw_txq_free_tfd, + .txq_init = il4965_hw_tx_queue_init, + .handler_setup = il4965_handler_setup, + .is_valid_rtc_data_addr = il4965_hw_valid_rtc_data_addr, + .init_alive_start = il4965_init_alive_start, + .load_ucode = il4965_load_bsm, + .dump_nic_error_log = il4965_dump_nic_error_log, + .dump_fh = il4965_dump_fh, + .set_channel_switch = il4965_hw_channel_switch, + .apm_ops = { + .init = il_apm_init, + .config = il4965_nic_config, + }, + .eeprom_ops = { + .regulatory_bands = { + EEPROM_REGULATORY_BAND_1_CHANNELS, + EEPROM_REGULATORY_BAND_2_CHANNELS, + EEPROM_REGULATORY_BAND_3_CHANNELS, + EEPROM_REGULATORY_BAND_4_CHANNELS, + EEPROM_REGULATORY_BAND_5_CHANNELS, + EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS, + EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS}, + .acquire_semaphore = il4965_eeprom_acquire_semaphore, + .release_semaphore = il4965_eeprom_release_semaphore, + }, + .send_tx_power = il4965_send_tx_power, + .update_chain_flags = il4965_update_chain_flags, + .temp_ops = { + .temperature = il4965_temperature_calib, + }, +#ifdef CONFIG_IWLEGACY_DEBUGFS + .debugfs_ops = { + .rx_stats_read = il4965_ucode_rx_stats_read, + .tx_stats_read = il4965_ucode_tx_stats_read, + .general_stats_read = il4965_ucode_general_stats_read, + }, +#endif +}; + +static const struct il_legacy_ops il4965_legacy_ops = { + .post_associate = il4965_post_associate, + .config_ap = il4965_config_ap, + .manage_ibss_station = il4965_manage_ibss_station, + .update_bcast_stations = il4965_update_bcast_stations, +}; + +struct ieee80211_ops il4965_hw_ops = { + .tx = il4965_mac_tx, + .start = il4965_mac_start, + .stop = il4965_mac_stop, + .add_interface = il_mac_add_interface, + .remove_interface = il_mac_remove_interface, + .change_interface = il_mac_change_interface, + .config = il_mac_config, + .configure_filter = il4965_configure_filter, + .set_key = il4965_mac_set_key, + .update_tkip_key = il4965_mac_update_tkip_key, + .conf_tx = il_mac_conf_tx, + .reset_tsf = il_mac_reset_tsf, + .bss_info_changed = il_mac_bss_info_changed, + .ampdu_action = il4965_mac_ampdu_action, + .hw_scan = il_mac_hw_scan, + .sta_add = il4965_mac_sta_add, + .sta_remove = il_mac_sta_remove, + .channel_switch = il4965_mac_channel_switch, + .tx_last_beacon = il_mac_tx_last_beacon, +}; + +static const struct il_ops il4965_ops = { + .lib = &il4965_lib, + .hcmd = &il4965_hcmd, + .utils = &il4965_hcmd_utils, + .led = &il4965_led_ops, + .legacy = &il4965_legacy_ops, + .ieee80211_ops = &il4965_hw_ops, +}; + +static struct il_base_params il4965_base_params = { + .eeprom_size = IL4965_EEPROM_IMG_SIZE, + .num_of_queues = IL49_NUM_QUEUES, + .num_of_ampdu_queues = IL49_NUM_AMPDU_QUEUES, + .pll_cfg_val = 0, + .set_l0s = true, + .use_bsm = true, + .led_compensation = 61, + .chain_noise_num_beacons = IL4965_CAL_NUM_BEACONS, + .wd_timeout = IL_DEF_WD_TIMEOUT, + .temperature_kelvin = true, + .ucode_tracing = true, + .sensitivity_calib_by_driver = true, + .chain_noise_calib_by_driver = true, +}; + +struct il_cfg il4965_cfg = { + .name = "Intel(R) Wireless WiFi Link 4965AGN", + .fw_name_pre = IL4965_FW_PRE, + .ucode_api_max = IL4965_UCODE_API_MAX, + .ucode_api_min = IL4965_UCODE_API_MIN, + .sku = IL_SKU_A | IL_SKU_G | IL_SKU_N, + .valid_tx_ant = ANT_AB, + .valid_rx_ant = ANT_ABC, + .eeprom_ver = EEPROM_4965_EEPROM_VERSION, + .eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION, + .ops = &il4965_ops, + .mod_params = &il4965_mod_params, + .base_params = &il4965_base_params, + .led_mode = IL_LED_BLINK, + /* + * Force use of chains B and C for scan RX on 5 GHz band + * because the device has off-channel reception on chain A. + */ + .scan_rx_antennas[IEEE80211_BAND_5GHZ] = ANT_BC, +}; + +/* Module firmware */ +MODULE_FIRMWARE(IL4965_MODULE_FIRMWARE(IL4965_UCODE_API_MAX)); diff --git a/drivers/net/wireless/iwlegacy/4965.h b/drivers/net/wireless/iwlegacy/4965.h new file mode 100644 index 00000000000..f280e0161b1 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/4965.h @@ -0,0 +1,1301 @@ +/****************************************************************************** + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __il_4965_h__ +#define __il_4965_h__ + +struct il_rx_queue; +struct il_rx_buf; +struct il_rx_pkt; +struct il_tx_queue; +struct il_rxon_context; + +/* configuration for the _4965 devices */ +extern struct il_cfg il4965_cfg; + +extern struct il_mod_params il4965_mod_params; + +extern struct ieee80211_ops il4965_hw_ops; + +/* tx queue */ +void il4965_free_tfds_in_queue(struct il_priv *il, int sta_id, int tid, + int freed); + +/* RXON */ +void il4965_set_rxon_chain(struct il_priv *il, struct il_rxon_context *ctx); + +/* uCode */ +int il4965_verify_ucode(struct il_priv *il); + +/* lib */ +void il4965_check_abort_status(struct il_priv *il, u8 frame_count, u32 status); + +void il4965_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq); +int il4965_rx_init(struct il_priv *il, struct il_rx_queue *rxq); +int il4965_hw_nic_init(struct il_priv *il); +int il4965_dump_fh(struct il_priv *il, char **buf, bool display); + +/* rx */ +void il4965_rx_queue_restock(struct il_priv *il); +void il4965_rx_replenish(struct il_priv *il); +void il4965_rx_replenish_now(struct il_priv *il); +void il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq); +int il4965_rxq_stop(struct il_priv *il); +int il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band); +void il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb); +void il4965_hdl_rx_phy(struct il_priv *il, struct il_rx_buf *rxb); +void il4965_rx_handle(struct il_priv *il); + +/* tx */ +void il4965_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq); +int il4965_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq, + dma_addr_t addr, u16 len, u8 reset, u8 pad); +int il4965_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq); +void il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags, + struct ieee80211_tx_info *info); +int il4965_tx_skb(struct il_priv *il, struct sk_buff *skb); +int il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid, u16 * ssn); +int il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid); +int il4965_txq_check_empty(struct il_priv *il, int sta_id, u8 tid, int txq_id); +void il4965_hdl_compressed_ba(struct il_priv *il, struct il_rx_buf *rxb); +int il4965_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx); +void il4965_hw_txq_ctx_free(struct il_priv *il); +int il4965_txq_ctx_alloc(struct il_priv *il); +void il4965_txq_ctx_reset(struct il_priv *il); +void il4965_txq_ctx_stop(struct il_priv *il); +void il4965_txq_set_sched(struct il_priv *il, u32 mask); + +/* + * Acquire il->lock before calling this function ! + */ +void il4965_set_wr_ptrs(struct il_priv *il, int txq_id, u32 idx); +/** + * il4965_tx_queue_set_status - (optionally) start Tx/Cmd queue + * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed + * @scd_retry: (1) Indicates queue will be used in aggregation mode + * + * NOTE: Acquire il->lock before calling this function ! + */ +void il4965_tx_queue_set_status(struct il_priv *il, struct il_tx_queue *txq, + int tx_fifo_id, int scd_retry); + +/* rx */ +void il4965_hdl_missed_beacon(struct il_priv *il, struct il_rx_buf *rxb); +bool il4965_good_plcp_health(struct il_priv *il, struct il_rx_pkt *pkt); +void il4965_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb); +void il4965_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb); + +/* scan */ +int il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif); + +/* station mgmt */ +int il4965_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif, + bool add); + +/* hcmd */ +int il4965_send_beacon_cmd(struct il_priv *il); + +#ifdef CONFIG_IWLEGACY_DEBUG +const char *il4965_get_tx_fail_reason(u32 status); +#else +static inline const char * +il4965_get_tx_fail_reason(u32 status) +{ + return ""; +} +#endif + +/* station management */ +int il4965_alloc_bcast_station(struct il_priv *il, struct il_rxon_context *ctx); +int il4965_add_bssid_station(struct il_priv *il, struct il_rxon_context *ctx, + const u8 *addr, u8 *sta_id_r); +int il4965_remove_default_wep_key(struct il_priv *il, + struct il_rxon_context *ctx, + struct ieee80211_key_conf *key); +int il4965_set_default_wep_key(struct il_priv *il, struct il_rxon_context *ctx, + struct ieee80211_key_conf *key); +int il4965_restore_default_wep_keys(struct il_priv *il, + struct il_rxon_context *ctx); +int il4965_set_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx, + struct ieee80211_key_conf *key, u8 sta_id); +int il4965_remove_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx, + struct ieee80211_key_conf *key, u8 sta_id); +void il4965_update_tkip_key(struct il_priv *il, struct il_rxon_context *ctx, + struct ieee80211_key_conf *keyconf, + struct ieee80211_sta *sta, u32 iv32, + u16 *phase1key); +int il4965_sta_tx_modify_enable_tid(struct il_priv *il, int sta_id, int tid); +int il4965_sta_rx_agg_start(struct il_priv *il, struct ieee80211_sta *sta, + int tid, u16 ssn); +int il4965_sta_rx_agg_stop(struct il_priv *il, struct ieee80211_sta *sta, + int tid); +void il4965_sta_modify_sleep_tx_count(struct il_priv *il, int sta_id, int cnt); +int il4965_update_bcast_stations(struct il_priv *il); + +/* rate */ +static inline u8 +il4965_hw_get_rate(__le32 rate_n_flags) +{ + return le32_to_cpu(rate_n_flags) & 0xFF; +} + +/* eeprom */ +void il4965_eeprom_get_mac(const struct il_priv *il, u8 * mac); +int il4965_eeprom_acquire_semaphore(struct il_priv *il); +void il4965_eeprom_release_semaphore(struct il_priv *il); +int il4965_eeprom_check_version(struct il_priv *il); + +/* mac80211 handlers (for 4965) */ +void il4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb); +int il4965_mac_start(struct ieee80211_hw *hw); +void il4965_mac_stop(struct ieee80211_hw *hw); +void il4965_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, u64 multicast); +int il4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key); +void il4965_mac_update_tkip_key(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_key_conf *keyconf, + struct ieee80211_sta *sta, u32 iv32, + u16 *phase1key); +int il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + enum ieee80211_ampdu_mlme_action action, + struct ieee80211_sta *sta, u16 tid, u16 * ssn, + u8 buf_size); +int il4965_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +void il4965_mac_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_channel_switch *ch_switch); + +void il4965_led_enable(struct il_priv *il); + +/* EEPROM */ +#define IL4965_EEPROM_IMG_SIZE 1024 + +/* + * uCode queue management definitions ... + * The first queue used for block-ack aggregation is #7 (4965 only). + * All block-ack aggregation queues should map to Tx DMA/FIFO channel 7. + */ +#define IL49_FIRST_AMPDU_QUEUE 7 + +/* Sizes and addresses for instruction and data memory (SRAM) in + * 4965's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */ +#define IL49_RTC_INST_LOWER_BOUND (0x000000) +#define IL49_RTC_INST_UPPER_BOUND (0x018000) + +#define IL49_RTC_DATA_LOWER_BOUND (0x800000) +#define IL49_RTC_DATA_UPPER_BOUND (0x80A000) + +#define IL49_RTC_INST_SIZE (IL49_RTC_INST_UPPER_BOUND - \ + IL49_RTC_INST_LOWER_BOUND) +#define IL49_RTC_DATA_SIZE (IL49_RTC_DATA_UPPER_BOUND - \ + IL49_RTC_DATA_LOWER_BOUND) + +#define IL49_MAX_INST_SIZE IL49_RTC_INST_SIZE +#define IL49_MAX_DATA_SIZE IL49_RTC_DATA_SIZE + +/* Size of uCode instruction memory in bootstrap state machine */ +#define IL49_MAX_BSM_SIZE BSM_SRAM_SIZE + +static inline int +il4965_hw_valid_rtc_data_addr(u32 addr) +{ + return (addr >= IL49_RTC_DATA_LOWER_BOUND && + addr < IL49_RTC_DATA_UPPER_BOUND); +} + +/********************* START TEMPERATURE *************************************/ + +/** + * 4965 temperature calculation. + * + * The driver must calculate the device temperature before calculating + * a txpower setting (amplifier gain is temperature dependent). The + * calculation uses 4 measurements, 3 of which (R1, R2, R3) are calibration + * values used for the life of the driver, and one of which (R4) is the + * real-time temperature indicator. + * + * uCode provides all 4 values to the driver via the "initialize alive" + * notification (see struct il4965_init_alive_resp). After the runtime uCode + * image loads, uCode updates the R4 value via stats notifications + * (see N_STATS), which occur after each received beacon + * when associated, or can be requested via C_STATS. + * + * NOTE: uCode provides the R4 value as a 23-bit signed value. Driver + * must sign-extend to 32 bits before applying formula below. + * + * Formula: + * + * degrees Kelvin = ((97 * 259 * (R4 - R2) / (R3 - R1)) / 100) + 8 + * + * NOTE: The basic formula is 259 * (R4-R2) / (R3-R1). The 97/100 is + * an additional correction, which should be centered around 0 degrees + * Celsius (273 degrees Kelvin). The 8 (3 percent of 273) compensates for + * centering the 97/100 correction around 0 degrees K. + * + * Add 273 to Kelvin value to find degrees Celsius, for comparing current + * temperature with factory-measured temperatures when calculating txpower + * settings. + */ +#define TEMPERATURE_CALIB_KELVIN_OFFSET 8 +#define TEMPERATURE_CALIB_A_VAL 259 + +/* Limit range of calculated temperature to be between these Kelvin values */ +#define IL_TX_POWER_TEMPERATURE_MIN (263) +#define IL_TX_POWER_TEMPERATURE_MAX (410) + +#define IL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(t) \ + ((t) < IL_TX_POWER_TEMPERATURE_MIN || \ + (t) > IL_TX_POWER_TEMPERATURE_MAX) + +/********************* END TEMPERATURE ***************************************/ + +/********************* START TXPOWER *****************************************/ + +/** + * 4965 txpower calculations rely on information from three sources: + * + * 1) EEPROM + * 2) "initialize" alive notification + * 3) stats notifications + * + * EEPROM data consists of: + * + * 1) Regulatory information (max txpower and channel usage flags) is provided + * separately for each channel that can possibly supported by 4965. + * 40 MHz wide (.11n HT40) channels are listed separately from 20 MHz + * (legacy) channels. + * + * See struct il4965_eeprom_channel for format, and struct il4965_eeprom + * for locations in EEPROM. + * + * 2) Factory txpower calibration information is provided separately for + * sub-bands of contiguous channels. 2.4GHz has just one sub-band, + * but 5 GHz has several sub-bands. + * + * In addition, per-band (2.4 and 5 Ghz) saturation txpowers are provided. + * + * See struct il4965_eeprom_calib_info (and the tree of structures + * contained within it) for format, and struct il4965_eeprom for + * locations in EEPROM. + * + * "Initialization alive" notification (see struct il4965_init_alive_resp) + * consists of: + * + * 1) Temperature calculation parameters. + * + * 2) Power supply voltage measurement. + * + * 3) Tx gain compensation to balance 2 transmitters for MIMO use. + * + * Statistics notifications deliver: + * + * 1) Current values for temperature param R4. + */ + +/** + * To calculate a txpower setting for a given desired target txpower, channel, + * modulation bit rate, and transmitter chain (4965 has 2 transmitters to + * support MIMO and transmit diversity), driver must do the following: + * + * 1) Compare desired txpower vs. (EEPROM) regulatory limit for this channel. + * Do not exceed regulatory limit; reduce target txpower if necessary. + * + * If setting up txpowers for MIMO rates (rate idxes 8-15, 24-31), + * 2 transmitters will be used simultaneously; driver must reduce the + * regulatory limit by 3 dB (half-power) for each transmitter, so the + * combined total output of the 2 transmitters is within regulatory limits. + * + * + * 2) Compare target txpower vs. (EEPROM) saturation txpower *reduced by + * backoff for this bit rate*. Do not exceed (saturation - backoff[rate]); + * reduce target txpower if necessary. + * + * Backoff values below are in 1/2 dB units (equivalent to steps in + * txpower gain tables): + * + * OFDM 6 - 36 MBit: 10 steps (5 dB) + * OFDM 48 MBit: 15 steps (7.5 dB) + * OFDM 54 MBit: 17 steps (8.5 dB) + * OFDM 60 MBit: 20 steps (10 dB) + * CCK all rates: 10 steps (5 dB) + * + * Backoff values apply to saturation txpower on a per-transmitter basis; + * when using MIMO (2 transmitters), each transmitter uses the same + * saturation level provided in EEPROM, and the same backoff values; + * no reduction (such as with regulatory txpower limits) is required. + * + * Saturation and Backoff values apply equally to 20 Mhz (legacy) channel + * widths and 40 Mhz (.11n HT40) channel widths; there is no separate + * factory measurement for ht40 channels. + * + * The result of this step is the final target txpower. The rest of + * the steps figure out the proper settings for the device to achieve + * that target txpower. + * + * + * 3) Determine (EEPROM) calibration sub band for the target channel, by + * comparing against first and last channels in each sub band + * (see struct il4965_eeprom_calib_subband_info). + * + * + * 4) Linearly interpolate (EEPROM) factory calibration measurement sets, + * referencing the 2 factory-measured (sample) channels within the sub band. + * + * Interpolation is based on difference between target channel's frequency + * and the sample channels' frequencies. Since channel numbers are based + * on frequency (5 MHz between each channel number), this is equivalent + * to interpolating based on channel number differences. + * + * Note that the sample channels may or may not be the channels at the + * edges of the sub band. The target channel may be "outside" of the + * span of the sampled channels. + * + * Driver may choose the pair (for 2 Tx chains) of measurements (see + * struct il4965_eeprom_calib_ch_info) for which the actual measured + * txpower comes closest to the desired txpower. Usually, though, + * the middle set of measurements is closest to the regulatory limits, + * and is therefore a good choice for all txpower calculations (this + * assumes that high accuracy is needed for maximizing legal txpower, + * while lower txpower configurations do not need as much accuracy). + * + * Driver should interpolate both members of the chosen measurement pair, + * i.e. for both Tx chains (radio transmitters), unless the driver knows + * that only one of the chains will be used (e.g. only one tx antenna + * connected, but this should be unusual). The rate scaling algorithm + * switches antennas to find best performance, so both Tx chains will + * be used (although only one at a time) even for non-MIMO transmissions. + * + * Driver should interpolate factory values for temperature, gain table + * idx, and actual power. The power amplifier detector values are + * not used by the driver. + * + * Sanity check: If the target channel happens to be one of the sample + * channels, the results should agree with the sample channel's + * measurements! + * + * + * 5) Find difference between desired txpower and (interpolated) + * factory-measured txpower. Using (interpolated) factory gain table idx + * (shown elsewhere) as a starting point, adjust this idx lower to + * increase txpower, or higher to decrease txpower, until the target + * txpower is reached. Each step in the gain table is 1/2 dB. + * + * For example, if factory measured txpower is 16 dBm, and target txpower + * is 13 dBm, add 6 steps to the factory gain idx to reduce txpower + * by 3 dB. + * + * + * 6) Find difference between current device temperature and (interpolated) + * factory-measured temperature for sub-band. Factory values are in + * degrees Celsius. To calculate current temperature, see comments for + * "4965 temperature calculation". + * + * If current temperature is higher than factory temperature, driver must + * increase gain (lower gain table idx), and vice verse. + * + * Temperature affects gain differently for different channels: + * + * 2.4 GHz all channels: 3.5 degrees per half-dB step + * 5 GHz channels 34-43: 4.5 degrees per half-dB step + * 5 GHz channels >= 44: 4.0 degrees per half-dB step + * + * NOTE: Temperature can increase rapidly when transmitting, especially + * with heavy traffic at high txpowers. Driver should update + * temperature calculations often under these conditions to + * maintain strong txpower in the face of rising temperature. + * + * + * 7) Find difference between current power supply voltage indicator + * (from "initialize alive") and factory-measured power supply voltage + * indicator (EEPROM). + * + * If the current voltage is higher (indicator is lower) than factory + * voltage, gain should be reduced (gain table idx increased) by: + * + * (eeprom - current) / 7 + * + * If the current voltage is lower (indicator is higher) than factory + * voltage, gain should be increased (gain table idx decreased) by: + * + * 2 * (current - eeprom) / 7 + * + * If number of idx steps in either direction turns out to be > 2, + * something is wrong ... just use 0. + * + * NOTE: Voltage compensation is independent of band/channel. + * + * NOTE: "Initialize" uCode measures current voltage, which is assumed + * to be constant after this initial measurement. Voltage + * compensation for txpower (number of steps in gain table) + * may be calculated once and used until the next uCode bootload. + * + * + * 8) If setting up txpowers for MIMO rates (rate idxes 8-15, 24-31), + * adjust txpower for each transmitter chain, so txpower is balanced + * between the two chains. There are 5 pairs of tx_atten[group][chain] + * values in "initialize alive", one pair for each of 5 channel ranges: + * + * Group 0: 5 GHz channel 34-43 + * Group 1: 5 GHz channel 44-70 + * Group 2: 5 GHz channel 71-124 + * Group 3: 5 GHz channel 125-200 + * Group 4: 2.4 GHz all channels + * + * Add the tx_atten[group][chain] value to the idx for the target chain. + * The values are signed, but are in pairs of 0 and a non-negative number, + * so as to reduce gain (if necessary) of the "hotter" channel. This + * avoids any need to double-check for regulatory compliance after + * this step. + * + * + * 9) If setting up for a CCK rate, lower the gain by adding a CCK compensation + * value to the idx: + * + * Hardware rev B: 9 steps (4.5 dB) + * Hardware rev C: 5 steps (2.5 dB) + * + * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG, + * bits [3:2], 1 = B, 2 = C. + * + * NOTE: This compensation is in addition to any saturation backoff that + * might have been applied in an earlier step. + * + * + * 10) Select the gain table, based on band (2.4 vs 5 GHz). + * + * Limit the adjusted idx to stay within the table! + * + * + * 11) Read gain table entries for DSP and radio gain, place into appropriate + * location(s) in command (struct il4965_txpowertable_cmd). + */ + +/** + * When MIMO is used (2 transmitters operating simultaneously), driver should + * limit each transmitter to deliver a max of 3 dB below the regulatory limit + * for the device. That is, use half power for each transmitter, so total + * txpower is within regulatory limits. + * + * The value "6" represents number of steps in gain table to reduce power 3 dB. + * Each step is 1/2 dB. + */ +#define IL_TX_POWER_MIMO_REGULATORY_COMPENSATION (6) + +/** + * CCK gain compensation. + * + * When calculating txpowers for CCK, after making sure that the target power + * is within regulatory and saturation limits, driver must additionally + * back off gain by adding these values to the gain table idx. + * + * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG, + * bits [3:2], 1 = B, 2 = C. + */ +#define IL_TX_POWER_CCK_COMPENSATION_B_STEP (9) +#define IL_TX_POWER_CCK_COMPENSATION_C_STEP (5) + +/* + * 4965 power supply voltage compensation for txpower + */ +#define TX_POWER_IL_VOLTAGE_CODES_PER_03V (7) + +/** + * Gain tables. + * + * The following tables contain pair of values for setting txpower, i.e. + * gain settings for the output of the device's digital signal processor (DSP), + * and for the analog gain structure of the transmitter. + * + * Each entry in the gain tables represents a step of 1/2 dB. Note that these + * are *relative* steps, not indications of absolute output power. Output + * power varies with temperature, voltage, and channel frequency, and also + * requires consideration of average power (to satisfy regulatory constraints), + * and peak power (to avoid distortion of the output signal). + * + * Each entry contains two values: + * 1) DSP gain (or sometimes called DSP attenuation). This is a fine-grained + * linear value that multiplies the output of the digital signal processor, + * before being sent to the analog radio. + * 2) Radio gain. This sets the analog gain of the radio Tx path. + * It is a coarser setting, and behaves in a logarithmic (dB) fashion. + * + * EEPROM contains factory calibration data for txpower. This maps actual + * measured txpower levels to gain settings in the "well known" tables + * below ("well-known" means here that both factory calibration *and* the + * driver work with the same table). + * + * There are separate tables for 2.4 GHz and 5 GHz bands. The 5 GHz table + * has an extension (into negative idxes), in case the driver needs to + * boost power setting for high device temperatures (higher than would be + * present during factory calibration). A 5 Ghz EEPROM idx of "40" + * corresponds to the 49th entry in the table used by the driver. + */ +#define MIN_TX_GAIN_IDX (0) /* highest gain, lowest idx, 2.4 */ +#define MIN_TX_GAIN_IDX_52GHZ_EXT (-9) /* highest gain, lowest idx, 5 */ + +/** + * 2.4 GHz gain table + * + * Index Dsp gain Radio gain + * 0 110 0x3f (highest gain) + * 1 104 0x3f + * 2 98 0x3f + * 3 110 0x3e + * 4 104 0x3e + * 5 98 0x3e + * 6 110 0x3d + * 7 104 0x3d + * 8 98 0x3d + * 9 110 0x3c + * 10 104 0x3c + * 11 98 0x3c + * 12 110 0x3b + * 13 104 0x3b + * 14 98 0x3b + * 15 110 0x3a + * 16 104 0x3a + * 17 98 0x3a + * 18 110 0x39 + * 19 104 0x39 + * 20 98 0x39 + * 21 110 0x38 + * 22 104 0x38 + * 23 98 0x38 + * 24 110 0x37 + * 25 104 0x37 + * 26 98 0x37 + * 27 110 0x36 + * 28 104 0x36 + * 29 98 0x36 + * 30 110 0x35 + * 31 104 0x35 + * 32 98 0x35 + * 33 110 0x34 + * 34 104 0x34 + * 35 98 0x34 + * 36 110 0x33 + * 37 104 0x33 + * 38 98 0x33 + * 39 110 0x32 + * 40 104 0x32 + * 41 98 0x32 + * 42 110 0x31 + * 43 104 0x31 + * 44 98 0x31 + * 45 110 0x30 + * 46 104 0x30 + * 47 98 0x30 + * 48 110 0x6 + * 49 104 0x6 + * 50 98 0x6 + * 51 110 0x5 + * 52 104 0x5 + * 53 98 0x5 + * 54 110 0x4 + * 55 104 0x4 + * 56 98 0x4 + * 57 110 0x3 + * 58 104 0x3 + * 59 98 0x3 + * 60 110 0x2 + * 61 104 0x2 + * 62 98 0x2 + * 63 110 0x1 + * 64 104 0x1 + * 65 98 0x1 + * 66 110 0x0 + * 67 104 0x0 + * 68 98 0x0 + * 69 97 0 + * 70 96 0 + * 71 95 0 + * 72 94 0 + * 73 93 0 + * 74 92 0 + * 75 91 0 + * 76 90 0 + * 77 89 0 + * 78 88 0 + * 79 87 0 + * 80 86 0 + * 81 85 0 + * 82 84 0 + * 83 83 0 + * 84 82 0 + * 85 81 0 + * 86 80 0 + * 87 79 0 + * 88 78 0 + * 89 77 0 + * 90 76 0 + * 91 75 0 + * 92 74 0 + * 93 73 0 + * 94 72 0 + * 95 71 0 + * 96 70 0 + * 97 69 0 + * 98 68 0 + */ + +/** + * 5 GHz gain table + * + * Index Dsp gain Radio gain + * -9 123 0x3F (highest gain) + * -8 117 0x3F + * -7 110 0x3F + * -6 104 0x3F + * -5 98 0x3F + * -4 110 0x3E + * -3 104 0x3E + * -2 98 0x3E + * -1 110 0x3D + * 0 104 0x3D + * 1 98 0x3D + * 2 110 0x3C + * 3 104 0x3C + * 4 98 0x3C + * 5 110 0x3B + * 6 104 0x3B + * 7 98 0x3B + * 8 110 0x3A + * 9 104 0x3A + * 10 98 0x3A + * 11 110 0x39 + * 12 104 0x39 + * 13 98 0x39 + * 14 110 0x38 + * 15 104 0x38 + * 16 98 0x38 + * 17 110 0x37 + * 18 104 0x37 + * 19 98 0x37 + * 20 110 0x36 + * 21 104 0x36 + * 22 98 0x36 + * 23 110 0x35 + * 24 104 0x35 + * 25 98 0x35 + * 26 110 0x34 + * 27 104 0x34 + * 28 98 0x34 + * 29 110 0x33 + * 30 104 0x33 + * 31 98 0x33 + * 32 110 0x32 + * 33 104 0x32 + * 34 98 0x32 + * 35 110 0x31 + * 36 104 0x31 + * 37 98 0x31 + * 38 110 0x30 + * 39 104 0x30 + * 40 98 0x30 + * 41 110 0x25 + * 42 104 0x25 + * 43 98 0x25 + * 44 110 0x24 + * 45 104 0x24 + * 46 98 0x24 + * 47 110 0x23 + * 48 104 0x23 + * 49 98 0x23 + * 50 110 0x22 + * 51 104 0x18 + * 52 98 0x18 + * 53 110 0x17 + * 54 104 0x17 + * 55 98 0x17 + * 56 110 0x16 + * 57 104 0x16 + * 58 98 0x16 + * 59 110 0x15 + * 60 104 0x15 + * 61 98 0x15 + * 62 110 0x14 + * 63 104 0x14 + * 64 98 0x14 + * 65 110 0x13 + * 66 104 0x13 + * 67 98 0x13 + * 68 110 0x12 + * 69 104 0x08 + * 70 98 0x08 + * 71 110 0x07 + * 72 104 0x07 + * 73 98 0x07 + * 74 110 0x06 + * 75 104 0x06 + * 76 98 0x06 + * 77 110 0x05 + * 78 104 0x05 + * 79 98 0x05 + * 80 110 0x04 + * 81 104 0x04 + * 82 98 0x04 + * 83 110 0x03 + * 84 104 0x03 + * 85 98 0x03 + * 86 110 0x02 + * 87 104 0x02 + * 88 98 0x02 + * 89 110 0x01 + * 90 104 0x01 + * 91 98 0x01 + * 92 110 0x00 + * 93 104 0x00 + * 94 98 0x00 + * 95 93 0x00 + * 96 88 0x00 + * 97 83 0x00 + * 98 78 0x00 + */ + +/** + * Sanity checks and default values for EEPROM regulatory levels. + * If EEPROM values fall outside MIN/MAX range, use default values. + * + * Regulatory limits refer to the maximum average txpower allowed by + * regulatory agencies in the geographies in which the device is meant + * to be operated. These limits are SKU-specific (i.e. geography-specific), + * and channel-specific; each channel has an individual regulatory limit + * listed in the EEPROM. + * + * Units are in half-dBm (i.e. "34" means 17 dBm). + */ +#define IL_TX_POWER_DEFAULT_REGULATORY_24 (34) +#define IL_TX_POWER_DEFAULT_REGULATORY_52 (34) +#define IL_TX_POWER_REGULATORY_MIN (0) +#define IL_TX_POWER_REGULATORY_MAX (34) + +/** + * Sanity checks and default values for EEPROM saturation levels. + * If EEPROM values fall outside MIN/MAX range, use default values. + * + * Saturation is the highest level that the output power amplifier can produce + * without significant clipping distortion. This is a "peak" power level. + * Different types of modulation (i.e. various "rates", and OFDM vs. CCK) + * require differing amounts of backoff, relative to their average power output, + * in order to avoid clipping distortion. + * + * Driver must make sure that it is violating neither the saturation limit, + * nor the regulatory limit, when calculating Tx power settings for various + * rates. + * + * Units are in half-dBm (i.e. "38" means 19 dBm). + */ +#define IL_TX_POWER_DEFAULT_SATURATION_24 (38) +#define IL_TX_POWER_DEFAULT_SATURATION_52 (38) +#define IL_TX_POWER_SATURATION_MIN (20) +#define IL_TX_POWER_SATURATION_MAX (50) + +/** + * Channel groups used for Tx Attenuation calibration (MIMO tx channel balance) + * and thermal Txpower calibration. + * + * When calculating txpower, driver must compensate for current device + * temperature; higher temperature requires higher gain. Driver must calculate + * current temperature (see "4965 temperature calculation"), then compare vs. + * factory calibration temperature in EEPROM; if current temperature is higher + * than factory temperature, driver must *increase* gain by proportions shown + * in table below. If current temperature is lower than factory, driver must + * *decrease* gain. + * + * Different frequency ranges require different compensation, as shown below. + */ +/* Group 0, 5.2 GHz ch 34-43: 4.5 degrees per 1/2 dB. */ +#define CALIB_IL_TX_ATTEN_GR1_FCH 34 +#define CALIB_IL_TX_ATTEN_GR1_LCH 43 + +/* Group 1, 5.3 GHz ch 44-70: 4.0 degrees per 1/2 dB. */ +#define CALIB_IL_TX_ATTEN_GR2_FCH 44 +#define CALIB_IL_TX_ATTEN_GR2_LCH 70 + +/* Group 2, 5.5 GHz ch 71-124: 4.0 degrees per 1/2 dB. */ +#define CALIB_IL_TX_ATTEN_GR3_FCH 71 +#define CALIB_IL_TX_ATTEN_GR3_LCH 124 + +/* Group 3, 5.7 GHz ch 125-200: 4.0 degrees per 1/2 dB. */ +#define CALIB_IL_TX_ATTEN_GR4_FCH 125 +#define CALIB_IL_TX_ATTEN_GR4_LCH 200 + +/* Group 4, 2.4 GHz all channels: 3.5 degrees per 1/2 dB. */ +#define CALIB_IL_TX_ATTEN_GR5_FCH 1 +#define CALIB_IL_TX_ATTEN_GR5_LCH 20 + +enum { + CALIB_CH_GROUP_1 = 0, + CALIB_CH_GROUP_2 = 1, + CALIB_CH_GROUP_3 = 2, + CALIB_CH_GROUP_4 = 3, + CALIB_CH_GROUP_5 = 4, + CALIB_CH_GROUP_MAX +}; + +/********************* END TXPOWER *****************************************/ + +/** + * Tx/Rx Queues + * + * Most communication between driver and 4965 is via queues of data buffers. + * For example, all commands that the driver issues to device's embedded + * controller (uCode) are via the command queue (one of the Tx queues). All + * uCode command responses/replies/notifications, including Rx frames, are + * conveyed from uCode to driver via the Rx queue. + * + * Most support for these queues, including handshake support, resides in + * structures in host DRAM, shared between the driver and the device. When + * allocating this memory, the driver must make sure that data written by + * the host CPU updates DRAM immediately (and does not get "stuck" in CPU's + * cache memory), so DRAM and cache are consistent, and the device can + * immediately see changes made by the driver. + * + * 4965 supports up to 16 DRAM-based Tx queues, and services these queues via + * up to 7 DMA channels (FIFOs). Each Tx queue is supported by a circular array + * in DRAM containing 256 Transmit Frame Descriptors (TFDs). + */ +#define IL49_NUM_FIFOS 7 +#define IL49_CMD_FIFO_NUM 4 +#define IL49_NUM_QUEUES 16 +#define IL49_NUM_AMPDU_QUEUES 8 + +/** + * struct il4965_schedq_bc_tbl + * + * Byte Count table + * + * Each Tx queue uses a byte-count table containing 320 entries: + * one 16-bit entry for each of 256 TFDs, plus an additional 64 entries that + * duplicate the first 64 entries (to avoid wrap-around within a Tx win; + * max Tx win is 64 TFDs). + * + * When driver sets up a new TFD, it must also enter the total byte count + * of the frame to be transmitted into the corresponding entry in the byte + * count table for the chosen Tx queue. If the TFD idx is 0-63, the driver + * must duplicate the byte count entry in corresponding idx 256-319. + * + * padding puts each byte count table on a 1024-byte boundary; + * 4965 assumes tables are separated by 1024 bytes. + */ +struct il4965_scd_bc_tbl { + __le16 tfd_offset[TFD_QUEUE_BC_SIZE]; + u8 pad[1024 - (TFD_QUEUE_BC_SIZE) * sizeof(__le16)]; +} __packed; + +#define IL4965_RTC_INST_LOWER_BOUND (0x000000) + +/* RSSI to dBm */ +#define IL4965_RSSI_OFFSET 44 + +/* PCI registers */ +#define PCI_CFG_RETRY_TIMEOUT 0x041 + +/* PCI register values */ +#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01 +#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02 + +#define IL4965_DEFAULT_TX_RETRY 15 + +/* EEPROM */ +#define IL4965_FIRST_AMPDU_QUEUE 10 + +/* Calibration */ +void il4965_chain_noise_calibration(struct il_priv *il, void *stat_resp); +void il4965_sensitivity_calibration(struct il_priv *il, void *resp); +void il4965_init_sensitivity(struct il_priv *il); +void il4965_reset_run_time_calib(struct il_priv *il); +void il4965_calib_free_results(struct il_priv *il); + +/* Debug */ +#ifdef CONFIG_IWLEGACY_DEBUGFS +ssize_t il4965_ucode_rx_stats_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos); +ssize_t il4965_ucode_tx_stats_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos); +ssize_t il4965_ucode_general_stats_read(struct file *file, + char __user *user_buf, size_t count, + loff_t *ppos); +#endif + +/****************************/ +/* Flow Handler Definitions */ +/****************************/ + +/** + * This I/O area is directly read/writable by driver (e.g. Linux uses writel()) + * Addresses are offsets from device's PCI hardware base address. + */ +#define FH49_MEM_LOWER_BOUND (0x1000) +#define FH49_MEM_UPPER_BOUND (0x2000) + +/** + * Keep-Warm (KW) buffer base address. + * + * Driver must allocate a 4KByte buffer that is used by 4965 for keeping the + * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency + * DRAM access when 4965 is Txing or Rxing. The dummy accesses prevent host + * from going into a power-savings mode that would cause higher DRAM latency, + * and possible data over/under-runs, before all Tx/Rx is complete. + * + * Driver loads FH49_KW_MEM_ADDR_REG with the physical address (bits 35:4) + * of the buffer, which must be 4K aligned. Once this is set up, the 4965 + * automatically invokes keep-warm accesses when normal accesses might not + * be sufficient to maintain fast DRAM response. + * + * Bit fields: + * 31-0: Keep-warm buffer physical base address [35:4], must be 4K aligned + */ +#define FH49_KW_MEM_ADDR_REG (FH49_MEM_LOWER_BOUND + 0x97C) + +/** + * TFD Circular Buffers Base (CBBC) addresses + * + * 4965 has 16 base pointer registers, one for each of 16 host-DRAM-resident + * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs) + * (see struct il_tfd_frame). These 16 pointer registers are offset by 0x04 + * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte + * aligned (address bits 0-7 must be 0). + * + * Bit fields in each pointer register: + * 27-0: TFD CB physical base address [35:8], must be 256-byte aligned + */ +#define FH49_MEM_CBBC_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0x9D0) +#define FH49_MEM_CBBC_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xA10) + +/* Find TFD CB base pointer for given queue (range 0-15). */ +#define FH49_MEM_CBBC_QUEUE(x) (FH49_MEM_CBBC_LOWER_BOUND + (x) * 0x4) + +/** + * Rx SRAM Control and Status Registers (RSCSR) + * + * These registers provide handshake between driver and 4965 for the Rx queue + * (this queue handles *all* command responses, notifications, Rx data, etc. + * sent from 4965 uCode to host driver). Unlike Tx, there is only one Rx + * queue, and only one Rx DMA/FIFO channel. Also unlike Tx, which can + * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer + * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1 + * mapping between RBDs and RBs. + * + * Driver must allocate host DRAM memory for the following, and set the + * physical address of each into 4965 registers: + * + * 1) Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256 + * entries (although any power of 2, up to 4096, is selectable by driver). + * Each entry (1 dword) points to a receive buffer (RB) of consistent size + * (typically 4K, although 8K or 16K are also selectable by driver). + * Driver sets up RB size and number of RBDs in the CB via Rx config + * register FH49_MEM_RCSR_CHNL0_CONFIG_REG. + * + * Bit fields within one RBD: + * 27-0: Receive Buffer physical address bits [35:8], 256-byte aligned + * + * Driver sets physical address [35:8] of base of RBD circular buffer + * into FH49_RSCSR_CHNL0_RBDCB_BASE_REG [27:0]. + * + * 2) Rx status buffer, 8 bytes, in which 4965 indicates which Rx Buffers + * (RBs) have been filled, via a "write pointer", actually the idx of + * the RB's corresponding RBD within the circular buffer. Driver sets + * physical address [35:4] into FH49_RSCSR_CHNL0_STTS_WPTR_REG [31:0]. + * + * Bit fields in lower dword of Rx status buffer (upper dword not used + * by driver; see struct il4965_shared, val0): + * 31-12: Not used by driver + * 11- 0: Index of last filled Rx buffer descriptor + * (4965 writes, driver reads this value) + * + * As the driver prepares Receive Buffers (RBs) for 4965 to fill, driver must + * enter pointers to these RBs into contiguous RBD circular buffer entries, + * and update the 4965's "write" idx register, + * FH49_RSCSR_CHNL0_RBDCB_WPTR_REG. + * + * This "write" idx corresponds to the *next* RBD that the driver will make + * available, i.e. one RBD past the tail of the ready-to-fill RBDs within + * the circular buffer. This value should initially be 0 (before preparing any + * RBs), should be 8 after preparing the first 8 RBs (for example), and must + * wrap back to 0 at the end of the circular buffer (but don't wrap before + * "read" idx has advanced past 1! See below). + * NOTE: 4965 EXPECTS THE WRITE IDX TO BE INCREMENTED IN MULTIPLES OF 8. + * + * As the 4965 fills RBs (referenced from contiguous RBDs within the circular + * buffer), it updates the Rx status buffer in host DRAM, 2) described above, + * to tell the driver the idx of the latest filled RBD. The driver must + * read this "read" idx from DRAM after receiving an Rx interrupt from 4965. + * + * The driver must also internally keep track of a third idx, which is the + * next RBD to process. When receiving an Rx interrupt, driver should process + * all filled but unprocessed RBs up to, but not including, the RB + * corresponding to the "read" idx. For example, if "read" idx becomes "1", + * driver may process the RB pointed to by RBD 0. Depending on volume of + * traffic, there may be many RBs to process. + * + * If read idx == write idx, 4965 thinks there is no room to put new data. + * Due to this, the maximum number of filled RBs is 255, instead of 256. To + * be safe, make sure that there is a gap of at least 2 RBDs between "write" + * and "read" idxes; that is, make sure that there are no more than 254 + * buffers waiting to be filled. + */ +#define FH49_MEM_RSCSR_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0xBC0) +#define FH49_MEM_RSCSR_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xC00) +#define FH49_MEM_RSCSR_CHNL0 (FH49_MEM_RSCSR_LOWER_BOUND) + +/** + * Physical base address of 8-byte Rx Status buffer. + * Bit fields: + * 31-0: Rx status buffer physical base address [35:4], must 16-byte aligned. + */ +#define FH49_RSCSR_CHNL0_STTS_WPTR_REG (FH49_MEM_RSCSR_CHNL0) + +/** + * Physical base address of Rx Buffer Descriptor Circular Buffer. + * Bit fields: + * 27-0: RBD CD physical base address [35:8], must be 256-byte aligned. + */ +#define FH49_RSCSR_CHNL0_RBDCB_BASE_REG (FH49_MEM_RSCSR_CHNL0 + 0x004) + +/** + * Rx write pointer (idx, really!). + * Bit fields: + * 11-0: Index of driver's most recent prepared-to-be-filled RBD, + 1. + * NOTE: For 256-entry circular buffer, use only bits [7:0]. + */ +#define FH49_RSCSR_CHNL0_RBDCB_WPTR_REG (FH49_MEM_RSCSR_CHNL0 + 0x008) +#define FH49_RSCSR_CHNL0_WPTR (FH49_RSCSR_CHNL0_RBDCB_WPTR_REG) + +/** + * Rx Config/Status Registers (RCSR) + * Rx Config Reg for channel 0 (only channel used) + * + * Driver must initialize FH49_MEM_RCSR_CHNL0_CONFIG_REG as follows for + * normal operation (see bit fields). + * + * Clearing FH49_MEM_RCSR_CHNL0_CONFIG_REG to 0 turns off Rx DMA. + * Driver should poll FH49_MEM_RSSR_RX_STATUS_REG for + * FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (bit 24) before continuing. + * + * Bit fields: + * 31-30: Rx DMA channel enable: '00' off/pause, '01' pause at end of frame, + * '10' operate normally + * 29-24: reserved + * 23-20: # RBDs in circular buffer = 2^value; use "8" for 256 RBDs (normal), + * min "5" for 32 RBDs, max "12" for 4096 RBDs. + * 19-18: reserved + * 17-16: size of each receive buffer; '00' 4K (normal), '01' 8K, + * '10' 12K, '11' 16K. + * 15-14: reserved + * 13-12: IRQ destination; '00' none, '01' host driver (normal operation) + * 11- 4: timeout for closing Rx buffer and interrupting host (units 32 usec) + * typical value 0x10 (about 1/2 msec) + * 3- 0: reserved + */ +#define FH49_MEM_RCSR_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0xC00) +#define FH49_MEM_RCSR_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xCC0) +#define FH49_MEM_RCSR_CHNL0 (FH49_MEM_RCSR_LOWER_BOUND) + +#define FH49_MEM_RCSR_CHNL0_CONFIG_REG (FH49_MEM_RCSR_CHNL0) + +#define FH49_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0) /* bits 4-11 */ +#define FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK (0x00001000) /* bits 12 */ +#define FH49_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK (0x00008000) /* bit 15 */ +#define FH49_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MSK (0x00030000) /* bits 16-17 */ +#define FH49_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MSK (0x00F00000) /* bits 20-23 */ +#define FH49_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MSK (0xC0000000) /* bits 30-31 */ + +#define FH49_RCSR_RX_CONFIG_RBDCB_SIZE_POS (20) +#define FH49_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS (4) +#define RX_RB_TIMEOUT (0x10) + +#define FH49_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000) +#define FH49_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000) +#define FH49_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL (0x80000000) + +#define FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K (0x00000000) +#define FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K (0x00010000) +#define FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K (0x00020000) +#define FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K (0x00030000) + +#define FH49_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY (0x00000004) +#define FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000) +#define FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000) + +/** + * Rx Shared Status Registers (RSSR) + * + * After stopping Rx DMA channel (writing 0 to + * FH49_MEM_RCSR_CHNL0_CONFIG_REG), driver must poll + * FH49_MEM_RSSR_RX_STATUS_REG until Rx channel is idle. + * + * Bit fields: + * 24: 1 = Channel 0 is idle + * + * FH49_MEM_RSSR_SHARED_CTRL_REG and FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV + * contain default values that should not be altered by the driver. + */ +#define FH49_MEM_RSSR_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0xC40) +#define FH49_MEM_RSSR_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xD00) + +#define FH49_MEM_RSSR_SHARED_CTRL_REG (FH49_MEM_RSSR_LOWER_BOUND) +#define FH49_MEM_RSSR_RX_STATUS_REG (FH49_MEM_RSSR_LOWER_BOUND + 0x004) +#define FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV\ + (FH49_MEM_RSSR_LOWER_BOUND + 0x008) + +#define FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000) + +#define FH49_MEM_TFDIB_REG1_ADDR_BITSHIFT 28 + +/* TFDB Area - TFDs buffer table */ +#define FH49_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF) +#define FH49_TFDIB_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0x900) +#define FH49_TFDIB_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0x958) +#define FH49_TFDIB_CTRL0_REG(_chnl) (FH49_TFDIB_LOWER_BOUND + 0x8 * (_chnl)) +#define FH49_TFDIB_CTRL1_REG(_chnl) (FH49_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4) + +/** + * Transmit DMA Channel Control/Status Registers (TCSR) + * + * 4965 has one configuration register for each of 8 Tx DMA/FIFO channels + * supported in hardware (don't confuse these with the 16 Tx queues in DRAM, + * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes. + * + * To use a Tx DMA channel, driver must initialize its + * FH49_TCSR_CHNL_TX_CONFIG_REG(chnl) with: + * + * FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | + * FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL + * + * All other bits should be 0. + * + * Bit fields: + * 31-30: Tx DMA channel enable: '00' off/pause, '01' pause at end of frame, + * '10' operate normally + * 29- 4: Reserved, set to "0" + * 3: Enable internal DMA requests (1, normal operation), disable (0) + * 2- 0: Reserved, set to "0" + */ +#define FH49_TCSR_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0xD00) +#define FH49_TCSR_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xE60) + +/* Find Control/Status reg for given Tx DMA/FIFO channel */ +#define FH49_TCSR_CHNL_NUM (7) +#define FH50_TCSR_CHNL_NUM (8) + +/* TCSR: tx_config register values */ +#define FH49_TCSR_CHNL_TX_CONFIG_REG(_chnl) \ + (FH49_TCSR_LOWER_BOUND + 0x20 * (_chnl)) +#define FH49_TCSR_CHNL_TX_CREDIT_REG(_chnl) \ + (FH49_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x4) +#define FH49_TCSR_CHNL_TX_BUF_STS_REG(_chnl) \ + (FH49_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x8) + +#define FH49_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000) +#define FH49_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRV (0x00000001) + +#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE (0x00000000) +#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE (0x00000008) + +#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT (0x00000000) +#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD (0x00100000) +#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000) + +#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000) +#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_ENDTFD (0x00400000) +#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_IFTFD (0x00800000) + +#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000) +#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF (0x40000000) +#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000) + +#define FH49_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY (0x00000000) +#define FH49_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT (0x00002000) +#define FH49_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00000003) + +#define FH49_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM (20) +#define FH49_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX (12) + +/** + * Tx Shared Status Registers (TSSR) + * + * After stopping Tx DMA channel (writing 0 to + * FH49_TCSR_CHNL_TX_CONFIG_REG(chnl)), driver must poll + * FH49_TSSR_TX_STATUS_REG until selected Tx channel is idle + * (channel's buffers empty | no pending requests). + * + * Bit fields: + * 31-24: 1 = Channel buffers empty (channel 7:0) + * 23-16: 1 = No pending requests (channel 7:0) + */ +#define FH49_TSSR_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0xEA0) +#define FH49_TSSR_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xEC0) + +#define FH49_TSSR_TX_STATUS_REG (FH49_TSSR_LOWER_BOUND + 0x010) + +/** + * Bit fields for TSSR(Tx Shared Status & Control) error status register: + * 31: Indicates an address error when accessed to internal memory + * uCode/driver must write "1" in order to clear this flag + * 30: Indicates that Host did not send the expected number of dwords to FH + * uCode/driver must write "1" in order to clear this flag + * 16-9:Each status bit is for one channel. Indicates that an (Error) ActDMA + * command was received from the scheduler while the TRB was already full + * with previous command + * uCode/driver must write "1" in order to clear this flag + * 7-0: Each status bit indicates a channel's TxCredit error. When an error + * bit is set, it indicates that the FH has received a full indication + * from the RTC TxFIFO and the current value of the TxCredit counter was + * not equal to zero. This mean that the credit mechanism was not + * synchronized to the TxFIFO status + * uCode/driver must write "1" in order to clear this flag + */ +#define FH49_TSSR_TX_ERROR_REG (FH49_TSSR_LOWER_BOUND + 0x018) + +#define FH49_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) ((1 << (_chnl)) << 16) + +/* Tx service channels */ +#define FH49_SRVC_CHNL (9) +#define FH49_SRVC_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0x9C8) +#define FH49_SRVC_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0x9D0) +#define FH49_SRVC_CHNL_SRAM_ADDR_REG(_chnl) \ + (FH49_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4) + +#define FH49_TX_CHICKEN_BITS_REG (FH49_MEM_LOWER_BOUND + 0xE98) +/* Instruct FH to increment the retry count of a packet when + * it is brought from the memory to TX-FIFO + */ +#define FH49_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN (0x00000002) + +/* Keep Warm Size */ +#define IL_KW_SIZE 0x1000 /* 4k */ + +#endif /* __il_4965_h__ */ diff --git a/drivers/net/wireless/iwlegacy/Kconfig b/drivers/net/wireless/iwlegacy/Kconfig index aef65cd4766..05bd375cb84 100644 --- a/drivers/net/wireless/iwlegacy/Kconfig +++ b/drivers/net/wireless/iwlegacy/Kconfig @@ -1,4 +1,4 @@ -config IWLWIFI_LEGACY +config IWLEGACY tristate select FW_LOADER select NEW_LEDS @@ -7,13 +7,13 @@ config IWLWIFI_LEGACY select MAC80211_LEDS menu "Debugging Options" - depends on IWLWIFI_LEGACY + depends on IWLEGACY -config IWLWIFI_LEGACY_DEBUG - bool "Enable full debugging output in 4965 and 3945 drivers" - depends on IWLWIFI_LEGACY +config IWLEGACY_DEBUG + bool "Enable full debugging output in iwlegacy (iwl 3945/4965) drivers" + depends on IWLEGACY ---help--- - This option will enable debug tracing output for the iwlwifilegacy + This option will enable debug tracing output for the iwlegacy drivers. This will result in the kernel module being ~100k larger. You can @@ -29,43 +29,26 @@ config IWLWIFI_LEGACY_DEBUG % echo 0x43fff > /sys/class/net/wlan0/device/debug_level You can find the list of debug mask values in: - drivers/net/wireless/iwlwifilegacy/iwl-debug.h + drivers/net/wireless/iwlegacy/common.h If this is your first time using this driver, you should say Y here as the debug information can assist others in helping you resolve any problems you may encounter. -config IWLWIFI_LEGACY_DEBUGFS - bool "4965 and 3945 debugfs support" - depends on IWLWIFI_LEGACY && MAC80211_DEBUGFS +config IWLEGACY_DEBUGFS + bool "iwlegacy (iwl 3945/4965) debugfs support" + depends on IWLEGACY && MAC80211_DEBUGFS ---help--- - Enable creation of debugfs files for the iwlwifilegacy drivers. This + Enable creation of debugfs files for the iwlegacy drivers. This is a low-impact option that allows getting insight into the driver's state at runtime. -config IWLWIFI_LEGACY_DEVICE_TRACING - bool "iwlwifilegacy legacy device access tracing" - depends on IWLWIFI_LEGACY - depends on EVENT_TRACING - help - Say Y here to trace all commands, including TX frames and IO - accesses, sent to the device. If you say yes, iwlwifilegacy will - register with the ftrace framework for event tracing and dump - all this information to the ringbuffer, you may need to - increase the ringbuffer size. See the ftrace documentation - for more information. - - When tracing is not enabled, this option still has some - (though rather small) overhead. - - If unsure, say Y so we can help you better when problems - occur. endmenu config IWL4965 tristate "Intel Wireless WiFi 4965AGN (iwl4965)" depends on PCI && MAC80211 - select IWLWIFI_LEGACY + select IWLEGACY ---help--- This option enables support for @@ -93,7 +76,7 @@ config IWL4965 config IWL3945 tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)" depends on PCI && MAC80211 - select IWLWIFI_LEGACY + select IWLEGACY ---help--- Select to build the driver supporting the: diff --git a/drivers/net/wireless/iwlegacy/Makefile b/drivers/net/wireless/iwlegacy/Makefile index d56aeb38c21..c985a01a073 100644 --- a/drivers/net/wireless/iwlegacy/Makefile +++ b/drivers/net/wireless/iwlegacy/Makefile @@ -1,25 +1,17 @@ -obj-$(CONFIG_IWLWIFI_LEGACY) += iwl-legacy.o -iwl-legacy-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o -iwl-legacy-objs += iwl-rx.o iwl-tx.o iwl-sta.o -iwl-legacy-objs += iwl-scan.o iwl-led.o -iwl-legacy-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-debugfs.o -iwl-legacy-$(CONFIG_IWLWIFI_LEGACY_DEVICE_TRACING) += iwl-devtrace.o +obj-$(CONFIG_IWLEGACY) += iwlegacy.o +iwlegacy-objs := common.o +iwlegacy-$(CONFIG_IWLEGACY_DEBUGFS) += debug.o -iwl-legacy-objs += $(iwl-legacy-m) - -CFLAGS_iwl-devtrace.o := -I$(src) +iwlegacy-objs += $(iwlegacy-m) # 4965 obj-$(CONFIG_IWL4965) += iwl4965.o -iwl4965-objs := iwl-4965.o iwl4965-base.o iwl-4965-rs.o iwl-4965-led.o -iwl4965-objs += iwl-4965-ucode.o iwl-4965-tx.o -iwl4965-objs += iwl-4965-lib.o iwl-4965-rx.o iwl-4965-calib.o -iwl4965-objs += iwl-4965-sta.o iwl-4965-eeprom.o -iwl4965-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-4965-debugfs.o +iwl4965-objs := 4965.o 4965-mac.o 4965-rs.o 4965-calib.o +iwl4965-$(CONFIG_IWLEGACY_DEBUGFS) += 4965-debug.o # 3945 obj-$(CONFIG_IWL3945) += iwl3945.o -iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o iwl-3945-led.o -iwl3945-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-3945-debugfs.o +iwl3945-objs := 3945-mac.o 3945.o 3945-rs.o +iwl3945-$(CONFIG_IWLEGACY_DEBUGFS) += 3945-debug.o ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/net/wireless/iwlegacy/iwl-commands.h b/drivers/net/wireless/iwlegacy/commands.h index 89904054473..25dd7d28d02 100644 --- a/drivers/net/wireless/iwlegacy/iwl-commands.h +++ b/drivers/net/wireless/iwlegacy/commands.h @@ -60,100 +60,96 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ -/* - * Please use this file (iwl-commands.h) only for uCode API definitions. - * Please use iwl-xxxx-hw.h for hardware-related definitions. - * Please use iwl-dev.h for driver implementation definitions. - */ -#ifndef __iwl_legacy_commands_h__ -#define __iwl_legacy_commands_h__ +#ifndef __il_commands_h__ +#define __il_commands_h__ -struct iwl_priv; +#include <linux/ieee80211.h> -/* uCode version contains 4 values: Major/Minor/API/Serial */ -#define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24) -#define IWL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16) -#define IWL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8) -#define IWL_UCODE_SERIAL(ver) ((ver) & 0x000000FF) +struct il_priv; +/* uCode version contains 4 values: Major/Minor/API/Serial */ +#define IL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24) +#define IL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16) +#define IL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8) +#define IL_UCODE_SERIAL(ver) ((ver) & 0x000000FF) /* Tx rates */ -#define IWL_CCK_RATES 4 -#define IWL_OFDM_RATES 8 -#define IWL_MAX_RATES (IWL_CCK_RATES + IWL_OFDM_RATES) +#define IL_CCK_RATES 4 +#define IL_OFDM_RATES 8 +#define IL_MAX_RATES (IL_CCK_RATES + IL_OFDM_RATES) enum { - REPLY_ALIVE = 0x1, - REPLY_ERROR = 0x2, + N_ALIVE = 0x1, + N_ERROR = 0x2, /* RXON and QOS commands */ - REPLY_RXON = 0x10, - REPLY_RXON_ASSOC = 0x11, - REPLY_QOS_PARAM = 0x13, - REPLY_RXON_TIMING = 0x14, + C_RXON = 0x10, + C_RXON_ASSOC = 0x11, + C_QOS_PARAM = 0x13, + C_RXON_TIMING = 0x14, /* Multi-Station support */ - REPLY_ADD_STA = 0x18, - REPLY_REMOVE_STA = 0x19, + C_ADD_STA = 0x18, + C_REM_STA = 0x19, /* Security */ - REPLY_WEPKEY = 0x20, + C_WEPKEY = 0x20, /* RX, TX, LEDs */ - REPLY_3945_RX = 0x1b, /* 3945 only */ - REPLY_TX = 0x1c, - REPLY_RATE_SCALE = 0x47, /* 3945 only */ - REPLY_LEDS_CMD = 0x48, - REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* for 4965 and up */ + N_3945_RX = 0x1b, /* 3945 only */ + C_TX = 0x1c, + C_RATE_SCALE = 0x47, /* 3945 only */ + C_LEDS = 0x48, + C_TX_LINK_QUALITY_CMD = 0x4e, /* for 4965 */ /* 802.11h related */ - REPLY_CHANNEL_SWITCH = 0x72, - CHANNEL_SWITCH_NOTIFICATION = 0x73, - REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74, - SPECTRUM_MEASURE_NOTIFICATION = 0x75, + C_CHANNEL_SWITCH = 0x72, + N_CHANNEL_SWITCH = 0x73, + C_SPECTRUM_MEASUREMENT = 0x74, + N_SPECTRUM_MEASUREMENT = 0x75, /* Power Management */ - POWER_TABLE_CMD = 0x77, - PM_SLEEP_NOTIFICATION = 0x7A, - PM_DEBUG_STATISTIC_NOTIFIC = 0x7B, + C_POWER_TBL = 0x77, + N_PM_SLEEP = 0x7A, + N_PM_DEBUG_STATS = 0x7B, /* Scan commands and notifications */ - REPLY_SCAN_CMD = 0x80, - REPLY_SCAN_ABORT_CMD = 0x81, - SCAN_START_NOTIFICATION = 0x82, - SCAN_RESULTS_NOTIFICATION = 0x83, - SCAN_COMPLETE_NOTIFICATION = 0x84, + C_SCAN = 0x80, + C_SCAN_ABORT = 0x81, + N_SCAN_START = 0x82, + N_SCAN_RESULTS = 0x83, + N_SCAN_COMPLETE = 0x84, /* IBSS/AP commands */ - BEACON_NOTIFICATION = 0x90, - REPLY_TX_BEACON = 0x91, + N_BEACON = 0x90, + C_TX_BEACON = 0x91, /* Miscellaneous commands */ - REPLY_TX_PWR_TABLE_CMD = 0x97, + C_TX_PWR_TBL = 0x97, /* Bluetooth device coexistence config command */ - REPLY_BT_CONFIG = 0x9b, + C_BT_CONFIG = 0x9b, /* Statistics */ - REPLY_STATISTICS_CMD = 0x9c, - STATISTICS_NOTIFICATION = 0x9d, + C_STATS = 0x9c, + N_STATS = 0x9d, /* RF-KILL commands and notifications */ - CARD_STATE_NOTIFICATION = 0xa1, + N_CARD_STATE = 0xa1, /* Missed beacons notification */ - MISSED_BEACONS_NOTIFICATION = 0xa2, + N_MISSED_BEACONS = 0xa2, - REPLY_CT_KILL_CONFIG_CMD = 0xa4, - SENSITIVITY_CMD = 0xa8, - REPLY_PHY_CALIBRATION_CMD = 0xb0, - REPLY_RX_PHY_CMD = 0xc0, - REPLY_RX_MPDU_CMD = 0xc1, - REPLY_RX = 0xc3, - REPLY_COMPRESSED_BA = 0xc5, + C_CT_KILL_CONFIG = 0xa4, + C_SENSITIVITY = 0xa8, + C_PHY_CALIBRATION = 0xb0, + N_RX_PHY = 0xc0, + N_RX_MPDU = 0xc1, + N_RX = 0xc3, + N_COMPRESSED_BA = 0xc5, - REPLY_MAX = 0xff + IL_CN_MAX = 0xff }; /****************************************************************************** @@ -163,25 +159,25 @@ enum { * *****************************************************************************/ -/* iwl_cmd_header flags value */ -#define IWL_CMD_FAILED_MSK 0x40 +/* il_cmd_header flags value */ +#define IL_CMD_FAILED_MSK 0x40 #define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f) #define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8) -#define SEQ_TO_INDEX(s) ((s) & 0xff) -#define INDEX_TO_SEQ(i) ((i) & 0xff) +#define SEQ_TO_IDX(s) ((s) & 0xff) +#define IDX_TO_SEQ(i) ((i) & 0xff) #define SEQ_HUGE_FRAME cpu_to_le16(0x4000) #define SEQ_RX_FRAME cpu_to_le16(0x8000) /** - * struct iwl_cmd_header + * struct il_cmd_header * * This header format appears in the beginning of each command sent from the * driver, and each response/notification received from uCode. */ -struct iwl_cmd_header { - u8 cmd; /* Command ID: REPLY_RXON, etc. */ - u8 flags; /* 0:5 reserved, 6 abort, 7 internal */ +struct il_cmd_header { + u8 cmd; /* Command ID: C_RXON, etc. */ + u8 flags; /* 0:5 reserved, 6 abort, 7 internal */ /* * The driver sets up the sequence number to values of its choosing. * uCode does not use this value, but passes it back to the driver @@ -192,29 +188,28 @@ struct iwl_cmd_header { * There is one exception: uCode sets bit 15 when it originates * the response/notification, i.e. when the response/notification * is not a direct response to a command sent by the driver. For - * example, uCode issues REPLY_3945_RX when it sends a received frame + * example, uCode issues N_3945_RX when it sends a received frame * to the driver; it is not a direct response to any driver command. * * The Linux driver uses the following format: * - * 0:7 tfd index - position within TX queue - * 8:12 TX queue id - * 13 reserved - * 14 huge - driver sets this to indicate command is in the - * 'huge' storage at the end of the command buffers - * 15 unsolicited RX or uCode-originated notification - */ + * 0:7 tfd idx - position within TX queue + * 8:12 TX queue id + * 13 reserved + * 14 huge - driver sets this to indicate command is in the + * 'huge' storage at the end of the command buffers + * 15 unsolicited RX or uCode-originated notification + */ __le16 sequence; /* command or response/notification data follows immediately */ u8 data[0]; } __packed; - /** - * struct iwl3945_tx_power + * struct il3945_tx_power * - * Used in REPLY_TX_PWR_TABLE_CMD, REPLY_SCAN_CMD, REPLY_CHANNEL_SWITCH + * Used in C_TX_PWR_TBL, C_SCAN, C_CHANNEL_SWITCH * * Each entry contains two values: * 1) DSP gain (or sometimes called DSP attenuation). This is a fine-grained @@ -223,21 +218,21 @@ struct iwl_cmd_header { * 2) Radio gain. This sets the analog gain of the radio Tx path. * It is a coarser setting, and behaves in a logarithmic (dB) fashion. * - * Driver obtains values from struct iwl3945_tx_power power_gain_table[][]. + * Driver obtains values from struct il3945_tx_power power_gain_table[][]. */ -struct iwl3945_tx_power { +struct il3945_tx_power { u8 tx_gain; /* gain for analog radio */ u8 dsp_atten; /* gain for DSP */ } __packed; /** - * struct iwl3945_power_per_rate + * struct il3945_power_per_rate * - * Used in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH + * Used in C_TX_PWR_TBL, C_CHANNEL_SWITCH */ -struct iwl3945_power_per_rate { +struct il3945_power_per_rate { u8 rate; /* plcp */ - struct iwl3945_tx_power tpc; + struct il3945_tx_power tpc; u8 reserved; } __packed; @@ -245,10 +240,10 @@ struct iwl3945_power_per_rate { * iwl4965 rate_n_flags bit fields * * rate_n_flags format is used in following iwl4965 commands: - * REPLY_RX (response only) - * REPLY_RX_MPDU (response only) - * REPLY_TX (both command and response) - * REPLY_TX_LINK_QUALITY_CMD + * N_RX (response only) + * N_RX_MPDU (response only) + * C_TX (both command and response) + * C_TX_LINK_QUALITY_CMD * * High-throughput (HT) rate format for bits 7:0 (bit 8 must be "1"): * 2-0: 0) 6 Mbps @@ -326,17 +321,17 @@ struct iwl3945_power_per_rate { #define RATE_MCS_ANT_ABC_MSK (RATE_MCS_ANT_AB_MSK | RATE_MCS_ANT_C_MSK) #define RATE_ANT_NUM 3 -#define POWER_TABLE_NUM_ENTRIES 33 -#define POWER_TABLE_NUM_HT_OFDM_ENTRIES 32 -#define POWER_TABLE_CCK_ENTRY 32 +#define POWER_TBL_NUM_ENTRIES 33 +#define POWER_TBL_NUM_HT_OFDM_ENTRIES 32 +#define POWER_TBL_CCK_ENTRY 32 -#define IWL_PWR_NUM_HT_OFDM_ENTRIES 24 -#define IWL_PWR_CCK_ENTRIES 2 +#define IL_PWR_NUM_HT_OFDM_ENTRIES 24 +#define IL_PWR_CCK_ENTRIES 2 /** - * union iwl4965_tx_power_dual_stream + * union il4965_tx_power_dual_stream * - * Host format used for REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH + * Host format used for C_TX_PWR_TBL, C_CHANNEL_SWITCH * Use __le32 version (struct tx_power_dual_stream) when building command. * * Driver provides radio gain and DSP attenuation settings to device in pairs, @@ -347,9 +342,9 @@ struct iwl3945_power_per_rate { * For MIMO rates, one value may be different from the other, * in order to balance the Tx output between the two transmitters. * - * See more details in doc for TXPOWER in iwl-4965-hw.h. + * See more details in doc for TXPOWER in 4965.h. */ -union iwl4965_tx_power_dual_stream { +union il4965_tx_power_dual_stream { struct { u8 radio_tx_gain[2]; u8 dsp_predis_atten[2]; @@ -360,21 +355,21 @@ union iwl4965_tx_power_dual_stream { /** * struct tx_power_dual_stream * - * Table entries in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH + * Table entries in C_TX_PWR_TBL, C_CHANNEL_SWITCH * - * Same format as iwl_tx_power_dual_stream, but __le32 + * Same format as il_tx_power_dual_stream, but __le32 */ struct tx_power_dual_stream { __le32 dw; } __packed; /** - * struct iwl4965_tx_power_db + * struct il4965_tx_power_db * - * Entire table within REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH + * Entire table within C_TX_PWR_TBL, C_CHANNEL_SWITCH */ -struct iwl4965_tx_power_db { - struct tx_power_dual_stream power_tbl[POWER_TABLE_NUM_ENTRIES]; +struct il4965_tx_power_db { + struct tx_power_dual_stream power_tbl[POWER_TBL_NUM_ENTRIES]; } __packed; /****************************************************************************** @@ -387,7 +382,7 @@ struct iwl4965_tx_power_db { #define INITIALIZE_SUBTYPE (9) /* - * ("Initialize") REPLY_ALIVE = 0x1 (response only, not a command) + * ("Initialize") N_ALIVE = 0x1 (response only, not a command) * * uCode issues this "initialize alive" notification once the initialization * uCode image has completed its work, and is ready to load the runtime image. @@ -410,7 +405,7 @@ struct iwl4965_tx_power_db { * 3) Tx gain compensation to balance 4965's 2 Tx chains for MIMO operation, * for each of 5 frequency ranges. */ -struct iwl_init_alive_resp { +struct il_init_alive_resp { u8 ucode_minor; u8 ucode_major; __le16 reserved1; @@ -433,9 +428,8 @@ struct iwl_init_alive_resp { * 2 Tx chains */ } __packed; - /** - * REPLY_ALIVE = 0x1 (response only, not a command) + * N_ALIVE = 0x1 (response only, not a command) * * uCode issues this "alive" notification once the runtime image is ready * to receive commands from the driver. This is the *second* "alive" @@ -454,7 +448,7 @@ struct iwl_init_alive_resp { * __le32 log_size; log capacity (in number of entries) * __le32 type; (1) timestamp with each entry, (0) no timestamp * __le32 wraps; # times uCode has wrapped to top of circular buffer - * __le32 write_index; next circular buffer entry that uCode would fill + * __le32 write_idx; next circular buffer entry that uCode would fill * * The header is followed by the circular buffer of log entries. Entries * with timestamps have the following format: @@ -511,13 +505,13 @@ struct iwl_init_alive_resp { * The Linux driver can print both logs to the system log when a uCode error * occurs. */ -struct iwl_alive_resp { +struct il_alive_resp { u8 ucode_minor; u8 ucode_major; __le16 reserved1; u8 sw_rev[8]; u8 ver_type; - u8 ver_subtype; /* not "9" for runtime alive */ + u8 ver_subtype; /* not "9" for runtime alive */ __le16 reserved2; __le32 log_event_table_ptr; /* SRAM address for event log */ __le32 error_event_table_ptr; /* SRAM address for error log */ @@ -526,9 +520,9 @@ struct iwl_alive_resp { } __packed; /* - * REPLY_ERROR = 0x2 (response only, not a command) + * N_ERROR = 0x2 (response only, not a command) */ -struct iwl_error_resp { +struct il_error_resp { __le32 error_type; u8 cmd_id; u8 reserved1; @@ -554,7 +548,6 @@ enum { RXON_DEV_TYPE_SNIFFER = 6, }; - #define RXON_RX_CHAIN_DRIVER_FORCE_MSK cpu_to_le16(0x1 << 0) #define RXON_RX_CHAIN_DRIVER_FORCE_POS (0) #define RXON_RX_CHAIN_VALID_MSK cpu_to_le16(0x7 << 1) @@ -593,7 +586,6 @@ enum { * (according to ON_AIR deassertion) */ #define RXON_FLG_TSF2HOST_MSK cpu_to_le32(1 << 15) - /* HT flags */ #define RXON_FLG_CTRL_CHANNEL_LOC_POS (22) #define RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK cpu_to_le32(0x1 << 22) @@ -640,7 +632,7 @@ enum { #define RXON_FILTER_BCON_AWARE_MSK cpu_to_le32(1 << 6) /** - * REPLY_RXON = 0x10 (command, has simple generic response) + * C_RXON = 0x10 (command, has simple generic response) * * RXON tunes the radio tuner to a service channel, and sets up a number * of parameters that are used primarily for Rx, but also for Tx operations. @@ -653,11 +645,11 @@ enum { * channel. * * NOTE: All RXONs wipe clean the internal txpower table. Driver must - * issue a new REPLY_TX_PWR_TABLE_CMD after each REPLY_RXON (0x10), + * issue a new C_TX_PWR_TBL after each C_RXON (0x10), * regardless of whether RXON_FILTER_ASSOC_MSK is set. */ -struct iwl3945_rxon_cmd { +struct il3945_rxon_cmd { u8 node_addr[6]; __le16 reserved1; u8 bssid_addr[6]; @@ -676,7 +668,7 @@ struct iwl3945_rxon_cmd { __le16 reserved5; } __packed; -struct iwl4965_rxon_cmd { +struct il4965_rxon_cmd { u8 node_addr[6]; __le16 reserved1; u8 bssid_addr[6]; @@ -699,7 +691,7 @@ struct iwl4965_rxon_cmd { /* Create a common rxon cmd which will be typecast into the 3945 or 4965 * specific rxon cmd, depending on where it is called from. */ -struct iwl_legacy_rxon_cmd { +struct il_rxon_cmd { u8 node_addr[6]; __le16 reserved1; u8 bssid_addr[6]; @@ -721,11 +713,10 @@ struct iwl_legacy_rxon_cmd { u8 reserved5; } __packed; - /* - * REPLY_RXON_ASSOC = 0x11 (command, has simple generic response) + * C_RXON_ASSOC = 0x11 (command, has simple generic response) */ -struct iwl3945_rxon_assoc_cmd { +struct il3945_rxon_assoc_cmd { __le32 flags; __le32 filter_flags; u8 ofdm_basic_rates; @@ -733,7 +724,7 @@ struct iwl3945_rxon_assoc_cmd { __le16 reserved; } __packed; -struct iwl4965_rxon_assoc_cmd { +struct il4965_rxon_assoc_cmd { __le32 flags; __le32 filter_flags; u8 ofdm_basic_rates; @@ -744,17 +735,17 @@ struct iwl4965_rxon_assoc_cmd { __le16 reserved; } __packed; -#define IWL_CONN_MAX_LISTEN_INTERVAL 10 -#define IWL_MAX_UCODE_BEACON_INTERVAL 4 /* 4096 */ -#define IWL39_MAX_UCODE_BEACON_INTERVAL 1 /* 1024 */ +#define IL_CONN_MAX_LISTEN_INTERVAL 10 +#define IL_MAX_UCODE_BEACON_INTERVAL 4 /* 4096 */ +#define IL39_MAX_UCODE_BEACON_INTERVAL 1 /* 1024 */ /* - * REPLY_RXON_TIMING = 0x14 (command, has simple generic response) + * C_RXON_TIMING = 0x14 (command, has simple generic response) */ -struct iwl_rxon_time_cmd { +struct il_rxon_time_cmd { __le64 timestamp; __le16 beacon_interval; - __le16 atim_window; + __le16 atim_win; __le32 beacon_init_val; __le16 listen_interval; u8 dtim_period; @@ -762,32 +753,32 @@ struct iwl_rxon_time_cmd { } __packed; /* - * REPLY_CHANNEL_SWITCH = 0x72 (command, has simple generic response) + * C_CHANNEL_SWITCH = 0x72 (command, has simple generic response) */ -struct iwl3945_channel_switch_cmd { +struct il3945_channel_switch_cmd { u8 band; u8 expect_beacon; __le16 channel; __le32 rxon_flags; __le32 rxon_filter_flags; __le32 switch_time; - struct iwl3945_power_per_rate power[IWL_MAX_RATES]; + struct il3945_power_per_rate power[IL_MAX_RATES]; } __packed; -struct iwl4965_channel_switch_cmd { +struct il4965_channel_switch_cmd { u8 band; u8 expect_beacon; __le16 channel; __le32 rxon_flags; __le32 rxon_filter_flags; __le32 switch_time; - struct iwl4965_tx_power_db tx_power; + struct il4965_tx_power_db tx_power; } __packed; /* - * CHANNEL_SWITCH_NOTIFICATION = 0x73 (notification only, not a command) + * N_CHANNEL_SWITCH = 0x73 (notification only, not a command) */ -struct iwl_csa_notification { +struct il_csa_notification { __le16 band; __le16 channel; __le32 status; /* 0 - OK, 1 - fail */ @@ -800,22 +791,22 @@ struct iwl_csa_notification { *****************************************************************************/ /** - * struct iwl_ac_qos -- QOS timing params for REPLY_QOS_PARAM - * One for each of 4 EDCA access categories in struct iwl_qosparam_cmd + * struct il_ac_qos -- QOS timing params for C_QOS_PARAM + * One for each of 4 EDCA access categories in struct il_qosparam_cmd * - * @cw_min: Contention window, start value in numbers of slots. + * @cw_min: Contention win, start value in numbers of slots. * Should be a power-of-2, minus 1. Device's default is 0x0f. - * @cw_max: Contention window, max value in numbers of slots. + * @cw_max: Contention win, max value in numbers of slots. * Should be a power-of-2, minus 1. Device's default is 0x3f. * @aifsn: Number of slots in Arbitration Interframe Space (before * performing random backoff timing prior to Tx). Device default 1. * @edca_txop: Length of Tx opportunity, in uSecs. Device default is 0. * - * Device will automatically increase contention window by (2*CW) + 1 for each + * Device will automatically increase contention win by (2*CW) + 1 for each * transmission retry. Device uses cw_max as a bit mask, ANDed with new CW * value, to cap the CW value. */ -struct iwl_ac_qos { +struct il_ac_qos { __le16 cw_min; __le16 cw_max; u8 aifsn; @@ -832,14 +823,14 @@ struct iwl_ac_qos { #define AC_NUM 4 /* - * REPLY_QOS_PARAM = 0x13 (command, has simple generic response) + * C_QOS_PARAM = 0x13 (command, has simple generic response) * * This command sets up timings for each of the 4 prioritized EDCA Tx FIFOs * 0: Background, 1: Best Effort, 2: Video, 3: Voice. */ -struct iwl_qosparam_cmd { +struct il_qosparam_cmd { __le32 qos_flags; - struct iwl_ac_qos ac[AC_NUM]; + struct il_ac_qos ac[AC_NUM]; } __packed; /****************************************************************************** @@ -852,15 +843,15 @@ struct iwl_qosparam_cmd { */ /* Special, dedicated locations within device's station table */ -#define IWL_AP_ID 0 -#define IWL_STA_ID 2 -#define IWL3945_BROADCAST_ID 24 -#define IWL3945_STATION_COUNT 25 -#define IWL4965_BROADCAST_ID 31 -#define IWL4965_STATION_COUNT 32 +#define IL_AP_ID 0 +#define IL_STA_ID 2 +#define IL3945_BROADCAST_ID 24 +#define IL3945_STATION_COUNT 25 +#define IL4965_BROADCAST_ID 31 +#define IL4965_STATION_COUNT 32 -#define IWL_STATION_COUNT 32 /* MAX(3945,4965)*/ -#define IWL_INVALID_STATION 255 +#define IL_STATION_COUNT 32 /* MAX(3945,4965) */ +#define IL_INVALID_STATION 255 #define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2) #define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8) @@ -901,11 +892,11 @@ struct iwl_qosparam_cmd { #define STA_MODIFY_DELBA_TID_MSK 0x10 #define STA_MODIFY_SLEEP_TX_COUNT_MSK 0x20 -/* Receiver address (actually, Rx station's index into station table), +/* Receiver address (actually, Rx station's idx into station table), * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */ #define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid)) -struct iwl4965_keyinfo { +struct il4965_keyinfo { __le16 key_flags; u8 tkip_rx_tsc_byte2; /* TSC[2] for key mix ph1 detection */ u8 reserved1; @@ -918,12 +909,12 @@ struct iwl4965_keyinfo { /** * struct sta_id_modify * @addr[ETH_ALEN]: station's MAC address - * @sta_id: index of station in uCode's station table + * @sta_id: idx of station in uCode's station table * @modify_mask: STA_MODIFY_*, 1: modify, 0: don't change * - * Driver selects unused table index when adding new station, - * or the index to a pre-existing station entry when modifying that station. - * Some indexes have special purposes (IWL_AP_ID, index 0, is for AP). + * Driver selects unused table idx when adding new station, + * or the idx to a pre-existing station entry when modifying that station. + * Some idxes have special purposes (IL_AP_ID, idx 0, is for AP). * * modify_mask flags select which parameters to modify vs. leave alone. */ @@ -936,15 +927,15 @@ struct sta_id_modify { } __packed; /* - * REPLY_ADD_STA = 0x18 (command) + * C_ADD_STA = 0x18 (command) * * The device contains an internal table of per-station information, * with info on security keys, aggregation parameters, and Tx rates for * initial Tx attempt and any retries (4965 devices uses - * REPLY_TX_LINK_QUALITY_CMD, - * 3945 uses REPLY_RATE_SCALE to set up rate tables). + * C_TX_LINK_QUALITY_CMD, + * 3945 uses C_RATE_SCALE to set up rate tables). * - * REPLY_ADD_STA sets up the table entry for one station, either creating + * C_ADD_STA sets up the table entry for one station, either creating * a new entry, or modifying a pre-existing one. * * NOTE: RXON command (without "associated" bit set) wipes the station table @@ -954,20 +945,20 @@ struct sta_id_modify { * their own txpower/rate setup data). * * When getting started on a new channel, driver must set up the - * IWL_BROADCAST_ID entry (last entry in the table). For a client + * IL_BROADCAST_ID entry (last entry in the table). For a client * station in a BSS, once an AP is selected, driver sets up the AP STA - * in the IWL_AP_ID entry (1st entry in the table). BROADCAST and AP + * in the IL_AP_ID entry (1st entry in the table). BROADCAST and AP * are all that are needed for a BSS client station. If the device is * used as AP, or in an IBSS network, driver must set up station table - * entries for all STAs in network, starting with index IWL_STA_ID. + * entries for all STAs in network, starting with idx IL_STA_ID. */ -struct iwl3945_addsta_cmd { +struct il3945_addsta_cmd { u8 mode; /* 1: modify existing, 0: add new station */ u8 reserved[3]; struct sta_id_modify sta; - struct iwl4965_keyinfo key; - __le32 station_flags; /* STA_FLG_* */ + struct il4965_keyinfo key; + __le32 station_flags; /* STA_FLG_* */ __le32 station_flags_msk; /* STA_FLG_* */ /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID) @@ -990,12 +981,12 @@ struct iwl3945_addsta_cmd { __le16 add_immediate_ba_ssn; } __packed; -struct iwl4965_addsta_cmd { +struct il4965_addsta_cmd { u8 mode; /* 1: modify existing, 0: add new station */ u8 reserved[3]; struct sta_id_modify sta; - struct iwl4965_keyinfo key; - __le32 station_flags; /* STA_FLG_* */ + struct il4965_keyinfo key; + __le32 station_flags; /* STA_FLG_* */ __le32 station_flags_msk; /* STA_FLG_* */ /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID) @@ -1003,7 +994,7 @@ struct iwl4965_addsta_cmd { * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */ __le16 tid_disable_tx; - __le16 reserved1; + __le16 reserved1; /* TID for which to add block-ack support. * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */ @@ -1028,12 +1019,12 @@ struct iwl4965_addsta_cmd { } __packed; /* Wrapper struct for 3945 and 4965 addsta_cmd structures */ -struct iwl_legacy_addsta_cmd { +struct il_addsta_cmd { u8 mode; /* 1: modify existing, 0: add new station */ u8 reserved[3]; struct sta_id_modify sta; - struct iwl4965_keyinfo key; - __le32 station_flags; /* STA_FLG_* */ + struct il4965_keyinfo key; + __le32 station_flags; /* STA_FLG_* */ __le32 station_flags_msk; /* STA_FLG_* */ /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID) @@ -1041,7 +1032,7 @@ struct iwl_legacy_addsta_cmd { * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */ __le16 tid_disable_tx; - __le16 rate_n_flags; /* 3945 only */ + __le16 rate_n_flags; /* 3945 only */ /* TID for which to add block-ack support. * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */ @@ -1065,51 +1056,50 @@ struct iwl_legacy_addsta_cmd { __le16 reserved2; } __packed; - #define ADD_STA_SUCCESS_MSK 0x1 -#define ADD_STA_NO_ROOM_IN_TABLE 0x2 +#define ADD_STA_NO_ROOM_IN_TBL 0x2 #define ADD_STA_NO_BLOCK_ACK_RESOURCE 0x4 #define ADD_STA_MODIFY_NON_EXIST_STA 0x8 /* - * REPLY_ADD_STA = 0x18 (response) + * C_ADD_STA = 0x18 (response) */ -struct iwl_add_sta_resp { - u8 status; /* ADD_STA_* */ +struct il_add_sta_resp { + u8 status; /* ADD_STA_* */ } __packed; #define REM_STA_SUCCESS_MSK 0x1 /* - * REPLY_REM_STA = 0x19 (response) + * C_REM_STA = 0x19 (response) */ -struct iwl_rem_sta_resp { +struct il_rem_sta_resp { u8 status; } __packed; /* - * REPLY_REM_STA = 0x19 (command) + * C_REM_STA = 0x19 (command) */ -struct iwl_rem_sta_cmd { - u8 num_sta; /* number of removed stations */ +struct il_rem_sta_cmd { + u8 num_sta; /* number of removed stations */ u8 reserved[3]; - u8 addr[ETH_ALEN]; /* MAC addr of the first station */ + u8 addr[ETH_ALEN]; /* MAC addr of the first station */ u8 reserved2[2]; } __packed; -#define IWL_TX_FIFO_BK_MSK cpu_to_le32(BIT(0)) -#define IWL_TX_FIFO_BE_MSK cpu_to_le32(BIT(1)) -#define IWL_TX_FIFO_VI_MSK cpu_to_le32(BIT(2)) -#define IWL_TX_FIFO_VO_MSK cpu_to_le32(BIT(3)) -#define IWL_AGG_TX_QUEUE_MSK cpu_to_le32(0xffc00) +#define IL_TX_FIFO_BK_MSK cpu_to_le32(BIT(0)) +#define IL_TX_FIFO_BE_MSK cpu_to_le32(BIT(1)) +#define IL_TX_FIFO_VI_MSK cpu_to_le32(BIT(2)) +#define IL_TX_FIFO_VO_MSK cpu_to_le32(BIT(3)) +#define IL_AGG_TX_QUEUE_MSK cpu_to_le32(0xffc00) -#define IWL_DROP_SINGLE 0 -#define IWL_DROP_SELECTED 1 -#define IWL_DROP_ALL 2 +#define IL_DROP_SINGLE 0 +#define IL_DROP_SELECTED 1 +#define IL_DROP_ALL 2 /* * REPLY_WEP_KEY = 0x20 */ -struct iwl_wep_key { - u8 key_index; +struct il_wep_key { + u8 key_idx; u8 key_offset; u8 reserved1[2]; u8 key_size; @@ -1117,12 +1107,12 @@ struct iwl_wep_key { u8 key[16]; } __packed; -struct iwl_wep_cmd { +struct il_wep_cmd { u8 num_keys; u8 global_key_type; u8 flags; u8 reserved; - struct iwl_wep_key key[0]; + struct il_wep_key key[0]; } __packed; #define WEP_KEY_WEP_TYPE 1 @@ -1168,8 +1158,7 @@ struct iwl_wep_cmd { #define RX_MPDU_RES_STATUS_TTAK_OK (1 << 7) #define RX_MPDU_RES_STATUS_DEC_DONE_MSK (0x800) - -struct iwl3945_rx_frame_stats { +struct il3945_rx_frame_stats { u8 phy_count; u8 id; u8 rssi; @@ -1179,7 +1168,7 @@ struct iwl3945_rx_frame_stats { u8 payload[0]; } __packed; -struct iwl3945_rx_frame_hdr { +struct il3945_rx_frame_hdr { __le16 channel; __le16 phy_flags; u8 reserved1; @@ -1188,73 +1177,71 @@ struct iwl3945_rx_frame_hdr { u8 payload[0]; } __packed; -struct iwl3945_rx_frame_end { +struct il3945_rx_frame_end { __le32 status; __le64 timestamp; __le32 beacon_timestamp; } __packed; /* - * REPLY_3945_RX = 0x1b (response only, not a command) + * N_3945_RX = 0x1b (response only, not a command) * * NOTE: DO NOT dereference from casts to this structure * It is provided only for calculating minimum data set size. * The actual offsets of the hdr and end are dynamic based on * stats.phy_count */ -struct iwl3945_rx_frame { - struct iwl3945_rx_frame_stats stats; - struct iwl3945_rx_frame_hdr hdr; - struct iwl3945_rx_frame_end end; +struct il3945_rx_frame { + struct il3945_rx_frame_stats stats; + struct il3945_rx_frame_hdr hdr; + struct il3945_rx_frame_end end; } __packed; -#define IWL39_RX_FRAME_SIZE (4 + sizeof(struct iwl3945_rx_frame)) +#define IL39_RX_FRAME_SIZE (4 + sizeof(struct il3945_rx_frame)) /* Fixed (non-configurable) rx data from phy */ -#define IWL49_RX_RES_PHY_CNT 14 -#define IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET (4) -#define IWL49_RX_PHY_FLAGS_ANTENNAE_MASK (0x70) -#define IWL49_AGC_DB_MASK (0x3f80) /* MASK(7,13) */ -#define IWL49_AGC_DB_POS (7) -struct iwl4965_rx_non_cfg_phy { +#define IL49_RX_RES_PHY_CNT 14 +#define IL49_RX_PHY_FLAGS_ANTENNAE_OFFSET (4) +#define IL49_RX_PHY_FLAGS_ANTENNAE_MASK (0x70) +#define IL49_AGC_DB_MASK (0x3f80) /* MASK(7,13) */ +#define IL49_AGC_DB_POS (7) +struct il4965_rx_non_cfg_phy { __le16 ant_selection; /* ant A bit 4, ant B bit 5, ant C bit 6 */ __le16 agc_info; /* agc code 0:6, agc dB 7:13, reserved 14:15 */ u8 rssi_info[6]; /* we use even entries, 0/2/4 for A/B/C rssi */ u8 pad[0]; } __packed; - /* - * REPLY_RX = 0xc3 (response only, not a command) + * N_RX = 0xc3 (response only, not a command) * Used only for legacy (non 11n) frames. */ -struct iwl_rx_phy_res { - u8 non_cfg_phy_cnt; /* non configurable DSP phy data byte count */ +struct il_rx_phy_res { + u8 non_cfg_phy_cnt; /* non configurable DSP phy data byte count */ u8 cfg_phy_cnt; /* configurable DSP phy data byte count */ u8 stat_id; /* configurable DSP phy data set ID */ u8 reserved1; __le64 timestamp; /* TSF at on air rise */ - __le32 beacon_time_stamp; /* beacon at on-air rise */ + __le32 beacon_time_stamp; /* beacon at on-air rise */ __le16 phy_flags; /* general phy flags: band, modulation, ... */ __le16 channel; /* channel number */ - u8 non_cfg_phy_buf[32]; /* for various implementations of non_cfg_phy */ + u8 non_cfg_phy_buf[32]; /* for various implementations of non_cfg_phy */ __le32 rate_n_flags; /* RATE_MCS_* */ __le16 byte_count; /* frame's byte-count */ __le16 frame_time; /* frame's time on the air */ } __packed; -struct iwl_rx_mpdu_res_start { +struct il_rx_mpdu_res_start { __le16 byte_count; __le16 reserved; } __packed; - /****************************************************************************** * (5) * Tx Commands & Responses: * - * Driver must place each REPLY_TX command into one of the prioritized Tx + * Driver must place each C_TX command into one of the prioritized Tx * queues in host DRAM, shared between driver and device (see comments for * SCD registers and Tx/Rx Queues). When the device's Tx scheduler and uCode * are preparing to transmit, the device pulls the Tx command over the PCI @@ -1264,18 +1251,18 @@ struct iwl_rx_mpdu_res_start { * uCode handles all timing and protocol related to control frames * (RTS/CTS/ACK), based on flags in the Tx command. uCode and Tx scheduler * handle reception of block-acks; uCode updates the host driver via - * REPLY_COMPRESSED_BA. + * N_COMPRESSED_BA. * * uCode handles retrying Tx when an ACK is expected but not received. * This includes trying lower data rates than the one requested in the Tx - * command, as set up by the REPLY_RATE_SCALE (for 3945) or - * REPLY_TX_LINK_QUALITY_CMD (4965). + * command, as set up by the C_RATE_SCALE (for 3945) or + * C_TX_LINK_QUALITY_CMD (4965). * - * Driver sets up transmit power for various rates via REPLY_TX_PWR_TABLE_CMD. + * Driver sets up transmit power for various rates via C_TX_PWR_TBL. * This command must be executed after every RXON command, before Tx can occur. *****************************************************************************/ -/* REPLY_TX Tx flags field */ +/* C_TX Tx flags field */ /* * 1: Use Request-To-Send protocol before this frame. @@ -1296,8 +1283,8 @@ struct iwl_rx_mpdu_res_start { #define TX_CMD_FLG_ACK_MSK cpu_to_le32(1 << 3) /* For 4965 devices: - * 1: Use rate scale table (see REPLY_TX_LINK_QUALITY_CMD). - * Tx command's initial_rate_index indicates first rate to try; + * 1: Use rate scale table (see C_TX_LINK_QUALITY_CMD). + * Tx command's initial_rate_idx indicates first rate to try; * uCode walks through table for additional Tx attempts. * 0: Use Tx rate/MCS from Tx command's rate_n_flags field. * This rate will be used for all Tx attempts; it will not be scaled. */ @@ -1322,7 +1309,7 @@ struct iwl_rx_mpdu_res_start { /* 1: uCode overrides sequence control field in MAC header. * 0: Driver provides sequence control field in MAC header. * Set this for management frames, non-QOS data frames, non-unicast frames, - * and also in Tx command embedded in REPLY_SCAN_CMD for active scans. */ + * and also in Tx command embedded in C_SCAN for active scans. */ #define TX_CMD_FLG_SEQ_CTL_MSK cpu_to_le32(1 << 13) /* 1: This frame is non-last MPDU; more fragments are coming. @@ -1349,7 +1336,6 @@ struct iwl_rx_mpdu_res_start { /* HCCA-AP - disable duration overwriting. */ #define TX_CMD_FLG_DUR_MSK cpu_to_le32(1 << 25) - /* * TX command security control */ @@ -1369,10 +1355,10 @@ struct iwl_rx_mpdu_res_start { #define TKIP_ICV_LEN 4 /* - * REPLY_TX = 0x1c (command) + * C_TX = 0x1c (command) */ -struct iwl3945_tx_cmd { +struct il3945_tx_cmd { /* * MPDU byte count: * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size, @@ -1434,9 +1420,9 @@ struct iwl3945_tx_cmd { } __packed; /* - * REPLY_TX = 0x1c (response) + * C_TX = 0x1c (response) */ -struct iwl3945_tx_resp { +struct il3945_tx_resp { u8 failure_rts; u8 failure_frame; u8 bt_kill_count; @@ -1445,19 +1431,18 @@ struct iwl3945_tx_resp { __le32 status; /* TX status */ } __packed; - /* * 4965 uCode updates these Tx attempt count values in host DRAM. * Used for managing Tx retries when expecting block-acks. * Driver should set these fields to 0. */ -struct iwl_dram_scratch { +struct il_dram_scratch { u8 try_cnt; /* Tx attempts */ u8 bt_kill_cnt; /* Tx attempts blocked by Bluetooth device */ __le16 reserved; } __packed; -struct iwl_tx_cmd { +struct il_tx_cmd { /* * MPDU byte count: * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size, @@ -1481,7 +1466,7 @@ struct iwl_tx_cmd { /* uCode may modify this field of the Tx command (in host DRAM!). * Driver must also set dram_lsb_ptr and dram_msb_ptr in this cmd. */ - struct iwl_dram_scratch scratch; + struct il_dram_scratch scratch; /* Rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is cleared. */ __le32 rate_n_flags; /* RATE_MCS_* */ @@ -1493,13 +1478,13 @@ struct iwl_tx_cmd { u8 sec_ctl; /* TX_CMD_SEC_* */ /* - * Index into rate table (see REPLY_TX_LINK_QUALITY_CMD) for initial + * Index into rate table (see C_TX_LINK_QUALITY_CMD) for initial * Tx attempt, if TX_CMD_FLG_STA_RATE_MSK is set. Normally "0" for * data frames, this field may be used to selectively reduce initial * rate (via non-0 value) for special frames (e.g. management), while * still supporting rate scaling for all frames. */ - u8 initial_rate_index; + u8 initial_rate_idx; u8 reserved; u8 key[16]; __le16 next_frame_flags; @@ -1628,12 +1613,12 @@ enum { }; enum { - TX_STATUS_MSK = 0x000000ff, /* bits 0:7 */ + TX_STATUS_MSK = 0x000000ff, /* bits 0:7 */ TX_STATUS_DELAY_MSK = 0x00000040, TX_STATUS_ABORT_MSK = 0x00000080, TX_PACKET_MODE_MSK = 0x0000ff00, /* bits 8:15 */ TX_FIFO_NUMBER_MSK = 0x00070000, /* bits 16:18 */ - TX_RESERVED = 0x00780000, /* bits 19:22 */ + TX_RESERVED = 0x00780000, /* bits 19:22 */ TX_POWER_PA_DETECT_MSK = 0x7f800000, /* bits 23:30 */ TX_ABORT_REQUIRED_MSK = 0x80000000, /* bits 31:31 */ }; @@ -1671,7 +1656,7 @@ enum { #define AGG_TX_STATE_SEQ_NUM_MSK 0xffff0000 /* - * REPLY_TX = 0x1c (response) + * C_TX = 0x1c (response) * * This response may be in one of two slightly different formats, indicated * by the frame_count field: @@ -1697,7 +1682,7 @@ struct agg_tx_status { __le16 sequence; } __packed; -struct iwl4965_tx_resp { +struct il4965_tx_resp { u8 frame_count; /* 1 no aggregation, >1 aggregation */ u8 bt_kill_count; /* # blocked by bluetooth (unused for agg) */ u8 failure_rts; /* # failures due to unsuccessful RTS */ @@ -1730,16 +1715,16 @@ struct iwl4965_tx_resp { */ union { __le32 status; - struct agg_tx_status agg_status[0]; /* for each agg frame */ + struct agg_tx_status agg_status[0]; /* for each agg frame */ } u; } __packed; /* - * REPLY_COMPRESSED_BA = 0xc5 (response only, not a command) + * N_COMPRESSED_BA = 0xc5 (response only, not a command) * * Reports Block-Acknowledge from recipient station */ -struct iwl_compressed_ba_resp { +struct il_compressed_ba_resp { __le32 sta_addr_lo32; __le16 sta_addr_hi16; __le16 reserved; @@ -1754,30 +1739,29 @@ struct iwl_compressed_ba_resp { } __packed; /* - * REPLY_TX_PWR_TABLE_CMD = 0x97 (command, has simple generic response) + * C_TX_PWR_TBL = 0x97 (command, has simple generic response) * - * See details under "TXPOWER" in iwl-4965-hw.h. + * See details under "TXPOWER" in 4965.h. */ -struct iwl3945_txpowertable_cmd { +struct il3945_txpowertable_cmd { u8 band; /* 0: 5 GHz, 1: 2.4 GHz */ u8 reserved; __le16 channel; - struct iwl3945_power_per_rate power[IWL_MAX_RATES]; + struct il3945_power_per_rate power[IL_MAX_RATES]; } __packed; -struct iwl4965_txpowertable_cmd { +struct il4965_txpowertable_cmd { u8 band; /* 0: 5 GHz, 1: 2.4 GHz */ u8 reserved; __le16 channel; - struct iwl4965_tx_power_db tx_power; + struct il4965_tx_power_db tx_power; } __packed; - /** - * struct iwl3945_rate_scaling_cmd - Rate Scaling Command & Response + * struct il3945_rate_scaling_cmd - Rate Scaling Command & Response * - * REPLY_RATE_SCALE = 0x47 (command, has simple generic response) + * C_RATE_SCALE = 0x47 (command, has simple generic response) * * NOTE: The table of rates passed to the uCode via the * RATE_SCALE command sets up the corresponding order of @@ -1786,22 +1770,21 @@ struct iwl4965_txpowertable_cmd { * * For example, if you set 9MB (PLCP 0x0f) as the first * rate in the rate table, the bit mask for that rate - * when passed through ofdm_basic_rates on the REPLY_RXON + * when passed through ofdm_basic_rates on the C_RXON * command would be bit 0 (1 << 0) */ -struct iwl3945_rate_scaling_info { +struct il3945_rate_scaling_info { __le16 rate_n_flags; u8 try_cnt; - u8 next_rate_index; + u8 next_rate_idx; } __packed; -struct iwl3945_rate_scaling_cmd { +struct il3945_rate_scaling_cmd { u8 table_id; u8 reserved[3]; - struct iwl3945_rate_scaling_info table[IWL_MAX_RATES]; + struct il3945_rate_scaling_info table[IL_MAX_RATES]; } __packed; - /*RS_NEW_API: only TLC_RTS remains and moved to bit 0 */ #define LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK (1 << 0) @@ -1816,28 +1799,27 @@ struct iwl3945_rate_scaling_cmd { #define LINK_QUAL_ANT_B_MSK (1 << 1) #define LINK_QUAL_ANT_MSK (LINK_QUAL_ANT_A_MSK|LINK_QUAL_ANT_B_MSK) - /** - * struct iwl_link_qual_general_params + * struct il_link_qual_general_params * - * Used in REPLY_TX_LINK_QUALITY_CMD + * Used in C_TX_LINK_QUALITY_CMD */ -struct iwl_link_qual_general_params { +struct il_link_qual_general_params { u8 flags; - /* No entries at or above this (driver chosen) index contain MIMO */ + /* No entries at or above this (driver chosen) idx contain MIMO */ u8 mimo_delimiter; /* Best single antenna to use for single stream (legacy, SISO). */ u8 single_stream_ant_msk; /* LINK_QUAL_ANT_* */ /* Best antennas to use for MIMO (unused for 4965, assumes both). */ - u8 dual_stream_ant_msk; /* LINK_QUAL_ANT_* */ + u8 dual_stream_ant_msk; /* LINK_QUAL_ANT_* */ /* * If driver needs to use different initial rates for different * EDCA QOS access categories (as implemented by tx fifos 0-3), - * this table will set that up, by indicating the indexes in the + * this table will set that up, by indicating the idxes in the * rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table at which to start. * Otherwise, driver should set all entries to 0. * @@ -1845,10 +1827,10 @@ struct iwl_link_qual_general_params { * 0 = Background, 1 = Best Effort (normal), 2 = Video, 3 = Voice * TX FIFOs above 3 use same value (typically 0) as TX FIFO 3. */ - u8 start_rate_index[LINK_QUAL_AC_NUM]; + u8 start_rate_idx[LINK_QUAL_AC_NUM]; } __packed; -#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */ +#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */ #define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000) #define LINK_QUAL_AGG_TIME_LIMIT_MIN (100) @@ -1861,11 +1843,11 @@ struct iwl_link_qual_general_params { #define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0) /** - * struct iwl_link_qual_agg_params + * struct il_link_qual_agg_params * - * Used in REPLY_TX_LINK_QUALITY_CMD + * Used in C_TX_LINK_QUALITY_CMD */ -struct iwl_link_qual_agg_params { +struct il_link_qual_agg_params { /* *Maximum number of uSec in aggregation. @@ -1892,9 +1874,9 @@ struct iwl_link_qual_agg_params { } __packed; /* - * REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response) + * C_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response) * - * For 4965 devices only; 3945 uses REPLY_RATE_SCALE. + * For 4965 devices only; 3945 uses C_RATE_SCALE. * * Each station in the 4965 device's internal station table has its own table * of 16 @@ -1903,13 +1885,13 @@ struct iwl_link_qual_agg_params { * one station. * * NOTE: Station must already be in 4965 device's station table. - * Use REPLY_ADD_STA. + * Use C_ADD_STA. * * The rate scaling procedures described below work well. Of course, other * procedures are possible, and may work better for particular environments. * * - * FILLING THE RATE TABLE + * FILLING THE RATE TBL * * Given a particular initial rate and mode, as determined by the rate * scaling algorithm described below, the Linux driver uses the following @@ -1948,13 +1930,13 @@ struct iwl_link_qual_agg_params { * speculative mode as the new current active mode. * * Each history set contains, separately for each possible rate, data for a - * sliding window of the 62 most recent tx attempts at that rate. The data + * sliding win of the 62 most recent tx attempts at that rate. The data * includes a shifting bitmap of success(1)/failure(0), and sums of successful * and attempted frames, from which the driver can additionally calculate a * success ratio (success / attempted) and number of failures - * (attempted - success), and control the size of the window (attempted). + * (attempted - success), and control the size of the win (attempted). * The driver uses the bit map to remove successes from the success sum, as - * the oldest tx attempts fall out of the window. + * the oldest tx attempts fall out of the win. * * When the 4965 device makes multiple tx attempts for a given frame, each * attempt might be at a different rate, and have different modulation @@ -1966,7 +1948,7 @@ struct iwl_link_qual_agg_params { * * When using block-ack (aggregation), all frames are transmitted at the same * rate, since there is no per-attempt acknowledgment from the destination - * station. The Tx response struct iwl_tx_resp indicates the Tx rate in + * station. The Tx response struct il_tx_resp indicates the Tx rate in * rate_n_flags field. After receiving a block-ack, the driver can update * history for the entire block all at once. * @@ -2016,8 +1998,8 @@ struct iwl_link_qual_agg_params { * good performance; higher rate is sure to have poorer success. * * 6) Re-evaluate the rate after each tx frame. If working with block- - * acknowledge, history and statistics may be calculated for the entire - * block (including prior history that fits within the history windows), + * acknowledge, history and stats may be calculated for the entire + * block (including prior history that fits within the history wins), * before re-evaluation. * * FINDING BEST STARTING MODULATION MODE: @@ -2079,22 +2061,22 @@ struct iwl_link_qual_agg_params { * legacy), and then repeat the search process. * */ -struct iwl_link_quality_cmd { +struct il_link_quality_cmd { /* Index of destination/recipient station in uCode's station table */ u8 sta_id; u8 reserved1; __le16 control; /* not used */ - struct iwl_link_qual_general_params general_params; - struct iwl_link_qual_agg_params agg_params; + struct il_link_qual_general_params general_params; + struct il_link_qual_agg_params agg_params; /* - * Rate info; when using rate-scaling, Tx command's initial_rate_index - * specifies 1st Tx rate attempted, via index into this table. + * Rate info; when using rate-scaling, Tx command's initial_rate_idx + * specifies 1st Tx rate attempted, via idx into this table. * 4965 devices works its way through table when retrying Tx. */ struct { - __le32 rate_n_flags; /* RATE_MCS_*, IWL_RATE_* */ + __le32 rate_n_flags; /* RATE_MCS_*, RATE_* */ } rs_table[LINK_QUAL_MAX_RETRY_NUM]; __le32 reserved2; } __packed; @@ -2117,13 +2099,13 @@ struct iwl_link_quality_cmd { #define BT_MAX_KILL_DEF (0x5) /* - * REPLY_BT_CONFIG = 0x9b (command, has simple generic response) + * C_BT_CONFIG = 0x9b (command, has simple generic response) * * 3945 and 4965 devices support hardware handshake with Bluetooth device on * same platform. Bluetooth device alerts wireless device when it will Tx; * wireless device can delay or kill its own Tx to accommodate. */ -struct iwl_bt_cmd { +struct il_bt_cmd { u8 flags; u8 lead_time; u8 max_kill; @@ -2132,7 +2114,6 @@ struct iwl_bt_cmd { __le32 kill_cts_mask; } __packed; - /****************************************************************************** * (6) * Spectrum Management (802.11h) Commands, Responses, Notifications: @@ -2150,18 +2131,18 @@ struct iwl_bt_cmd { RXON_FILTER_ASSOC_MSK | \ RXON_FILTER_BCON_AWARE_MSK) -struct iwl_measure_channel { +struct il_measure_channel { __le32 duration; /* measurement duration in extended beacon * format */ u8 channel; /* channel to measure */ - u8 type; /* see enum iwl_measure_type */ + u8 type; /* see enum il_measure_type */ __le16 reserved; } __packed; /* - * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (command) + * C_SPECTRUM_MEASUREMENT = 0x74 (command) */ -struct iwl_spectrum_cmd { +struct il_spectrum_cmd { __le16 len; /* number of bytes starting from token */ u8 token; /* token id */ u8 id; /* measurement id -- 0 or 1 */ @@ -2174,13 +2155,13 @@ struct iwl_spectrum_cmd { __le32 filter_flags; /* rxon filter flags */ __le16 channel_count; /* minimum 1, maximum 10 */ __le16 reserved3; - struct iwl_measure_channel channels[10]; + struct il_measure_channel channels[10]; } __packed; /* - * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (response) + * C_SPECTRUM_MEASUREMENT = 0x74 (response) */ -struct iwl_spectrum_resp { +struct il_spectrum_resp { u8 token; u8 id; /* id of the prior command replaced, or 0xff */ __le16 status; /* 0 - command will be handled @@ -2188,57 +2169,57 @@ struct iwl_spectrum_resp { * measurement) */ } __packed; -enum iwl_measurement_state { - IWL_MEASUREMENT_START = 0, - IWL_MEASUREMENT_STOP = 1, +enum il_measurement_state { + IL_MEASUREMENT_START = 0, + IL_MEASUREMENT_STOP = 1, }; -enum iwl_measurement_status { - IWL_MEASUREMENT_OK = 0, - IWL_MEASUREMENT_CONCURRENT = 1, - IWL_MEASUREMENT_CSA_CONFLICT = 2, - IWL_MEASUREMENT_TGH_CONFLICT = 3, +enum il_measurement_status { + IL_MEASUREMENT_OK = 0, + IL_MEASUREMENT_CONCURRENT = 1, + IL_MEASUREMENT_CSA_CONFLICT = 2, + IL_MEASUREMENT_TGH_CONFLICT = 3, /* 4-5 reserved */ - IWL_MEASUREMENT_STOPPED = 6, - IWL_MEASUREMENT_TIMEOUT = 7, - IWL_MEASUREMENT_PERIODIC_FAILED = 8, + IL_MEASUREMENT_STOPPED = 6, + IL_MEASUREMENT_TIMEOUT = 7, + IL_MEASUREMENT_PERIODIC_FAILED = 8, }; #define NUM_ELEMENTS_IN_HISTOGRAM 8 -struct iwl_measurement_histogram { +struct il_measurement_histogram { __le32 ofdm[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 0.8usec counts */ __le32 cck[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 1usec counts */ } __packed; /* clear channel availability counters */ -struct iwl_measurement_cca_counters { +struct il_measurement_cca_counters { __le32 ofdm; __le32 cck; } __packed; -enum iwl_measure_type { - IWL_MEASURE_BASIC = (1 << 0), - IWL_MEASURE_CHANNEL_LOAD = (1 << 1), - IWL_MEASURE_HISTOGRAM_RPI = (1 << 2), - IWL_MEASURE_HISTOGRAM_NOISE = (1 << 3), - IWL_MEASURE_FRAME = (1 << 4), +enum il_measure_type { + IL_MEASURE_BASIC = (1 << 0), + IL_MEASURE_CHANNEL_LOAD = (1 << 1), + IL_MEASURE_HISTOGRAM_RPI = (1 << 2), + IL_MEASURE_HISTOGRAM_NOISE = (1 << 3), + IL_MEASURE_FRAME = (1 << 4), /* bits 5:6 are reserved */ - IWL_MEASURE_IDLE = (1 << 7), + IL_MEASURE_IDLE = (1 << 7), }; /* - * SPECTRUM_MEASURE_NOTIFICATION = 0x75 (notification only, not a command) + * N_SPECTRUM_MEASUREMENT = 0x75 (notification only, not a command) */ -struct iwl_spectrum_notification { +struct il_spectrum_notification { u8 id; /* measurement id -- 0 or 1 */ u8 token; - u8 channel_index; /* index in measurement channel list */ + u8 channel_idx; /* idx in measurement channel list */ u8 state; /* 0 - start, 1 - stop */ __le32 start_time; /* lower 32-bits of TSF */ u8 band; /* 0 - 5.2GHz, 1 - 2.4GHz */ u8 channel; - u8 type; /* see enum iwl_measurement_type */ + u8 type; /* see enum il_measurement_type */ u8 reserved1; /* NOTE: cca_ofdm, cca_cck, basic_type, and histogram are only only * valid if applicable for measurement type requested. */ @@ -2248,9 +2229,9 @@ struct iwl_spectrum_notification { u8 basic_type; /* 0 - bss, 1 - ofdm preamble, 2 - * unidentified */ u8 reserved2[3]; - struct iwl_measurement_histogram histogram; + struct il_measurement_histogram histogram; __le32 stop_time; /* lower 32-bits of TSF */ - __le32 status; /* see iwl_measurement_status */ + __le32 status; /* see il_measurement_status */ } __packed; /****************************************************************************** @@ -2260,10 +2241,10 @@ struct iwl_spectrum_notification { *****************************************************************************/ /** - * struct iwl_powertable_cmd - Power Table Command + * struct il_powertable_cmd - Power Table Command * @flags: See below: * - * POWER_TABLE_CMD = 0x77 (command, has simple generic response) + * C_POWER_TBL = 0x77 (command, has simple generic response) * * PM allow: * bit 0 - '0' Driver not allow power management @@ -2290,38 +2271,38 @@ struct iwl_spectrum_notification { * '10' force xtal sleep * '11' Illegal set * - * NOTE: if sleep_interval[SLEEP_INTRVL_TABLE_SIZE-1] > DTIM period then + * NOTE: if sleep_interval[SLEEP_INTRVL_TBL_SIZE-1] > DTIM period then * ucode assume sleep over DTIM is allowed and we don't need to wake up * for every DTIM. */ -#define IWL_POWER_VEC_SIZE 5 +#define IL_POWER_VEC_SIZE 5 -#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK cpu_to_le16(BIT(0)) -#define IWL_POWER_PCI_PM_MSK cpu_to_le16(BIT(3)) +#define IL_POWER_DRIVER_ALLOW_SLEEP_MSK cpu_to_le16(BIT(0)) +#define IL_POWER_PCI_PM_MSK cpu_to_le16(BIT(3)) -struct iwl3945_powertable_cmd { +struct il3945_powertable_cmd { __le16 flags; u8 reserved[2]; __le32 rx_data_timeout; __le32 tx_data_timeout; - __le32 sleep_interval[IWL_POWER_VEC_SIZE]; + __le32 sleep_interval[IL_POWER_VEC_SIZE]; } __packed; -struct iwl_powertable_cmd { +struct il_powertable_cmd { __le16 flags; - u8 keep_alive_seconds; /* 3945 reserved */ - u8 debug_flags; /* 3945 reserved */ + u8 keep_alive_seconds; /* 3945 reserved */ + u8 debug_flags; /* 3945 reserved */ __le32 rx_data_timeout; __le32 tx_data_timeout; - __le32 sleep_interval[IWL_POWER_VEC_SIZE]; + __le32 sleep_interval[IL_POWER_VEC_SIZE]; __le32 keep_alive_beacons; } __packed; /* - * PM_SLEEP_NOTIFICATION = 0x7A (notification only, not a command) + * N_PM_SLEEP = 0x7A (notification only, not a command) * all devices identical. */ -struct iwl_sleep_notification { +struct il_sleep_notification { u8 pm_sleep_mode; u8 pm_wakeup_src; __le16 reserved; @@ -2332,23 +2313,23 @@ struct iwl_sleep_notification { /* Sleep states. all devices identical. */ enum { - IWL_PM_NO_SLEEP = 0, - IWL_PM_SLP_MAC = 1, - IWL_PM_SLP_FULL_MAC_UNASSOCIATE = 2, - IWL_PM_SLP_FULL_MAC_CARD_STATE = 3, - IWL_PM_SLP_PHY = 4, - IWL_PM_SLP_REPENT = 5, - IWL_PM_WAKEUP_BY_TIMER = 6, - IWL_PM_WAKEUP_BY_DRIVER = 7, - IWL_PM_WAKEUP_BY_RFKILL = 8, + IL_PM_NO_SLEEP = 0, + IL_PM_SLP_MAC = 1, + IL_PM_SLP_FULL_MAC_UNASSOCIATE = 2, + IL_PM_SLP_FULL_MAC_CARD_STATE = 3, + IL_PM_SLP_PHY = 4, + IL_PM_SLP_REPENT = 5, + IL_PM_WAKEUP_BY_TIMER = 6, + IL_PM_WAKEUP_BY_DRIVER = 7, + IL_PM_WAKEUP_BY_RFKILL = 8, /* 3 reserved */ - IWL_PM_NUM_OF_MODES = 12, + IL_PM_NUM_OF_MODES = 12, }; /* - * CARD_STATE_NOTIFICATION = 0xa1 (notification only, not a command) + * N_CARD_STATE = 0xa1 (notification only, not a command) */ -struct iwl_card_state_notif { +struct il_card_state_notif { __le32 flags; } __packed; @@ -2357,11 +2338,11 @@ struct iwl_card_state_notif { #define CT_CARD_DISABLED 0x04 #define RXON_CARD_DISABLED 0x10 -struct iwl_ct_kill_config { - __le32 reserved; - __le32 critical_temperature_M; - __le32 critical_temperature_R; -} __packed; +struct il_ct_kill_config { + __le32 reserved; + __le32 critical_temperature_M; + __le32 critical_temperature_R; +} __packed; /****************************************************************************** * (8) @@ -2373,7 +2354,7 @@ struct iwl_ct_kill_config { #define SCAN_CHANNEL_TYPE_ACTIVE cpu_to_le32(1) /** - * struct iwl_scan_channel - entry in REPLY_SCAN_CMD channel table + * struct il_scan_channel - entry in C_SCAN channel table * * One for each channel in the scan list. * Each channel can independently select: @@ -2383,7 +2364,7 @@ struct iwl_ct_kill_config { * quiet_plcp_th, good_CRC_th) * * To avoid uCode errors, make sure the following are true (see comments - * under struct iwl_scan_cmd about max_out_time and quiet_time): + * under struct il_scan_cmd about max_out_time and quiet_time): * 1) If using passive_dwell (i.e. passive_dwell != 0): * active_dwell <= passive_dwell (< max_out_time if max_out_time != 0) * 2) quiet_time <= active_dwell @@ -2391,7 +2372,7 @@ struct iwl_ct_kill_config { * passive_dwell < max_out_time * active_dwell < max_out_time */ -struct iwl3945_scan_channel { +struct il3945_scan_channel { /* * type is defined as: * 0:0 1 = active, 0 = passive @@ -2400,16 +2381,16 @@ struct iwl3945_scan_channel { * 5:7 reserved */ u8 type; - u8 channel; /* band is selected by iwl3945_scan_cmd "flags" field */ - struct iwl3945_tx_power tpc; + u8 channel; /* band is selected by il3945_scan_cmd "flags" field */ + struct il3945_tx_power tpc; __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */ __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */ } __packed; /* set number of direct probes u8 type */ -#define IWL39_SCAN_PROBE_MASK(n) ((BIT(n) | (BIT(n) - BIT(1)))) +#define IL39_SCAN_PROBE_MASK(n) ((BIT(n) | (BIT(n) - BIT(1)))) -struct iwl_scan_channel { +struct il_scan_channel { /* * type is defined as: * 0:0 1 = active, 0 = passive @@ -2418,7 +2399,7 @@ struct iwl_scan_channel { * 21:31 reserved */ __le32 type; - __le16 channel; /* band is selected by iwl_scan_cmd "flags" field */ + __le16 channel; /* band is selected by il_scan_cmd "flags" field */ u8 tx_gain; /* gain for analog radio */ u8 dsp_atten; /* gain for DSP */ __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */ @@ -2426,17 +2407,17 @@ struct iwl_scan_channel { } __packed; /* set number of direct probes __le32 type */ -#define IWL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1)))) +#define IL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1)))) /** - * struct iwl_ssid_ie - directed scan network information element + * struct il_ssid_ie - directed scan network information element * - * Up to 20 of these may appear in REPLY_SCAN_CMD (Note: Only 4 are in - * 3945 SCAN api), selected by "type" bit field in struct iwl_scan_channel; + * Up to 20 of these may appear in C_SCAN (Note: Only 4 are in + * 3945 SCAN api), selected by "type" bit field in struct il_scan_channel; * each channel may select different ssids from among the 20 (4) entries. * SSID IEs get transmitted in reverse order of entry. */ -struct iwl_ssid_ie { +struct il_ssid_ie { u8 id; u8 len; u8 ssid[32]; @@ -2445,14 +2426,14 @@ struct iwl_ssid_ie { #define PROBE_OPTION_MAX_3945 4 #define PROBE_OPTION_MAX 20 #define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF) -#define IWL_GOOD_CRC_TH_DISABLED 0 -#define IWL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1) -#define IWL_GOOD_CRC_TH_NEVER cpu_to_le16(0xffff) -#define IWL_MAX_SCAN_SIZE 1024 -#define IWL_MAX_CMD_SIZE 4096 +#define IL_GOOD_CRC_TH_DISABLED 0 +#define IL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1) +#define IL_GOOD_CRC_TH_NEVER cpu_to_le16(0xffff) +#define IL_MAX_SCAN_SIZE 1024 +#define IL_MAX_CMD_SIZE 4096 /* - * REPLY_SCAN_CMD = 0x80 (command) + * C_SCAN = 0x80 (command) * * The hardware scan command is very powerful; the driver can set it up to * maintain (relatively) normal network traffic while doing a scan in the @@ -2501,10 +2482,10 @@ struct iwl_ssid_ie { * Driver must use separate scan commands for 2.4 vs. 5 GHz bands. * * To avoid uCode errors, see timing restrictions described under - * struct iwl_scan_channel. + * struct il_scan_channel. */ -struct iwl3945_scan_cmd { +struct il3945_scan_cmd { __le16 len; u8 reserved0; u8 channel_count; /* # channels in channel list */ @@ -2525,10 +2506,10 @@ struct iwl3945_scan_cmd { /* For active scans (set to all-0s for passive scans). * Does not include payload. Must specify Tx rate; no rate scaling. */ - struct iwl3945_tx_cmd tx_cmd; + struct il3945_tx_cmd tx_cmd; /* For directed active scans (set to all-0s otherwise) */ - struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX_3945]; + struct il_ssid_ie direct_scan[PROBE_OPTION_MAX_3945]; /* * Probe request frame, followed by channel list. @@ -2538,17 +2519,17 @@ struct iwl3945_scan_cmd { * Number of channels in list is specified by channel_count. * Each channel in list is of type: * - * struct iwl3945_scan_channel channels[0]; + * struct il3945_scan_channel channels[0]; * * NOTE: Only one band of channels can be scanned per pass. You * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait - * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION) + * for one scan to complete (i.e. receive N_SCAN_COMPLETE) * before requesting another scan. */ u8 data[0]; } __packed; -struct iwl_scan_cmd { +struct il_scan_cmd { __le16 len; u8 reserved0; u8 channel_count; /* # channels in channel list */ @@ -2569,10 +2550,10 @@ struct iwl_scan_cmd { /* For active scans (set to all-0s for passive scans). * Does not include payload. Must specify Tx rate; no rate scaling. */ - struct iwl_tx_cmd tx_cmd; + struct il_tx_cmd tx_cmd; /* For directed active scans (set to all-0s otherwise) */ - struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX]; + struct il_ssid_ie direct_scan[PROBE_OPTION_MAX]; /* * Probe request frame, followed by channel list. @@ -2582,11 +2563,11 @@ struct iwl_scan_cmd { * Number of channels in list is specified by channel_count. * Each channel in list is of type: * - * struct iwl_scan_channel channels[0]; + * struct il_scan_channel channels[0]; * * NOTE: Only one band of channels can be scanned per pass. You * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait - * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION) + * for one scan to complete (i.e. receive N_SCAN_COMPLETE) * before requesting another scan. */ u8 data[0]; @@ -2598,16 +2579,16 @@ struct iwl_scan_cmd { #define ABORT_STATUS 0x2 /* - * REPLY_SCAN_CMD = 0x80 (response) + * C_SCAN = 0x80 (response) */ -struct iwl_scanreq_notification { +struct il_scanreq_notification { __le32 status; /* 1: okay, 2: cannot fulfill request */ } __packed; /* - * SCAN_START_NOTIFICATION = 0x82 (notification only, not a command) + * N_SCAN_START = 0x82 (notification only, not a command) */ -struct iwl_scanstart_notification { +struct il_scanstart_notification { __le32 tsf_low; __le32 tsf_high; __le32 beacon_timer; @@ -2620,30 +2601,30 @@ struct iwl_scanstart_notification { #define SCAN_OWNER_STATUS 0x1 #define MEASURE_OWNER_STATUS 0x2 -#define IWL_PROBE_STATUS_OK 0 -#define IWL_PROBE_STATUS_TX_FAILED BIT(0) +#define IL_PROBE_STATUS_OK 0 +#define IL_PROBE_STATUS_TX_FAILED BIT(0) /* error statuses combined with TX_FAILED */ -#define IWL_PROBE_STATUS_FAIL_TTL BIT(1) -#define IWL_PROBE_STATUS_FAIL_BT BIT(2) +#define IL_PROBE_STATUS_FAIL_TTL BIT(1) +#define IL_PROBE_STATUS_FAIL_BT BIT(2) -#define NUMBER_OF_STATISTICS 1 /* first __le32 is good CRC */ +#define NUMBER_OF_STATS 1 /* first __le32 is good CRC */ /* - * SCAN_RESULTS_NOTIFICATION = 0x83 (notification only, not a command) + * N_SCAN_RESULTS = 0x83 (notification only, not a command) */ -struct iwl_scanresults_notification { +struct il_scanresults_notification { u8 channel; u8 band; u8 probe_status; - u8 num_probe_not_sent; /* not enough time to send */ + u8 num_probe_not_sent; /* not enough time to send */ __le32 tsf_low; __le32 tsf_high; - __le32 statistics[NUMBER_OF_STATISTICS]; + __le32 stats[NUMBER_OF_STATS]; } __packed; /* - * SCAN_COMPLETE_NOTIFICATION = 0x84 (notification only, not a command) + * N_SCAN_COMPLETE = 0x84 (notification only, not a command) */ -struct iwl_scancomplete_notification { +struct il_scancomplete_notification { u8 scanned_channels; u8 status; u8 last_channel; @@ -2651,50 +2632,49 @@ struct iwl_scancomplete_notification { __le32 tsf_high; } __packed; - /****************************************************************************** * (9) * IBSS/AP Commands and Notifications: * *****************************************************************************/ -enum iwl_ibss_manager { - IWL_NOT_IBSS_MANAGER = 0, - IWL_IBSS_MANAGER = 1, +enum il_ibss_manager { + IL_NOT_IBSS_MANAGER = 0, + IL_IBSS_MANAGER = 1, }; /* - * BEACON_NOTIFICATION = 0x90 (notification only, not a command) + * N_BEACON = 0x90 (notification only, not a command) */ -struct iwl3945_beacon_notif { - struct iwl3945_tx_resp beacon_notify_hdr; +struct il3945_beacon_notif { + struct il3945_tx_resp beacon_notify_hdr; __le32 low_tsf; __le32 high_tsf; __le32 ibss_mgr_status; } __packed; -struct iwl4965_beacon_notif { - struct iwl4965_tx_resp beacon_notify_hdr; +struct il4965_beacon_notif { + struct il4965_tx_resp beacon_notify_hdr; __le32 low_tsf; __le32 high_tsf; __le32 ibss_mgr_status; } __packed; /* - * REPLY_TX_BEACON = 0x91 (command, has simple generic response) + * C_TX_BEACON= 0x91 (command, has simple generic response) */ -struct iwl3945_tx_beacon_cmd { - struct iwl3945_tx_cmd tx; +struct il3945_tx_beacon_cmd { + struct il3945_tx_cmd tx; __le16 tim_idx; u8 tim_size; u8 reserved1; struct ieee80211_hdr frame[0]; /* beacon frame */ } __packed; -struct iwl_tx_beacon_cmd { - struct iwl_tx_cmd tx; +struct il_tx_beacon_cmd { + struct il_tx_cmd tx; __le16 tim_idx; u8 tim_size; u8 reserved1; @@ -2707,7 +2687,7 @@ struct iwl_tx_beacon_cmd { * *****************************************************************************/ -#define IWL_TEMP_CONVERT 260 +#define IL_TEMP_CONVERT 260 #define SUP_RATE_11A_MAX_NUM_CHANNELS 8 #define SUP_RATE_11B_MAX_NUM_CHANNELS 4 @@ -2727,9 +2707,9 @@ struct rate_histogram { } failed; } __packed; -/* statistics command response */ +/* stats command response */ -struct iwl39_statistics_rx_phy { +struct iwl39_stats_rx_phy { __le32 ina_cnt; __le32 fina_cnt; __le32 plcp_err; @@ -2747,7 +2727,7 @@ struct iwl39_statistics_rx_phy { __le32 sent_cts_cnt; } __packed; -struct iwl39_statistics_rx_non_phy { +struct iwl39_stats_rx_non_phy { __le32 bogus_cts; /* CTS received when not expecting CTS */ __le32 bogus_ack; /* ACK received when not expecting ACK */ __le32 non_bssid_frames; /* number of frames with BSSID that @@ -2758,13 +2738,13 @@ struct iwl39_statistics_rx_non_phy { * our serving channel */ } __packed; -struct iwl39_statistics_rx { - struct iwl39_statistics_rx_phy ofdm; - struct iwl39_statistics_rx_phy cck; - struct iwl39_statistics_rx_non_phy general; +struct iwl39_stats_rx { + struct iwl39_stats_rx_phy ofdm; + struct iwl39_stats_rx_phy cck; + struct iwl39_stats_rx_non_phy general; } __packed; -struct iwl39_statistics_tx { +struct iwl39_stats_tx { __le32 preamble_cnt; __le32 rx_detected_cnt; __le32 bt_prio_defer_cnt; @@ -2776,31 +2756,31 @@ struct iwl39_statistics_tx { __le32 actual_ack_cnt; } __packed; -struct statistics_dbg { +struct stats_dbg { __le32 burst_check; __le32 burst_count; __le32 wait_for_silence_timeout_cnt; __le32 reserved[3]; } __packed; -struct iwl39_statistics_div { +struct iwl39_stats_div { __le32 tx_on_a; __le32 tx_on_b; __le32 exec_time; __le32 probe_time; } __packed; -struct iwl39_statistics_general { +struct iwl39_stats_general { __le32 temperature; - struct statistics_dbg dbg; + struct stats_dbg dbg; __le32 sleep_time; __le32 slots_out; __le32 slots_idle; __le32 ttl_timestamp; - struct iwl39_statistics_div div; + struct iwl39_stats_div div; } __packed; -struct statistics_rx_phy { +struct stats_rx_phy { __le32 ina_cnt; __le32 fina_cnt; __le32 plcp_err; @@ -2823,7 +2803,7 @@ struct statistics_rx_phy { __le32 reserved3; } __packed; -struct statistics_rx_ht_phy { +struct stats_rx_ht_phy { __le32 plcp_err; __le32 overrun_err; __le32 early_overrun_err; @@ -2838,7 +2818,7 @@ struct statistics_rx_ht_phy { #define INTERFERENCE_DATA_AVAILABLE cpu_to_le32(1) -struct statistics_rx_non_phy { +struct stats_rx_non_phy { __le32 bogus_cts; /* CTS received when not expecting CTS */ __le32 bogus_ack; /* ACK received when not expecting ACK */ __le32 non_bssid_frames; /* number of frames with BSSID that @@ -2852,15 +2832,15 @@ struct statistics_rx_non_phy { __le32 num_missed_bcon; /* number of missed beacons */ __le32 adc_rx_saturation_time; /* count in 0.8us units the time the * ADC was in saturation */ - __le32 ina_detection_search_time;/* total time (in 0.8us) searched - * for INA */ + __le32 ina_detection_search_time; /* total time (in 0.8us) searched + * for INA */ __le32 beacon_silence_rssi_a; /* RSSI silence after beacon frame */ __le32 beacon_silence_rssi_b; /* RSSI silence after beacon frame */ __le32 beacon_silence_rssi_c; /* RSSI silence after beacon frame */ __le32 interference_data_flag; /* flag for interference data * availability. 1 when data is * available. */ - __le32 channel_load; /* counts RX Enable time in uSec */ + __le32 channel_load; /* counts RX Enable time in uSec */ __le32 dsp_false_alarms; /* DSP false alarm (both OFDM * and CCK) counter */ __le32 beacon_rssi_a; @@ -2871,28 +2851,28 @@ struct statistics_rx_non_phy { __le32 beacon_energy_c; } __packed; -struct statistics_rx { - struct statistics_rx_phy ofdm; - struct statistics_rx_phy cck; - struct statistics_rx_non_phy general; - struct statistics_rx_ht_phy ofdm_ht; +struct stats_rx { + struct stats_rx_phy ofdm; + struct stats_rx_phy cck; + struct stats_rx_non_phy general; + struct stats_rx_ht_phy ofdm_ht; } __packed; /** - * struct statistics_tx_power - current tx power + * struct stats_tx_power - current tx power * * @ant_a: current tx power on chain a in 1/2 dB step * @ant_b: current tx power on chain b in 1/2 dB step * @ant_c: current tx power on chain c in 1/2 dB step */ -struct statistics_tx_power { +struct stats_tx_power { u8 ant_a; u8 ant_b; u8 ant_c; u8 reserved; } __packed; -struct statistics_tx_non_phy_agg { +struct stats_tx_non_phy_agg { __le32 ba_timeout; __le32 ba_reschedule_frames; __le32 scd_query_agg_frame_cnt; @@ -2905,7 +2885,7 @@ struct statistics_tx_non_phy_agg { __le32 rx_ba_rsp_cnt; } __packed; -struct statistics_tx { +struct stats_tx { __le32 preamble_cnt; __le32 rx_detected_cnt; __le32 bt_prio_defer_cnt; @@ -2920,13 +2900,12 @@ struct statistics_tx { __le32 burst_abort_missing_next_frame_cnt; __le32 cts_timeout_collision; __le32 ack_or_ba_timeout_collision; - struct statistics_tx_non_phy_agg agg; + struct stats_tx_non_phy_agg agg; __le32 reserved1; } __packed; - -struct statistics_div { +struct stats_div { __le32 tx_on_a; __le32 tx_on_b; __le32 exec_time; @@ -2935,14 +2914,14 @@ struct statistics_div { __le32 reserved2; } __packed; -struct statistics_general_common { - __le32 temperature; /* radio temperature */ - struct statistics_dbg dbg; +struct stats_general_common { + __le32 temperature; /* radio temperature */ + struct stats_dbg dbg; __le32 sleep_time; __le32 slots_out; __le32 slots_idle; __le32 ttl_timestamp; - struct statistics_div div; + struct stats_div div; __le32 rx_enable_counter; /* * num_of_sos_states: @@ -2952,73 +2931,73 @@ struct statistics_general_common { __le32 num_of_sos_states; } __packed; -struct statistics_general { - struct statistics_general_common common; +struct stats_general { + struct stats_general_common common; __le32 reserved2; __le32 reserved3; } __packed; -#define UCODE_STATISTICS_CLEAR_MSK (0x1 << 0) -#define UCODE_STATISTICS_FREQUENCY_MSK (0x1 << 1) -#define UCODE_STATISTICS_NARROW_BAND_MSK (0x1 << 2) +#define UCODE_STATS_CLEAR_MSK (0x1 << 0) +#define UCODE_STATS_FREQUENCY_MSK (0x1 << 1) +#define UCODE_STATS_NARROW_BAND_MSK (0x1 << 2) /* - * REPLY_STATISTICS_CMD = 0x9c, + * C_STATS = 0x9c, * all devices identical. * - * This command triggers an immediate response containing uCode statistics. - * The response is in the same format as STATISTICS_NOTIFICATION 0x9d, below. + * This command triggers an immediate response containing uCode stats. + * The response is in the same format as N_STATS 0x9d, below. * * If the CLEAR_STATS configuration flag is set, uCode will clear its - * internal copy of the statistics (counters) after issuing the response. - * This flag does not affect STATISTICS_NOTIFICATIONs after beacons (see below). + * internal copy of the stats (counters) after issuing the response. + * This flag does not affect N_STATSs after beacons (see below). * * If the DISABLE_NOTIF configuration flag is set, uCode will not issue - * STATISTICS_NOTIFICATIONs after received beacons (see below). This flag - * does not affect the response to the REPLY_STATISTICS_CMD 0x9c itself. + * N_STATSs after received beacons (see below). This flag + * does not affect the response to the C_STATS 0x9c itself. */ -#define IWL_STATS_CONF_CLEAR_STATS cpu_to_le32(0x1) /* see above */ -#define IWL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2)/* see above */ -struct iwl_statistics_cmd { - __le32 configuration_flags; /* IWL_STATS_CONF_* */ +#define IL_STATS_CONF_CLEAR_STATS cpu_to_le32(0x1) /* see above */ +#define IL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2) /* see above */ +struct il_stats_cmd { + __le32 configuration_flags; /* IL_STATS_CONF_* */ } __packed; /* - * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command) + * N_STATS = 0x9d (notification only, not a command) * * By default, uCode issues this notification after receiving a beacon * while associated. To disable this behavior, set DISABLE_NOTIF flag in the - * REPLY_STATISTICS_CMD 0x9c, above. + * C_STATS 0x9c, above. * * Statistics counters continue to increment beacon after beacon, but are - * cleared when changing channels or when driver issues REPLY_STATISTICS_CMD + * cleared when changing channels or when driver issues C_STATS * 0x9c with CLEAR_STATS bit set (see above). * - * uCode also issues this notification during scans. uCode clears statistics - * appropriately so that each notification contains statistics for only the + * uCode also issues this notification during scans. uCode clears stats + * appropriately so that each notification contains stats for only the * one channel that has just been scanned. */ -#define STATISTICS_REPLY_FLG_BAND_24G_MSK cpu_to_le32(0x2) -#define STATISTICS_REPLY_FLG_HT40_MODE_MSK cpu_to_le32(0x8) +#define STATS_REPLY_FLG_BAND_24G_MSK cpu_to_le32(0x2) +#define STATS_REPLY_FLG_HT40_MODE_MSK cpu_to_le32(0x8) -struct iwl3945_notif_statistics { +struct il3945_notif_stats { __le32 flag; - struct iwl39_statistics_rx rx; - struct iwl39_statistics_tx tx; - struct iwl39_statistics_general general; + struct iwl39_stats_rx rx; + struct iwl39_stats_tx tx; + struct iwl39_stats_general general; } __packed; -struct iwl_notif_statistics { +struct il_notif_stats { __le32 flag; - struct statistics_rx rx; - struct statistics_tx tx; - struct statistics_general general; + struct stats_rx rx; + struct stats_tx tx; + struct stats_general general; } __packed; /* - * MISSED_BEACONS_NOTIFICATION = 0xa2 (notification only, not a command) + * N_MISSED_BEACONS = 0xa2 (notification only, not a command) * - * uCode send MISSED_BEACONS_NOTIFICATION to driver when detect beacon missed + * uCode send N_MISSED_BEACONS to driver when detect beacon missed * in regardless of how many missed beacons, which mean when driver receive the * notification, inside the command, it can find all the beacons information * which include number of total missed beacons, number of consecutive missed @@ -3035,18 +3014,17 @@ struct iwl_notif_statistics { * */ -#define IWL_MISSED_BEACON_THRESHOLD_MIN (1) -#define IWL_MISSED_BEACON_THRESHOLD_DEF (5) -#define IWL_MISSED_BEACON_THRESHOLD_MAX IWL_MISSED_BEACON_THRESHOLD_DEF +#define IL_MISSED_BEACON_THRESHOLD_MIN (1) +#define IL_MISSED_BEACON_THRESHOLD_DEF (5) +#define IL_MISSED_BEACON_THRESHOLD_MAX IL_MISSED_BEACON_THRESHOLD_DEF -struct iwl_missed_beacon_notif { +struct il_missed_beacon_notif { __le32 consecutive_missed_beacons; __le32 total_missed_becons; __le32 num_expected_beacons; __le32 num_recvd_beacons; } __packed; - /****************************************************************************** * (11) * Rx Calibration Commands: @@ -3062,7 +3040,7 @@ struct iwl_missed_beacon_notif { *****************************************************************************/ /** - * SENSITIVITY_CMD = 0xa8 (command, has simple generic response) + * C_SENSITIVITY = 0xa8 (command, has simple generic response) * * This command sets up the Rx signal detector for a sensitivity level that * is high enough to lock onto all signals within the associated network, @@ -3076,12 +3054,12 @@ struct iwl_missed_beacon_notif { * time listening, not transmitting). Driver must adjust sensitivity so that * the ratio of actual false alarms to actual Rx time falls within this range. * - * While associated, uCode delivers STATISTICS_NOTIFICATIONs after each + * While associated, uCode delivers N_STATSs after each * received beacon. These provide information to the driver to analyze the - * sensitivity. Don't analyze statistics that come in from scanning, or any - * other non-associated-network source. Pertinent statistics include: + * sensitivity. Don't analyze stats that come in from scanning, or any + * other non-associated-network source. Pertinent stats include: * - * From "general" statistics (struct statistics_rx_non_phy): + * From "general" stats (struct stats_rx_non_phy): * * (beacon_energy_[abc] & 0x0FF00) >> 8 (unsigned, higher value is lower level) * Measure of energy of desired signal. Used for establishing a level @@ -3094,7 +3072,7 @@ struct iwl_missed_beacon_notif { * uSecs of actual Rx time during beacon period (varies according to * how much time was spent transmitting). * - * From "cck" and "ofdm" statistics (struct statistics_rx_phy), separately: + * From "cck" and "ofdm" stats (struct stats_rx_phy), separately: * * false_alarm_cnt * Signal locks abandoned early (before phy-level header). @@ -3111,15 +3089,15 @@ struct iwl_missed_beacon_notif { * * Total number of false alarms = false_alarms + plcp_errs * - * For OFDM, adjust the following table entries in struct iwl_sensitivity_cmd + * For OFDM, adjust the following table entries in struct il_sensitivity_cmd * (notice that the start points for OFDM are at or close to settings for * maximum sensitivity): * * START / MIN / MAX - * HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX 90 / 85 / 120 - * HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX 170 / 170 / 210 - * HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX 105 / 105 / 140 - * HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX 220 / 220 / 270 + * HD_AUTO_CORR32_X1_TH_ADD_MIN_IDX 90 / 85 / 120 + * HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX 170 / 170 / 210 + * HD_AUTO_CORR32_X4_TH_ADD_MIN_IDX 105 / 105 / 140 + * HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX 220 / 220 / 270 * * If actual rate of OFDM false alarms (+ plcp_errors) is too high * (greater than 50 for each 204.8 msecs listening), reduce sensitivity @@ -3152,30 +3130,30 @@ struct iwl_missed_beacon_notif { * Reset this to 0 at the first beacon period that falls within the * "good" range (5 to 50 false alarms per 204.8 milliseconds rx). * - * Then, adjust the following CCK table entries in struct iwl_sensitivity_cmd + * Then, adjust the following CCK table entries in struct il_sensitivity_cmd * (notice that the start points for CCK are at maximum sensitivity): * * START / MIN / MAX - * HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX 125 / 125 / 200 - * HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX 200 / 200 / 400 - * HD_MIN_ENERGY_CCK_DET_INDEX 100 / 0 / 100 + * HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX 125 / 125 / 200 + * HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX 200 / 200 / 400 + * HD_MIN_ENERGY_CCK_DET_IDX 100 / 0 / 100 * * If actual rate of CCK false alarms (+ plcp_errors) is too high * (greater than 50 for each 204.8 msecs listening), method for reducing * sensitivity is: * - * 1) *Add* 3 to value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX, + * 1) *Add* 3 to value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX, * up to max 400. * - * 2) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is < 160, + * 2) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX is < 160, * sensitivity has been reduced a significant amount; bring it up to * a moderate 161. Otherwise, *add* 3, up to max 200. * - * 3) a) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is > 160, + * 3) a) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX is > 160, * sensitivity has been reduced only a moderate or small amount; - * *subtract* 2 from value in HD_MIN_ENERGY_CCK_DET_INDEX, + * *subtract* 2 from value in HD_MIN_ENERGY_CCK_DET_IDX, * down to min 0. Otherwise (if gain has been significantly reduced), - * don't change the HD_MIN_ENERGY_CCK_DET_INDEX value. + * don't change the HD_MIN_ENERGY_CCK_DET_IDX value. * * b) Save a snapshot of the "silence reference". * @@ -3191,13 +3169,13 @@ struct iwl_missed_beacon_notif { * * Method for increasing sensitivity: * - * 1) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX, + * 1) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX, * down to min 125. * - * 2) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX, + * 2) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX, * down to min 200. * - * 3) *Add* 2 to value in HD_MIN_ENERGY_CCK_DET_INDEX, up to max 100. + * 3) *Add* 2 to value in HD_MIN_ENERGY_CCK_DET_IDX, up to max 100. * * If actual rate of CCK false alarms (+ plcp_errors) is within good range * (between 5 and 50 for each 204.8 msecs listening): @@ -3206,57 +3184,56 @@ struct iwl_missed_beacon_notif { * * 2) If previous beacon had too many CCK false alarms (+ plcp_errors), * give some extra margin to energy threshold by *subtracting* 8 - * from value in HD_MIN_ENERGY_CCK_DET_INDEX. + * from value in HD_MIN_ENERGY_CCK_DET_IDX. * * For all cases (too few, too many, good range), make sure that the CCK * detection threshold (energy) is below the energy level for robust * detection over the past 10 beacon periods, the "Max cck energy". * Lower values mean higher energy; this means making sure that the value - * in HD_MIN_ENERGY_CCK_DET_INDEX is at or *above* "Max cck energy". + * in HD_MIN_ENERGY_CCK_DET_IDX is at or *above* "Max cck energy". * */ /* - * Table entries in SENSITIVITY_CMD (struct iwl_sensitivity_cmd) - */ -#define HD_TABLE_SIZE (11) /* number of entries */ -#define HD_MIN_ENERGY_CCK_DET_INDEX (0) /* table indexes */ -#define HD_MIN_ENERGY_OFDM_DET_INDEX (1) -#define HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX (2) -#define HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX (3) -#define HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX (4) -#define HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX (5) -#define HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX (6) -#define HD_BARKER_CORR_TH_ADD_MIN_INDEX (7) -#define HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX (8) -#define HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX (9) -#define HD_OFDM_ENERGY_TH_IN_INDEX (10) - -/* Control field in struct iwl_sensitivity_cmd */ -#define SENSITIVITY_CMD_CONTROL_DEFAULT_TABLE cpu_to_le16(0) -#define SENSITIVITY_CMD_CONTROL_WORK_TABLE cpu_to_le16(1) + * Table entries in C_SENSITIVITY (struct il_sensitivity_cmd) + */ +#define HD_TBL_SIZE (11) /* number of entries */ +#define HD_MIN_ENERGY_CCK_DET_IDX (0) /* table idxes */ +#define HD_MIN_ENERGY_OFDM_DET_IDX (1) +#define HD_AUTO_CORR32_X1_TH_ADD_MIN_IDX (2) +#define HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX (3) +#define HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX (4) +#define HD_AUTO_CORR32_X4_TH_ADD_MIN_IDX (5) +#define HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX (6) +#define HD_BARKER_CORR_TH_ADD_MIN_IDX (7) +#define HD_BARKER_CORR_TH_ADD_MIN_MRC_IDX (8) +#define HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX (9) +#define HD_OFDM_ENERGY_TH_IN_IDX (10) + +/* Control field in struct il_sensitivity_cmd */ +#define C_SENSITIVITY_CONTROL_DEFAULT_TBL cpu_to_le16(0) +#define C_SENSITIVITY_CONTROL_WORK_TBL cpu_to_le16(1) /** - * struct iwl_sensitivity_cmd + * struct il_sensitivity_cmd * @control: (1) updates working table, (0) updates default table - * @table: energy threshold values, use HD_* as index into table + * @table: energy threshold values, use HD_* as idx into table * * Always use "1" in "control" to update uCode's working table and DSP. */ -struct iwl_sensitivity_cmd { - __le16 control; /* always use "1" */ - __le16 table[HD_TABLE_SIZE]; /* use HD_* as index */ +struct il_sensitivity_cmd { + __le16 control; /* always use "1" */ + __le16 table[HD_TBL_SIZE]; /* use HD_* as idx */ } __packed; - /** - * REPLY_PHY_CALIBRATION_CMD = 0xb0 (command, has simple generic response) + * C_PHY_CALIBRATION = 0xb0 (command, has simple generic response) * * This command sets the relative gains of 4965 device's 3 radio receiver chains. * * After the first association, driver should accumulate signal and noise - * statistics from the STATISTICS_NOTIFICATIONs that follow the first 20 - * beacons from the associated network (don't collect statistics that come + * stats from the N_STATSs that follow the first 20 + * beacons from the associated network (don't collect stats that come * in from scanning, or any other non-network source). * * DISCONNECTED ANTENNA: @@ -3264,7 +3241,7 @@ struct iwl_sensitivity_cmd { * Driver should determine which antennas are actually connected, by comparing * average beacon signal levels for the 3 Rx chains. Accumulate (add) the * following values over 20 beacons, one accumulator for each of the chains - * a/b/c, from struct statistics_rx_non_phy: + * a/b/c, from struct stats_rx_non_phy: * * beacon_rssi_[abc] & 0x0FF (unsigned, units in dB) * @@ -3283,7 +3260,7 @@ struct iwl_sensitivity_cmd { * to antennas, see above) for gain, by comparing the average signal levels * detected during the silence after each beacon (background noise). * Accumulate (add) the following values over 20 beacons, one accumulator for - * each of the chains a/b/c, from struct statistics_rx_non_phy: + * each of the chains a/b/c, from struct stats_rx_non_phy: * * beacon_silence_rssi_[abc] & 0x0FF (unsigned, units in dB) * @@ -3294,7 +3271,7 @@ struct iwl_sensitivity_cmd { * (accum_noise[i] - accum_noise[reference]) / 30 * * The "30" adjusts the dB in the 20 accumulated samples to units of 1.5 dB. - * For use in diff_gain_[abc] fields of struct iwl_calibration_cmd, the + * For use in diff_gain_[abc] fields of struct il_calibration_cmd, the * driver should limit the difference results to a range of 0-3 (0-4.5 dB), * and set bit 2 to indicate "reduce gain". The value for the reference * (weakest) chain should be "0". @@ -3306,24 +3283,24 @@ struct iwl_sensitivity_cmd { /* Phy calibration command for series */ /* The default calibrate table size if not specified by firmware */ -#define IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18 +#define IL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18 enum { - IWL_PHY_CALIBRATE_DIFF_GAIN_CMD = 7, - IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE = 19, + IL_PHY_CALIBRATE_DIFF_GAIN_CMD = 7, + IL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE = 19, }; -#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE (253) +#define IL_MAX_PHY_CALIBRATE_TBL_SIZE (253) -struct iwl_calib_hdr { +struct il_calib_hdr { u8 op_code; u8 first_group; u8 groups_num; u8 data_valid; } __packed; -/* IWL_PHY_CALIBRATE_DIFF_GAIN_CMD (7) */ -struct iwl_calib_diff_gain_cmd { - struct iwl_calib_hdr hdr; +/* IL_PHY_CALIBRATE_DIFF_GAIN_CMD (7) */ +struct il_calib_diff_gain_cmd { + struct il_calib_hdr hdr; s8 diff_gain_a; /* see above */ s8 diff_gain_b; s8 diff_gain_c; @@ -3338,12 +3315,12 @@ struct iwl_calib_diff_gain_cmd { /* * LEDs Command & Response - * REPLY_LEDS_CMD = 0x48 (command, has simple generic response) + * C_LEDS = 0x48 (command, has simple generic response) * * For each of 3 possible LEDs (Activity/Link/Tech, selected by "id" field), * this command turns it on or off, or sets up a periodic blinking cycle. */ -struct iwl_led_cmd { +struct il_led_cmd { __le32 interval; /* "interval" in uSec */ u8 id; /* 1: Activity, 2: Link, 3: Tech */ u8 off; /* # intervals off while blinking; @@ -3353,14 +3330,15 @@ struct iwl_led_cmd { u8 reserved; } __packed; - /****************************************************************************** * (13) * Union of all expected notifications/responses: * *****************************************************************************/ -struct iwl_rx_packet { +#define IL_RX_FRAME_SIZE_MSK 0x00003fff + +struct il_rx_pkt { /* * The first 4 bytes of the RX frame header contain both the RX frame * size and some flags. @@ -3372,27 +3350,27 @@ struct iwl_rx_packet { * 13-00: RX frame size */ __le32 len_n_flags; - struct iwl_cmd_header hdr; + struct il_cmd_header hdr; union { - struct iwl3945_rx_frame rx_frame; - struct iwl3945_tx_resp tx_resp; - struct iwl3945_beacon_notif beacon_status; - - struct iwl_alive_resp alive_frame; - struct iwl_spectrum_notification spectrum_notif; - struct iwl_csa_notification csa_notif; - struct iwl_error_resp err_resp; - struct iwl_card_state_notif card_state_notif; - struct iwl_add_sta_resp add_sta; - struct iwl_rem_sta_resp rem_sta; - struct iwl_sleep_notification sleep_notif; - struct iwl_spectrum_resp spectrum; - struct iwl_notif_statistics stats; - struct iwl_compressed_ba_resp compressed_ba; - struct iwl_missed_beacon_notif missed_beacon; + struct il3945_rx_frame rx_frame; + struct il3945_tx_resp tx_resp; + struct il3945_beacon_notif beacon_status; + + struct il_alive_resp alive_frame; + struct il_spectrum_notification spectrum_notif; + struct il_csa_notification csa_notif; + struct il_error_resp err_resp; + struct il_card_state_notif card_state_notif; + struct il_add_sta_resp add_sta; + struct il_rem_sta_resp rem_sta; + struct il_sleep_notification sleep_notif; + struct il_spectrum_resp spectrum; + struct il_notif_stats stats; + struct il_compressed_ba_resp compressed_ba; + struct il_missed_beacon_notif missed_beacon; __le32 status; u8 raw[0]; } u; } __packed; -#endif /* __iwl_legacy_commands_h__ */ +#endif /* __il_commands_h__ */ diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c new file mode 100644 index 00000000000..36454d0bbee --- /dev/null +++ b/drivers/net/wireless/iwlegacy/common.c @@ -0,0 +1,5867 @@ +/****************************************************************************** + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + *****************************************************************************/ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/etherdevice.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/lockdep.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/dma-mapping.h> +#include <linux/delay.h> +#include <linux/skbuff.h> +#include <net/mac80211.h> + +#include "common.h" + +int +_il_poll_bit(struct il_priv *il, u32 addr, u32 bits, u32 mask, int timeout) +{ + const int interval = 10; /* microseconds */ + int t = 0; + + do { + if ((_il_rd(il, addr) & mask) == (bits & mask)) + return t; + udelay(interval); + t += interval; + } while (t < timeout); + + return -ETIMEDOUT; +} +EXPORT_SYMBOL(_il_poll_bit); + +void +il_set_bit(struct il_priv *p, u32 r, u32 m) +{ + unsigned long reg_flags; + + spin_lock_irqsave(&p->reg_lock, reg_flags); + _il_set_bit(p, r, m); + spin_unlock_irqrestore(&p->reg_lock, reg_flags); +} +EXPORT_SYMBOL(il_set_bit); + +void +il_clear_bit(struct il_priv *p, u32 r, u32 m) +{ + unsigned long reg_flags; + + spin_lock_irqsave(&p->reg_lock, reg_flags); + _il_clear_bit(p, r, m); + spin_unlock_irqrestore(&p->reg_lock, reg_flags); +} +EXPORT_SYMBOL(il_clear_bit); + +int +_il_grab_nic_access(struct il_priv *il) +{ + int ret; + u32 val; + + /* this bit wakes up the NIC */ + _il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + + /* + * These bits say the device is running, and should keep running for + * at least a short while (at least as long as MAC_ACCESS_REQ stays 1), + * but they do not indicate that embedded SRAM is restored yet; + * 3945 and 4965 have volatile SRAM, and must save/restore contents + * to/from host DRAM when sleeping/waking for power-saving. + * Each direction takes approximately 1/4 millisecond; with this + * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a + * series of register accesses are expected (e.g. reading Event Log), + * to keep device from sleeping. + * + * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that + * SRAM is okay/restored. We don't check that here because this call + * is just for hardware register access; but GP1 MAC_SLEEP check is a + * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log). + * + */ + ret = + _il_poll_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, + (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | + CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000); + if (ret < 0) { + val = _il_rd(il, CSR_GP_CNTRL); + IL_ERR("MAC is in deep sleep!. CSR_GP_CNTRL = 0x%08X\n", val); + _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI); + return -EIO; + } + + return 0; +} +EXPORT_SYMBOL_GPL(_il_grab_nic_access); + +int +il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout) +{ + const int interval = 10; /* microseconds */ + int t = 0; + + do { + if ((il_rd(il, addr) & mask) == mask) + return t; + udelay(interval); + t += interval; + } while (t < timeout); + + return -ETIMEDOUT; +} +EXPORT_SYMBOL(il_poll_bit); + +u32 +il_rd_prph(struct il_priv *il, u32 reg) +{ + unsigned long reg_flags; + u32 val; + + spin_lock_irqsave(&il->reg_lock, reg_flags); + _il_grab_nic_access(il); + val = _il_rd_prph(il, reg); + _il_release_nic_access(il); + spin_unlock_irqrestore(&il->reg_lock, reg_flags); + return val; +} +EXPORT_SYMBOL(il_rd_prph); + +void +il_wr_prph(struct il_priv *il, u32 addr, u32 val) +{ + unsigned long reg_flags; + + spin_lock_irqsave(&il->reg_lock, reg_flags); + if (!_il_grab_nic_access(il)) { + _il_wr_prph(il, addr, val); + _il_release_nic_access(il); + } + spin_unlock_irqrestore(&il->reg_lock, reg_flags); +} +EXPORT_SYMBOL(il_wr_prph); + +u32 +il_read_targ_mem(struct il_priv *il, u32 addr) +{ + unsigned long reg_flags; + u32 value; + + spin_lock_irqsave(&il->reg_lock, reg_flags); + _il_grab_nic_access(il); + + _il_wr(il, HBUS_TARG_MEM_RADDR, addr); + rmb(); + value = _il_rd(il, HBUS_TARG_MEM_RDAT); + + _il_release_nic_access(il); + spin_unlock_irqrestore(&il->reg_lock, reg_flags); + return value; +} +EXPORT_SYMBOL(il_read_targ_mem); + +void +il_write_targ_mem(struct il_priv *il, u32 addr, u32 val) +{ + unsigned long reg_flags; + + spin_lock_irqsave(&il->reg_lock, reg_flags); + if (!_il_grab_nic_access(il)) { + _il_wr(il, HBUS_TARG_MEM_WADDR, addr); + wmb(); + _il_wr(il, HBUS_TARG_MEM_WDAT, val); + _il_release_nic_access(il); + } + spin_unlock_irqrestore(&il->reg_lock, reg_flags); +} +EXPORT_SYMBOL(il_write_targ_mem); + +const char * +il_get_cmd_string(u8 cmd) +{ + switch (cmd) { + IL_CMD(N_ALIVE); + IL_CMD(N_ERROR); + IL_CMD(C_RXON); + IL_CMD(C_RXON_ASSOC); + IL_CMD(C_QOS_PARAM); + IL_CMD(C_RXON_TIMING); + IL_CMD(C_ADD_STA); + IL_CMD(C_REM_STA); + IL_CMD(C_WEPKEY); + IL_CMD(N_3945_RX); + IL_CMD(C_TX); + IL_CMD(C_RATE_SCALE); + IL_CMD(C_LEDS); + IL_CMD(C_TX_LINK_QUALITY_CMD); + IL_CMD(C_CHANNEL_SWITCH); + IL_CMD(N_CHANNEL_SWITCH); + IL_CMD(C_SPECTRUM_MEASUREMENT); + IL_CMD(N_SPECTRUM_MEASUREMENT); + IL_CMD(C_POWER_TBL); + IL_CMD(N_PM_SLEEP); + IL_CMD(N_PM_DEBUG_STATS); + IL_CMD(C_SCAN); + IL_CMD(C_SCAN_ABORT); + IL_CMD(N_SCAN_START); + IL_CMD(N_SCAN_RESULTS); + IL_CMD(N_SCAN_COMPLETE); + IL_CMD(N_BEACON); + IL_CMD(C_TX_BEACON); + IL_CMD(C_TX_PWR_TBL); + IL_CMD(C_BT_CONFIG); + IL_CMD(C_STATS); + IL_CMD(N_STATS); + IL_CMD(N_CARD_STATE); + IL_CMD(N_MISSED_BEACONS); + IL_CMD(C_CT_KILL_CONFIG); + IL_CMD(C_SENSITIVITY); + IL_CMD(C_PHY_CALIBRATION); + IL_CMD(N_RX_PHY); + IL_CMD(N_RX_MPDU); + IL_CMD(N_RX); + IL_CMD(N_COMPRESSED_BA); + default: + return "UNKNOWN"; + + } +} +EXPORT_SYMBOL(il_get_cmd_string); + +#define HOST_COMPLETE_TIMEOUT (HZ / 2) + +static void +il_generic_cmd_callback(struct il_priv *il, struct il_device_cmd *cmd, + struct il_rx_pkt *pkt) +{ + if (pkt->hdr.flags & IL_CMD_FAILED_MSK) { + IL_ERR("Bad return from %s (0x%08X)\n", + il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); + return; + } +#ifdef CONFIG_IWLEGACY_DEBUG + switch (cmd->hdr.cmd) { + case C_TX_LINK_QUALITY_CMD: + case C_SENSITIVITY: + D_HC_DUMP("back from %s (0x%08X)\n", + il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); + break; + default: + D_HC("back from %s (0x%08X)\n", il_get_cmd_string(cmd->hdr.cmd), + pkt->hdr.flags); + } +#endif +} + +static int +il_send_cmd_async(struct il_priv *il, struct il_host_cmd *cmd) +{ + int ret; + + BUG_ON(!(cmd->flags & CMD_ASYNC)); + + /* An asynchronous command can not expect an SKB to be set. */ + BUG_ON(cmd->flags & CMD_WANT_SKB); + + /* Assign a generic callback if one is not provided */ + if (!cmd->callback) + cmd->callback = il_generic_cmd_callback; + + if (test_bit(S_EXIT_PENDING, &il->status)) + return -EBUSY; + + ret = il_enqueue_hcmd(il, cmd); + if (ret < 0) { + IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n", + il_get_cmd_string(cmd->id), ret); + return ret; + } + return 0; +} + +int +il_send_cmd_sync(struct il_priv *il, struct il_host_cmd *cmd) +{ + int cmd_idx; + int ret; + + lockdep_assert_held(&il->mutex); + + BUG_ON(cmd->flags & CMD_ASYNC); + + /* A synchronous command can not have a callback set. */ + BUG_ON(cmd->callback); + + D_INFO("Attempting to send sync command %s\n", + il_get_cmd_string(cmd->id)); + + set_bit(S_HCMD_ACTIVE, &il->status); + D_INFO("Setting HCMD_ACTIVE for command %s\n", + il_get_cmd_string(cmd->id)); + + cmd_idx = il_enqueue_hcmd(il, cmd); + if (cmd_idx < 0) { + ret = cmd_idx; + IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n", + il_get_cmd_string(cmd->id), ret); + goto out; + } + + ret = wait_event_timeout(il->wait_command_queue, + !test_bit(S_HCMD_ACTIVE, &il->status), + HOST_COMPLETE_TIMEOUT); + if (!ret) { + if (test_bit(S_HCMD_ACTIVE, &il->status)) { + IL_ERR("Error sending %s: time out after %dms.\n", + il_get_cmd_string(cmd->id), + jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); + + clear_bit(S_HCMD_ACTIVE, &il->status); + D_INFO("Clearing HCMD_ACTIVE for command %s\n", + il_get_cmd_string(cmd->id)); + ret = -ETIMEDOUT; + goto cancel; + } + } + + if (test_bit(S_RF_KILL_HW, &il->status)) { + IL_ERR("Command %s aborted: RF KILL Switch\n", + il_get_cmd_string(cmd->id)); + ret = -ECANCELED; + goto fail; + } + if (test_bit(S_FW_ERROR, &il->status)) { + IL_ERR("Command %s failed: FW Error\n", + il_get_cmd_string(cmd->id)); + ret = -EIO; + goto fail; + } + if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) { + IL_ERR("Error: Response NULL in '%s'\n", + il_get_cmd_string(cmd->id)); + ret = -EIO; + goto cancel; + } + + ret = 0; + goto out; + +cancel: + if (cmd->flags & CMD_WANT_SKB) { + /* + * Cancel the CMD_WANT_SKB flag for the cmd in the + * TX cmd queue. Otherwise in case the cmd comes + * in later, it will possibly set an invalid + * address (cmd->meta.source). + */ + il->txq[il->cmd_queue].meta[cmd_idx].flags &= ~CMD_WANT_SKB; + } +fail: + if (cmd->reply_page) { + il_free_pages(il, cmd->reply_page); + cmd->reply_page = 0; + } +out: + return ret; +} +EXPORT_SYMBOL(il_send_cmd_sync); + +int +il_send_cmd(struct il_priv *il, struct il_host_cmd *cmd) +{ + if (cmd->flags & CMD_ASYNC) + return il_send_cmd_async(il, cmd); + + return il_send_cmd_sync(il, cmd); +} +EXPORT_SYMBOL(il_send_cmd); + +int +il_send_cmd_pdu(struct il_priv *il, u8 id, u16 len, const void *data) +{ + struct il_host_cmd cmd = { + .id = id, + .len = len, + .data = data, + }; + + return il_send_cmd_sync(il, &cmd); +} +EXPORT_SYMBOL(il_send_cmd_pdu); + +int +il_send_cmd_pdu_async(struct il_priv *il, u8 id, u16 len, const void *data, + void (*callback) (struct il_priv *il, + struct il_device_cmd *cmd, + struct il_rx_pkt *pkt)) +{ + struct il_host_cmd cmd = { + .id = id, + .len = len, + .data = data, + }; + + cmd.flags |= CMD_ASYNC; + cmd.callback = callback; + + return il_send_cmd_async(il, &cmd); +} +EXPORT_SYMBOL(il_send_cmd_pdu_async); + +/* default: IL_LED_BLINK(0) using blinking idx table */ +static int led_mode; +module_param(led_mode, int, S_IRUGO); +MODULE_PARM_DESC(led_mode, + "0=system default, " "1=On(RF On)/Off(RF Off), 2=blinking"); + +/* Throughput OFF time(ms) ON time (ms) + * >300 25 25 + * >200 to 300 40 40 + * >100 to 200 55 55 + * >70 to 100 65 65 + * >50 to 70 75 75 + * >20 to 50 85 85 + * >10 to 20 95 95 + * >5 to 10 110 110 + * >1 to 5 130 130 + * >0 to 1 167 167 + * <=0 SOLID ON + */ +static const struct ieee80211_tpt_blink il_blink[] = { + {.throughput = 0, .blink_time = 334}, + {.throughput = 1 * 1024 - 1, .blink_time = 260}, + {.throughput = 5 * 1024 - 1, .blink_time = 220}, + {.throughput = 10 * 1024 - 1, .blink_time = 190}, + {.throughput = 20 * 1024 - 1, .blink_time = 170}, + {.throughput = 50 * 1024 - 1, .blink_time = 150}, + {.throughput = 70 * 1024 - 1, .blink_time = 130}, + {.throughput = 100 * 1024 - 1, .blink_time = 110}, + {.throughput = 200 * 1024 - 1, .blink_time = 80}, + {.throughput = 300 * 1024 - 1, .blink_time = 50}, +}; + +/* + * Adjust led blink rate to compensate on a MAC Clock difference on every HW + * Led blink rate analysis showed an average deviation of 0% on 3945, + * 5% on 4965 HW. + * Need to compensate on the led on/off time per HW according to the deviation + * to achieve the desired led frequency + * The calculation is: (100-averageDeviation)/100 * blinkTime + * For code efficiency the calculation will be: + * compensation = (100 - averageDeviation) * 64 / 100 + * NewBlinkTime = (compensation * BlinkTime) / 64 + */ +static inline u8 +il_blink_compensation(struct il_priv *il, u8 time, u16 compensation) +{ + if (!compensation) { + IL_ERR("undefined blink compensation: " + "use pre-defined blinking time\n"); + return time; + } + + return (u8) ((time * compensation) >> 6); +} + +/* Set led pattern command */ +static int +il_led_cmd(struct il_priv *il, unsigned long on, unsigned long off) +{ + struct il_led_cmd led_cmd = { + .id = IL_LED_LINK, + .interval = IL_DEF_LED_INTRVL + }; + int ret; + + if (!test_bit(S_READY, &il->status)) + return -EBUSY; + + if (il->blink_on == on && il->blink_off == off) + return 0; + + if (off == 0) { + /* led is SOLID_ON */ + on = IL_LED_SOLID; + } + + D_LED("Led blink time compensation=%u\n", + il->cfg->base_params->led_compensation); + led_cmd.on = + il_blink_compensation(il, on, + il->cfg->base_params->led_compensation); + led_cmd.off = + il_blink_compensation(il, off, + il->cfg->base_params->led_compensation); + + ret = il->cfg->ops->led->cmd(il, &led_cmd); + if (!ret) { + il->blink_on = on; + il->blink_off = off; + } + return ret; +} + +static void +il_led_brightness_set(struct led_classdev *led_cdev, + enum led_brightness brightness) +{ + struct il_priv *il = container_of(led_cdev, struct il_priv, led); + unsigned long on = 0; + + if (brightness > 0) + on = IL_LED_SOLID; + + il_led_cmd(il, on, 0); +} + +static int +il_led_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on, + unsigned long *delay_off) +{ + struct il_priv *il = container_of(led_cdev, struct il_priv, led); + + return il_led_cmd(il, *delay_on, *delay_off); +} + +void +il_leds_init(struct il_priv *il) +{ + int mode = led_mode; + int ret; + + if (mode == IL_LED_DEFAULT) + mode = il->cfg->led_mode; + + il->led.name = + kasprintf(GFP_KERNEL, "%s-led", wiphy_name(il->hw->wiphy)); + il->led.brightness_set = il_led_brightness_set; + il->led.blink_set = il_led_blink_set; + il->led.max_brightness = 1; + + switch (mode) { + case IL_LED_DEFAULT: + WARN_ON(1); + break; + case IL_LED_BLINK: + il->led.default_trigger = + ieee80211_create_tpt_led_trigger(il->hw, + IEEE80211_TPT_LEDTRIG_FL_CONNECTED, + il_blink, + ARRAY_SIZE(il_blink)); + break; + case IL_LED_RF_STATE: + il->led.default_trigger = ieee80211_get_radio_led_name(il->hw); + break; + } + + ret = led_classdev_register(&il->pci_dev->dev, &il->led); + if (ret) { + kfree(il->led.name); + return; + } + + il->led_registered = true; +} +EXPORT_SYMBOL(il_leds_init); + +void +il_leds_exit(struct il_priv *il) +{ + if (!il->led_registered) + return; + + led_classdev_unregister(&il->led); + kfree(il->led.name); +} +EXPORT_SYMBOL(il_leds_exit); + +/************************** EEPROM BANDS **************************** + * + * The il_eeprom_band definitions below provide the mapping from the + * EEPROM contents to the specific channel number supported for each + * band. + * + * For example, il_priv->eeprom.band_3_channels[4] from the band_3 + * definition below maps to physical channel 42 in the 5.2GHz spectrum. + * The specific geography and calibration information for that channel + * is contained in the eeprom map itself. + * + * During init, we copy the eeprom information and channel map + * information into il->channel_info_24/52 and il->channel_map_24/52 + * + * channel_map_24/52 provides the idx in the channel_info array for a + * given channel. We have to have two separate maps as there is channel + * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and + * band_2 + * + * A value of 0xff stored in the channel_map indicates that the channel + * is not supported by the hardware at all. + * + * A value of 0xfe in the channel_map indicates that the channel is not + * valid for Tx with the current hardware. This means that + * while the system can tune and receive on a given channel, it may not + * be able to associate or transmit any frames on that + * channel. There is no corresponding channel information for that + * entry. + * + *********************************************************************/ + +/* 2.4 GHz */ +const u8 il_eeprom_band_1[14] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 +}; + +/* 5.2 GHz bands */ +static const u8 il_eeprom_band_2[] = { /* 4915-5080MHz */ + 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16 +}; + +static const u8 il_eeprom_band_3[] = { /* 5170-5320MHz */ + 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64 +}; + +static const u8 il_eeprom_band_4[] = { /* 5500-5700MHz */ + 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140 +}; + +static const u8 il_eeprom_band_5[] = { /* 5725-5825MHz */ + 145, 149, 153, 157, 161, 165 +}; + +static const u8 il_eeprom_band_6[] = { /* 2.4 ht40 channel */ + 1, 2, 3, 4, 5, 6, 7 +}; + +static const u8 il_eeprom_band_7[] = { /* 5.2 ht40 channel */ + 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157 +}; + +/****************************************************************************** + * + * EEPROM related functions + * +******************************************************************************/ + +static int +il_eeprom_verify_signature(struct il_priv *il) +{ + u32 gp = _il_rd(il, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK; + int ret = 0; + + D_EEPROM("EEPROM signature=0x%08x\n", gp); + switch (gp) { + case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K: + case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K: + break; + default: + IL_ERR("bad EEPROM signature," "EEPROM_GP=0x%08x\n", gp); + ret = -ENOENT; + break; + } + return ret; +} + +const u8 * +il_eeprom_query_addr(const struct il_priv *il, size_t offset) +{ + BUG_ON(offset >= il->cfg->base_params->eeprom_size); + return &il->eeprom[offset]; +} +EXPORT_SYMBOL(il_eeprom_query_addr); + +u16 +il_eeprom_query16(const struct il_priv *il, size_t offset) +{ + if (!il->eeprom) + return 0; + return (u16) il->eeprom[offset] | ((u16) il->eeprom[offset + 1] << 8); +} +EXPORT_SYMBOL(il_eeprom_query16); + +/** + * il_eeprom_init - read EEPROM contents + * + * Load the EEPROM contents from adapter into il->eeprom + * + * NOTE: This routine uses the non-debug IO access functions. + */ +int +il_eeprom_init(struct il_priv *il) +{ + __le16 *e; + u32 gp = _il_rd(il, CSR_EEPROM_GP); + int sz; + int ret; + u16 addr; + + /* allocate eeprom */ + sz = il->cfg->base_params->eeprom_size; + D_EEPROM("NVM size = %d\n", sz); + il->eeprom = kzalloc(sz, GFP_KERNEL); + if (!il->eeprom) { + ret = -ENOMEM; + goto alloc_err; + } + e = (__le16 *) il->eeprom; + + il->cfg->ops->lib->apm_ops.init(il); + + ret = il_eeprom_verify_signature(il); + if (ret < 0) { + IL_ERR("EEPROM not found, EEPROM_GP=0x%08x\n", gp); + ret = -ENOENT; + goto err; + } + + /* Make sure driver (instead of uCode) is allowed to read EEPROM */ + ret = il->cfg->ops->lib->eeprom_ops.acquire_semaphore(il); + if (ret < 0) { + IL_ERR("Failed to acquire EEPROM semaphore.\n"); + ret = -ENOENT; + goto err; + } + + /* eeprom is an array of 16bit values */ + for (addr = 0; addr < sz; addr += sizeof(u16)) { + u32 r; + + _il_wr(il, CSR_EEPROM_REG, + CSR_EEPROM_REG_MSK_ADDR & (addr << 1)); + + ret = + _il_poll_bit(il, CSR_EEPROM_REG, + CSR_EEPROM_REG_READ_VALID_MSK, + CSR_EEPROM_REG_READ_VALID_MSK, + IL_EEPROM_ACCESS_TIMEOUT); + if (ret < 0) { + IL_ERR("Time out reading EEPROM[%d]\n", addr); + goto done; + } + r = _il_rd(il, CSR_EEPROM_REG); + e[addr / 2] = cpu_to_le16(r >> 16); + } + + D_EEPROM("NVM Type: %s, version: 0x%x\n", "EEPROM", + il_eeprom_query16(il, EEPROM_VERSION)); + + ret = 0; +done: + il->cfg->ops->lib->eeprom_ops.release_semaphore(il); + +err: + if (ret) + il_eeprom_free(il); + /* Reset chip to save power until we load uCode during "up". */ + il_apm_stop(il); +alloc_err: + return ret; +} +EXPORT_SYMBOL(il_eeprom_init); + +void +il_eeprom_free(struct il_priv *il) +{ + kfree(il->eeprom); + il->eeprom = NULL; +} +EXPORT_SYMBOL(il_eeprom_free); + +static void +il_init_band_reference(const struct il_priv *il, int eep_band, + int *eeprom_ch_count, + const struct il_eeprom_channel **eeprom_ch_info, + const u8 **eeprom_ch_idx) +{ + u32 offset = + il->cfg->ops->lib->eeprom_ops.regulatory_bands[eep_band - 1]; + switch (eep_band) { + case 1: /* 2.4GHz band */ + *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_1); + *eeprom_ch_info = + (struct il_eeprom_channel *)il_eeprom_query_addr(il, + offset); + *eeprom_ch_idx = il_eeprom_band_1; + break; + case 2: /* 4.9GHz band */ + *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_2); + *eeprom_ch_info = + (struct il_eeprom_channel *)il_eeprom_query_addr(il, + offset); + *eeprom_ch_idx = il_eeprom_band_2; + break; + case 3: /* 5.2GHz band */ + *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_3); + *eeprom_ch_info = + (struct il_eeprom_channel *)il_eeprom_query_addr(il, + offset); + *eeprom_ch_idx = il_eeprom_band_3; + break; + case 4: /* 5.5GHz band */ + *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_4); + *eeprom_ch_info = + (struct il_eeprom_channel *)il_eeprom_query_addr(il, + offset); + *eeprom_ch_idx = il_eeprom_band_4; + break; + case 5: /* 5.7GHz band */ + *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_5); + *eeprom_ch_info = + (struct il_eeprom_channel *)il_eeprom_query_addr(il, + offset); + *eeprom_ch_idx = il_eeprom_band_5; + break; + case 6: /* 2.4GHz ht40 channels */ + *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_6); + *eeprom_ch_info = + (struct il_eeprom_channel *)il_eeprom_query_addr(il, + offset); + *eeprom_ch_idx = il_eeprom_band_6; + break; + case 7: /* 5 GHz ht40 channels */ + *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_7); + *eeprom_ch_info = + (struct il_eeprom_channel *)il_eeprom_query_addr(il, + offset); + *eeprom_ch_idx = il_eeprom_band_7; + break; + default: + BUG(); + } +} + +#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \ + ? # x " " : "") +/** + * il_mod_ht40_chan_info - Copy ht40 channel info into driver's il. + * + * Does not set up a command, or touch hardware. + */ +static int +il_mod_ht40_chan_info(struct il_priv *il, enum ieee80211_band band, u16 channel, + const struct il_eeprom_channel *eeprom_ch, + u8 clear_ht40_extension_channel) +{ + struct il_channel_info *ch_info; + + ch_info = + (struct il_channel_info *)il_get_channel_info(il, band, channel); + + if (!il_is_channel_valid(ch_info)) + return -1; + + D_EEPROM("HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):" + " Ad-Hoc %ssupported\n", ch_info->channel, + il_is_channel_a_band(ch_info) ? "5.2" : "2.4", + CHECK_AND_PRINT(IBSS), CHECK_AND_PRINT(ACTIVE), + CHECK_AND_PRINT(RADAR), CHECK_AND_PRINT(WIDE), + CHECK_AND_PRINT(DFS), eeprom_ch->flags, + eeprom_ch->max_power_avg, + ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS) && + !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ? "" : "not "); + + ch_info->ht40_eeprom = *eeprom_ch; + ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg; + ch_info->ht40_flags = eeprom_ch->flags; + if (eeprom_ch->flags & EEPROM_CHANNEL_VALID) + ch_info->ht40_extension_channel &= + ~clear_ht40_extension_channel; + + return 0; +} + +#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \ + ? # x " " : "") + +/** + * il_init_channel_map - Set up driver's info for all possible channels + */ +int +il_init_channel_map(struct il_priv *il) +{ + int eeprom_ch_count = 0; + const u8 *eeprom_ch_idx = NULL; + const struct il_eeprom_channel *eeprom_ch_info = NULL; + int band, ch; + struct il_channel_info *ch_info; + + if (il->channel_count) { + D_EEPROM("Channel map already initialized.\n"); + return 0; + } + + D_EEPROM("Initializing regulatory info from EEPROM\n"); + + il->channel_count = + ARRAY_SIZE(il_eeprom_band_1) + ARRAY_SIZE(il_eeprom_band_2) + + ARRAY_SIZE(il_eeprom_band_3) + ARRAY_SIZE(il_eeprom_band_4) + + ARRAY_SIZE(il_eeprom_band_5); + + D_EEPROM("Parsing data for %d channels.\n", il->channel_count); + + il->channel_info = + kzalloc(sizeof(struct il_channel_info) * il->channel_count, + GFP_KERNEL); + if (!il->channel_info) { + IL_ERR("Could not allocate channel_info\n"); + il->channel_count = 0; + return -ENOMEM; + } + + ch_info = il->channel_info; + + /* Loop through the 5 EEPROM bands adding them in order to the + * channel map we maintain (that contains additional information than + * what just in the EEPROM) */ + for (band = 1; band <= 5; band++) { + + il_init_band_reference(il, band, &eeprom_ch_count, + &eeprom_ch_info, &eeprom_ch_idx); + + /* Loop through each band adding each of the channels */ + for (ch = 0; ch < eeprom_ch_count; ch++) { + ch_info->channel = eeprom_ch_idx[ch]; + ch_info->band = + (band == + 1) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; + + /* permanently store EEPROM's channel regulatory flags + * and max power in channel info database. */ + ch_info->eeprom = eeprom_ch_info[ch]; + + /* Copy the run-time flags so they are there even on + * invalid channels */ + ch_info->flags = eeprom_ch_info[ch].flags; + /* First write that ht40 is not enabled, and then enable + * one by one */ + ch_info->ht40_extension_channel = + IEEE80211_CHAN_NO_HT40; + + if (!(il_is_channel_valid(ch_info))) { + D_EEPROM("Ch. %d Flags %x [%sGHz] - " + "No traffic\n", ch_info->channel, + ch_info->flags, + il_is_channel_a_band(ch_info) ? "5.2" : + "2.4"); + ch_info++; + continue; + } + + /* Initialize regulatory-based run-time data */ + ch_info->max_power_avg = ch_info->curr_txpow = + eeprom_ch_info[ch].max_power_avg; + ch_info->scan_power = eeprom_ch_info[ch].max_power_avg; + ch_info->min_power = 0; + + D_EEPROM("Ch. %d [%sGHz] " "%s%s%s%s%s%s(0x%02x %ddBm):" + " Ad-Hoc %ssupported\n", ch_info->channel, + il_is_channel_a_band(ch_info) ? "5.2" : "2.4", + CHECK_AND_PRINT_I(VALID), + CHECK_AND_PRINT_I(IBSS), + CHECK_AND_PRINT_I(ACTIVE), + CHECK_AND_PRINT_I(RADAR), + CHECK_AND_PRINT_I(WIDE), + CHECK_AND_PRINT_I(DFS), + eeprom_ch_info[ch].flags, + eeprom_ch_info[ch].max_power_avg, + ((eeprom_ch_info[ch]. + flags & EEPROM_CHANNEL_IBSS) && + !(eeprom_ch_info[ch]. + flags & EEPROM_CHANNEL_RADAR)) ? "" : + "not "); + + ch_info++; + } + } + + /* Check if we do have HT40 channels */ + if (il->cfg->ops->lib->eeprom_ops.regulatory_bands[5] == + EEPROM_REGULATORY_BAND_NO_HT40 && + il->cfg->ops->lib->eeprom_ops.regulatory_bands[6] == + EEPROM_REGULATORY_BAND_NO_HT40) + return 0; + + /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */ + for (band = 6; band <= 7; band++) { + enum ieee80211_band ieeeband; + + il_init_band_reference(il, band, &eeprom_ch_count, + &eeprom_ch_info, &eeprom_ch_idx); + + /* EEPROM band 6 is 2.4, band 7 is 5 GHz */ + ieeeband = + (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; + + /* Loop through each band adding each of the channels */ + for (ch = 0; ch < eeprom_ch_count; ch++) { + /* Set up driver's info for lower half */ + il_mod_ht40_chan_info(il, ieeeband, eeprom_ch_idx[ch], + &eeprom_ch_info[ch], + IEEE80211_CHAN_NO_HT40PLUS); + + /* Set up driver's info for upper half */ + il_mod_ht40_chan_info(il, ieeeband, + eeprom_ch_idx[ch] + 4, + &eeprom_ch_info[ch], + IEEE80211_CHAN_NO_HT40MINUS); + } + } + + return 0; +} +EXPORT_SYMBOL(il_init_channel_map); + +/* + * il_free_channel_map - undo allocations in il_init_channel_map + */ +void +il_free_channel_map(struct il_priv *il) +{ + kfree(il->channel_info); + il->channel_count = 0; +} +EXPORT_SYMBOL(il_free_channel_map); + +/** + * il_get_channel_info - Find driver's ilate channel info + * + * Based on band and channel number. + */ +const struct il_channel_info * +il_get_channel_info(const struct il_priv *il, enum ieee80211_band band, + u16 channel) +{ + int i; + + switch (band) { + case IEEE80211_BAND_5GHZ: + for (i = 14; i < il->channel_count; i++) { + if (il->channel_info[i].channel == channel) + return &il->channel_info[i]; + } + break; + case IEEE80211_BAND_2GHZ: + if (channel >= 1 && channel <= 14) + return &il->channel_info[channel - 1]; + break; + default: + BUG(); + } + + return NULL; +} +EXPORT_SYMBOL(il_get_channel_info); + +/* + * Setting power level allows the card to go to sleep when not busy. + * + * We calculate a sleep command based on the required latency, which + * we get from mac80211. In order to handle thermal throttling, we can + * also use pre-defined power levels. + */ + +/* + * This defines the old power levels. They are still used by default + * (level 1) and for thermal throttle (levels 3 through 5) + */ + +struct il_power_vec_entry { + struct il_powertable_cmd cmd; + u8 no_dtim; /* number of skip dtim */ +}; + +static void +il_power_sleep_cam_cmd(struct il_priv *il, struct il_powertable_cmd *cmd) +{ + memset(cmd, 0, sizeof(*cmd)); + + if (il->power_data.pci_pm) + cmd->flags |= IL_POWER_PCI_PM_MSK; + + D_POWER("Sleep command for CAM\n"); +} + +static int +il_set_power(struct il_priv *il, struct il_powertable_cmd *cmd) +{ + D_POWER("Sending power/sleep command\n"); + D_POWER("Flags value = 0x%08X\n", cmd->flags); + D_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout)); + D_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout)); + D_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n", + le32_to_cpu(cmd->sleep_interval[0]), + le32_to_cpu(cmd->sleep_interval[1]), + le32_to_cpu(cmd->sleep_interval[2]), + le32_to_cpu(cmd->sleep_interval[3]), + le32_to_cpu(cmd->sleep_interval[4])); + + return il_send_cmd_pdu(il, C_POWER_TBL, + sizeof(struct il_powertable_cmd), cmd); +} + +int +il_power_set_mode(struct il_priv *il, struct il_powertable_cmd *cmd, bool force) +{ + int ret; + bool update_chains; + + lockdep_assert_held(&il->mutex); + + /* Don't update the RX chain when chain noise calibration is running */ + update_chains = il->chain_noise_data.state == IL_CHAIN_NOISE_DONE || + il->chain_noise_data.state == IL_CHAIN_NOISE_ALIVE; + + if (!memcmp(&il->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force) + return 0; + + if (!il_is_ready_rf(il)) + return -EIO; + + /* scan complete use sleep_power_next, need to be updated */ + memcpy(&il->power_data.sleep_cmd_next, cmd, sizeof(*cmd)); + if (test_bit(S_SCANNING, &il->status) && !force) { + D_INFO("Defer power set mode while scanning\n"); + return 0; + } + + if (cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK) + set_bit(S_POWER_PMI, &il->status); + + ret = il_set_power(il, cmd); + if (!ret) { + if (!(cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK)) + clear_bit(S_POWER_PMI, &il->status); + + if (il->cfg->ops->lib->update_chain_flags && update_chains) + il->cfg->ops->lib->update_chain_flags(il); + else if (il->cfg->ops->lib->update_chain_flags) + D_POWER("Cannot update the power, chain noise " + "calibration running: %d\n", + il->chain_noise_data.state); + + memcpy(&il->power_data.sleep_cmd, cmd, sizeof(*cmd)); + } else + IL_ERR("set power fail, ret = %d", ret); + + return ret; +} + +int +il_power_update_mode(struct il_priv *il, bool force) +{ + struct il_powertable_cmd cmd; + + il_power_sleep_cam_cmd(il, &cmd); + return il_power_set_mode(il, &cmd, force); +} +EXPORT_SYMBOL(il_power_update_mode); + +/* initialize to default */ +void +il_power_initialize(struct il_priv *il) +{ + u16 lctl = il_pcie_link_ctl(il); + + il->power_data.pci_pm = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN); + + il->power_data.debug_sleep_level_override = -1; + + memset(&il->power_data.sleep_cmd, 0, sizeof(il->power_data.sleep_cmd)); +} +EXPORT_SYMBOL(il_power_initialize); + +/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after + * sending probe req. This should be set long enough to hear probe responses + * from more than one AP. */ +#define IL_ACTIVE_DWELL_TIME_24 (30) /* all times in msec */ +#define IL_ACTIVE_DWELL_TIME_52 (20) + +#define IL_ACTIVE_DWELL_FACTOR_24GHZ (3) +#define IL_ACTIVE_DWELL_FACTOR_52GHZ (2) + +/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel. + * Must be set longer than active dwell time. + * For the most reliable scan, set > AP beacon interval (typically 100msec). */ +#define IL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */ +#define IL_PASSIVE_DWELL_TIME_52 (10) +#define IL_PASSIVE_DWELL_BASE (100) +#define IL_CHANNEL_TUNE_TIME 5 + +static int +il_send_scan_abort(struct il_priv *il) +{ + int ret; + struct il_rx_pkt *pkt; + struct il_host_cmd cmd = { + .id = C_SCAN_ABORT, + .flags = CMD_WANT_SKB, + }; + + /* Exit instantly with error when device is not ready + * to receive scan abort command or it does not perform + * hardware scan currently */ + if (!test_bit(S_READY, &il->status) || + !test_bit(S_GEO_CONFIGURED, &il->status) || + !test_bit(S_SCAN_HW, &il->status) || + test_bit(S_FW_ERROR, &il->status) || + test_bit(S_EXIT_PENDING, &il->status)) + return -EIO; + + ret = il_send_cmd_sync(il, &cmd); + if (ret) + return ret; + + pkt = (struct il_rx_pkt *)cmd.reply_page; + if (pkt->u.status != CAN_ABORT_STATUS) { + /* The scan abort will return 1 for success or + * 2 for "failure". A failure condition can be + * due to simply not being in an active scan which + * can occur if we send the scan abort before we + * the microcode has notified us that a scan is + * completed. */ + D_SCAN("SCAN_ABORT ret %d.\n", pkt->u.status); + ret = -EIO; + } + + il_free_pages(il, cmd.reply_page); + return ret; +} + +static void +il_complete_scan(struct il_priv *il, bool aborted) +{ + /* check if scan was requested from mac80211 */ + if (il->scan_request) { + D_SCAN("Complete scan in mac80211\n"); + ieee80211_scan_completed(il->hw, aborted); + } + + il->scan_vif = NULL; + il->scan_request = NULL; +} + +void +il_force_scan_end(struct il_priv *il) +{ + lockdep_assert_held(&il->mutex); + + if (!test_bit(S_SCANNING, &il->status)) { + D_SCAN("Forcing scan end while not scanning\n"); + return; + } + + D_SCAN("Forcing scan end\n"); + clear_bit(S_SCANNING, &il->status); + clear_bit(S_SCAN_HW, &il->status); + clear_bit(S_SCAN_ABORTING, &il->status); + il_complete_scan(il, true); +} + +static void +il_do_scan_abort(struct il_priv *il) +{ + int ret; + + lockdep_assert_held(&il->mutex); + + if (!test_bit(S_SCANNING, &il->status)) { + D_SCAN("Not performing scan to abort\n"); + return; + } + + if (test_and_set_bit(S_SCAN_ABORTING, &il->status)) { + D_SCAN("Scan abort in progress\n"); + return; + } + + ret = il_send_scan_abort(il); + if (ret) { + D_SCAN("Send scan abort failed %d\n", ret); + il_force_scan_end(il); + } else + D_SCAN("Successfully send scan abort\n"); +} + +/** + * il_scan_cancel - Cancel any currently executing HW scan + */ +int +il_scan_cancel(struct il_priv *il) +{ + D_SCAN("Queuing abort scan\n"); + queue_work(il->workqueue, &il->abort_scan); + return 0; +} +EXPORT_SYMBOL(il_scan_cancel); + +/** + * il_scan_cancel_timeout - Cancel any currently executing HW scan + * @ms: amount of time to wait (in milliseconds) for scan to abort + * + */ +int +il_scan_cancel_timeout(struct il_priv *il, unsigned long ms) +{ + unsigned long timeout = jiffies + msecs_to_jiffies(ms); + + lockdep_assert_held(&il->mutex); + + D_SCAN("Scan cancel timeout\n"); + + il_do_scan_abort(il); + + while (time_before_eq(jiffies, timeout)) { + if (!test_bit(S_SCAN_HW, &il->status)) + break; + msleep(20); + } + + return test_bit(S_SCAN_HW, &il->status); +} +EXPORT_SYMBOL(il_scan_cancel_timeout); + +/* Service response to C_SCAN (0x80) */ +static void +il_hdl_scan(struct il_priv *il, struct il_rx_buf *rxb) +{ +#ifdef CONFIG_IWLEGACY_DEBUG + struct il_rx_pkt *pkt = rxb_addr(rxb); + struct il_scanreq_notification *notif = + (struct il_scanreq_notification *)pkt->u.raw; + + D_SCAN("Scan request status = 0x%x\n", notif->status); +#endif +} + +/* Service N_SCAN_START (0x82) */ +static void +il_hdl_scan_start(struct il_priv *il, struct il_rx_buf *rxb) +{ + struct il_rx_pkt *pkt = rxb_addr(rxb); + struct il_scanstart_notification *notif = + (struct il_scanstart_notification *)pkt->u.raw; + il->scan_start_tsf = le32_to_cpu(notif->tsf_low); + D_SCAN("Scan start: " "%d [802.11%s] " + "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n", notif->channel, + notif->band ? "bg" : "a", le32_to_cpu(notif->tsf_high), + le32_to_cpu(notif->tsf_low), notif->status, notif->beacon_timer); +} + +/* Service N_SCAN_RESULTS (0x83) */ +static void +il_hdl_scan_results(struct il_priv *il, struct il_rx_buf *rxb) +{ +#ifdef CONFIG_IWLEGACY_DEBUG + struct il_rx_pkt *pkt = rxb_addr(rxb); + struct il_scanresults_notification *notif = + (struct il_scanresults_notification *)pkt->u.raw; + + D_SCAN("Scan ch.res: " "%d [802.11%s] " "(TSF: 0x%08X:%08X) - %d " + "elapsed=%lu usec\n", notif->channel, notif->band ? "bg" : "a", + le32_to_cpu(notif->tsf_high), le32_to_cpu(notif->tsf_low), + le32_to_cpu(notif->stats[0]), + le32_to_cpu(notif->tsf_low) - il->scan_start_tsf); +#endif +} + +/* Service N_SCAN_COMPLETE (0x84) */ +static void +il_hdl_scan_complete(struct il_priv *il, struct il_rx_buf *rxb) +{ + +#ifdef CONFIG_IWLEGACY_DEBUG + struct il_rx_pkt *pkt = rxb_addr(rxb); + struct il_scancomplete_notification *scan_notif = (void *)pkt->u.raw; +#endif + + D_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n", + scan_notif->scanned_channels, scan_notif->tsf_low, + scan_notif->tsf_high, scan_notif->status); + + /* The HW is no longer scanning */ + clear_bit(S_SCAN_HW, &il->status); + + D_SCAN("Scan on %sGHz took %dms\n", + (il->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2", + jiffies_to_msecs(jiffies - il->scan_start)); + + queue_work(il->workqueue, &il->scan_completed); +} + +void +il_setup_rx_scan_handlers(struct il_priv *il) +{ + /* scan handlers */ + il->handlers[C_SCAN] = il_hdl_scan; + il->handlers[N_SCAN_START] = il_hdl_scan_start; + il->handlers[N_SCAN_RESULTS] = il_hdl_scan_results; + il->handlers[N_SCAN_COMPLETE] = il_hdl_scan_complete; +} +EXPORT_SYMBOL(il_setup_rx_scan_handlers); + +inline u16 +il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band, + u8 n_probes) +{ + if (band == IEEE80211_BAND_5GHZ) + return IL_ACTIVE_DWELL_TIME_52 + + IL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1); + else + return IL_ACTIVE_DWELL_TIME_24 + + IL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1); +} +EXPORT_SYMBOL(il_get_active_dwell_time); + +u16 +il_get_passive_dwell_time(struct il_priv *il, enum ieee80211_band band, + struct ieee80211_vif *vif) +{ + struct il_rxon_context *ctx = &il->ctx; + u16 value; + + u16 passive = + (band == + IEEE80211_BAND_2GHZ) ? IL_PASSIVE_DWELL_BASE + + IL_PASSIVE_DWELL_TIME_24 : IL_PASSIVE_DWELL_BASE + + IL_PASSIVE_DWELL_TIME_52; + + if (il_is_any_associated(il)) { + /* + * If we're associated, we clamp the maximum passive + * dwell time to be 98% of the smallest beacon interval + * (minus 2 * channel tune time) + */ + value = ctx->vif ? ctx->vif->bss_conf.beacon_int : 0; + if (value > IL_PASSIVE_DWELL_BASE || !value) + value = IL_PASSIVE_DWELL_BASE; + value = (value * 98) / 100 - IL_CHANNEL_TUNE_TIME * 2; + passive = min(value, passive); + } + + return passive; +} +EXPORT_SYMBOL(il_get_passive_dwell_time); + +void +il_init_scan_params(struct il_priv *il) +{ + u8 ant_idx = fls(il->hw_params.valid_tx_ant) - 1; + if (!il->scan_tx_ant[IEEE80211_BAND_5GHZ]) + il->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx; + if (!il->scan_tx_ant[IEEE80211_BAND_2GHZ]) + il->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx; +} +EXPORT_SYMBOL(il_init_scan_params); + +static int +il_scan_initiate(struct il_priv *il, struct ieee80211_vif *vif) +{ + int ret; + + lockdep_assert_held(&il->mutex); + + if (WARN_ON(!il->cfg->ops->utils->request_scan)) + return -EOPNOTSUPP; + + cancel_delayed_work(&il->scan_check); + + if (!il_is_ready_rf(il)) { + IL_WARN("Request scan called when driver not ready.\n"); + return -EIO; + } + + if (test_bit(S_SCAN_HW, &il->status)) { + D_SCAN("Multiple concurrent scan requests in parallel.\n"); + return -EBUSY; + } + + if (test_bit(S_SCAN_ABORTING, &il->status)) { + D_SCAN("Scan request while abort pending.\n"); + return -EBUSY; + } + + D_SCAN("Starting scan...\n"); + + set_bit(S_SCANNING, &il->status); + il->scan_start = jiffies; + + ret = il->cfg->ops->utils->request_scan(il, vif); + if (ret) { + clear_bit(S_SCANNING, &il->status); + return ret; + } + + queue_delayed_work(il->workqueue, &il->scan_check, + IL_SCAN_CHECK_WATCHDOG); + + return 0; +} + +int +il_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct cfg80211_scan_request *req) +{ + struct il_priv *il = hw->priv; + int ret; + + D_MAC80211("enter\n"); + + if (req->n_channels == 0) + return -EINVAL; + + mutex_lock(&il->mutex); + + if (test_bit(S_SCANNING, &il->status)) { + D_SCAN("Scan already in progress.\n"); + ret = -EAGAIN; + goto out_unlock; + } + + /* mac80211 will only ask for one band at a time */ + il->scan_request = req; + il->scan_vif = vif; + il->scan_band = req->channels[0]->band; + + ret = il_scan_initiate(il, vif); + + D_MAC80211("leave\n"); + +out_unlock: + mutex_unlock(&il->mutex); + + return ret; +} +EXPORT_SYMBOL(il_mac_hw_scan); + +static void +il_bg_scan_check(struct work_struct *data) +{ + struct il_priv *il = + container_of(data, struct il_priv, scan_check.work); + + D_SCAN("Scan check work\n"); + + /* Since we are here firmware does not finish scan and + * most likely is in bad shape, so we don't bother to + * send abort command, just force scan complete to mac80211 */ + mutex_lock(&il->mutex); + il_force_scan_end(il); + mutex_unlock(&il->mutex); +} + +/** + * il_fill_probe_req - fill in all required fields and IE for probe request + */ + +u16 +il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame, + const u8 *ta, const u8 *ies, int ie_len, int left) +{ + int len = 0; + u8 *pos = NULL; + + /* Make sure there is enough space for the probe request, + * two mandatory IEs and the data */ + left -= 24; + if (left < 0) + return 0; + + frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ); + memcpy(frame->da, il_bcast_addr, ETH_ALEN); + memcpy(frame->sa, ta, ETH_ALEN); + memcpy(frame->bssid, il_bcast_addr, ETH_ALEN); + frame->seq_ctrl = 0; + + len += 24; + + /* ...next IE... */ + pos = &frame->u.probe_req.variable[0]; + + /* fill in our indirect SSID IE */ + left -= 2; + if (left < 0) + return 0; + *pos++ = WLAN_EID_SSID; + *pos++ = 0; + + len += 2; + + if (WARN_ON(left < ie_len)) + return len; + + if (ies && ie_len) { + memcpy(pos, ies, ie_len); + len += ie_len; + } + + return (u16) len; +} +EXPORT_SYMBOL(il_fill_probe_req); + +static void +il_bg_abort_scan(struct work_struct *work) +{ + struct il_priv *il = container_of(work, struct il_priv, abort_scan); + + D_SCAN("Abort scan work\n"); + + /* We keep scan_check work queued in case when firmware will not + * report back scan completed notification */ + mutex_lock(&il->mutex); + il_scan_cancel_timeout(il, 200); + mutex_unlock(&il->mutex); +} + +static void +il_bg_scan_completed(struct work_struct *work) +{ + struct il_priv *il = container_of(work, struct il_priv, scan_completed); + bool aborted; + + D_SCAN("Completed scan.\n"); + + cancel_delayed_work(&il->scan_check); + + mutex_lock(&il->mutex); + + aborted = test_and_clear_bit(S_SCAN_ABORTING, &il->status); + if (aborted) + D_SCAN("Aborted scan completed.\n"); + + if (!test_and_clear_bit(S_SCANNING, &il->status)) { + D_SCAN("Scan already completed.\n"); + goto out_settings; + } + + il_complete_scan(il, aborted); + +out_settings: + /* Can we still talk to firmware ? */ + if (!il_is_ready_rf(il)) + goto out; + + /* + * We do not commit power settings while scan is pending, + * do it now if the settings changed. + */ + il_power_set_mode(il, &il->power_data.sleep_cmd_next, false); + il_set_tx_power(il, il->tx_power_next, false); + + il->cfg->ops->utils->post_scan(il); + +out: + mutex_unlock(&il->mutex); +} + +void +il_setup_scan_deferred_work(struct il_priv *il) +{ + INIT_WORK(&il->scan_completed, il_bg_scan_completed); + INIT_WORK(&il->abort_scan, il_bg_abort_scan); + INIT_DELAYED_WORK(&il->scan_check, il_bg_scan_check); +} +EXPORT_SYMBOL(il_setup_scan_deferred_work); + +void +il_cancel_scan_deferred_work(struct il_priv *il) +{ + cancel_work_sync(&il->abort_scan); + cancel_work_sync(&il->scan_completed); + + if (cancel_delayed_work_sync(&il->scan_check)) { + mutex_lock(&il->mutex); + il_force_scan_end(il); + mutex_unlock(&il->mutex); + } +} +EXPORT_SYMBOL(il_cancel_scan_deferred_work); + +/* il->sta_lock must be held */ +static void +il_sta_ucode_activate(struct il_priv *il, u8 sta_id) +{ + + if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE)) + IL_ERR("ACTIVATE a non DRIVER active station id %u addr %pM\n", + sta_id, il->stations[sta_id].sta.sta.addr); + + if (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) { + D_ASSOC("STA id %u addr %pM already present" + " in uCode (according to driver)\n", sta_id, + il->stations[sta_id].sta.sta.addr); + } else { + il->stations[sta_id].used |= IL_STA_UCODE_ACTIVE; + D_ASSOC("Added STA id %u addr %pM to uCode\n", sta_id, + il->stations[sta_id].sta.sta.addr); + } +} + +static int +il_process_add_sta_resp(struct il_priv *il, struct il_addsta_cmd *addsta, + struct il_rx_pkt *pkt, bool sync) +{ + u8 sta_id = addsta->sta.sta_id; + unsigned long flags; + int ret = -EIO; + + if (pkt->hdr.flags & IL_CMD_FAILED_MSK) { + IL_ERR("Bad return from C_ADD_STA (0x%08X)\n", pkt->hdr.flags); + return ret; + } + + D_INFO("Processing response for adding station %u\n", sta_id); + + spin_lock_irqsave(&il->sta_lock, flags); + + switch (pkt->u.add_sta.status) { + case ADD_STA_SUCCESS_MSK: + D_INFO("C_ADD_STA PASSED\n"); + il_sta_ucode_activate(il, sta_id); + ret = 0; + break; + case ADD_STA_NO_ROOM_IN_TBL: + IL_ERR("Adding station %d failed, no room in table.\n", sta_id); + break; + case ADD_STA_NO_BLOCK_ACK_RESOURCE: + IL_ERR("Adding station %d failed, no block ack resource.\n", + sta_id); + break; + case ADD_STA_MODIFY_NON_EXIST_STA: + IL_ERR("Attempting to modify non-existing station %d\n", + sta_id); + break; + default: + D_ASSOC("Received C_ADD_STA:(0x%08X)\n", pkt->u.add_sta.status); + break; + } + + D_INFO("%s station id %u addr %pM\n", + il->stations[sta_id].sta.mode == + STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", sta_id, + il->stations[sta_id].sta.sta.addr); + + /* + * XXX: The MAC address in the command buffer is often changed from + * the original sent to the device. That is, the MAC address + * written to the command buffer often is not the same MAC address + * read from the command buffer when the command returns. This + * issue has not yet been resolved and this debugging is left to + * observe the problem. + */ + D_INFO("%s station according to cmd buffer %pM\n", + il->stations[sta_id].sta.mode == + STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", addsta->sta.addr); + spin_unlock_irqrestore(&il->sta_lock, flags); + + return ret; +} + +static void +il_add_sta_callback(struct il_priv *il, struct il_device_cmd *cmd, + struct il_rx_pkt *pkt) +{ + struct il_addsta_cmd *addsta = (struct il_addsta_cmd *)cmd->cmd.payload; + + il_process_add_sta_resp(il, addsta, pkt, false); + +} + +int +il_send_add_sta(struct il_priv *il, struct il_addsta_cmd *sta, u8 flags) +{ + struct il_rx_pkt *pkt = NULL; + int ret = 0; + u8 data[sizeof(*sta)]; + struct il_host_cmd cmd = { + .id = C_ADD_STA, + .flags = flags, + .data = data, + }; + u8 sta_id __maybe_unused = sta->sta.sta_id; + + D_INFO("Adding sta %u (%pM) %ssynchronously\n", sta_id, sta->sta.addr, + flags & CMD_ASYNC ? "a" : ""); + + if (flags & CMD_ASYNC) + cmd.callback = il_add_sta_callback; + else { + cmd.flags |= CMD_WANT_SKB; + might_sleep(); + } + + cmd.len = il->cfg->ops->utils->build_addsta_hcmd(sta, data); + ret = il_send_cmd(il, &cmd); + + if (ret || (flags & CMD_ASYNC)) + return ret; + + if (ret == 0) { + pkt = (struct il_rx_pkt *)cmd.reply_page; + ret = il_process_add_sta_resp(il, sta, pkt, true); + } + il_free_pages(il, cmd.reply_page); + + return ret; +} +EXPORT_SYMBOL(il_send_add_sta); + +static void +il_set_ht_add_station(struct il_priv *il, u8 idx, struct ieee80211_sta *sta, + struct il_rxon_context *ctx) +{ + struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap; + __le32 sta_flags; + u8 mimo_ps_mode; + + if (!sta || !sta_ht_inf->ht_supported) + goto done; + + mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2; + D_ASSOC("spatial multiplexing power save mode: %s\n", + (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ? "static" : + (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ? "dynamic" : + "disabled"); + + sta_flags = il->stations[idx].sta.station_flags; + + sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK); + + switch (mimo_ps_mode) { + case WLAN_HT_CAP_SM_PS_STATIC: + sta_flags |= STA_FLG_MIMO_DIS_MSK; + break; + case WLAN_HT_CAP_SM_PS_DYNAMIC: + sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK; + break; + case WLAN_HT_CAP_SM_PS_DISABLED: + break; + default: + IL_WARN("Invalid MIMO PS mode %d\n", mimo_ps_mode); + break; + } + + sta_flags |= + cpu_to_le32((u32) sta_ht_inf-> + ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS); + + sta_flags |= + cpu_to_le32((u32) sta_ht_inf-> + ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS); + + if (il_is_ht40_tx_allowed(il, ctx, &sta->ht_cap)) + sta_flags |= STA_FLG_HT40_EN_MSK; + else + sta_flags &= ~STA_FLG_HT40_EN_MSK; + + il->stations[idx].sta.station_flags = sta_flags; +done: + return; +} + +/** + * il_prep_station - Prepare station information for addition + * + * should be called with sta_lock held + */ +u8 +il_prep_station(struct il_priv *il, struct il_rxon_context *ctx, + const u8 *addr, bool is_ap, struct ieee80211_sta *sta) +{ + struct il_station_entry *station; + int i; + u8 sta_id = IL_INVALID_STATION; + u16 rate; + + if (is_ap) + sta_id = ctx->ap_sta_id; + else if (is_broadcast_ether_addr(addr)) + sta_id = ctx->bcast_sta_id; + else + for (i = IL_STA_ID; i < il->hw_params.max_stations; i++) { + if (!compare_ether_addr + (il->stations[i].sta.sta.addr, addr)) { + sta_id = i; + break; + } + + if (!il->stations[i].used && + sta_id == IL_INVALID_STATION) + sta_id = i; + } + + /* + * These two conditions have the same outcome, but keep them + * separate + */ + if (unlikely(sta_id == IL_INVALID_STATION)) + return sta_id; + + /* + * uCode is not able to deal with multiple requests to add a + * station. Keep track if one is in progress so that we do not send + * another. + */ + if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) { + D_INFO("STA %d already in process of being added.\n", sta_id); + return sta_id; + } + + if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) && + (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) && + !compare_ether_addr(il->stations[sta_id].sta.sta.addr, addr)) { + D_ASSOC("STA %d (%pM) already added, not adding again.\n", + sta_id, addr); + return sta_id; + } + + station = &il->stations[sta_id]; + station->used = IL_STA_DRIVER_ACTIVE; + D_ASSOC("Add STA to driver ID %d: %pM\n", sta_id, addr); + il->num_stations++; + + /* Set up the C_ADD_STA command to send to device */ + memset(&station->sta, 0, sizeof(struct il_addsta_cmd)); + memcpy(station->sta.sta.addr, addr, ETH_ALEN); + station->sta.mode = 0; + station->sta.sta.sta_id = sta_id; + station->sta.station_flags = ctx->station_flags; + station->ctxid = ctx->ctxid; + + if (sta) { + struct il_station_priv_common *sta_priv; + + sta_priv = (void *)sta->drv_priv; + sta_priv->ctx = ctx; + } + + /* + * OK to call unconditionally, since local stations (IBSS BSSID + * STA and broadcast STA) pass in a NULL sta, and mac80211 + * doesn't allow HT IBSS. + */ + il_set_ht_add_station(il, sta_id, sta, ctx); + + /* 3945 only */ + rate = (il->band == IEEE80211_BAND_5GHZ) ? RATE_6M_PLCP : RATE_1M_PLCP; + /* Turn on both antennas for the station... */ + station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK); + + return sta_id; + +} +EXPORT_SYMBOL_GPL(il_prep_station); + +#define STA_WAIT_TIMEOUT (HZ/2) + +/** + * il_add_station_common - + */ +int +il_add_station_common(struct il_priv *il, struct il_rxon_context *ctx, + const u8 *addr, bool is_ap, struct ieee80211_sta *sta, + u8 *sta_id_r) +{ + unsigned long flags_spin; + int ret = 0; + u8 sta_id; + struct il_addsta_cmd sta_cmd; + + *sta_id_r = 0; + spin_lock_irqsave(&il->sta_lock, flags_spin); + sta_id = il_prep_station(il, ctx, addr, is_ap, sta); + if (sta_id == IL_INVALID_STATION) { + IL_ERR("Unable to prepare station %pM for addition\n", addr); + spin_unlock_irqrestore(&il->sta_lock, flags_spin); + return -EINVAL; + } + + /* + * uCode is not able to deal with multiple requests to add a + * station. Keep track if one is in progress so that we do not send + * another. + */ + if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) { + D_INFO("STA %d already in process of being added.\n", sta_id); + spin_unlock_irqrestore(&il->sta_lock, flags_spin); + return -EEXIST; + } + + if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) && + (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) { + D_ASSOC("STA %d (%pM) already added, not adding again.\n", + sta_id, addr); + spin_unlock_irqrestore(&il->sta_lock, flags_spin); + return -EEXIST; + } + + il->stations[sta_id].used |= IL_STA_UCODE_INPROGRESS; + memcpy(&sta_cmd, &il->stations[sta_id].sta, + sizeof(struct il_addsta_cmd)); + spin_unlock_irqrestore(&il->sta_lock, flags_spin); + + /* Add station to device's station table */ + ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC); + if (ret) { + spin_lock_irqsave(&il->sta_lock, flags_spin); + IL_ERR("Adding station %pM failed.\n", + il->stations[sta_id].sta.sta.addr); + il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE; + il->stations[sta_id].used &= ~IL_STA_UCODE_INPROGRESS; + spin_unlock_irqrestore(&il->sta_lock, flags_spin); + } + *sta_id_r = sta_id; + return ret; +} +EXPORT_SYMBOL(il_add_station_common); + +/** + * il_sta_ucode_deactivate - deactivate ucode status for a station + * + * il->sta_lock must be held + */ +static void +il_sta_ucode_deactivate(struct il_priv *il, u8 sta_id) +{ + /* Ucode must be active and driver must be non active */ + if ((il->stations[sta_id]. + used & (IL_STA_UCODE_ACTIVE | IL_STA_DRIVER_ACTIVE)) != + IL_STA_UCODE_ACTIVE) + IL_ERR("removed non active STA %u\n", sta_id); + + il->stations[sta_id].used &= ~IL_STA_UCODE_ACTIVE; + + memset(&il->stations[sta_id], 0, sizeof(struct il_station_entry)); + D_ASSOC("Removed STA %u\n", sta_id); +} + +static int +il_send_remove_station(struct il_priv *il, const u8 * addr, int sta_id, + bool temporary) +{ + struct il_rx_pkt *pkt; + int ret; + + unsigned long flags_spin; + struct il_rem_sta_cmd rm_sta_cmd; + + struct il_host_cmd cmd = { + .id = C_REM_STA, + .len = sizeof(struct il_rem_sta_cmd), + .flags = CMD_SYNC, + .data = &rm_sta_cmd, + }; + + memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd)); + rm_sta_cmd.num_sta = 1; + memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN); + + cmd.flags |= CMD_WANT_SKB; + + ret = il_send_cmd(il, &cmd); + + if (ret) + return ret; + + pkt = (struct il_rx_pkt *)cmd.reply_page; + if (pkt->hdr.flags & IL_CMD_FAILED_MSK) { + IL_ERR("Bad return from C_REM_STA (0x%08X)\n", pkt->hdr.flags); + ret = -EIO; + } + + if (!ret) { + switch (pkt->u.rem_sta.status) { + case REM_STA_SUCCESS_MSK: + if (!temporary) { + spin_lock_irqsave(&il->sta_lock, flags_spin); + il_sta_ucode_deactivate(il, sta_id); + spin_unlock_irqrestore(&il->sta_lock, + flags_spin); + } + D_ASSOC("C_REM_STA PASSED\n"); + break; + default: + ret = -EIO; + IL_ERR("C_REM_STA failed\n"); + break; + } + } + il_free_pages(il, cmd.reply_page); + + return ret; +} + +/** + * il_remove_station - Remove driver's knowledge of station. + */ +int +il_remove_station(struct il_priv *il, const u8 sta_id, const u8 * addr) +{ + unsigned long flags; + + if (!il_is_ready(il)) { + D_INFO("Unable to remove station %pM, device not ready.\n", + addr); + /* + * It is typical for stations to be removed when we are + * going down. Return success since device will be down + * soon anyway + */ + return 0; + } + + D_ASSOC("Removing STA from driver:%d %pM\n", sta_id, addr); + + if (WARN_ON(sta_id == IL_INVALID_STATION)) + return -EINVAL; + + spin_lock_irqsave(&il->sta_lock, flags); + + if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE)) { + D_INFO("Removing %pM but non DRIVER active\n", addr); + goto out_err; + } + + if (!(il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) { + D_INFO("Removing %pM but non UCODE active\n", addr); + goto out_err; + } + + if (il->stations[sta_id].used & IL_STA_LOCAL) { + kfree(il->stations[sta_id].lq); + il->stations[sta_id].lq = NULL; + } + + il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE; + + il->num_stations--; + + BUG_ON(il->num_stations < 0); + + spin_unlock_irqrestore(&il->sta_lock, flags); + + return il_send_remove_station(il, addr, sta_id, false); +out_err: + spin_unlock_irqrestore(&il->sta_lock, flags); + return -EINVAL; +} +EXPORT_SYMBOL_GPL(il_remove_station); + +/** + * il_clear_ucode_stations - clear ucode station table bits + * + * This function clears all the bits in the driver indicating + * which stations are active in the ucode. Call when something + * other than explicit station management would cause this in + * the ucode, e.g. unassociated RXON. + */ +void +il_clear_ucode_stations(struct il_priv *il, struct il_rxon_context *ctx) +{ + int i; + unsigned long flags_spin; + bool cleared = false; + + D_INFO("Clearing ucode stations in driver\n"); + + spin_lock_irqsave(&il->sta_lock, flags_spin); + for (i = 0; i < il->hw_params.max_stations; i++) { + if (ctx && ctx->ctxid != il->stations[i].ctxid) + continue; + + if (il->stations[i].used & IL_STA_UCODE_ACTIVE) { + D_INFO("Clearing ucode active for station %d\n", i); + il->stations[i].used &= ~IL_STA_UCODE_ACTIVE; + cleared = true; + } + } + spin_unlock_irqrestore(&il->sta_lock, flags_spin); + + if (!cleared) + D_INFO("No active stations found to be cleared\n"); +} +EXPORT_SYMBOL(il_clear_ucode_stations); + +/** + * il_restore_stations() - Restore driver known stations to device + * + * All stations considered active by driver, but not present in ucode, is + * restored. + * + * Function sleeps. + */ +void +il_restore_stations(struct il_priv *il, struct il_rxon_context *ctx) +{ + struct il_addsta_cmd sta_cmd; + struct il_link_quality_cmd lq; + unsigned long flags_spin; + int i; + bool found = false; + int ret; + bool send_lq; + + if (!il_is_ready(il)) { + D_INFO("Not ready yet, not restoring any stations.\n"); + return; + } + + D_ASSOC("Restoring all known stations ... start.\n"); + spin_lock_irqsave(&il->sta_lock, flags_spin); + for (i = 0; i < il->hw_params.max_stations; i++) { + if (ctx->ctxid != il->stations[i].ctxid) + continue; + if ((il->stations[i].used & IL_STA_DRIVER_ACTIVE) && + !(il->stations[i].used & IL_STA_UCODE_ACTIVE)) { + D_ASSOC("Restoring sta %pM\n", + il->stations[i].sta.sta.addr); + il->stations[i].sta.mode = 0; + il->stations[i].used |= IL_STA_UCODE_INPROGRESS; + found = true; + } + } + + for (i = 0; i < il->hw_params.max_stations; i++) { + if ((il->stations[i].used & IL_STA_UCODE_INPROGRESS)) { + memcpy(&sta_cmd, &il->stations[i].sta, + sizeof(struct il_addsta_cmd)); + send_lq = false; + if (il->stations[i].lq) { + memcpy(&lq, il->stations[i].lq, + sizeof(struct il_link_quality_cmd)); + send_lq = true; + } + spin_unlock_irqrestore(&il->sta_lock, flags_spin); + ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC); + if (ret) { + spin_lock_irqsave(&il->sta_lock, flags_spin); + IL_ERR("Adding station %pM failed.\n", + il->stations[i].sta.sta.addr); + il->stations[i].used &= ~IL_STA_DRIVER_ACTIVE; + il->stations[i].used &= + ~IL_STA_UCODE_INPROGRESS; + spin_unlock_irqrestore(&il->sta_lock, + flags_spin); + } + /* + * Rate scaling has already been initialized, send + * current LQ command + */ + if (send_lq) + il_send_lq_cmd(il, ctx, &lq, CMD_SYNC, true); + spin_lock_irqsave(&il->sta_lock, flags_spin); + il->stations[i].used &= ~IL_STA_UCODE_INPROGRESS; + } + } + + spin_unlock_irqrestore(&il->sta_lock, flags_spin); + if (!found) + D_INFO("Restoring all known stations" + " .... no stations to be restored.\n"); + else + D_INFO("Restoring all known stations" " .... complete.\n"); +} +EXPORT_SYMBOL(il_restore_stations); + +int +il_get_free_ucode_key_idx(struct il_priv *il) +{ + int i; + + for (i = 0; i < il->sta_key_max_num; i++) + if (!test_and_set_bit(i, &il->ucode_key_table)) + return i; + + return WEP_INVALID_OFFSET; +} +EXPORT_SYMBOL(il_get_free_ucode_key_idx); + +void +il_dealloc_bcast_stations(struct il_priv *il) +{ + unsigned long flags; + int i; + + spin_lock_irqsave(&il->sta_lock, flags); + for (i = 0; i < il->hw_params.max_stations; i++) { + if (!(il->stations[i].used & IL_STA_BCAST)) + continue; + + il->stations[i].used &= ~IL_STA_UCODE_ACTIVE; + il->num_stations--; + BUG_ON(il->num_stations < 0); + kfree(il->stations[i].lq); + il->stations[i].lq = NULL; + } + spin_unlock_irqrestore(&il->sta_lock, flags); +} +EXPORT_SYMBOL_GPL(il_dealloc_bcast_stations); + +#ifdef CONFIG_IWLEGACY_DEBUG +static void +il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq) +{ + int i; + D_RATE("lq station id 0x%x\n", lq->sta_id); + D_RATE("lq ant 0x%X 0x%X\n", lq->general_params.single_stream_ant_msk, + lq->general_params.dual_stream_ant_msk); + + for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) + D_RATE("lq idx %d 0x%X\n", i, lq->rs_table[i].rate_n_flags); +} +#else +static inline void +il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq) +{ +} +#endif + +/** + * il_is_lq_table_valid() - Test one aspect of LQ cmd for validity + * + * It sometimes happens when a HT rate has been in use and we + * loose connectivity with AP then mac80211 will first tell us that the + * current channel is not HT anymore before removing the station. In such a + * scenario the RXON flags will be updated to indicate we are not + * communicating HT anymore, but the LQ command may still contain HT rates. + * Test for this to prevent driver from sending LQ command between the time + * RXON flags are updated and when LQ command is updated. + */ +static bool +il_is_lq_table_valid(struct il_priv *il, struct il_rxon_context *ctx, + struct il_link_quality_cmd *lq) +{ + int i; + + if (ctx->ht.enabled) + return true; + + D_INFO("Channel %u is not an HT channel\n", ctx->active.channel); + for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { + if (le32_to_cpu(lq->rs_table[i].rate_n_flags) & RATE_MCS_HT_MSK) { + D_INFO("idx %d of LQ expects HT channel\n", i); + return false; + } + } + return true; +} + +/** + * il_send_lq_cmd() - Send link quality command + * @init: This command is sent as part of station initialization right + * after station has been added. + * + * The link quality command is sent as the last step of station creation. + * This is the special case in which init is set and we call a callback in + * this case to clear the state indicating that station creation is in + * progress. + */ +int +il_send_lq_cmd(struct il_priv *il, struct il_rxon_context *ctx, + struct il_link_quality_cmd *lq, u8 flags, bool init) +{ + int ret = 0; + unsigned long flags_spin; + + struct il_host_cmd cmd = { + .id = C_TX_LINK_QUALITY_CMD, + .len = sizeof(struct il_link_quality_cmd), + .flags = flags, + .data = lq, + }; + + if (WARN_ON(lq->sta_id == IL_INVALID_STATION)) + return -EINVAL; + + spin_lock_irqsave(&il->sta_lock, flags_spin); + if (!(il->stations[lq->sta_id].used & IL_STA_DRIVER_ACTIVE)) { + spin_unlock_irqrestore(&il->sta_lock, flags_spin); + return -EINVAL; + } + spin_unlock_irqrestore(&il->sta_lock, flags_spin); + + il_dump_lq_cmd(il, lq); + BUG_ON(init && (cmd.flags & CMD_ASYNC)); + + if (il_is_lq_table_valid(il, ctx, lq)) + ret = il_send_cmd(il, &cmd); + else + ret = -EINVAL; + + if (cmd.flags & CMD_ASYNC) + return ret; + + if (init) { + D_INFO("init LQ command complete," + " clearing sta addition status for sta %d\n", + lq->sta_id); + spin_lock_irqsave(&il->sta_lock, flags_spin); + il->stations[lq->sta_id].used &= ~IL_STA_UCODE_INPROGRESS; + spin_unlock_irqrestore(&il->sta_lock, flags_spin); + } + return ret; +} +EXPORT_SYMBOL(il_send_lq_cmd); + +int +il_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct il_priv *il = hw->priv; + struct il_station_priv_common *sta_common = (void *)sta->drv_priv; + int ret; + + D_INFO("received request to remove station %pM\n", sta->addr); + mutex_lock(&il->mutex); + D_INFO("proceeding to remove station %pM\n", sta->addr); + ret = il_remove_station(il, sta_common->sta_id, sta->addr); + if (ret) + IL_ERR("Error removing station %pM\n", sta->addr); + mutex_unlock(&il->mutex); + return ret; +} +EXPORT_SYMBOL(il_mac_sta_remove); + +/************************** RX-FUNCTIONS ****************************/ +/* + * Rx theory of operation + * + * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs), + * each of which point to Receive Buffers to be filled by the NIC. These get + * used not only for Rx frames, but for any command response or notification + * from the NIC. The driver and NIC manage the Rx buffers by means + * of idxes into the circular buffer. + * + * Rx Queue Indexes + * The host/firmware share two idx registers for managing the Rx buffers. + * + * The READ idx maps to the first position that the firmware may be writing + * to -- the driver can read up to (but not including) this position and get + * good data. + * The READ idx is managed by the firmware once the card is enabled. + * + * The WRITE idx maps to the last position the driver has read from -- the + * position preceding WRITE is the last slot the firmware can place a packet. + * + * The queue is empty (no good data) if WRITE = READ - 1, and is full if + * WRITE = READ. + * + * During initialization, the host sets up the READ queue position to the first + * IDX position, and WRITE to the last (READ - 1 wrapped) + * + * When the firmware places a packet in a buffer, it will advance the READ idx + * and fire the RX interrupt. The driver can then query the READ idx and + * process as many packets as possible, moving the WRITE idx forward as it + * resets the Rx queue buffers with new memory. + * + * The management in the driver is as follows: + * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When + * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled + * to replenish the iwl->rxq->rx_free. + * + In il_rx_replenish (scheduled) if 'processed' != 'read' then the + * iwl->rxq is replenished and the READ IDX is updated (updating the + * 'processed' and 'read' driver idxes as well) + * + A received packet is processed and handed to the kernel network stack, + * detached from the iwl->rxq. The driver 'processed' idx is updated. + * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free + * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ + * IDX is not incremented and iwl->status(RX_STALLED) is set. If there + * were enough free buffers and RX_STALLED is set it is cleared. + * + * + * Driver sequence: + * + * il_rx_queue_alloc() Allocates rx_free + * il_rx_replenish() Replenishes rx_free list from rx_used, and calls + * il_rx_queue_restock + * il_rx_queue_restock() Moves available buffers from rx_free into Rx + * queue, updates firmware pointers, and updates + * the WRITE idx. If insufficient rx_free buffers + * are available, schedules il_rx_replenish + * + * -- enable interrupts -- + * ISR - il_rx() Detach il_rx_bufs from pool up to the + * READ IDX, detaching the SKB from the pool. + * Moves the packet buffer from queue to rx_used. + * Calls il_rx_queue_restock to refill any empty + * slots. + * ... + * + */ + +/** + * il_rx_queue_space - Return number of free slots available in queue. + */ +int +il_rx_queue_space(const struct il_rx_queue *q) +{ + int s = q->read - q->write; + if (s <= 0) + s += RX_QUEUE_SIZE; + /* keep some buffer to not confuse full and empty queue */ + s -= 2; + if (s < 0) + s = 0; + return s; +} +EXPORT_SYMBOL(il_rx_queue_space); + +/** + * il_rx_queue_update_write_ptr - Update the write pointer for the RX queue + */ +void +il_rx_queue_update_write_ptr(struct il_priv *il, struct il_rx_queue *q) +{ + unsigned long flags; + u32 rx_wrt_ptr_reg = il->hw_params.rx_wrt_ptr_reg; + u32 reg; + + spin_lock_irqsave(&q->lock, flags); + + if (q->need_update == 0) + goto exit_unlock; + + /* If power-saving is in use, make sure device is awake */ + if (test_bit(S_POWER_PMI, &il->status)) { + reg = _il_rd(il, CSR_UCODE_DRV_GP1); + + if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { + D_INFO("Rx queue requesting wakeup," " GP1 = 0x%x\n", + reg); + il_set_bit(il, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + goto exit_unlock; + } + + q->write_actual = (q->write & ~0x7); + il_wr(il, rx_wrt_ptr_reg, q->write_actual); + + /* Else device is assumed to be awake */ + } else { + /* Device expects a multiple of 8 */ + q->write_actual = (q->write & ~0x7); + il_wr(il, rx_wrt_ptr_reg, q->write_actual); + } + + q->need_update = 0; + +exit_unlock: + spin_unlock_irqrestore(&q->lock, flags); +} +EXPORT_SYMBOL(il_rx_queue_update_write_ptr); + +int +il_rx_queue_alloc(struct il_priv *il) +{ + struct il_rx_queue *rxq = &il->rxq; + struct device *dev = &il->pci_dev->dev; + int i; + + spin_lock_init(&rxq->lock); + INIT_LIST_HEAD(&rxq->rx_free); + INIT_LIST_HEAD(&rxq->rx_used); + + /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */ + rxq->bd = + dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma, + GFP_KERNEL); + if (!rxq->bd) + goto err_bd; + + rxq->rb_stts = + dma_alloc_coherent(dev, sizeof(struct il_rb_status), + &rxq->rb_stts_dma, GFP_KERNEL); + if (!rxq->rb_stts) + goto err_rb; + + /* Fill the rx_used queue with _all_ of the Rx buffers */ + for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) + list_add_tail(&rxq->pool[i].list, &rxq->rx_used); + + /* Set us so that we have processed and used all buffers, but have + * not restocked the Rx queue with fresh buffers */ + rxq->read = rxq->write = 0; + rxq->write_actual = 0; + rxq->free_count = 0; + rxq->need_update = 0; + return 0; + +err_rb: + dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, + rxq->bd_dma); +err_bd: + return -ENOMEM; +} +EXPORT_SYMBOL(il_rx_queue_alloc); + +void +il_hdl_spectrum_measurement(struct il_priv *il, struct il_rx_buf *rxb) +{ + struct il_rx_pkt *pkt = rxb_addr(rxb); + struct il_spectrum_notification *report = &(pkt->u.spectrum_notif); + + if (!report->state) { + D_11H("Spectrum Measure Notification: Start\n"); + return; + } + + memcpy(&il->measure_report, report, sizeof(*report)); + il->measurement_status |= MEASUREMENT_READY; +} +EXPORT_SYMBOL(il_hdl_spectrum_measurement); + +/* + * returns non-zero if packet should be dropped + */ +int +il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr, + u32 decrypt_res, struct ieee80211_rx_status *stats) +{ + u16 fc = le16_to_cpu(hdr->frame_control); + + /* + * All contexts have the same setting here due to it being + * a module parameter, so OK to check any context. + */ + if (il->ctx.active.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK) + return 0; + + if (!(fc & IEEE80211_FCTL_PROTECTED)) + return 0; + + D_RX("decrypt_res:0x%x\n", decrypt_res); + switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) { + case RX_RES_STATUS_SEC_TYPE_TKIP: + /* The uCode has got a bad phase 1 Key, pushes the packet. + * Decryption will be done in SW. */ + if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == + RX_RES_STATUS_BAD_KEY_TTAK) + break; + + case RX_RES_STATUS_SEC_TYPE_WEP: + if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == + RX_RES_STATUS_BAD_ICV_MIC) { + /* bad ICV, the packet is destroyed since the + * decryption is inplace, drop it */ + D_RX("Packet destroyed\n"); + return -1; + } + case RX_RES_STATUS_SEC_TYPE_CCMP: + if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == + RX_RES_STATUS_DECRYPT_OK) { + D_RX("hw decrypt successfully!!!\n"); + stats->flag |= RX_FLAG_DECRYPTED; + } + break; + + default: + break; + } + return 0; +} +EXPORT_SYMBOL(il_set_decrypted_flag); + +/** + * il_txq_update_write_ptr - Send new write idx to hardware + */ +void +il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq) +{ + u32 reg = 0; + int txq_id = txq->q.id; + + if (txq->need_update == 0) + return; + + /* if we're trying to save power */ + if (test_bit(S_POWER_PMI, &il->status)) { + /* wake up nic if it's powered down ... + * uCode will wake up, and interrupt us again, so next + * time we'll skip this part. */ + reg = _il_rd(il, CSR_UCODE_DRV_GP1); + + if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { + D_INFO("Tx queue %d requesting wakeup," " GP1 = 0x%x\n", + txq_id, reg); + il_set_bit(il, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + return; + } + + il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8)); + + /* + * else not in power-save mode, + * uCode will never sleep when we're + * trying to tx (during RFKILL, we're not trying to tx). + */ + } else + _il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8)); + txq->need_update = 0; +} +EXPORT_SYMBOL(il_txq_update_write_ptr); + +/** + * il_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's + */ +void +il_tx_queue_unmap(struct il_priv *il, int txq_id) +{ + struct il_tx_queue *txq = &il->txq[txq_id]; + struct il_queue *q = &txq->q; + + if (q->n_bd == 0) + return; + + while (q->write_ptr != q->read_ptr) { + il->cfg->ops->lib->txq_free_tfd(il, txq); + q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd); + } +} +EXPORT_SYMBOL(il_tx_queue_unmap); + +/** + * il_tx_queue_free - Deallocate DMA queue. + * @txq: Transmit queue to deallocate. + * + * Empty queue by removing and destroying all BD's. + * Free all buffers. + * 0-fill, but do not free "txq" descriptor structure. + */ +void +il_tx_queue_free(struct il_priv *il, int txq_id) +{ + struct il_tx_queue *txq = &il->txq[txq_id]; + struct device *dev = &il->pci_dev->dev; + int i; + + il_tx_queue_unmap(il, txq_id); + + /* De-alloc array of command/tx buffers */ + for (i = 0; i < TFD_TX_CMD_SLOTS; i++) + kfree(txq->cmd[i]); + + /* De-alloc circular buffer of TFDs */ + if (txq->q.n_bd) + dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd, + txq->tfds, txq->q.dma_addr); + + /* De-alloc array of per-TFD driver data */ + kfree(txq->txb); + txq->txb = NULL; + + /* deallocate arrays */ + kfree(txq->cmd); + kfree(txq->meta); + txq->cmd = NULL; + txq->meta = NULL; + + /* 0-fill queue descriptor structure */ + memset(txq, 0, sizeof(*txq)); +} +EXPORT_SYMBOL(il_tx_queue_free); + +/** + * il_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue + */ +void +il_cmd_queue_unmap(struct il_priv *il) +{ + struct il_tx_queue *txq = &il->txq[il->cmd_queue]; + struct il_queue *q = &txq->q; + int i; + + if (q->n_bd == 0) + return; + + while (q->read_ptr != q->write_ptr) { + i = il_get_cmd_idx(q, q->read_ptr, 0); + + if (txq->meta[i].flags & CMD_MAPPED) { + pci_unmap_single(il->pci_dev, + dma_unmap_addr(&txq->meta[i], mapping), + dma_unmap_len(&txq->meta[i], len), + PCI_DMA_BIDIRECTIONAL); + txq->meta[i].flags = 0; + } + + q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd); + } + + i = q->n_win; + if (txq->meta[i].flags & CMD_MAPPED) { + pci_unmap_single(il->pci_dev, + dma_unmap_addr(&txq->meta[i], mapping), + dma_unmap_len(&txq->meta[i], len), + PCI_DMA_BIDIRECTIONAL); + txq->meta[i].flags = 0; + } +} +EXPORT_SYMBOL(il_cmd_queue_unmap); + +/** + * il_cmd_queue_free - Deallocate DMA queue. + * @txq: Transmit queue to deallocate. + * + * Empty queue by removing and destroying all BD's. + * Free all buffers. + * 0-fill, but do not free "txq" descriptor structure. + */ +void +il_cmd_queue_free(struct il_priv *il) +{ + struct il_tx_queue *txq = &il->txq[il->cmd_queue]; + struct device *dev = &il->pci_dev->dev; + int i; + + il_cmd_queue_unmap(il); + + /* De-alloc array of command/tx buffers */ + for (i = 0; i <= TFD_CMD_SLOTS; i++) + kfree(txq->cmd[i]); + + /* De-alloc circular buffer of TFDs */ + if (txq->q.n_bd) + dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd, + txq->tfds, txq->q.dma_addr); + + /* deallocate arrays */ + kfree(txq->cmd); + kfree(txq->meta); + txq->cmd = NULL; + txq->meta = NULL; + + /* 0-fill queue descriptor structure */ + memset(txq, 0, sizeof(*txq)); +} +EXPORT_SYMBOL(il_cmd_queue_free); + +/*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** + * DMA services + * + * Theory of operation + * + * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer + * of buffer descriptors, each of which points to one or more data buffers for + * the device to read from or fill. Driver and device exchange status of each + * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty + * entries in each circular buffer, to protect against confusing empty and full + * queue states. + * + * The device reads or writes the data in the queues via the device's several + * DMA/FIFO channels. Each queue is mapped to a single DMA channel. + * + * For Tx queue, there are low mark and high mark limits. If, after queuing + * the packet for Tx, free space become < low mark, Tx queue stopped. When + * reclaiming packets (on 'tx done IRQ), if free space become > high mark, + * Tx queue resumed. + * + * See more detailed info in 4965.h. + ***************************************************/ + +int +il_queue_space(const struct il_queue *q) +{ + int s = q->read_ptr - q->write_ptr; + + if (q->read_ptr > q->write_ptr) + s -= q->n_bd; + + if (s <= 0) + s += q->n_win; + /* keep some reserve to not confuse empty and full situations */ + s -= 2; + if (s < 0) + s = 0; + return s; +} +EXPORT_SYMBOL(il_queue_space); + + +/** + * il_queue_init - Initialize queue's high/low-water and read/write idxes + */ +static int +il_queue_init(struct il_priv *il, struct il_queue *q, int count, int slots_num, + u32 id) +{ + q->n_bd = count; + q->n_win = slots_num; + q->id = id; + + /* count must be power-of-two size, otherwise il_queue_inc_wrap + * and il_queue_dec_wrap are broken. */ + BUG_ON(!is_power_of_2(count)); + + /* slots_num must be power-of-two size, otherwise + * il_get_cmd_idx is broken. */ + BUG_ON(!is_power_of_2(slots_num)); + + q->low_mark = q->n_win / 4; + if (q->low_mark < 4) + q->low_mark = 4; + + q->high_mark = q->n_win / 8; + if (q->high_mark < 2) + q->high_mark = 2; + + q->write_ptr = q->read_ptr = 0; + + return 0; +} + +/** + * il_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue + */ +static int +il_tx_queue_alloc(struct il_priv *il, struct il_tx_queue *txq, u32 id) +{ + struct device *dev = &il->pci_dev->dev; + size_t tfd_sz = il->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX; + + /* Driver ilate data, only for Tx (not command) queues, + * not shared with device. */ + if (id != il->cmd_queue) { + txq->txb = kcalloc(TFD_QUEUE_SIZE_MAX, sizeof(txq->txb[0]), + GFP_KERNEL); + if (!txq->txb) { + IL_ERR("kmalloc for auxiliary BD " + "structures failed\n"); + goto error; + } + } else { + txq->txb = NULL; + } + + /* Circular buffer of transmit frame descriptors (TFDs), + * shared with device */ + txq->tfds = + dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr, GFP_KERNEL); + if (!txq->tfds) { + IL_ERR("pci_alloc_consistent(%zd) failed\n", tfd_sz); + goto error; + } + txq->q.id = id; + + return 0; + +error: + kfree(txq->txb); + txq->txb = NULL; + + return -ENOMEM; +} + +/** + * il_tx_queue_init - Allocate and initialize one tx/cmd queue + */ +int +il_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq, int slots_num, + u32 txq_id) +{ + int i, len; + int ret; + int actual_slots = slots_num; + + /* + * Alloc buffer array for commands (Tx or other types of commands). + * For the command queue (#4/#9), allocate command space + one big + * command for scan, since scan command is very huge; the system will + * not have two scans at the same time, so only one is needed. + * For normal Tx queues (all other queues), no super-size command + * space is needed. + */ + if (txq_id == il->cmd_queue) + actual_slots++; + + txq->meta = + kzalloc(sizeof(struct il_cmd_meta) * actual_slots, GFP_KERNEL); + txq->cmd = + kzalloc(sizeof(struct il_device_cmd *) * actual_slots, GFP_KERNEL); + + if (!txq->meta || !txq->cmd) + goto out_free_arrays; + + len = sizeof(struct il_device_cmd); + for (i = 0; i < actual_slots; i++) { + /* only happens for cmd queue */ + if (i == slots_num) + len = IL_MAX_CMD_SIZE; + + txq->cmd[i] = kmalloc(len, GFP_KERNEL); + if (!txq->cmd[i]) + goto err; + } + + /* Alloc driver data array and TFD circular buffer */ + ret = il_tx_queue_alloc(il, txq, txq_id); + if (ret) + goto err; + + txq->need_update = 0; + + /* + * For the default queues 0-3, set up the swq_id + * already -- all others need to get one later + * (if they need one at all). + */ + if (txq_id < 4) + il_set_swq_id(txq, txq_id, txq_id); + + /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise + * il_queue_inc_wrap and il_queue_dec_wrap are broken. */ + BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); + + /* Initialize queue's high/low-water marks, and head/tail idxes */ + il_queue_init(il, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id); + + /* Tell device where to find queue */ + il->cfg->ops->lib->txq_init(il, txq); + + return 0; +err: + for (i = 0; i < actual_slots; i++) + kfree(txq->cmd[i]); +out_free_arrays: + kfree(txq->meta); + kfree(txq->cmd); + + return -ENOMEM; +} +EXPORT_SYMBOL(il_tx_queue_init); + +void +il_tx_queue_reset(struct il_priv *il, struct il_tx_queue *txq, int slots_num, + u32 txq_id) +{ + int actual_slots = slots_num; + + if (txq_id == il->cmd_queue) + actual_slots++; + + memset(txq->meta, 0, sizeof(struct il_cmd_meta) * actual_slots); + + txq->need_update = 0; + + /* Initialize queue's high/low-water marks, and head/tail idxes */ + il_queue_init(il, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id); + + /* Tell device where to find queue */ + il->cfg->ops->lib->txq_init(il, txq); +} +EXPORT_SYMBOL(il_tx_queue_reset); + +/*************** HOST COMMAND QUEUE FUNCTIONS *****/ + +/** + * il_enqueue_hcmd - enqueue a uCode command + * @il: device ilate data point + * @cmd: a point to the ucode command structure + * + * The function returns < 0 values to indicate the operation is + * failed. On success, it turns the idx (> 0) of command in the + * command queue. + */ +int +il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd) +{ + struct il_tx_queue *txq = &il->txq[il->cmd_queue]; + struct il_queue *q = &txq->q; + struct il_device_cmd *out_cmd; + struct il_cmd_meta *out_meta; + dma_addr_t phys_addr; + unsigned long flags; + int len; + u32 idx; + u16 fix_size; + + cmd->len = il->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len); + fix_size = (u16) (cmd->len + sizeof(out_cmd->hdr)); + + /* If any of the command structures end up being larger than + * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then + * we will need to increase the size of the TFD entries + * Also, check to see if command buffer should not exceed the size + * of device_cmd and max_cmd_size. */ + BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) && + !(cmd->flags & CMD_SIZE_HUGE)); + BUG_ON(fix_size > IL_MAX_CMD_SIZE); + + if (il_is_rfkill(il) || il_is_ctkill(il)) { + IL_WARN("Not sending command - %s KILL\n", + il_is_rfkill(il) ? "RF" : "CT"); + return -EIO; + } + + spin_lock_irqsave(&il->hcmd_lock, flags); + + if (il_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { + spin_unlock_irqrestore(&il->hcmd_lock, flags); + + IL_ERR("Restarting adapter due to command queue full\n"); + queue_work(il->workqueue, &il->restart); + return -ENOSPC; + } + + idx = il_get_cmd_idx(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE); + out_cmd = txq->cmd[idx]; + out_meta = &txq->meta[idx]; + + if (WARN_ON(out_meta->flags & CMD_MAPPED)) { + spin_unlock_irqrestore(&il->hcmd_lock, flags); + return -ENOSPC; + } + + memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ + out_meta->flags = cmd->flags | CMD_MAPPED; + if (cmd->flags & CMD_WANT_SKB) + out_meta->source = cmd; + if (cmd->flags & CMD_ASYNC) + out_meta->callback = cmd->callback; + + out_cmd->hdr.cmd = cmd->id; + memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len); + + /* At this point, the out_cmd now has all of the incoming cmd + * information */ + + out_cmd->hdr.flags = 0; + out_cmd->hdr.sequence = + cpu_to_le16(QUEUE_TO_SEQ(il->cmd_queue) | IDX_TO_SEQ(q->write_ptr)); + if (cmd->flags & CMD_SIZE_HUGE) + out_cmd->hdr.sequence |= SEQ_HUGE_FRAME; + len = sizeof(struct il_device_cmd); + if (idx == TFD_CMD_SLOTS) + len = IL_MAX_CMD_SIZE; + +#ifdef CONFIG_IWLEGACY_DEBUG + switch (out_cmd->hdr.cmd) { + case C_TX_LINK_QUALITY_CMD: + case C_SENSITIVITY: + D_HC_DUMP("Sending command %s (#%x), seq: 0x%04X, " + "%d bytes at %d[%d]:%d\n", + il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd, + le16_to_cpu(out_cmd->hdr.sequence), fix_size, + q->write_ptr, idx, il->cmd_queue); + break; + default: + D_HC("Sending command %s (#%x), seq: 0x%04X, " + "%d bytes at %d[%d]:%d\n", + il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd, + le16_to_cpu(out_cmd->hdr.sequence), fix_size, q->write_ptr, + idx, il->cmd_queue); + } +#endif + txq->need_update = 1; + + if (il->cfg->ops->lib->txq_update_byte_cnt_tbl) + /* Set up entry in queue's byte count circular buffer */ + il->cfg->ops->lib->txq_update_byte_cnt_tbl(il, txq, 0); + + phys_addr = + pci_map_single(il->pci_dev, &out_cmd->hdr, fix_size, + PCI_DMA_BIDIRECTIONAL); + dma_unmap_addr_set(out_meta, mapping, phys_addr); + dma_unmap_len_set(out_meta, len, fix_size); + + il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, phys_addr, fix_size, + 1, U32_PAD(cmd->len)); + + /* Increment and update queue's write idx */ + q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd); + il_txq_update_write_ptr(il, txq); + + spin_unlock_irqrestore(&il->hcmd_lock, flags); + return idx; +} + +/** + * il_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd + * + * When FW advances 'R' idx, all entries between old and new 'R' idx + * need to be reclaimed. As result, some free space forms. If there is + * enough free space (> low mark), wake the stack that feeds us. + */ +static void +il_hcmd_queue_reclaim(struct il_priv *il, int txq_id, int idx, int cmd_idx) +{ + struct il_tx_queue *txq = &il->txq[txq_id]; + struct il_queue *q = &txq->q; + int nfreed = 0; + + if (idx >= q->n_bd || il_queue_used(q, idx) == 0) { + IL_ERR("Read idx for DMA queue txq id (%d), idx %d, " + "is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd, + q->write_ptr, q->read_ptr); + return; + } + + for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; + q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) { + + if (nfreed++ > 0) { + IL_ERR("HCMD skipped: idx (%d) %d %d\n", idx, + q->write_ptr, q->read_ptr); + queue_work(il->workqueue, &il->restart); + } + + } +} + +/** + * il_tx_cmd_complete - Pull unused buffers off the queue and reclaim them + * @rxb: Rx buffer to reclaim + * + * If an Rx buffer has an async callback associated with it the callback + * will be executed. The attached skb (if present) will only be freed + * if the callback returns 1 + */ +void +il_tx_cmd_complete(struct il_priv *il, struct il_rx_buf *rxb) +{ + struct il_rx_pkt *pkt = rxb_addr(rxb); + u16 sequence = le16_to_cpu(pkt->hdr.sequence); + int txq_id = SEQ_TO_QUEUE(sequence); + int idx = SEQ_TO_IDX(sequence); + int cmd_idx; + bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME); + struct il_device_cmd *cmd; + struct il_cmd_meta *meta; + struct il_tx_queue *txq = &il->txq[il->cmd_queue]; + unsigned long flags; + + /* If a Tx command is being handled and it isn't in the actual + * command queue then there a command routing bug has been introduced + * in the queue management code. */ + if (WARN + (txq_id != il->cmd_queue, + "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", + txq_id, il->cmd_queue, sequence, il->txq[il->cmd_queue].q.read_ptr, + il->txq[il->cmd_queue].q.write_ptr)) { + il_print_hex_error(il, pkt, 32); + return; + } + + cmd_idx = il_get_cmd_idx(&txq->q, idx, huge); + cmd = txq->cmd[cmd_idx]; + meta = &txq->meta[cmd_idx]; + + txq->time_stamp = jiffies; + + pci_unmap_single(il->pci_dev, dma_unmap_addr(meta, mapping), + dma_unmap_len(meta, len), PCI_DMA_BIDIRECTIONAL); + + /* Input error checking is done when commands are added to queue. */ + if (meta->flags & CMD_WANT_SKB) { + meta->source->reply_page = (unsigned long)rxb_addr(rxb); + rxb->page = NULL; + } else if (meta->callback) + meta->callback(il, cmd, pkt); + + spin_lock_irqsave(&il->hcmd_lock, flags); + + il_hcmd_queue_reclaim(il, txq_id, idx, cmd_idx); + + if (!(meta->flags & CMD_ASYNC)) { + clear_bit(S_HCMD_ACTIVE, &il->status); + D_INFO("Clearing HCMD_ACTIVE for command %s\n", + il_get_cmd_string(cmd->hdr.cmd)); + wake_up(&il->wait_command_queue); + } + + /* Mark as unmapped */ + meta->flags = 0; + + spin_unlock_irqrestore(&il->hcmd_lock, flags); +} +EXPORT_SYMBOL(il_tx_cmd_complete); + +MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965"); +MODULE_VERSION(IWLWIFI_VERSION); +MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); +MODULE_LICENSE("GPL"); + +/* + * set bt_coex_active to true, uCode will do kill/defer + * every time the priority line is asserted (BT is sending signals on the + * priority line in the PCIx). + * set bt_coex_active to false, uCode will ignore the BT activity and + * perform the normal operation + * + * User might experience transmit issue on some platform due to WiFi/BT + * co-exist problem. The possible behaviors are: + * Able to scan and finding all the available AP + * Not able to associate with any AP + * On those platforms, WiFi communication can be restored by set + * "bt_coex_active" module parameter to "false" + * + * default: bt_coex_active = true (BT_COEX_ENABLE) + */ +static bool bt_coex_active = true; +module_param(bt_coex_active, bool, S_IRUGO); +MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist"); + +u32 il_debug_level; +EXPORT_SYMBOL(il_debug_level); + +const u8 il_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; +EXPORT_SYMBOL(il_bcast_addr); + +/* This function both allocates and initializes hw and il. */ +struct ieee80211_hw * +il_alloc_all(struct il_cfg *cfg) +{ + struct il_priv *il; + /* mac80211 allocates memory for this device instance, including + * space for this driver's ilate structure */ + struct ieee80211_hw *hw; + + hw = ieee80211_alloc_hw(sizeof(struct il_priv), + cfg->ops->ieee80211_ops); + if (hw == NULL) { + pr_err("%s: Can not allocate network device\n", cfg->name); + goto out; + } + + il = hw->priv; + il->hw = hw; + +out: + return hw; +} +EXPORT_SYMBOL(il_alloc_all); + +#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */ +#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */ +static void +il_init_ht_hw_capab(const struct il_priv *il, + struct ieee80211_sta_ht_cap *ht_info, + enum ieee80211_band band) +{ + u16 max_bit_rate = 0; + u8 rx_chains_num = il->hw_params.rx_chains_num; + u8 tx_chains_num = il->hw_params.tx_chains_num; + + ht_info->cap = 0; + memset(&ht_info->mcs, 0, sizeof(ht_info->mcs)); + + ht_info->ht_supported = true; + + ht_info->cap |= IEEE80211_HT_CAP_SGI_20; + max_bit_rate = MAX_BIT_RATE_20_MHZ; + if (il->hw_params.ht40_channel & BIT(band)) { + ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; + ht_info->cap |= IEEE80211_HT_CAP_SGI_40; + ht_info->mcs.rx_mask[4] = 0x01; + max_bit_rate = MAX_BIT_RATE_40_MHZ; + } + + if (il->cfg->mod_params->amsdu_size_8K) + ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU; + + ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF; + ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF; + + ht_info->mcs.rx_mask[0] = 0xFF; + if (rx_chains_num >= 2) + ht_info->mcs.rx_mask[1] = 0xFF; + if (rx_chains_num >= 3) + ht_info->mcs.rx_mask[2] = 0xFF; + + /* Highest supported Rx data rate */ + max_bit_rate *= rx_chains_num; + WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK); + ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate); + + /* Tx MCS capabilities */ + ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; + if (tx_chains_num != rx_chains_num) { + ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF; + ht_info->mcs.tx_params |= + ((tx_chains_num - + 1) << IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT); + } +} + +/** + * il_init_geos - Initialize mac80211's geo/channel info based from eeprom + */ +int +il_init_geos(struct il_priv *il) +{ + struct il_channel_info *ch; + struct ieee80211_supported_band *sband; + struct ieee80211_channel *channels; + struct ieee80211_channel *geo_ch; + struct ieee80211_rate *rates; + int i = 0; + s8 max_tx_power = 0; + + if (il->bands[IEEE80211_BAND_2GHZ].n_bitrates || + il->bands[IEEE80211_BAND_5GHZ].n_bitrates) { + D_INFO("Geography modes already initialized.\n"); + set_bit(S_GEO_CONFIGURED, &il->status); + return 0; + } + + channels = + kzalloc(sizeof(struct ieee80211_channel) * il->channel_count, + GFP_KERNEL); + if (!channels) + return -ENOMEM; + + rates = + kzalloc((sizeof(struct ieee80211_rate) * RATE_COUNT_LEGACY), + GFP_KERNEL); + if (!rates) { + kfree(channels); + return -ENOMEM; + } + + /* 5.2GHz channels start after the 2.4GHz channels */ + sband = &il->bands[IEEE80211_BAND_5GHZ]; + sband->channels = &channels[ARRAY_SIZE(il_eeprom_band_1)]; + /* just OFDM */ + sband->bitrates = &rates[IL_FIRST_OFDM_RATE]; + sband->n_bitrates = RATE_COUNT_LEGACY - IL_FIRST_OFDM_RATE; + + if (il->cfg->sku & IL_SKU_N) + il_init_ht_hw_capab(il, &sband->ht_cap, IEEE80211_BAND_5GHZ); + + sband = &il->bands[IEEE80211_BAND_2GHZ]; + sband->channels = channels; + /* OFDM & CCK */ + sband->bitrates = rates; + sband->n_bitrates = RATE_COUNT_LEGACY; + + if (il->cfg->sku & IL_SKU_N) + il_init_ht_hw_capab(il, &sband->ht_cap, IEEE80211_BAND_2GHZ); + + il->ieee_channels = channels; + il->ieee_rates = rates; + + for (i = 0; i < il->channel_count; i++) { + ch = &il->channel_info[i]; + + if (!il_is_channel_valid(ch)) + continue; + + sband = &il->bands[ch->band]; + + geo_ch = &sband->channels[sband->n_channels++]; + + geo_ch->center_freq = + ieee80211_channel_to_frequency(ch->channel, ch->band); + geo_ch->max_power = ch->max_power_avg; + geo_ch->max_antenna_gain = 0xff; + geo_ch->hw_value = ch->channel; + + if (il_is_channel_valid(ch)) { + if (!(ch->flags & EEPROM_CHANNEL_IBSS)) + geo_ch->flags |= IEEE80211_CHAN_NO_IBSS; + + if (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) + geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN; + + if (ch->flags & EEPROM_CHANNEL_RADAR) + geo_ch->flags |= IEEE80211_CHAN_RADAR; + + geo_ch->flags |= ch->ht40_extension_channel; + + if (ch->max_power_avg > max_tx_power) + max_tx_power = ch->max_power_avg; + } else { + geo_ch->flags |= IEEE80211_CHAN_DISABLED; + } + + D_INFO("Channel %d Freq=%d[%sGHz] %s flag=0x%X\n", ch->channel, + geo_ch->center_freq, + il_is_channel_a_band(ch) ? "5.2" : "2.4", + geo_ch-> + flags & IEEE80211_CHAN_DISABLED ? "restricted" : "valid", + geo_ch->flags); + } + + il->tx_power_device_lmt = max_tx_power; + il->tx_power_user_lmt = max_tx_power; + il->tx_power_next = max_tx_power; + + if (il->bands[IEEE80211_BAND_5GHZ].n_channels == 0 && + (il->cfg->sku & IL_SKU_A)) { + IL_INFO("Incorrectly detected BG card as ABG. " + "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n", + il->pci_dev->device, il->pci_dev->subsystem_device); + il->cfg->sku &= ~IL_SKU_A; + } + + IL_INFO("Tunable channels: %d 802.11bg, %d 802.11a channels\n", + il->bands[IEEE80211_BAND_2GHZ].n_channels, + il->bands[IEEE80211_BAND_5GHZ].n_channels); + + set_bit(S_GEO_CONFIGURED, &il->status); + + return 0; +} +EXPORT_SYMBOL(il_init_geos); + +/* + * il_free_geos - undo allocations in il_init_geos + */ +void +il_free_geos(struct il_priv *il) +{ + kfree(il->ieee_channels); + kfree(il->ieee_rates); + clear_bit(S_GEO_CONFIGURED, &il->status); +} +EXPORT_SYMBOL(il_free_geos); + +static bool +il_is_channel_extension(struct il_priv *il, enum ieee80211_band band, + u16 channel, u8 extension_chan_offset) +{ + const struct il_channel_info *ch_info; + + ch_info = il_get_channel_info(il, band, channel); + if (!il_is_channel_valid(ch_info)) + return false; + + if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE) + return !(ch_info-> + ht40_extension_channel & IEEE80211_CHAN_NO_HT40PLUS); + else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW) + return !(ch_info-> + ht40_extension_channel & IEEE80211_CHAN_NO_HT40MINUS); + + return false; +} + +bool +il_is_ht40_tx_allowed(struct il_priv *il, struct il_rxon_context *ctx, + struct ieee80211_sta_ht_cap *ht_cap) +{ + if (!ctx->ht.enabled || !ctx->ht.is_40mhz) + return false; + + /* + * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40 + * the bit will not set if it is pure 40MHz case + */ + if (ht_cap && !ht_cap->ht_supported) + return false; + +#ifdef CONFIG_IWLEGACY_DEBUGFS + if (il->disable_ht40) + return false; +#endif + + return il_is_channel_extension(il, il->band, + le16_to_cpu(ctx->staging.channel), + ctx->ht.extension_chan_offset); +} +EXPORT_SYMBOL(il_is_ht40_tx_allowed); + +static u16 +il_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val) +{ + u16 new_val; + u16 beacon_factor; + + /* + * If mac80211 hasn't given us a beacon interval, program + * the default into the device. + */ + if (!beacon_val) + return DEFAULT_BEACON_INTERVAL; + + /* + * If the beacon interval we obtained from the peer + * is too large, we'll have to wake up more often + * (and in IBSS case, we'll beacon too much) + * + * For example, if max_beacon_val is 4096, and the + * requested beacon interval is 7000, we'll have to + * use 3500 to be able to wake up on the beacons. + * + * This could badly influence beacon detection stats. + */ + + beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val; + new_val = beacon_val / beacon_factor; + + if (!new_val) + new_val = max_beacon_val; + + return new_val; +} + +int +il_send_rxon_timing(struct il_priv *il, struct il_rxon_context *ctx) +{ + u64 tsf; + s32 interval_tm, rem; + struct ieee80211_conf *conf = NULL; + u16 beacon_int; + struct ieee80211_vif *vif = ctx->vif; + + conf = &il->hw->conf; + + lockdep_assert_held(&il->mutex); + + memset(&ctx->timing, 0, sizeof(struct il_rxon_time_cmd)); + + ctx->timing.timestamp = cpu_to_le64(il->timestamp); + ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval); + + beacon_int = vif ? vif->bss_conf.beacon_int : 0; + + /* + * TODO: For IBSS we need to get atim_win from mac80211, + * for now just always use 0 + */ + ctx->timing.atim_win = 0; + + beacon_int = + il_adjust_beacon_interval(beacon_int, + il->hw_params.max_beacon_itrvl * + TIME_UNIT); + ctx->timing.beacon_interval = cpu_to_le16(beacon_int); + + tsf = il->timestamp; /* tsf is modifed by do_div: copy it */ + interval_tm = beacon_int * TIME_UNIT; + rem = do_div(tsf, interval_tm); + ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem); + + ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ? : 1) : 1; + + D_ASSOC("beacon interval %d beacon timer %d beacon tim %d\n", + le16_to_cpu(ctx->timing.beacon_interval), + le32_to_cpu(ctx->timing.beacon_init_val), + le16_to_cpu(ctx->timing.atim_win)); + + return il_send_cmd_pdu(il, ctx->rxon_timing_cmd, sizeof(ctx->timing), + &ctx->timing); +} +EXPORT_SYMBOL(il_send_rxon_timing); + +void +il_set_rxon_hwcrypto(struct il_priv *il, struct il_rxon_context *ctx, + int hw_decrypt) +{ + struct il_rxon_cmd *rxon = &ctx->staging; + + if (hw_decrypt) + rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK; + else + rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK; + +} +EXPORT_SYMBOL(il_set_rxon_hwcrypto); + +/* validate RXON structure is valid */ +int +il_check_rxon_cmd(struct il_priv *il, struct il_rxon_context *ctx) +{ + struct il_rxon_cmd *rxon = &ctx->staging; + bool error = false; + + if (rxon->flags & RXON_FLG_BAND_24G_MSK) { + if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) { + IL_WARN("check 2.4G: wrong narrow\n"); + error = true; + } + if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) { + IL_WARN("check 2.4G: wrong radar\n"); + error = true; + } + } else { + if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) { + IL_WARN("check 5.2G: not short slot!\n"); + error = true; + } + if (rxon->flags & RXON_FLG_CCK_MSK) { + IL_WARN("check 5.2G: CCK!\n"); + error = true; + } + } + if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) { + IL_WARN("mac/bssid mcast!\n"); + error = true; + } + + /* make sure basic rates 6Mbps and 1Mbps are supported */ + if ((rxon->ofdm_basic_rates & RATE_6M_MASK) == 0 && + (rxon->cck_basic_rates & RATE_1M_MASK) == 0) { + IL_WARN("neither 1 nor 6 are basic\n"); + error = true; + } + + if (le16_to_cpu(rxon->assoc_id) > 2007) { + IL_WARN("aid > 2007\n"); + error = true; + } + + if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) == + (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) { + IL_WARN("CCK and short slot\n"); + error = true; + } + + if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) == + (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) { + IL_WARN("CCK and auto detect"); + error = true; + } + + if ((rxon-> + flags & (RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK)) == + RXON_FLG_TGG_PROTECT_MSK) { + IL_WARN("TGg but no auto-detect\n"); + error = true; + } + + if (error) + IL_WARN("Tuning to channel %d\n", le16_to_cpu(rxon->channel)); + + if (error) { + IL_ERR("Invalid RXON\n"); + return -EINVAL; + } + return 0; +} +EXPORT_SYMBOL(il_check_rxon_cmd); + +/** + * il_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed + * @il: staging_rxon is compared to active_rxon + * + * If the RXON structure is changing enough to require a new tune, + * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that + * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required. + */ +int +il_full_rxon_required(struct il_priv *il, struct il_rxon_context *ctx) +{ + const struct il_rxon_cmd *staging = &ctx->staging; + const struct il_rxon_cmd *active = &ctx->active; + +#define CHK(cond) \ + if ((cond)) { \ + D_INFO("need full RXON - " #cond "\n"); \ + return 1; \ + } + +#define CHK_NEQ(c1, c2) \ + if ((c1) != (c2)) { \ + D_INFO("need full RXON - " \ + #c1 " != " #c2 " - %d != %d\n", \ + (c1), (c2)); \ + return 1; \ + } + + /* These items are only settable from the full RXON command */ + CHK(!il_is_associated_ctx(ctx)); + CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr)); + CHK(compare_ether_addr(staging->node_addr, active->node_addr)); + CHK(compare_ether_addr + (staging->wlap_bssid_addr, active->wlap_bssid_addr)); + CHK_NEQ(staging->dev_type, active->dev_type); + CHK_NEQ(staging->channel, active->channel); + CHK_NEQ(staging->air_propagation, active->air_propagation); + CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates, + active->ofdm_ht_single_stream_basic_rates); + CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates, + active->ofdm_ht_dual_stream_basic_rates); + CHK_NEQ(staging->assoc_id, active->assoc_id); + + /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can + * be updated with the RXON_ASSOC command -- however only some + * flag transitions are allowed using RXON_ASSOC */ + + /* Check if we are not switching bands */ + CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK, + active->flags & RXON_FLG_BAND_24G_MSK); + + /* Check if we are switching association toggle */ + CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK, + active->filter_flags & RXON_FILTER_ASSOC_MSK); + +#undef CHK +#undef CHK_NEQ + + return 0; +} +EXPORT_SYMBOL(il_full_rxon_required); + +u8 +il_get_lowest_plcp(struct il_priv *il, struct il_rxon_context *ctx) +{ + /* + * Assign the lowest rate -- should really get this from + * the beacon skb from mac80211. + */ + if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) + return RATE_1M_PLCP; + else + return RATE_6M_PLCP; +} +EXPORT_SYMBOL(il_get_lowest_plcp); + +static void +_il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf, + struct il_rxon_context *ctx) +{ + struct il_rxon_cmd *rxon = &ctx->staging; + + if (!ctx->ht.enabled) { + rxon->flags &= + ~(RXON_FLG_CHANNEL_MODE_MSK | + RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK | RXON_FLG_HT40_PROT_MSK + | RXON_FLG_HT_PROT_MSK); + return; + } + + rxon->flags |= + cpu_to_le32(ctx->ht.protection << RXON_FLG_HT_OPERATING_MODE_POS); + + /* Set up channel bandwidth: + * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */ + /* clear the HT channel mode before set the mode */ + rxon->flags &= + ~(RXON_FLG_CHANNEL_MODE_MSK | RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); + if (il_is_ht40_tx_allowed(il, ctx, NULL)) { + /* pure ht40 */ + if (ctx->ht.protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) { + rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40; + /* Note: control channel is opposite of extension channel */ + switch (ctx->ht.extension_chan_offset) { + case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: + rxon->flags &= + ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; + break; + case IEEE80211_HT_PARAM_CHA_SEC_BELOW: + rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; + break; + } + } else { + /* Note: control channel is opposite of extension channel */ + switch (ctx->ht.extension_chan_offset) { + case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: + rxon->flags &= + ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); + rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED; + break; + case IEEE80211_HT_PARAM_CHA_SEC_BELOW: + rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; + rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED; + break; + case IEEE80211_HT_PARAM_CHA_SEC_NONE: + default: + /* channel location only valid if in Mixed mode */ + IL_ERR("invalid extension channel offset\n"); + break; + } + } + } else { + rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY; + } + + if (il->cfg->ops->hcmd->set_rxon_chain) + il->cfg->ops->hcmd->set_rxon_chain(il, ctx); + + D_ASSOC("rxon flags 0x%X operation mode :0x%X " + "extension channel offset 0x%x\n", le32_to_cpu(rxon->flags), + ctx->ht.protection, ctx->ht.extension_chan_offset); +} + +void +il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf) +{ + _il_set_rxon_ht(il, ht_conf, &il->ctx); +} +EXPORT_SYMBOL(il_set_rxon_ht); + +/* Return valid, unused, channel for a passive scan to reset the RF */ +u8 +il_get_single_channel_number(struct il_priv *il, enum ieee80211_band band) +{ + const struct il_channel_info *ch_info; + int i; + u8 channel = 0; + u8 min, max; + + if (band == IEEE80211_BAND_5GHZ) { + min = 14; + max = il->channel_count; + } else { + min = 0; + max = 14; + } + + for (i = min; i < max; i++) { + channel = il->channel_info[i].channel; + if (channel == le16_to_cpu(il->ctx.staging.channel)) + continue; + + ch_info = il_get_channel_info(il, band, channel); + if (il_is_channel_valid(ch_info)) + break; + } + + return channel; +} +EXPORT_SYMBOL(il_get_single_channel_number); + +/** + * il_set_rxon_channel - Set the band and channel values in staging RXON + * @ch: requested channel as a pointer to struct ieee80211_channel + + * NOTE: Does not commit to the hardware; it sets appropriate bit fields + * in the staging RXON flag structure based on the ch->band + */ +int +il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch, + struct il_rxon_context *ctx) +{ + enum ieee80211_band band = ch->band; + u16 channel = ch->hw_value; + + if (le16_to_cpu(ctx->staging.channel) == channel && il->band == band) + return 0; + + ctx->staging.channel = cpu_to_le16(channel); + if (band == IEEE80211_BAND_5GHZ) + ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK; + else + ctx->staging.flags |= RXON_FLG_BAND_24G_MSK; + + il->band = band; + + D_INFO("Staging channel set to %d [%d]\n", channel, band); + + return 0; +} +EXPORT_SYMBOL(il_set_rxon_channel); + +void +il_set_flags_for_band(struct il_priv *il, struct il_rxon_context *ctx, + enum ieee80211_band band, struct ieee80211_vif *vif) +{ + if (band == IEEE80211_BAND_5GHZ) { + ctx->staging.flags &= + ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK | + RXON_FLG_CCK_MSK); + ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; + } else { + /* Copied from il_post_associate() */ + if (vif && vif->bss_conf.use_short_slot) + ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; + else + ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; + + ctx->staging.flags |= RXON_FLG_BAND_24G_MSK; + ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK; + ctx->staging.flags &= ~RXON_FLG_CCK_MSK; + } +} +EXPORT_SYMBOL(il_set_flags_for_band); + +/* + * initialize rxon structure with default values from eeprom + */ +void +il_connection_init_rx_config(struct il_priv *il, struct il_rxon_context *ctx) +{ + const struct il_channel_info *ch_info; + + memset(&ctx->staging, 0, sizeof(ctx->staging)); + + if (!ctx->vif) { + ctx->staging.dev_type = ctx->unused_devtype; + } else + switch (ctx->vif->type) { + + case NL80211_IFTYPE_STATION: + ctx->staging.dev_type = ctx->station_devtype; + ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK; + break; + + case NL80211_IFTYPE_ADHOC: + ctx->staging.dev_type = ctx->ibss_devtype; + ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK; + ctx->staging.filter_flags = + RXON_FILTER_BCON_AWARE_MSK | + RXON_FILTER_ACCEPT_GRP_MSK; + break; + + default: + IL_ERR("Unsupported interface type %d\n", + ctx->vif->type); + break; + } + +#if 0 + /* TODO: Figure out when short_preamble would be set and cache from + * that */ + if (!hw_to_local(il->hw)->short_preamble) + ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; + else + ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; +#endif + + ch_info = + il_get_channel_info(il, il->band, le16_to_cpu(ctx->active.channel)); + + if (!ch_info) + ch_info = &il->channel_info[0]; + + ctx->staging.channel = cpu_to_le16(ch_info->channel); + il->band = ch_info->band; + + il_set_flags_for_band(il, ctx, il->band, ctx->vif); + + ctx->staging.ofdm_basic_rates = + (IL_OFDM_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF; + ctx->staging.cck_basic_rates = + (IL_CCK_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF; + + /* clear both MIX and PURE40 mode flag */ + ctx->staging.flags &= + ~(RXON_FLG_CHANNEL_MODE_MIXED | RXON_FLG_CHANNEL_MODE_PURE_40); + if (ctx->vif) + memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN); + + ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff; + ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff; +} +EXPORT_SYMBOL(il_connection_init_rx_config); + +void +il_set_rate(struct il_priv *il) +{ + const struct ieee80211_supported_band *hw = NULL; + struct ieee80211_rate *rate; + int i; + + hw = il_get_hw_mode(il, il->band); + if (!hw) { + IL_ERR("Failed to set rate: unable to get hw mode\n"); + return; + } + + il->active_rate = 0; + + for (i = 0; i < hw->n_bitrates; i++) { + rate = &(hw->bitrates[i]); + if (rate->hw_value < RATE_COUNT_LEGACY) + il->active_rate |= (1 << rate->hw_value); + } + + D_RATE("Set active_rate = %0x\n", il->active_rate); + + il->ctx.staging.cck_basic_rates = + (IL_CCK_BASIC_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF; + + il->ctx.staging.ofdm_basic_rates = + (IL_OFDM_BASIC_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF; +} +EXPORT_SYMBOL(il_set_rate); + +void +il_chswitch_done(struct il_priv *il, bool is_success) +{ + struct il_rxon_context *ctx = &il->ctx; + + if (test_bit(S_EXIT_PENDING, &il->status)) + return; + + if (test_and_clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status)) + ieee80211_chswitch_done(ctx->vif, is_success); +} +EXPORT_SYMBOL(il_chswitch_done); + +void +il_hdl_csa(struct il_priv *il, struct il_rx_buf *rxb) +{ + struct il_rx_pkt *pkt = rxb_addr(rxb); + struct il_csa_notification *csa = &(pkt->u.csa_notif); + + struct il_rxon_context *ctx = &il->ctx; + struct il_rxon_cmd *rxon = (void *)&ctx->active; + + if (!test_bit(S_CHANNEL_SWITCH_PENDING, &il->status)) + return; + + if (!le32_to_cpu(csa->status) && csa->channel == il->switch_channel) { + rxon->channel = csa->channel; + ctx->staging.channel = csa->channel; + D_11H("CSA notif: channel %d\n", le16_to_cpu(csa->channel)); + il_chswitch_done(il, true); + } else { + IL_ERR("CSA notif (fail) : channel %d\n", + le16_to_cpu(csa->channel)); + il_chswitch_done(il, false); + } +} +EXPORT_SYMBOL(il_hdl_csa); + +#ifdef CONFIG_IWLEGACY_DEBUG +void +il_print_rx_config_cmd(struct il_priv *il, struct il_rxon_context *ctx) +{ + struct il_rxon_cmd *rxon = &ctx->staging; + + D_RADIO("RX CONFIG:\n"); + il_print_hex_dump(il, IL_DL_RADIO, (u8 *) rxon, sizeof(*rxon)); + D_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel)); + D_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags)); + D_RADIO("u32 filter_flags: 0x%08x\n", le32_to_cpu(rxon->filter_flags)); + D_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type); + D_RADIO("u8 ofdm_basic_rates: 0x%02x\n", rxon->ofdm_basic_rates); + D_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates); + D_RADIO("u8[6] node_addr: %pM\n", rxon->node_addr); + D_RADIO("u8[6] bssid_addr: %pM\n", rxon->bssid_addr); + D_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id)); +} +EXPORT_SYMBOL(il_print_rx_config_cmd); +#endif +/** + * il_irq_handle_error - called for HW or SW error interrupt from card + */ +void +il_irq_handle_error(struct il_priv *il) +{ + /* Set the FW error flag -- cleared on il_down */ + set_bit(S_FW_ERROR, &il->status); + + /* Cancel currently queued command. */ + clear_bit(S_HCMD_ACTIVE, &il->status); + + IL_ERR("Loaded firmware version: %s\n", il->hw->wiphy->fw_version); + + il->cfg->ops->lib->dump_nic_error_log(il); + if (il->cfg->ops->lib->dump_fh) + il->cfg->ops->lib->dump_fh(il, NULL, false); +#ifdef CONFIG_IWLEGACY_DEBUG + if (il_get_debug_level(il) & IL_DL_FW_ERRORS) + il_print_rx_config_cmd(il, &il->ctx); +#endif + + wake_up(&il->wait_command_queue); + + /* Keep the restart process from trying to send host + * commands by clearing the INIT status bit */ + clear_bit(S_READY, &il->status); + + if (!test_bit(S_EXIT_PENDING, &il->status)) { + IL_DBG(IL_DL_FW_ERRORS, + "Restarting adapter due to uCode error.\n"); + + if (il->cfg->mod_params->restart_fw) + queue_work(il->workqueue, &il->restart); + } +} +EXPORT_SYMBOL(il_irq_handle_error); + +static int +il_apm_stop_master(struct il_priv *il) +{ + int ret = 0; + + /* stop device's busmaster DMA activity */ + il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); + + ret = + _il_poll_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED, + CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); + if (ret) + IL_WARN("Master Disable Timed Out, 100 usec\n"); + + D_INFO("stop master\n"); + + return ret; +} + +void +il_apm_stop(struct il_priv *il) +{ + D_INFO("Stop card, put in low power state\n"); + + /* Stop device's DMA activity */ + il_apm_stop_master(il); + + /* Reset the entire device */ + il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); + + udelay(10); + + /* + * Clear "initialization complete" bit to move adapter from + * D0A* (powered-up Active) --> D0U* (Uninitialized) state. + */ + il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); +} +EXPORT_SYMBOL(il_apm_stop); + +/* + * Start up NIC's basic functionality after it has been reset + * (e.g. after platform boot, or shutdown via il_apm_stop()) + * NOTE: This does not load uCode nor start the embedded processor + */ +int +il_apm_init(struct il_priv *il) +{ + int ret = 0; + u16 lctl; + + D_INFO("Init card's basic functions\n"); + + /* + * Use "set_bit" below rather than "write", to preserve any hardware + * bits already set by default after reset. + */ + + /* Disable L0S exit timer (platform NMI Work/Around) */ + il_set_bit(il, CSR_GIO_CHICKEN_BITS, + CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); + + /* + * Disable L0s without affecting L1; + * don't wait for ICH L0s (ICH bug W/A) + */ + il_set_bit(il, CSR_GIO_CHICKEN_BITS, + CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); + + /* Set FH wait threshold to maximum (HW error during stress W/A) */ + il_set_bit(il, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL); + + /* + * Enable HAP INTA (interrupt from management bus) to + * wake device's PCI Express link L1a -> L0s + * NOTE: This is no-op for 3945 (non-existent bit) + */ + il_set_bit(il, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); + + /* + * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition. + * Check if BIOS (or OS) enabled L1-ASPM on this device. + * If so (likely), disable L0S, so device moves directly L0->L1; + * costs negligible amount of power savings. + * If not (unlikely), enable L0S, so there is at least some + * power savings, even without L1. + */ + if (il->cfg->base_params->set_l0s) { + lctl = il_pcie_link_ctl(il); + if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) == + PCI_CFG_LINK_CTRL_VAL_L1_EN) { + /* L1-ASPM enabled; disable(!) L0S */ + il_set_bit(il, CSR_GIO_REG, + CSR_GIO_REG_VAL_L0S_ENABLED); + D_POWER("L1 Enabled; Disabling L0S\n"); + } else { + /* L1-ASPM disabled; enable(!) L0S */ + il_clear_bit(il, CSR_GIO_REG, + CSR_GIO_REG_VAL_L0S_ENABLED); + D_POWER("L1 Disabled; Enabling L0S\n"); + } + } + + /* Configure analog phase-lock-loop before activating to D0A */ + if (il->cfg->base_params->pll_cfg_val) + il_set_bit(il, CSR_ANA_PLL_CFG, + il->cfg->base_params->pll_cfg_val); + + /* + * Set "initialization complete" bit to move adapter from + * D0U* --> D0A* (powered-up active) state. + */ + il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + + /* + * Wait for clock stabilization; once stabilized, access to + * device-internal resources is supported, e.g. il_wr_prph() + * and accesses to uCode SRAM. + */ + ret = + _il_poll_bit(il, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); + if (ret < 0) { + D_INFO("Failed to init the card\n"); + goto out; + } + + /* + * Enable DMA and BSM (if used) clocks, wait for them to stabilize. + * BSM (Boostrap State Machine) is only in 3945 and 4965. + * + * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits + * do not disable clocks. This preserves any hardware bits already + * set by default in "CLK_CTRL_REG" after reset. + */ + if (il->cfg->base_params->use_bsm) + il_wr_prph(il, APMG_CLK_EN_REG, + APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT); + else + il_wr_prph(il, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT); + udelay(20); + + /* Disable L1-Active */ + il_set_bits_prph(il, APMG_PCIDEV_STT_REG, + APMG_PCIDEV_STT_VAL_L1_ACT_DIS); + +out: + return ret; +} +EXPORT_SYMBOL(il_apm_init); + +int +il_set_tx_power(struct il_priv *il, s8 tx_power, bool force) +{ + int ret; + s8 prev_tx_power; + bool defer; + struct il_rxon_context *ctx = &il->ctx; + + lockdep_assert_held(&il->mutex); + + if (il->tx_power_user_lmt == tx_power && !force) + return 0; + + if (!il->cfg->ops->lib->send_tx_power) + return -EOPNOTSUPP; + + /* 0 dBm mean 1 milliwatt */ + if (tx_power < 0) { + IL_WARN("Requested user TXPOWER %d below 1 mW.\n", tx_power); + return -EINVAL; + } + + if (tx_power > il->tx_power_device_lmt) { + IL_WARN("Requested user TXPOWER %d above upper limit %d.\n", + tx_power, il->tx_power_device_lmt); + return -EINVAL; + } + + if (!il_is_ready_rf(il)) + return -EIO; + + /* scan complete and commit_rxon use tx_power_next value, + * it always need to be updated for newest request */ + il->tx_power_next = tx_power; + + /* do not set tx power when scanning or channel changing */ + defer = test_bit(S_SCANNING, &il->status) || + memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)); + if (defer && !force) { + D_INFO("Deferring tx power set\n"); + return 0; + } + + prev_tx_power = il->tx_power_user_lmt; + il->tx_power_user_lmt = tx_power; + + ret = il->cfg->ops->lib->send_tx_power(il); + + /* if fail to set tx_power, restore the orig. tx power */ + if (ret) { + il->tx_power_user_lmt = prev_tx_power; + il->tx_power_next = prev_tx_power; + } + return ret; +} +EXPORT_SYMBOL(il_set_tx_power); + +void +il_send_bt_config(struct il_priv *il) +{ + struct il_bt_cmd bt_cmd = { + .lead_time = BT_LEAD_TIME_DEF, + .max_kill = BT_MAX_KILL_DEF, + .kill_ack_mask = 0, + .kill_cts_mask = 0, + }; + + if (!bt_coex_active) + bt_cmd.flags = BT_COEX_DISABLE; + else + bt_cmd.flags = BT_COEX_ENABLE; + + D_INFO("BT coex %s\n", + (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active"); + + if (il_send_cmd_pdu(il, C_BT_CONFIG, sizeof(struct il_bt_cmd), &bt_cmd)) + IL_ERR("failed to send BT Coex Config\n"); +} +EXPORT_SYMBOL(il_send_bt_config); + +int +il_send_stats_request(struct il_priv *il, u8 flags, bool clear) +{ + struct il_stats_cmd stats_cmd = { + .configuration_flags = clear ? IL_STATS_CONF_CLEAR_STATS : 0, + }; + + if (flags & CMD_ASYNC) + return il_send_cmd_pdu_async(il, C_STATS, sizeof(struct il_stats_cmd), + &stats_cmd, NULL); + else + return il_send_cmd_pdu(il, C_STATS, sizeof(struct il_stats_cmd), + &stats_cmd); +} +EXPORT_SYMBOL(il_send_stats_request); + +void +il_hdl_pm_sleep(struct il_priv *il, struct il_rx_buf *rxb) +{ +#ifdef CONFIG_IWLEGACY_DEBUG + struct il_rx_pkt *pkt = rxb_addr(rxb); + struct il_sleep_notification *sleep = &(pkt->u.sleep_notif); + D_RX("sleep mode: %d, src: %d\n", + sleep->pm_sleep_mode, sleep->pm_wakeup_src); +#endif +} +EXPORT_SYMBOL(il_hdl_pm_sleep); + +void +il_hdl_pm_debug_stats(struct il_priv *il, struct il_rx_buf *rxb) +{ + struct il_rx_pkt *pkt = rxb_addr(rxb); + u32 len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK; + D_RADIO("Dumping %d bytes of unhandled notification for %s:\n", len, + il_get_cmd_string(pkt->hdr.cmd)); + il_print_hex_dump(il, IL_DL_RADIO, pkt->u.raw, len); +} +EXPORT_SYMBOL(il_hdl_pm_debug_stats); + +void +il_hdl_error(struct il_priv *il, struct il_rx_buf *rxb) +{ + struct il_rx_pkt *pkt = rxb_addr(rxb); + + IL_ERR("Error Reply type 0x%08X cmd %s (0x%02X) " + "seq 0x%04X ser 0x%08X\n", + le32_to_cpu(pkt->u.err_resp.error_type), + il_get_cmd_string(pkt->u.err_resp.cmd_id), + pkt->u.err_resp.cmd_id, + le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num), + le32_to_cpu(pkt->u.err_resp.error_info)); +} +EXPORT_SYMBOL(il_hdl_error); + +void +il_clear_isr_stats(struct il_priv *il) +{ + memset(&il->isr_stats, 0, sizeof(il->isr_stats)); +} + +int +il_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue, + const struct ieee80211_tx_queue_params *params) +{ + struct il_priv *il = hw->priv; + unsigned long flags; + int q; + + D_MAC80211("enter\n"); + + if (!il_is_ready_rf(il)) { + D_MAC80211("leave - RF not ready\n"); + return -EIO; + } + + if (queue >= AC_NUM) { + D_MAC80211("leave - queue >= AC_NUM %d\n", queue); + return 0; + } + + q = AC_NUM - 1 - queue; + + spin_lock_irqsave(&il->lock, flags); + + il->ctx.qos_data.def_qos_parm.ac[q].cw_min = + cpu_to_le16(params->cw_min); + il->ctx.qos_data.def_qos_parm.ac[q].cw_max = + cpu_to_le16(params->cw_max); + il->ctx.qos_data.def_qos_parm.ac[q].aifsn = params->aifs; + il->ctx.qos_data.def_qos_parm.ac[q].edca_txop = + cpu_to_le16((params->txop * 32)); + + il->ctx.qos_data.def_qos_parm.ac[q].reserved1 = 0; + + spin_unlock_irqrestore(&il->lock, flags); + + D_MAC80211("leave\n"); + return 0; +} +EXPORT_SYMBOL(il_mac_conf_tx); + +int +il_mac_tx_last_beacon(struct ieee80211_hw *hw) +{ + struct il_priv *il = hw->priv; + + return il->ibss_manager == IL_IBSS_MANAGER; +} +EXPORT_SYMBOL_GPL(il_mac_tx_last_beacon); + +static int +il_set_mode(struct il_priv *il, struct il_rxon_context *ctx) +{ + il_connection_init_rx_config(il, ctx); + + if (il->cfg->ops->hcmd->set_rxon_chain) + il->cfg->ops->hcmd->set_rxon_chain(il, ctx); + + return il_commit_rxon(il, ctx); +} + +static int +il_setup_interface(struct il_priv *il, struct il_rxon_context *ctx) +{ + struct ieee80211_vif *vif = ctx->vif; + int err; + + lockdep_assert_held(&il->mutex); + + /* + * This variable will be correct only when there's just + * a single context, but all code using it is for hardware + * that supports only one context. + */ + il->iw_mode = vif->type; + + ctx->is_active = true; + + err = il_set_mode(il, ctx); + if (err) { + if (!ctx->always_active) + ctx->is_active = false; + return err; + } + + return 0; +} + +int +il_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct il_priv *il = hw->priv; + struct il_vif_priv *vif_priv = (void *)vif->drv_priv; + int err; + u32 modes; + + D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr); + + mutex_lock(&il->mutex); + + if (!il_is_ready_rf(il)) { + IL_WARN("Try to add interface when device not ready\n"); + err = -EINVAL; + goto out; + } + + /* check if busy context is exclusive */ + if (il->ctx.vif && + (il->ctx.exclusive_interface_modes & BIT(il->ctx.vif->type))) { + err = -EINVAL; + goto out; + } + + modes = il->ctx.interface_modes | il->ctx.exclusive_interface_modes; + if (!(modes & BIT(vif->type))) { + err = -EOPNOTSUPP; + goto out; + } + + vif_priv->ctx = &il->ctx; + il->ctx.vif = vif; + + err = il_setup_interface(il, &il->ctx); + if (err) { + il->ctx.vif = NULL; + il->iw_mode = NL80211_IFTYPE_STATION; + } + +out: + mutex_unlock(&il->mutex); + + D_MAC80211("leave\n"); + return err; +} +EXPORT_SYMBOL(il_mac_add_interface); + +static void +il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif, + bool mode_change) +{ + struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif); + + lockdep_assert_held(&il->mutex); + + if (il->scan_vif == vif) { + il_scan_cancel_timeout(il, 200); + il_force_scan_end(il); + } + + if (!mode_change) { + il_set_mode(il, ctx); + if (!ctx->always_active) + ctx->is_active = false; + } +} + +void +il_mac_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct il_priv *il = hw->priv; + struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif); + + D_MAC80211("enter\n"); + + mutex_lock(&il->mutex); + + WARN_ON(ctx->vif != vif); + ctx->vif = NULL; + + il_teardown_interface(il, vif, false); + + memset(il->bssid, 0, ETH_ALEN); + mutex_unlock(&il->mutex); + + D_MAC80211("leave\n"); + +} +EXPORT_SYMBOL(il_mac_remove_interface); + +int +il_alloc_txq_mem(struct il_priv *il) +{ + if (!il->txq) + il->txq = + kzalloc(sizeof(struct il_tx_queue) * + il->cfg->base_params->num_of_queues, GFP_KERNEL); + if (!il->txq) { + IL_ERR("Not enough memory for txq\n"); + return -ENOMEM; + } + return 0; +} +EXPORT_SYMBOL(il_alloc_txq_mem); + +void +il_txq_mem(struct il_priv *il) +{ + kfree(il->txq); + il->txq = NULL; +} +EXPORT_SYMBOL(il_txq_mem); + +#ifdef CONFIG_IWLEGACY_DEBUGFS + +#define IL_TRAFFIC_DUMP_SIZE (IL_TRAFFIC_ENTRY_SIZE * IL_TRAFFIC_ENTRIES) + +void +il_reset_traffic_log(struct il_priv *il) +{ + il->tx_traffic_idx = 0; + il->rx_traffic_idx = 0; + if (il->tx_traffic) + memset(il->tx_traffic, 0, IL_TRAFFIC_DUMP_SIZE); + if (il->rx_traffic) + memset(il->rx_traffic, 0, IL_TRAFFIC_DUMP_SIZE); +} + +int +il_alloc_traffic_mem(struct il_priv *il) +{ + u32 traffic_size = IL_TRAFFIC_DUMP_SIZE; + + if (il_debug_level & IL_DL_TX) { + if (!il->tx_traffic) { + il->tx_traffic = kzalloc(traffic_size, GFP_KERNEL); + if (!il->tx_traffic) + return -ENOMEM; + } + } + if (il_debug_level & IL_DL_RX) { + if (!il->rx_traffic) { + il->rx_traffic = kzalloc(traffic_size, GFP_KERNEL); + if (!il->rx_traffic) + return -ENOMEM; + } + } + il_reset_traffic_log(il); + return 0; +} +EXPORT_SYMBOL(il_alloc_traffic_mem); + +void +il_free_traffic_mem(struct il_priv *il) +{ + kfree(il->tx_traffic); + il->tx_traffic = NULL; + + kfree(il->rx_traffic); + il->rx_traffic = NULL; +} +EXPORT_SYMBOL(il_free_traffic_mem); + +void +il_dbg_log_tx_data_frame(struct il_priv *il, u16 length, + struct ieee80211_hdr *header) +{ + __le16 fc; + u16 len; + + if (likely(!(il_debug_level & IL_DL_TX))) + return; + + if (!il->tx_traffic) + return; + + fc = header->frame_control; + if (ieee80211_is_data(fc)) { + len = + (length > + IL_TRAFFIC_ENTRY_SIZE) ? IL_TRAFFIC_ENTRY_SIZE : length; + memcpy((il->tx_traffic + + (il->tx_traffic_idx * IL_TRAFFIC_ENTRY_SIZE)), header, + len); + il->tx_traffic_idx = + (il->tx_traffic_idx + 1) % IL_TRAFFIC_ENTRIES; + } +} +EXPORT_SYMBOL(il_dbg_log_tx_data_frame); + +void +il_dbg_log_rx_data_frame(struct il_priv *il, u16 length, + struct ieee80211_hdr *header) +{ + __le16 fc; + u16 len; + + if (likely(!(il_debug_level & IL_DL_RX))) + return; + + if (!il->rx_traffic) + return; + + fc = header->frame_control; + if (ieee80211_is_data(fc)) { + len = + (length > + IL_TRAFFIC_ENTRY_SIZE) ? IL_TRAFFIC_ENTRY_SIZE : length; + memcpy((il->rx_traffic + + (il->rx_traffic_idx * IL_TRAFFIC_ENTRY_SIZE)), header, + len); + il->rx_traffic_idx = + (il->rx_traffic_idx + 1) % IL_TRAFFIC_ENTRIES; + } +} +EXPORT_SYMBOL(il_dbg_log_rx_data_frame); + +const char * +il_get_mgmt_string(int cmd) +{ + switch (cmd) { + IL_CMD(MANAGEMENT_ASSOC_REQ); + IL_CMD(MANAGEMENT_ASSOC_RESP); + IL_CMD(MANAGEMENT_REASSOC_REQ); + IL_CMD(MANAGEMENT_REASSOC_RESP); + IL_CMD(MANAGEMENT_PROBE_REQ); + IL_CMD(MANAGEMENT_PROBE_RESP); + IL_CMD(MANAGEMENT_BEACON); + IL_CMD(MANAGEMENT_ATIM); + IL_CMD(MANAGEMENT_DISASSOC); + IL_CMD(MANAGEMENT_AUTH); + IL_CMD(MANAGEMENT_DEAUTH); + IL_CMD(MANAGEMENT_ACTION); + default: + return "UNKNOWN"; + + } +} + +const char * +il_get_ctrl_string(int cmd) +{ + switch (cmd) { + IL_CMD(CONTROL_BACK_REQ); + IL_CMD(CONTROL_BACK); + IL_CMD(CONTROL_PSPOLL); + IL_CMD(CONTROL_RTS); + IL_CMD(CONTROL_CTS); + IL_CMD(CONTROL_ACK); + IL_CMD(CONTROL_CFEND); + IL_CMD(CONTROL_CFENDACK); + default: + return "UNKNOWN"; + + } +} + +void +il_clear_traffic_stats(struct il_priv *il) +{ + memset(&il->tx_stats, 0, sizeof(struct traffic_stats)); + memset(&il->rx_stats, 0, sizeof(struct traffic_stats)); +} + +/* + * if CONFIG_IWLEGACY_DEBUGFS defined, + * il_update_stats function will + * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass + * Use debugFs to display the rx/rx_stats + * if CONFIG_IWLEGACY_DEBUGFS not being defined, then no MGMT and CTRL + * information will be recorded, but DATA pkt still will be recorded + * for the reason of il_led.c need to control the led blinking based on + * number of tx and rx data. + * + */ +void +il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len) +{ + struct traffic_stats *stats; + + if (is_tx) + stats = &il->tx_stats; + else + stats = &il->rx_stats; + + if (ieee80211_is_mgmt(fc)) { + switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) { + case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ): + stats->mgmt[MANAGEMENT_ASSOC_REQ]++; + break; + case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP): + stats->mgmt[MANAGEMENT_ASSOC_RESP]++; + break; + case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ): + stats->mgmt[MANAGEMENT_REASSOC_REQ]++; + break; + case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP): + stats->mgmt[MANAGEMENT_REASSOC_RESP]++; + break; + case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ): + stats->mgmt[MANAGEMENT_PROBE_REQ]++; + break; + case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP): + stats->mgmt[MANAGEMENT_PROBE_RESP]++; + break; + case cpu_to_le16(IEEE80211_STYPE_BEACON): + stats->mgmt[MANAGEMENT_BEACON]++; + break; + case cpu_to_le16(IEEE80211_STYPE_ATIM): + stats->mgmt[MANAGEMENT_ATIM]++; + break; + case cpu_to_le16(IEEE80211_STYPE_DISASSOC): + stats->mgmt[MANAGEMENT_DISASSOC]++; + break; + case cpu_to_le16(IEEE80211_STYPE_AUTH): + stats->mgmt[MANAGEMENT_AUTH]++; + break; + case cpu_to_le16(IEEE80211_STYPE_DEAUTH): + stats->mgmt[MANAGEMENT_DEAUTH]++; + break; + case cpu_to_le16(IEEE80211_STYPE_ACTION): + stats->mgmt[MANAGEMENT_ACTION]++; + break; + } + } else if (ieee80211_is_ctl(fc)) { + switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) { + case cpu_to_le16(IEEE80211_STYPE_BACK_REQ): + stats->ctrl[CONTROL_BACK_REQ]++; + break; + case cpu_to_le16(IEEE80211_STYPE_BACK): + stats->ctrl[CONTROL_BACK]++; + break; + case cpu_to_le16(IEEE80211_STYPE_PSPOLL): + stats->ctrl[CONTROL_PSPOLL]++; + break; + case cpu_to_le16(IEEE80211_STYPE_RTS): + stats->ctrl[CONTROL_RTS]++; + break; + case cpu_to_le16(IEEE80211_STYPE_CTS): + stats->ctrl[CONTROL_CTS]++; + break; + case cpu_to_le16(IEEE80211_STYPE_ACK): + stats->ctrl[CONTROL_ACK]++; + break; + case cpu_to_le16(IEEE80211_STYPE_CFEND): + stats->ctrl[CONTROL_CFEND]++; + break; + case cpu_to_le16(IEEE80211_STYPE_CFENDACK): + stats->ctrl[CONTROL_CFENDACK]++; + break; + } + } else { + /* data */ + stats->data_cnt++; + stats->data_bytes += len; + } +} +EXPORT_SYMBOL(il_update_stats); +#endif + +int +il_force_reset(struct il_priv *il, bool external) +{ + struct il_force_reset *force_reset; + + if (test_bit(S_EXIT_PENDING, &il->status)) + return -EINVAL; + + force_reset = &il->force_reset; + force_reset->reset_request_count++; + if (!external) { + if (force_reset->last_force_reset_jiffies && + time_after(force_reset->last_force_reset_jiffies + + force_reset->reset_duration, jiffies)) { + D_INFO("force reset rejected\n"); + force_reset->reset_reject_count++; + return -EAGAIN; + } + } + force_reset->reset_success_count++; + force_reset->last_force_reset_jiffies = jiffies; + + /* + * if the request is from external(ex: debugfs), + * then always perform the request in regardless the module + * parameter setting + * if the request is from internal (uCode error or driver + * detect failure), then fw_restart module parameter + * need to be check before performing firmware reload + */ + + if (!external && !il->cfg->mod_params->restart_fw) { + D_INFO("Cancel firmware reload based on " + "module parameter setting\n"); + return 0; + } + + IL_ERR("On demand firmware reload\n"); + + /* Set the FW error flag -- cleared on il_down */ + set_bit(S_FW_ERROR, &il->status); + wake_up(&il->wait_command_queue); + /* + * Keep the restart process from trying to send host + * commands by clearing the INIT status bit + */ + clear_bit(S_READY, &il->status); + queue_work(il->workqueue, &il->restart); + + return 0; +} + +int +il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + enum nl80211_iftype newtype, bool newp2p) +{ + struct il_priv *il = hw->priv; + struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif); + u32 modes; + int err; + + newtype = ieee80211_iftype_p2p(newtype, newp2p); + + mutex_lock(&il->mutex); + + if (!ctx->vif || !il_is_ready_rf(il)) { + /* + * Huh? But wait ... this can maybe happen when + * we're in the middle of a firmware restart! + */ + err = -EBUSY; + goto out; + } + + modes = ctx->interface_modes | ctx->exclusive_interface_modes; + if (!(modes & BIT(newtype))) { + err = -EOPNOTSUPP; + goto out; + } + + if ((il->ctx.exclusive_interface_modes & BIT(il->ctx.vif->type)) || + (il->ctx.exclusive_interface_modes & BIT(newtype))) { + err = -EINVAL; + goto out; + } + + /* success */ + il_teardown_interface(il, vif, true); + vif->type = newtype; + vif->p2p = newp2p; + err = il_setup_interface(il, ctx); + WARN_ON(err); + /* + * We've switched internally, but submitting to the + * device may have failed for some reason. Mask this + * error, because otherwise mac80211 will not switch + * (and set the interface type back) and we'll be + * out of sync with it. + */ + err = 0; + +out: + mutex_unlock(&il->mutex); + return err; +} +EXPORT_SYMBOL(il_mac_change_interface); + +/* + * On every watchdog tick we check (latest) time stamp. If it does not + * change during timeout period and queue is not empty we reset firmware. + */ +static int +il_check_stuck_queue(struct il_priv *il, int cnt) +{ + struct il_tx_queue *txq = &il->txq[cnt]; + struct il_queue *q = &txq->q; + unsigned long timeout; + int ret; + + if (q->read_ptr == q->write_ptr) { + txq->time_stamp = jiffies; + return 0; + } + + timeout = + txq->time_stamp + + msecs_to_jiffies(il->cfg->base_params->wd_timeout); + + if (time_after(jiffies, timeout)) { + IL_ERR("Queue %d stuck for %u ms.\n", q->id, + il->cfg->base_params->wd_timeout); + ret = il_force_reset(il, false); + return (ret == -EAGAIN) ? 0 : 1; + } + + return 0; +} + +/* + * Making watchdog tick be a quarter of timeout assure we will + * discover the queue hung between timeout and 1.25*timeout + */ +#define IL_WD_TICK(timeout) ((timeout) / 4) + +/* + * Watchdog timer callback, we check each tx queue for stuck, if if hung + * we reset the firmware. If everything is fine just rearm the timer. + */ +void +il_bg_watchdog(unsigned long data) +{ + struct il_priv *il = (struct il_priv *)data; + int cnt; + unsigned long timeout; + + if (test_bit(S_EXIT_PENDING, &il->status)) + return; + + timeout = il->cfg->base_params->wd_timeout; + if (timeout == 0) + return; + + /* monitor and check for stuck cmd queue */ + if (il_check_stuck_queue(il, il->cmd_queue)) + return; + + /* monitor and check for other stuck queues */ + if (il_is_any_associated(il)) { + for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) { + /* skip as we already checked the command queue */ + if (cnt == il->cmd_queue) + continue; + if (il_check_stuck_queue(il, cnt)) + return; + } + } + + mod_timer(&il->watchdog, + jiffies + msecs_to_jiffies(IL_WD_TICK(timeout))); +} +EXPORT_SYMBOL(il_bg_watchdog); + +void +il_setup_watchdog(struct il_priv *il) +{ + unsigned int timeout = il->cfg->base_params->wd_timeout; + + if (timeout) + mod_timer(&il->watchdog, + jiffies + msecs_to_jiffies(IL_WD_TICK(timeout))); + else + del_timer(&il->watchdog); +} +EXPORT_SYMBOL(il_setup_watchdog); + +/* + * extended beacon time format + * time in usec will be changed into a 32-bit value in extended:internal format + * the extended part is the beacon counts + * the internal part is the time in usec within one beacon interval + */ +u32 +il_usecs_to_beacons(struct il_priv *il, u32 usec, u32 beacon_interval) +{ + u32 quot; + u32 rem; + u32 interval = beacon_interval * TIME_UNIT; + + if (!interval || !usec) + return 0; + + quot = + (usec / + interval) & (il_beacon_time_mask_high(il, + il->hw_params. + beacon_time_tsf_bits) >> il-> + hw_params.beacon_time_tsf_bits); + rem = + (usec % interval) & il_beacon_time_mask_low(il, + il->hw_params. + beacon_time_tsf_bits); + + return (quot << il->hw_params.beacon_time_tsf_bits) + rem; +} +EXPORT_SYMBOL(il_usecs_to_beacons); + +/* base is usually what we get from ucode with each received frame, + * the same as HW timer counter counting down + */ +__le32 +il_add_beacon_time(struct il_priv *il, u32 base, u32 addon, + u32 beacon_interval) +{ + u32 base_low = base & il_beacon_time_mask_low(il, + il->hw_params. + beacon_time_tsf_bits); + u32 addon_low = addon & il_beacon_time_mask_low(il, + il->hw_params. + beacon_time_tsf_bits); + u32 interval = beacon_interval * TIME_UNIT; + u32 res = (base & il_beacon_time_mask_high(il, + il->hw_params. + beacon_time_tsf_bits)) + + (addon & il_beacon_time_mask_high(il, + il->hw_params. + beacon_time_tsf_bits)); + + if (base_low > addon_low) + res += base_low - addon_low; + else if (base_low < addon_low) { + res += interval + base_low - addon_low; + res += (1 << il->hw_params.beacon_time_tsf_bits); + } else + res += (1 << il->hw_params.beacon_time_tsf_bits); + + return cpu_to_le32(res); +} +EXPORT_SYMBOL(il_add_beacon_time); + +#ifdef CONFIG_PM + +int +il_pci_suspend(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct il_priv *il = pci_get_drvdata(pdev); + + /* + * This function is called when system goes into suspend state + * mac80211 will call il_mac_stop() from the mac80211 suspend function + * first but since il_mac_stop() has no knowledge of who the caller is, + * it will not call apm_ops.stop() to stop the DMA operation. + * Calling apm_ops.stop here to make sure we stop the DMA. + */ + il_apm_stop(il); + + return 0; +} +EXPORT_SYMBOL(il_pci_suspend); + +int +il_pci_resume(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct il_priv *il = pci_get_drvdata(pdev); + bool hw_rfkill = false; + + /* + * We disable the RETRY_TIMEOUT register (0x41) to keep + * PCI Tx retries from interfering with C3 CPU state. + */ + pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); + + il_enable_interrupts(il); + + if (!(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) + hw_rfkill = true; + + if (hw_rfkill) + set_bit(S_RF_KILL_HW, &il->status); + else + clear_bit(S_RF_KILL_HW, &il->status); + + wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rfkill); + + return 0; +} +EXPORT_SYMBOL(il_pci_resume); + +const struct dev_pm_ops il_pm_ops = { + .suspend = il_pci_suspend, + .resume = il_pci_resume, + .freeze = il_pci_suspend, + .thaw = il_pci_resume, + .poweroff = il_pci_suspend, + .restore = il_pci_resume, +}; +EXPORT_SYMBOL(il_pm_ops); + +#endif /* CONFIG_PM */ + +static void +il_update_qos(struct il_priv *il, struct il_rxon_context *ctx) +{ + if (test_bit(S_EXIT_PENDING, &il->status)) + return; + + if (!ctx->is_active) + return; + + ctx->qos_data.def_qos_parm.qos_flags = 0; + + if (ctx->qos_data.qos_active) + ctx->qos_data.def_qos_parm.qos_flags |= + QOS_PARAM_FLG_UPDATE_EDCA_MSK; + + if (ctx->ht.enabled) + ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK; + + D_QOS("send QoS cmd with Qos active=%d FLAGS=0x%X\n", + ctx->qos_data.qos_active, ctx->qos_data.def_qos_parm.qos_flags); + + il_send_cmd_pdu_async(il, ctx->qos_cmd, sizeof(struct il_qosparam_cmd), + &ctx->qos_data.def_qos_parm, NULL); +} + +/** + * il_mac_config - mac80211 config callback + */ +int +il_mac_config(struct ieee80211_hw *hw, u32 changed) +{ + struct il_priv *il = hw->priv; + const struct il_channel_info *ch_info; + struct ieee80211_conf *conf = &hw->conf; + struct ieee80211_channel *channel = conf->channel; + struct il_ht_config *ht_conf = &il->current_ht_config; + struct il_rxon_context *ctx = &il->ctx; + unsigned long flags = 0; + int ret = 0; + u16 ch; + int scan_active = 0; + bool ht_changed = false; + + if (WARN_ON(!il->cfg->ops->legacy)) + return -EOPNOTSUPP; + + mutex_lock(&il->mutex); + + D_MAC80211("enter to channel %d changed 0x%X\n", channel->hw_value, + changed); + + if (unlikely(test_bit(S_SCANNING, &il->status))) { + scan_active = 1; + D_MAC80211("scan active\n"); + } + + if (changed & + (IEEE80211_CONF_CHANGE_SMPS | IEEE80211_CONF_CHANGE_CHANNEL)) { + /* mac80211 uses static for non-HT which is what we want */ + il->current_ht_config.smps = conf->smps_mode; + + /* + * Recalculate chain counts. + * + * If monitor mode is enabled then mac80211 will + * set up the SM PS mode to OFF if an HT channel is + * configured. + */ + if (il->cfg->ops->hcmd->set_rxon_chain) + il->cfg->ops->hcmd->set_rxon_chain(il, &il->ctx); + } + + /* during scanning mac80211 will delay channel setting until + * scan finish with changed = 0 + */ + if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) { + + if (scan_active) + goto set_ch_out; + + ch = channel->hw_value; + ch_info = il_get_channel_info(il, channel->band, ch); + if (!il_is_channel_valid(ch_info)) { + D_MAC80211("leave - invalid channel\n"); + ret = -EINVAL; + goto set_ch_out; + } + + if (il->iw_mode == NL80211_IFTYPE_ADHOC && + !il_is_channel_ibss(ch_info)) { + D_MAC80211("leave - not IBSS channel\n"); + ret = -EINVAL; + goto set_ch_out; + } + + spin_lock_irqsave(&il->lock, flags); + + /* Configure HT40 channels */ + if (ctx->ht.enabled != conf_is_ht(conf)) { + ctx->ht.enabled = conf_is_ht(conf); + ht_changed = true; + } + if (ctx->ht.enabled) { + if (conf_is_ht40_minus(conf)) { + ctx->ht.extension_chan_offset = + IEEE80211_HT_PARAM_CHA_SEC_BELOW; + ctx->ht.is_40mhz = true; + } else if (conf_is_ht40_plus(conf)) { + ctx->ht.extension_chan_offset = + IEEE80211_HT_PARAM_CHA_SEC_ABOVE; + ctx->ht.is_40mhz = true; + } else { + ctx->ht.extension_chan_offset = + IEEE80211_HT_PARAM_CHA_SEC_NONE; + ctx->ht.is_40mhz = false; + } + } else + ctx->ht.is_40mhz = false; + + /* + * Default to no protection. Protection mode will + * later be set from BSS config in il_ht_conf + */ + ctx->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE; + + /* if we are switching from ht to 2.4 clear flags + * from any ht related info since 2.4 does not + * support ht */ + if ((le16_to_cpu(ctx->staging.channel) != ch)) + ctx->staging.flags = 0; + + il_set_rxon_channel(il, channel, ctx); + il_set_rxon_ht(il, ht_conf); + + il_set_flags_for_band(il, ctx, channel->band, ctx->vif); + + spin_unlock_irqrestore(&il->lock, flags); + + if (il->cfg->ops->legacy->update_bcast_stations) + ret = il->cfg->ops->legacy->update_bcast_stations(il); + +set_ch_out: + /* The list of supported rates and rate mask can be different + * for each band; since the band may have changed, reset + * the rate mask to what mac80211 lists */ + il_set_rate(il); + } + + if (changed & (IEEE80211_CONF_CHANGE_PS | IEEE80211_CONF_CHANGE_IDLE)) { + ret = il_power_update_mode(il, false); + if (ret) + D_MAC80211("Error setting sleep level\n"); + } + + if (changed & IEEE80211_CONF_CHANGE_POWER) { + D_MAC80211("TX Power old=%d new=%d\n", il->tx_power_user_lmt, + conf->power_level); + + il_set_tx_power(il, conf->power_level, false); + } + + if (!il_is_ready(il)) { + D_MAC80211("leave - not ready\n"); + goto out; + } + + if (scan_active) + goto out; + + if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging))) + il_commit_rxon(il, ctx); + else + D_INFO("Not re-sending same RXON configuration.\n"); + if (ht_changed) + il_update_qos(il, ctx); + +out: + D_MAC80211("leave\n"); + mutex_unlock(&il->mutex); + return ret; +} +EXPORT_SYMBOL(il_mac_config); + +void +il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct il_priv *il = hw->priv; + unsigned long flags; + struct il_rxon_context *ctx = &il->ctx; + + if (WARN_ON(!il->cfg->ops->legacy)) + return; + + mutex_lock(&il->mutex); + D_MAC80211("enter\n"); + + spin_lock_irqsave(&il->lock, flags); + memset(&il->current_ht_config, 0, sizeof(struct il_ht_config)); + spin_unlock_irqrestore(&il->lock, flags); + + spin_lock_irqsave(&il->lock, flags); + + /* new association get rid of ibss beacon skb */ + if (il->beacon_skb) + dev_kfree_skb(il->beacon_skb); + + il->beacon_skb = NULL; + + il->timestamp = 0; + + spin_unlock_irqrestore(&il->lock, flags); + + il_scan_cancel_timeout(il, 100); + if (!il_is_ready_rf(il)) { + D_MAC80211("leave - not ready\n"); + mutex_unlock(&il->mutex); + return; + } + + /* we are restarting association process + * clear RXON_FILTER_ASSOC_MSK bit + */ + ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; + il_commit_rxon(il, ctx); + + il_set_rate(il); + + mutex_unlock(&il->mutex); + + D_MAC80211("leave\n"); +} +EXPORT_SYMBOL(il_mac_reset_tsf); + +static void +il_ht_conf(struct il_priv *il, struct ieee80211_vif *vif) +{ + struct il_ht_config *ht_conf = &il->current_ht_config; + struct ieee80211_sta *sta; + struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; + struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif); + + D_ASSOC("enter:\n"); + + if (!ctx->ht.enabled) + return; + + ctx->ht.protection = + bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION; + ctx->ht.non_gf_sta_present = + !!(bss_conf-> + ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT); + + ht_conf->single_chain_sufficient = false; + + switch (vif->type) { + case NL80211_IFTYPE_STATION: + rcu_read_lock(); + sta = ieee80211_find_sta(vif, bss_conf->bssid); + if (sta) { + struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + int maxstreams; + + maxstreams = + (ht_cap->mcs. + tx_params & IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK) + >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT; + maxstreams += 1; + + if (ht_cap->mcs.rx_mask[1] == 0 && + ht_cap->mcs.rx_mask[2] == 0) + ht_conf->single_chain_sufficient = true; + if (maxstreams <= 1) + ht_conf->single_chain_sufficient = true; + } else { + /* + * If at all, this can only happen through a race + * when the AP disconnects us while we're still + * setting up the connection, in that case mac80211 + * will soon tell us about that. + */ + ht_conf->single_chain_sufficient = true; + } + rcu_read_unlock(); + break; + case NL80211_IFTYPE_ADHOC: + ht_conf->single_chain_sufficient = true; + break; + default: + break; + } + + D_ASSOC("leave\n"); +} + +static inline void +il_set_no_assoc(struct il_priv *il, struct ieee80211_vif *vif) +{ + struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif); + + /* + * inform the ucode that there is no longer an + * association and that no more packets should be + * sent + */ + ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; + ctx->staging.assoc_id = 0; + il_commit_rxon(il, ctx); +} + +static void +il_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct il_priv *il = hw->priv; + unsigned long flags; + __le64 timestamp; + struct sk_buff *skb = ieee80211_beacon_get(hw, vif); + + if (!skb) + return; + + D_MAC80211("enter\n"); + + lockdep_assert_held(&il->mutex); + + if (!il->beacon_ctx) { + IL_ERR("update beacon but no beacon context!\n"); + dev_kfree_skb(skb); + return; + } + + spin_lock_irqsave(&il->lock, flags); + + if (il->beacon_skb) + dev_kfree_skb(il->beacon_skb); + + il->beacon_skb = skb; + + timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp; + il->timestamp = le64_to_cpu(timestamp); + + D_MAC80211("leave\n"); + spin_unlock_irqrestore(&il->lock, flags); + + if (!il_is_ready_rf(il)) { + D_MAC80211("leave - RF not ready\n"); + return; + } + + il->cfg->ops->legacy->post_associate(il); +} + +void +il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, u32 changes) +{ + struct il_priv *il = hw->priv; + struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif); + int ret; + + if (WARN_ON(!il->cfg->ops->legacy)) + return; + + D_MAC80211("changes = 0x%X\n", changes); + + mutex_lock(&il->mutex); + + if (!il_is_alive(il)) { + mutex_unlock(&il->mutex); + return; + } + + if (changes & BSS_CHANGED_QOS) { + unsigned long flags; + + spin_lock_irqsave(&il->lock, flags); + ctx->qos_data.qos_active = bss_conf->qos; + il_update_qos(il, ctx); + spin_unlock_irqrestore(&il->lock, flags); + } + + if (changes & BSS_CHANGED_BEACON_ENABLED) { + /* + * the add_interface code must make sure we only ever + * have a single interface that could be beaconing at + * any time. + */ + if (vif->bss_conf.enable_beacon) + il->beacon_ctx = ctx; + else + il->beacon_ctx = NULL; + } + + if (changes & BSS_CHANGED_BSSID) { + D_MAC80211("BSSID %pM\n", bss_conf->bssid); + + /* + * If there is currently a HW scan going on in the + * background then we need to cancel it else the RXON + * below/in post_associate will fail. + */ + if (il_scan_cancel_timeout(il, 100)) { + IL_WARN("Aborted scan still in progress after 100ms\n"); + D_MAC80211("leaving - scan abort failed.\n"); + mutex_unlock(&il->mutex); + return; + } + + /* mac80211 only sets assoc when in STATION mode */ + if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) { + memcpy(ctx->staging.bssid_addr, bss_conf->bssid, + ETH_ALEN); + + /* currently needed in a few places */ + memcpy(il->bssid, bss_conf->bssid, ETH_ALEN); + } else { + ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; + } + + } + + /* + * This needs to be after setting the BSSID in case + * mac80211 decides to do both changes at once because + * it will invoke post_associate. + */ + if (vif->type == NL80211_IFTYPE_ADHOC && (changes & BSS_CHANGED_BEACON)) + il_beacon_update(hw, vif); + + if (changes & BSS_CHANGED_ERP_PREAMBLE) { + D_MAC80211("ERP_PREAMBLE %d\n", bss_conf->use_short_preamble); + if (bss_conf->use_short_preamble) + ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; + else + ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; + } + + if (changes & BSS_CHANGED_ERP_CTS_PROT) { + D_MAC80211("ERP_CTS %d\n", bss_conf->use_cts_prot); + if (bss_conf->use_cts_prot && il->band != IEEE80211_BAND_5GHZ) + ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK; + else + ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK; + if (bss_conf->use_cts_prot) + ctx->staging.flags |= RXON_FLG_SELF_CTS_EN; + else + ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN; + } + + if (changes & BSS_CHANGED_BASIC_RATES) { + /* XXX use this information + * + * To do that, remove code from il_set_rate() and put something + * like this here: + * + if (A-band) + ctx->staging.ofdm_basic_rates = + bss_conf->basic_rates; + else + ctx->staging.ofdm_basic_rates = + bss_conf->basic_rates >> 4; + ctx->staging.cck_basic_rates = + bss_conf->basic_rates & 0xF; + */ + } + + if (changes & BSS_CHANGED_HT) { + il_ht_conf(il, vif); + + if (il->cfg->ops->hcmd->set_rxon_chain) + il->cfg->ops->hcmd->set_rxon_chain(il, ctx); + } + + if (changes & BSS_CHANGED_ASSOC) { + D_MAC80211("ASSOC %d\n", bss_conf->assoc); + if (bss_conf->assoc) { + il->timestamp = bss_conf->timestamp; + + if (!il_is_rfkill(il)) + il->cfg->ops->legacy->post_associate(il); + } else + il_set_no_assoc(il, vif); + } + + if (changes && il_is_associated_ctx(ctx) && bss_conf->aid) { + D_MAC80211("Changes (%#x) while associated\n", changes); + ret = il_send_rxon_assoc(il, ctx); + if (!ret) { + /* Sync active_rxon with latest change. */ + memcpy((void *)&ctx->active, &ctx->staging, + sizeof(struct il_rxon_cmd)); + } + } + + if (changes & BSS_CHANGED_BEACON_ENABLED) { + if (vif->bss_conf.enable_beacon) { + memcpy(ctx->staging.bssid_addr, bss_conf->bssid, + ETH_ALEN); + memcpy(il->bssid, bss_conf->bssid, ETH_ALEN); + il->cfg->ops->legacy->config_ap(il); + } else + il_set_no_assoc(il, vif); + } + + if (changes & BSS_CHANGED_IBSS) { + ret = + il->cfg->ops->legacy->manage_ibss_station(il, vif, + bss_conf-> + ibss_joined); + if (ret) + IL_ERR("failed to %s IBSS station %pM\n", + bss_conf->ibss_joined ? "add" : "remove", + bss_conf->bssid); + } + + mutex_unlock(&il->mutex); + + D_MAC80211("leave\n"); +} +EXPORT_SYMBOL(il_mac_bss_info_changed); + +irqreturn_t +il_isr(int irq, void *data) +{ + struct il_priv *il = data; + u32 inta, inta_mask; + u32 inta_fh; + unsigned long flags; + if (!il) + return IRQ_NONE; + + spin_lock_irqsave(&il->lock, flags); + + /* Disable (but don't clear!) interrupts here to avoid + * back-to-back ISRs and sporadic interrupts from our NIC. + * If we have something to service, the tasklet will re-enable ints. + * If we *don't* have something, we'll re-enable before leaving here. */ + inta_mask = _il_rd(il, CSR_INT_MASK); /* just for debug */ + _il_wr(il, CSR_INT_MASK, 0x00000000); + + /* Discover which interrupts are active/pending */ + inta = _il_rd(il, CSR_INT); + inta_fh = _il_rd(il, CSR_FH_INT_STATUS); + + /* Ignore interrupt if there's nothing in NIC to service. + * This may be due to IRQ shared with another device, + * or due to sporadic interrupts thrown from our NIC. */ + if (!inta && !inta_fh) { + D_ISR("Ignore interrupt, inta == 0, inta_fh == 0\n"); + goto none; + } + + if (inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0) { + /* Hardware disappeared. It might have already raised + * an interrupt */ + IL_WARN("HARDWARE GONE?? INTA == 0x%08x\n", inta); + goto unplugged; + } + + D_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta, inta_mask, + inta_fh); + + inta &= ~CSR_INT_BIT_SCD; + + /* il_irq_tasklet() will service interrupts and re-enable them */ + if (likely(inta || inta_fh)) + tasklet_schedule(&il->irq_tasklet); + +unplugged: + spin_unlock_irqrestore(&il->lock, flags); + return IRQ_HANDLED; + +none: + /* re-enable interrupts here since we don't have anything to service. */ + /* only Re-enable if disabled by irq */ + if (test_bit(S_INT_ENABLED, &il->status)) + il_enable_interrupts(il); + spin_unlock_irqrestore(&il->lock, flags); + return IRQ_NONE; +} +EXPORT_SYMBOL(il_isr); + +/* + * il_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this + * function. + */ +void +il_tx_cmd_protection(struct il_priv *il, struct ieee80211_tx_info *info, + __le16 fc, __le32 *tx_flags) +{ + if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) { + *tx_flags |= TX_CMD_FLG_RTS_MSK; + *tx_flags &= ~TX_CMD_FLG_CTS_MSK; + *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; + + if (!ieee80211_is_mgmt(fc)) + return; + + switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) { + case cpu_to_le16(IEEE80211_STYPE_AUTH): + case cpu_to_le16(IEEE80211_STYPE_DEAUTH): + case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ): + case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ): + *tx_flags &= ~TX_CMD_FLG_RTS_MSK; + *tx_flags |= TX_CMD_FLG_CTS_MSK; + break; + } + } else if (info->control.rates[0]. + flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { + *tx_flags &= ~TX_CMD_FLG_RTS_MSK; + *tx_flags |= TX_CMD_FLG_CTS_MSK; + *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; + } +} +EXPORT_SYMBOL(il_tx_cmd_protection); diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h new file mode 100644 index 00000000000..abfa388588b --- /dev/null +++ b/drivers/net/wireless/iwlegacy/common.h @@ -0,0 +1,3246 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ +#ifndef __il_core_h__ +#define __il_core_h__ + +#include <linux/interrupt.h> +#include <linux/pci.h> /* for struct pci_device_id */ +#include <linux/kernel.h> +#include <linux/leds.h> +#include <linux/wait.h> +#include <linux/io.h> +#include <net/mac80211.h> +#include <net/ieee80211_radiotap.h> + +#include "commands.h" +#include "csr.h" +#include "prph.h" + +struct il_host_cmd; +struct il_cmd; +struct il_tx_queue; + +#define IL_ERR(f, a...) dev_err(&il->pci_dev->dev, f, ## a) +#define IL_WARN(f, a...) dev_warn(&il->pci_dev->dev, f, ## a) +#define IL_INFO(f, a...) dev_info(&il->pci_dev->dev, f, ## a) + +#define RX_QUEUE_SIZE 256 +#define RX_QUEUE_MASK 255 +#define RX_QUEUE_SIZE_LOG 8 + +/* + * RX related structures and functions + */ +#define RX_FREE_BUFFERS 64 +#define RX_LOW_WATERMARK 8 + +#define U32_PAD(n) ((4-(n))&0x3) + +/* CT-KILL constants */ +#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */ + +/* Default noise level to report when noise measurement is not available. + * This may be because we're: + * 1) Not associated (4965, no beacon stats being sent to driver) + * 2) Scanning (noise measurement does not apply to associated channel) + * 3) Receiving CCK (3945 delivers noise info only for OFDM frames) + * Use default noise value of -127 ... this is below the range of measurable + * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user. + * Also, -127 works better than 0 when averaging frames with/without + * noise info (e.g. averaging might be done in app); measured dBm values are + * always negative ... using a negative value as the default keeps all + * averages within an s8's (used in some apps) range of negative values. */ +#define IL_NOISE_MEAS_NOT_AVAILABLE (-127) + +/* + * RTS threshold here is total size [2347] minus 4 FCS bytes + * Per spec: + * a value of 0 means RTS on all data/management packets + * a value > max MSDU size means no RTS + * else RTS for data/management frames where MPDU is larger + * than RTS value. + */ +#define DEFAULT_RTS_THRESHOLD 2347U +#define MIN_RTS_THRESHOLD 0U +#define MAX_RTS_THRESHOLD 2347U +#define MAX_MSDU_SIZE 2304U +#define MAX_MPDU_SIZE 2346U +#define DEFAULT_BEACON_INTERVAL 100U +#define DEFAULT_SHORT_RETRY_LIMIT 7U +#define DEFAULT_LONG_RETRY_LIMIT 4U + +struct il_rx_buf { + dma_addr_t page_dma; + struct page *page; + struct list_head list; +}; + +#define rxb_addr(r) page_address(r->page) + +/* defined below */ +struct il_device_cmd; + +struct il_cmd_meta { + /* only for SYNC commands, iff the reply skb is wanted */ + struct il_host_cmd *source; + /* + * only for ASYNC commands + * (which is somewhat stupid -- look at common.c for instance + * which duplicates a bunch of code because the callback isn't + * invoked for SYNC commands, if it were and its result passed + * through it would be simpler...) + */ + void (*callback) (struct il_priv *il, struct il_device_cmd *cmd, + struct il_rx_pkt *pkt); + + /* The CMD_SIZE_HUGE flag bit indicates that the command + * structure is stored at the end of the shared queue memory. */ + u32 flags; + + DEFINE_DMA_UNMAP_ADDR(mapping); + DEFINE_DMA_UNMAP_LEN(len); +}; + +/* + * Generic queue structure + * + * Contains common data for Rx and Tx queues + */ +struct il_queue { + int n_bd; /* number of BDs in this queue */ + int write_ptr; /* 1-st empty entry (idx) host_w */ + int read_ptr; /* last used entry (idx) host_r */ + /* use for monitoring and recovering the stuck queue */ + dma_addr_t dma_addr; /* physical addr for BD's */ + int n_win; /* safe queue win */ + u32 id; + int low_mark; /* low watermark, resume queue if free + * space more than this */ + int high_mark; /* high watermark, stop queue if free + * space less than this */ +}; + +/* One for each TFD */ +struct il_tx_info { + struct sk_buff *skb; + struct il_rxon_context *ctx; +}; + +/** + * struct il_tx_queue - Tx Queue for DMA + * @q: generic Rx/Tx queue descriptor + * @bd: base of circular buffer of TFDs + * @cmd: array of command/TX buffer pointers + * @meta: array of meta data for each command/tx buffer + * @dma_addr_cmd: physical address of cmd/tx buffer array + * @txb: array of per-TFD driver data + * @time_stamp: time (in jiffies) of last read_ptr change + * @need_update: indicates need to update read/write idx + * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled + * + * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame + * descriptors) and required locking structures. + */ +#define TFD_TX_CMD_SLOTS 256 +#define TFD_CMD_SLOTS 32 + +struct il_tx_queue { + struct il_queue q; + void *tfds; + struct il_device_cmd **cmd; + struct il_cmd_meta *meta; + struct il_tx_info *txb; + unsigned long time_stamp; + u8 need_update; + u8 sched_retry; + u8 active; + u8 swq_id; +}; + +/* + * EEPROM access time values: + * + * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG. + * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1). + * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec. + * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG. + */ +#define IL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */ + +#define IL_EEPROM_SEM_TIMEOUT 10 /* microseconds */ +#define IL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */ + +/* + * Regulatory channel usage flags in EEPROM struct il4965_eeprom_channel.flags. + * + * IBSS and/or AP operation is allowed *only* on those channels with + * (VALID && IBSS && ACTIVE && !RADAR). This restriction is in place because + * RADAR detection is not supported by the 4965 driver, but is a + * requirement for establishing a new network for legal operation on channels + * requiring RADAR detection or restricting ACTIVE scanning. + * + * NOTE: "WIDE" flag does not indicate anything about "HT40" 40 MHz channels. + * It only indicates that 20 MHz channel use is supported; HT40 channel + * usage is indicated by a separate set of regulatory flags for each + * HT40 channel pair. + * + * NOTE: Using a channel inappropriately will result in a uCode error! + */ +#define IL_NUM_TX_CALIB_GROUPS 5 +enum { + EEPROM_CHANNEL_VALID = (1 << 0), /* usable for this SKU/geo */ + EEPROM_CHANNEL_IBSS = (1 << 1), /* usable as an IBSS channel */ + /* Bit 2 Reserved */ + EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */ + EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */ + EEPROM_CHANNEL_WIDE = (1 << 5), /* 20 MHz channel okay */ + /* Bit 6 Reserved (was Narrow Channel) */ + EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */ +}; + +/* SKU Capabilities */ +/* 3945 only */ +#define EEPROM_SKU_CAP_SW_RF_KILL_ENABLE (1 << 0) +#define EEPROM_SKU_CAP_HW_RF_KILL_ENABLE (1 << 1) + +/* *regulatory* channel data format in eeprom, one for each channel. + * There are separate entries for HT40 (40 MHz) vs. normal (20 MHz) channels. */ +struct il_eeprom_channel { + u8 flags; /* EEPROM_CHANNEL_* flags copied from EEPROM */ + s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */ +} __packed; + +/* 3945 Specific */ +#define EEPROM_3945_EEPROM_VERSION (0x2f) + +/* 4965 has two radio transmitters (and 3 radio receivers) */ +#define EEPROM_TX_POWER_TX_CHAINS (2) + +/* 4965 has room for up to 8 sets of txpower calibration data */ +#define EEPROM_TX_POWER_BANDS (8) + +/* 4965 factory calibration measures txpower gain settings for + * each of 3 target output levels */ +#define EEPROM_TX_POWER_MEASUREMENTS (3) + +/* 4965 Specific */ +/* 4965 driver does not work with txpower calibration version < 5 */ +#define EEPROM_4965_TX_POWER_VERSION (5) +#define EEPROM_4965_EEPROM_VERSION (0x2f) +#define EEPROM_4965_CALIB_VERSION_OFFSET (2*0xB6) /* 2 bytes */ +#define EEPROM_4965_CALIB_TXPOWER_OFFSET (2*0xE8) /* 48 bytes */ +#define EEPROM_4965_BOARD_REVISION (2*0x4F) /* 2 bytes */ +#define EEPROM_4965_BOARD_PBA (2*0x56+1) /* 9 bytes */ + +/* 2.4 GHz */ +extern const u8 il_eeprom_band_1[14]; + +/* + * factory calibration data for one txpower level, on one channel, + * measured on one of the 2 tx chains (radio transmitter and associated + * antenna). EEPROM contains: + * + * 1) Temperature (degrees Celsius) of device when measurement was made. + * + * 2) Gain table idx used to achieve the target measurement power. + * This refers to the "well-known" gain tables (see 4965.h). + * + * 3) Actual measured output power, in half-dBm ("34" = 17 dBm). + * + * 4) RF power amplifier detector level measurement (not used). + */ +struct il_eeprom_calib_measure { + u8 temperature; /* Device temperature (Celsius) */ + u8 gain_idx; /* Index into gain table */ + u8 actual_pow; /* Measured RF output power, half-dBm */ + s8 pa_det; /* Power amp detector level (not used) */ +} __packed; + +/* + * measurement set for one channel. EEPROM contains: + * + * 1) Channel number measured + * + * 2) Measurements for each of 3 power levels for each of 2 radio transmitters + * (a.k.a. "tx chains") (6 measurements altogether) + */ +struct il_eeprom_calib_ch_info { + u8 ch_num; + struct il_eeprom_calib_measure + measurements[EEPROM_TX_POWER_TX_CHAINS] + [EEPROM_TX_POWER_MEASUREMENTS]; +} __packed; + +/* + * txpower subband info. + * + * For each frequency subband, EEPROM contains the following: + * + * 1) First and last channels within range of the subband. "0" values + * indicate that this sample set is not being used. + * + * 2) Sample measurement sets for 2 channels close to the range endpoints. + */ +struct il_eeprom_calib_subband_info { + u8 ch_from; /* channel number of lowest channel in subband */ + u8 ch_to; /* channel number of highest channel in subband */ + struct il_eeprom_calib_ch_info ch1; + struct il_eeprom_calib_ch_info ch2; +} __packed; + +/* + * txpower calibration info. EEPROM contains: + * + * 1) Factory-measured saturation power levels (maximum levels at which + * tx power amplifier can output a signal without too much distortion). + * There is one level for 2.4 GHz band and one for 5 GHz band. These + * values apply to all channels within each of the bands. + * + * 2) Factory-measured power supply voltage level. This is assumed to be + * constant (i.e. same value applies to all channels/bands) while the + * factory measurements are being made. + * + * 3) Up to 8 sets of factory-measured txpower calibration values. + * These are for different frequency ranges, since txpower gain + * characteristics of the analog radio circuitry vary with frequency. + * + * Not all sets need to be filled with data; + * struct il_eeprom_calib_subband_info contains range of channels + * (0 if unused) for each set of data. + */ +struct il_eeprom_calib_info { + u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */ + u8 saturation_power52; /* half-dBm */ + __le16 voltage; /* signed */ + struct il_eeprom_calib_subband_info band_info[EEPROM_TX_POWER_BANDS]; +} __packed; + +/* General */ +#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */ +#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */ +#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */ +#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */ +#define EEPROM_VERSION (2*0x44) /* 2 bytes */ +#define EEPROM_SKU_CAP (2*0x45) /* 2 bytes */ +#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */ +#define EEPROM_WOWLAN_MODE (2*0x47) /* 2 bytes */ +#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */ +#define EEPROM_NUM_MAC_ADDRESS (2*0x4C) /* 2 bytes */ + +/* The following masks are to be applied on EEPROM_RADIO_CONFIG */ +#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */ +#define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */ +#define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */ +#define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */ +#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */ +#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */ + +#define EEPROM_3945_RF_CFG_TYPE_MAX 0x0 +#define EEPROM_4965_RF_CFG_TYPE_MAX 0x1 + +/* + * Per-channel regulatory data. + * + * Each channel that *might* be supported by iwl has a fixed location + * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory + * txpower (MSB). + * + * Entries immediately below are for 20 MHz channel width. HT40 (40 MHz) + * channels (only for 4965, not supported by 3945) appear later in the EEPROM. + * + * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 + */ +#define EEPROM_REGULATORY_SKU_ID (2*0x60) /* 4 bytes */ +#define EEPROM_REGULATORY_BAND_1 (2*0x62) /* 2 bytes */ +#define EEPROM_REGULATORY_BAND_1_CHANNELS (2*0x63) /* 28 bytes */ + +/* + * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196, + * 5.0 GHz channels 7, 8, 11, 12, 16 + * (4915-5080MHz) (none of these is ever supported) + */ +#define EEPROM_REGULATORY_BAND_2 (2*0x71) /* 2 bytes */ +#define EEPROM_REGULATORY_BAND_2_CHANNELS (2*0x72) /* 26 bytes */ + +/* + * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64 + * (5170-5320MHz) + */ +#define EEPROM_REGULATORY_BAND_3 (2*0x7F) /* 2 bytes */ +#define EEPROM_REGULATORY_BAND_3_CHANNELS (2*0x80) /* 24 bytes */ + +/* + * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140 + * (5500-5700MHz) + */ +#define EEPROM_REGULATORY_BAND_4 (2*0x8C) /* 2 bytes */ +#define EEPROM_REGULATORY_BAND_4_CHANNELS (2*0x8D) /* 22 bytes */ + +/* + * 5.7 GHz channels 145, 149, 153, 157, 161, 165 + * (5725-5825MHz) + */ +#define EEPROM_REGULATORY_BAND_5 (2*0x98) /* 2 bytes */ +#define EEPROM_REGULATORY_BAND_5_CHANNELS (2*0x99) /* 12 bytes */ + +/* + * 2.4 GHz HT40 channels 1 (5), 2 (6), 3 (7), 4 (8), 5 (9), 6 (10), 7 (11) + * + * The channel listed is the center of the lower 20 MHz half of the channel. + * The overall center frequency is actually 2 channels (10 MHz) above that, + * and the upper half of each HT40 channel is centered 4 channels (20 MHz) away + * from the lower half; e.g. the upper half of HT40 channel 1 is channel 5, + * and the overall HT40 channel width centers on channel 3. + * + * NOTE: The RXON command uses 20 MHz channel numbers to specify the + * control channel to which to tune. RXON also specifies whether the + * control channel is the upper or lower half of a HT40 channel. + * + * NOTE: 4965 does not support HT40 channels on 2.4 GHz. + */ +#define EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS (2*0xA0) /* 14 bytes */ + +/* + * 5.2 GHz HT40 channels 36 (40), 44 (48), 52 (56), 60 (64), + * 100 (104), 108 (112), 116 (120), 124 (128), 132 (136), 149 (153), 157 (161) + */ +#define EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS (2*0xA8) /* 22 bytes */ + +#define EEPROM_REGULATORY_BAND_NO_HT40 (0) + +struct il_eeprom_ops { + const u32 regulatory_bands[7]; + int (*acquire_semaphore) (struct il_priv *il); + void (*release_semaphore) (struct il_priv *il); +}; + +int il_eeprom_init(struct il_priv *il); +void il_eeprom_free(struct il_priv *il); +const u8 *il_eeprom_query_addr(const struct il_priv *il, size_t offset); +u16 il_eeprom_query16(const struct il_priv *il, size_t offset); +int il_init_channel_map(struct il_priv *il); +void il_free_channel_map(struct il_priv *il); +const struct il_channel_info *il_get_channel_info(const struct il_priv *il, + enum ieee80211_band band, + u16 channel); + +#define IL_NUM_SCAN_RATES (2) + +struct il4965_channel_tgd_info { + u8 type; + s8 max_power; +}; + +struct il4965_channel_tgh_info { + s64 last_radar_time; +}; + +#define IL4965_MAX_RATE (33) + +struct il3945_clip_group { + /* maximum power level to prevent clipping for each rate, derived by + * us from this band's saturation power in EEPROM */ + const s8 clip_powers[IL_MAX_RATES]; +}; + +/* current Tx power values to use, one for each rate for each channel. + * requested power is limited by: + * -- regulatory EEPROM limits for this channel + * -- hardware capabilities (clip-powers) + * -- spectrum management + * -- user preference (e.g. iwconfig) + * when requested power is set, base power idx must also be set. */ +struct il3945_channel_power_info { + struct il3945_tx_power tpc; /* actual radio and DSP gain settings */ + s8 power_table_idx; /* actual (compenst'd) idx into gain table */ + s8 base_power_idx; /* gain idx for power at factory temp. */ + s8 requested_power; /* power (dBm) requested for this chnl/rate */ +}; + +/* current scan Tx power values to use, one for each scan rate for each + * channel. */ +struct il3945_scan_power_info { + struct il3945_tx_power tpc; /* actual radio and DSP gain settings */ + s8 power_table_idx; /* actual (compenst'd) idx into gain table */ + s8 requested_power; /* scan pwr (dBm) requested for chnl/rate */ +}; + +/* + * One for each channel, holds all channel setup data + * Some of the fields (e.g. eeprom and flags/max_power_avg) are redundant + * with one another! + */ +struct il_channel_info { + struct il4965_channel_tgd_info tgd; + struct il4965_channel_tgh_info tgh; + struct il_eeprom_channel eeprom; /* EEPROM regulatory limit */ + struct il_eeprom_channel ht40_eeprom; /* EEPROM regulatory limit for + * HT40 channel */ + + u8 channel; /* channel number */ + u8 flags; /* flags copied from EEPROM */ + s8 max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */ + s8 curr_txpow; /* (dBm) regulatory/spectrum/user (not h/w) limit */ + s8 min_power; /* always 0 */ + s8 scan_power; /* (dBm) regul. eeprom, direct scans, any rate */ + + u8 group_idx; /* 0-4, maps channel to group1/2/3/4/5 */ + u8 band_idx; /* 0-4, maps channel to band1/2/3/4/5 */ + enum ieee80211_band band; + + /* HT40 channel info */ + s8 ht40_max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */ + u8 ht40_flags; /* flags copied from EEPROM */ + u8 ht40_extension_channel; /* HT_IE_EXT_CHANNEL_* */ + + /* Radio/DSP gain settings for each "normal" data Tx rate. + * These include, in addition to RF and DSP gain, a few fields for + * remembering/modifying gain settings (idxes). */ + struct il3945_channel_power_info power_info[IL4965_MAX_RATE]; + + /* Radio/DSP gain settings for each scan rate, for directed scans. */ + struct il3945_scan_power_info scan_pwr_info[IL_NUM_SCAN_RATES]; +}; + +#define IL_TX_FIFO_BK 0 /* shared */ +#define IL_TX_FIFO_BE 1 +#define IL_TX_FIFO_VI 2 /* shared */ +#define IL_TX_FIFO_VO 3 +#define IL_TX_FIFO_UNUSED -1 + +/* Minimum number of queues. MAX_NUM is defined in hw specific files. + * Set the minimum to accommodate the 4 standard TX queues, 1 command + * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */ +#define IL_MIN_NUM_QUEUES 10 + +#define IL_DEFAULT_CMD_QUEUE_NUM 4 + +#define IEEE80211_DATA_LEN 2304 +#define IEEE80211_4ADDR_LEN 30 +#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN) +#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN) + +struct il_frame { + union { + struct ieee80211_hdr frame; + struct il_tx_beacon_cmd beacon; + u8 raw[IEEE80211_FRAME_LEN]; + u8 cmd[360]; + } u; + struct list_head list; +}; + +#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4) +#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ) +#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4) + +enum { + CMD_SYNC = 0, + CMD_SIZE_NORMAL = 0, + CMD_NO_SKB = 0, + CMD_SIZE_HUGE = (1 << 0), + CMD_ASYNC = (1 << 1), + CMD_WANT_SKB = (1 << 2), + CMD_MAPPED = (1 << 3), +}; + +#define DEF_CMD_PAYLOAD_SIZE 320 + +/** + * struct il_device_cmd + * + * For allocation of the command and tx queues, this establishes the overall + * size of the largest command we send to uCode, except for a scan command + * (which is relatively huge; space is allocated separately). + */ +struct il_device_cmd { + struct il_cmd_header hdr; /* uCode API */ + union { + u32 flags; + u8 val8; + u16 val16; + u32 val32; + struct il_tx_cmd tx; + u8 payload[DEF_CMD_PAYLOAD_SIZE]; + } __packed cmd; +} __packed; + +#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct il_device_cmd)) + +struct il_host_cmd { + const void *data; + unsigned long reply_page; + void (*callback) (struct il_priv *il, struct il_device_cmd *cmd, + struct il_rx_pkt *pkt); + u32 flags; + u16 len; + u8 id; +}; + +#define SUP_RATE_11A_MAX_NUM_CHANNELS 8 +#define SUP_RATE_11B_MAX_NUM_CHANNELS 4 +#define SUP_RATE_11G_MAX_NUM_CHANNELS 12 + +/** + * struct il_rx_queue - Rx queue + * @bd: driver's pointer to buffer of receive buffer descriptors (rbd) + * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) + * @read: Shared idx to newest available Rx buffer + * @write: Shared idx to oldest written Rx packet + * @free_count: Number of pre-allocated buffers in rx_free + * @rx_free: list of free SKBs for use + * @rx_used: List of Rx buffers with no SKB + * @need_update: flag to indicate we need to update read/write idx + * @rb_stts: driver's pointer to receive buffer status + * @rb_stts_dma: bus address of receive buffer status + * + * NOTE: rx_free and rx_used are used as a FIFO for il_rx_bufs + */ +struct il_rx_queue { + __le32 *bd; + dma_addr_t bd_dma; + struct il_rx_buf pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS]; + struct il_rx_buf *queue[RX_QUEUE_SIZE]; + u32 read; + u32 write; + u32 free_count; + u32 write_actual; + struct list_head rx_free; + struct list_head rx_used; + int need_update; + struct il_rb_status *rb_stts; + dma_addr_t rb_stts_dma; + spinlock_t lock; +}; + +#define IL_SUPPORTED_RATES_IE_LEN 8 + +#define MAX_TID_COUNT 9 + +#define IL_INVALID_RATE 0xFF +#define IL_INVALID_VALUE -1 + +/** + * struct il_ht_agg -- aggregation status while waiting for block-ack + * @txq_id: Tx queue used for Tx attempt + * @frame_count: # frames attempted by Tx command + * @wait_for_ba: Expect block-ack before next Tx reply + * @start_idx: Index of 1st Transmit Frame Descriptor (TFD) in Tx win + * @bitmap0: Low order bitmap, one bit for each frame pending ACK in Tx win + * @bitmap1: High order, one bit for each frame pending ACK in Tx win + * @rate_n_flags: Rate at which Tx was attempted + * + * If C_TX indicates that aggregation was attempted, driver must wait + * for block ack (N_COMPRESSED_BA). This struct stores tx reply info + * until block ack arrives. + */ +struct il_ht_agg { + u16 txq_id; + u16 frame_count; + u16 wait_for_ba; + u16 start_idx; + u64 bitmap; + u32 rate_n_flags; +#define IL_AGG_OFF 0 +#define IL_AGG_ON 1 +#define IL_EMPTYING_HW_QUEUE_ADDBA 2 +#define IL_EMPTYING_HW_QUEUE_DELBA 3 + u8 state; +}; + +struct il_tid_data { + u16 seq_number; /* 4965 only */ + u16 tfds_in_queue; + struct il_ht_agg agg; +}; + +struct il_hw_key { + u32 cipher; + int keylen; + u8 keyidx; + u8 key[32]; +}; + +union il_ht_rate_supp { + u16 rates; + struct { + u8 siso_rate; + u8 mimo_rate; + }; +}; + +#define CFG_HT_RX_AMPDU_FACTOR_8K (0x0) +#define CFG_HT_RX_AMPDU_FACTOR_16K (0x1) +#define CFG_HT_RX_AMPDU_FACTOR_32K (0x2) +#define CFG_HT_RX_AMPDU_FACTOR_64K (0x3) +#define CFG_HT_RX_AMPDU_FACTOR_DEF CFG_HT_RX_AMPDU_FACTOR_64K +#define CFG_HT_RX_AMPDU_FACTOR_MAX CFG_HT_RX_AMPDU_FACTOR_64K +#define CFG_HT_RX_AMPDU_FACTOR_MIN CFG_HT_RX_AMPDU_FACTOR_8K + +/* + * Maximal MPDU density for TX aggregation + * 4 - 2us density + * 5 - 4us density + * 6 - 8us density + * 7 - 16us density + */ +#define CFG_HT_MPDU_DENSITY_2USEC (0x4) +#define CFG_HT_MPDU_DENSITY_4USEC (0x5) +#define CFG_HT_MPDU_DENSITY_8USEC (0x6) +#define CFG_HT_MPDU_DENSITY_16USEC (0x7) +#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_4USEC +#define CFG_HT_MPDU_DENSITY_MAX CFG_HT_MPDU_DENSITY_16USEC +#define CFG_HT_MPDU_DENSITY_MIN (0x1) + +struct il_ht_config { + bool single_chain_sufficient; + enum ieee80211_smps_mode smps; /* current smps mode */ +}; + +/* QoS structures */ +struct il_qos_info { + int qos_active; + struct il_qosparam_cmd def_qos_parm; +}; + +/* + * Structure should be accessed with sta_lock held. When station addition + * is in progress (IL_STA_UCODE_INPROGRESS) it is possible to access only + * the commands (il_addsta_cmd and il_link_quality_cmd) without + * sta_lock held. + */ +struct il_station_entry { + struct il_addsta_cmd sta; + struct il_tid_data tid[MAX_TID_COUNT]; + u8 used, ctxid; + struct il_hw_key keyinfo; + struct il_link_quality_cmd *lq; +}; + +struct il_station_priv_common { + struct il_rxon_context *ctx; + u8 sta_id; +}; + +/** + * struct il_vif_priv - driver's ilate per-interface information + * + * When mac80211 allocates a virtual interface, it can allocate + * space for us to put data into. + */ +struct il_vif_priv { + struct il_rxon_context *ctx; + u8 ibss_bssid_sta_id; +}; + +/* one for each uCode image (inst/data, boot/init/runtime) */ +struct fw_desc { + void *v_addr; /* access by driver */ + dma_addr_t p_addr; /* access by card's busmaster DMA */ + u32 len; /* bytes */ +}; + +/* uCode file layout */ +struct il_ucode_header { + __le32 ver; /* major/minor/API/serial */ + struct { + __le32 inst_size; /* bytes of runtime code */ + __le32 data_size; /* bytes of runtime data */ + __le32 init_size; /* bytes of init code */ + __le32 init_data_size; /* bytes of init data */ + __le32 boot_size; /* bytes of bootstrap code */ + u8 data[0]; /* in same order as sizes */ + } v1; +}; + +struct il4965_ibss_seq { + u8 mac[ETH_ALEN]; + u16 seq_num; + u16 frag_num; + unsigned long packet_time; + struct list_head list; +}; + +struct il_sensitivity_ranges { + u16 min_nrg_cck; + u16 max_nrg_cck; + + u16 nrg_th_cck; + u16 nrg_th_ofdm; + + u16 auto_corr_min_ofdm; + u16 auto_corr_min_ofdm_mrc; + u16 auto_corr_min_ofdm_x1; + u16 auto_corr_min_ofdm_mrc_x1; + + u16 auto_corr_max_ofdm; + u16 auto_corr_max_ofdm_mrc; + u16 auto_corr_max_ofdm_x1; + u16 auto_corr_max_ofdm_mrc_x1; + + u16 auto_corr_max_cck; + u16 auto_corr_max_cck_mrc; + u16 auto_corr_min_cck; + u16 auto_corr_min_cck_mrc; + + u16 barker_corr_th_min; + u16 barker_corr_th_min_mrc; + u16 nrg_th_cca; +}; + +#define KELVIN_TO_CELSIUS(x) ((x)-273) +#define CELSIUS_TO_KELVIN(x) ((x)+273) + +/** + * struct il_hw_params + * @max_txq_num: Max # Tx queues supported + * @dma_chnl_num: Number of Tx DMA/FIFO channels + * @scd_bc_tbls_size: size of scheduler byte count tables + * @tfd_size: TFD size + * @tx/rx_chains_num: Number of TX/RX chains + * @valid_tx/rx_ant: usable antennas + * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2) + * @max_rxq_log: Log-base-2 of max_rxq_size + * @rx_page_order: Rx buffer page order + * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR + * @max_stations: + * @ht40_channel: is 40MHz width possible in band 2.4 + * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ) + * @sw_crypto: 0 for hw, 1 for sw + * @max_xxx_size: for ucode uses + * @ct_kill_threshold: temperature threshold + * @beacon_time_tsf_bits: number of valid tsf bits for beacon time + * @struct il_sensitivity_ranges: range of sensitivity values + */ +struct il_hw_params { + u8 max_txq_num; + u8 dma_chnl_num; + u16 scd_bc_tbls_size; + u32 tfd_size; + u8 tx_chains_num; + u8 rx_chains_num; + u8 valid_tx_ant; + u8 valid_rx_ant; + u16 max_rxq_size; + u16 max_rxq_log; + u32 rx_page_order; + u32 rx_wrt_ptr_reg; + u8 max_stations; + u8 ht40_channel; + u8 max_beacon_itrvl; /* in 1024 ms */ + u32 max_inst_size; + u32 max_data_size; + u32 max_bsm_size; + u32 ct_kill_threshold; /* value in hw-dependent units */ + u16 beacon_time_tsf_bits; + const struct il_sensitivity_ranges *sens; +}; + +/****************************************************************************** + * + * Functions implemented in core module which are forward declared here + * for use by iwl-[4-5].c + * + * NOTE: The implementation of these functions are not hardware specific + * which is why they are in the core module files. + * + * Naming convention -- + * il_ <-- Is part of iwlwifi + * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX) + * il4965_bg_ <-- Called from work queue context + * il4965_mac_ <-- mac80211 callback + * + ****************************************************************************/ +extern void il4965_update_chain_flags(struct il_priv *il); +extern const u8 il_bcast_addr[ETH_ALEN]; +extern int il_queue_space(const struct il_queue *q); +static inline int +il_queue_used(const struct il_queue *q, int i) +{ + return q->write_ptr >= q->read_ptr ? (i >= q->read_ptr && + i < q->write_ptr) : !(i < + q->read_ptr + && i >= + q-> + write_ptr); +} + +static inline u8 +il_get_cmd_idx(struct il_queue *q, u32 idx, int is_huge) +{ + /* + * This is for init calibration result and scan command which + * required buffer > TFD_MAX_PAYLOAD_SIZE, + * the big buffer at end of command array + */ + if (is_huge) + return q->n_win; /* must be power of 2 */ + + /* Otherwise, use normal size buffers */ + return idx & (q->n_win - 1); +} + +struct il_dma_ptr { + dma_addr_t dma; + void *addr; + size_t size; +}; + +#define IL_OPERATION_MODE_AUTO 0 +#define IL_OPERATION_MODE_HT_ONLY 1 +#define IL_OPERATION_MODE_MIXED 2 +#define IL_OPERATION_MODE_20MHZ 3 + +#define IL_TX_CRC_SIZE 4 +#define IL_TX_DELIMITER_SIZE 4 + +#define TX_POWER_IL_ILLEGAL_VOLTAGE -10000 + +/* Sensitivity and chain noise calibration */ +#define INITIALIZATION_VALUE 0xFFFF +#define IL4965_CAL_NUM_BEACONS 20 +#define IL_CAL_NUM_BEACONS 16 +#define MAXIMUM_ALLOWED_PATHLOSS 15 + +#define CHAIN_NOISE_MAX_DELTA_GAIN_CODE 3 + +#define MAX_FA_OFDM 50 +#define MIN_FA_OFDM 5 +#define MAX_FA_CCK 50 +#define MIN_FA_CCK 5 + +#define AUTO_CORR_STEP_OFDM 1 + +#define AUTO_CORR_STEP_CCK 3 +#define AUTO_CORR_MAX_TH_CCK 160 + +#define NRG_DIFF 2 +#define NRG_STEP_CCK 2 +#define NRG_MARGIN 8 +#define MAX_NUMBER_CCK_NO_FA 100 + +#define AUTO_CORR_CCK_MIN_VAL_DEF (125) + +#define CHAIN_A 0 +#define CHAIN_B 1 +#define CHAIN_C 2 +#define CHAIN_NOISE_DELTA_GAIN_INIT_VAL 4 +#define ALL_BAND_FILTER 0xFF00 +#define IN_BAND_FILTER 0xFF +#define MIN_AVERAGE_NOISE_MAX_VALUE 0xFFFFFFFF + +#define NRG_NUM_PREV_STAT_L 20 +#define NUM_RX_CHAINS 3 + +enum il4965_false_alarm_state { + IL_FA_TOO_MANY = 0, + IL_FA_TOO_FEW = 1, + IL_FA_GOOD_RANGE = 2, +}; + +enum il4965_chain_noise_state { + IL_CHAIN_NOISE_ALIVE = 0, /* must be 0 */ + IL_CHAIN_NOISE_ACCUMULATE, + IL_CHAIN_NOISE_CALIBRATED, + IL_CHAIN_NOISE_DONE, +}; + +enum il4965_calib_enabled_state { + IL_CALIB_DISABLED = 0, /* must be 0 */ + IL_CALIB_ENABLED = 1, +}; + +/* + * enum il_calib + * defines the order in which results of initial calibrations + * should be sent to the runtime uCode + */ +enum il_calib { + IL_CALIB_MAX, +}; + +/* Opaque calibration results */ +struct il_calib_result { + void *buf; + size_t buf_len; +}; + +enum ucode_type { + UCODE_NONE = 0, + UCODE_INIT, + UCODE_RT +}; + +/* Sensitivity calib data */ +struct il_sensitivity_data { + u32 auto_corr_ofdm; + u32 auto_corr_ofdm_mrc; + u32 auto_corr_ofdm_x1; + u32 auto_corr_ofdm_mrc_x1; + u32 auto_corr_cck; + u32 auto_corr_cck_mrc; + + u32 last_bad_plcp_cnt_ofdm; + u32 last_fa_cnt_ofdm; + u32 last_bad_plcp_cnt_cck; + u32 last_fa_cnt_cck; + + u32 nrg_curr_state; + u32 nrg_prev_state; + u32 nrg_value[10]; + u8 nrg_silence_rssi[NRG_NUM_PREV_STAT_L]; + u32 nrg_silence_ref; + u32 nrg_energy_idx; + u32 nrg_silence_idx; + u32 nrg_th_cck; + s32 nrg_auto_corr_silence_diff; + u32 num_in_cck_no_fa; + u32 nrg_th_ofdm; + + u16 barker_corr_th_min; + u16 barker_corr_th_min_mrc; + u16 nrg_th_cca; +}; + +/* Chain noise (differential Rx gain) calib data */ +struct il_chain_noise_data { + u32 active_chains; + u32 chain_noise_a; + u32 chain_noise_b; + u32 chain_noise_c; + u32 chain_signal_a; + u32 chain_signal_b; + u32 chain_signal_c; + u16 beacon_count; + u8 disconn_array[NUM_RX_CHAINS]; + u8 delta_gain_code[NUM_RX_CHAINS]; + u8 radio_write; + u8 state; +}; + +#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */ +#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */ + +#define IL_TRAFFIC_ENTRIES (256) +#define IL_TRAFFIC_ENTRY_SIZE (64) + +enum { + MEASUREMENT_READY = (1 << 0), + MEASUREMENT_ACTIVE = (1 << 1), +}; + +/* interrupt stats */ +struct isr_stats { + u32 hw; + u32 sw; + u32 err_code; + u32 sch; + u32 alive; + u32 rfkill; + u32 ctkill; + u32 wakeup; + u32 rx; + u32 handlers[IL_CN_MAX]; + u32 tx; + u32 unhandled; +}; + +/* management stats */ +enum il_mgmt_stats { + MANAGEMENT_ASSOC_REQ = 0, + MANAGEMENT_ASSOC_RESP, + MANAGEMENT_REASSOC_REQ, + MANAGEMENT_REASSOC_RESP, + MANAGEMENT_PROBE_REQ, + MANAGEMENT_PROBE_RESP, + MANAGEMENT_BEACON, + MANAGEMENT_ATIM, + MANAGEMENT_DISASSOC, + MANAGEMENT_AUTH, + MANAGEMENT_DEAUTH, + MANAGEMENT_ACTION, + MANAGEMENT_MAX, +}; +/* control stats */ +enum il_ctrl_stats { + CONTROL_BACK_REQ = 0, + CONTROL_BACK, + CONTROL_PSPOLL, + CONTROL_RTS, + CONTROL_CTS, + CONTROL_ACK, + CONTROL_CFEND, + CONTROL_CFENDACK, + CONTROL_MAX, +}; + +struct traffic_stats { +#ifdef CONFIG_IWLEGACY_DEBUGFS + u32 mgmt[MANAGEMENT_MAX]; + u32 ctrl[CONTROL_MAX]; + u32 data_cnt; + u64 data_bytes; +#endif +}; + +/* + * host interrupt timeout value + * used with setting interrupt coalescing timer + * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit + * + * default interrupt coalescing timer is 64 x 32 = 2048 usecs + * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs + */ +#define IL_HOST_INT_TIMEOUT_MAX (0xFF) +#define IL_HOST_INT_TIMEOUT_DEF (0x40) +#define IL_HOST_INT_TIMEOUT_MIN (0x0) +#define IL_HOST_INT_CALIB_TIMEOUT_MAX (0xFF) +#define IL_HOST_INT_CALIB_TIMEOUT_DEF (0x10) +#define IL_HOST_INT_CALIB_TIMEOUT_MIN (0x0) + +#define IL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5) + +/* TX queue watchdog timeouts in mSecs */ +#define IL_DEF_WD_TIMEOUT (2000) +#define IL_LONG_WD_TIMEOUT (10000) +#define IL_MAX_WD_TIMEOUT (120000) + +struct il_force_reset { + int reset_request_count; + int reset_success_count; + int reset_reject_count; + unsigned long reset_duration; + unsigned long last_force_reset_jiffies; +}; + +/* extend beacon time format bit shifting */ +/* + * for _3945 devices + * bits 31:24 - extended + * bits 23:0 - interval + */ +#define IL3945_EXT_BEACON_TIME_POS 24 +/* + * for _4965 devices + * bits 31:22 - extended + * bits 21:0 - interval + */ +#define IL4965_EXT_BEACON_TIME_POS 22 + +struct il_rxon_context { + struct ieee80211_vif *vif; + + const u8 *ac_to_fifo; + const u8 *ac_to_queue; + u8 mcast_queue; + + /* + * We could use the vif to indicate active, but we + * also need it to be active during disabling when + * we already removed the vif for type setting. + */ + bool always_active, is_active; + + bool ht_need_multiple_chains; + + int ctxid; + + u32 interface_modes, exclusive_interface_modes; + u8 unused_devtype, ap_devtype, ibss_devtype, station_devtype; + + /* + * We declare this const so it can only be + * changed via explicit cast within the + * routines that actually update the physical + * hardware. + */ + const struct il_rxon_cmd active; + struct il_rxon_cmd staging; + + struct il_rxon_time_cmd timing; + + struct il_qos_info qos_data; + + u8 bcast_sta_id, ap_sta_id; + + u8 rxon_cmd, rxon_assoc_cmd, rxon_timing_cmd; + u8 qos_cmd; + u8 wep_key_cmd; + + struct il_wep_key wep_keys[WEP_KEYS_MAX]; + u8 key_mapping_keys; + + __le32 station_flags; + + struct { + bool non_gf_sta_present; + u8 protection; + bool enabled, is_40mhz; + u8 extension_chan_offset; + } ht; +}; + +struct il_power_mgr { + struct il_powertable_cmd sleep_cmd; + struct il_powertable_cmd sleep_cmd_next; + int debug_sleep_level_override; + bool pci_pm; +}; + +struct il_priv { + + /* ieee device used by generic ieee processing code */ + struct ieee80211_hw *hw; + struct ieee80211_channel *ieee_channels; + struct ieee80211_rate *ieee_rates; + struct il_cfg *cfg; + + /* temporary frame storage list */ + struct list_head free_frames; + int frames_count; + + enum ieee80211_band band; + int alloc_rxb_page; + + void (*handlers[IL_CN_MAX]) (struct il_priv *il, + struct il_rx_buf *rxb); + + struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS]; + + /* spectrum measurement report caching */ + struct il_spectrum_notification measure_report; + u8 measurement_status; + + /* ucode beacon time */ + u32 ucode_beacon_time; + int missed_beacon_threshold; + + /* track IBSS manager (last beacon) status */ + u32 ibss_manager; + + /* force reset */ + struct il_force_reset force_reset; + + /* we allocate array of il_channel_info for NIC's valid channels. + * Access via channel # using indirect idx array */ + struct il_channel_info *channel_info; /* channel info array */ + u8 channel_count; /* # of channels */ + + /* thermal calibration */ + s32 temperature; /* degrees Kelvin */ + s32 last_temperature; + + /* init calibration results */ + struct il_calib_result calib_results[IL_CALIB_MAX]; + + /* Scan related variables */ + unsigned long scan_start; + unsigned long scan_start_tsf; + void *scan_cmd; + enum ieee80211_band scan_band; + struct cfg80211_scan_request *scan_request; + struct ieee80211_vif *scan_vif; + u8 scan_tx_ant[IEEE80211_NUM_BANDS]; + u8 mgmt_tx_ant; + + /* spinlock */ + spinlock_t lock; /* protect general shared data */ + spinlock_t hcmd_lock; /* protect hcmd */ + spinlock_t reg_lock; /* protect hw register access */ + struct mutex mutex; + + /* basic pci-network driver stuff */ + struct pci_dev *pci_dev; + + /* pci hardware address support */ + void __iomem *hw_base; + u32 hw_rev; + u32 hw_wa_rev; + u8 rev_id; + + /* command queue number */ + u8 cmd_queue; + + /* max number of station keys */ + u8 sta_key_max_num; + + /* EEPROM MAC addresses */ + struct mac_address addresses[1]; + + /* uCode images, save to reload in case of failure */ + int fw_idx; /* firmware we're trying to load */ + u32 ucode_ver; /* version of ucode, copy of + il_ucode.ver */ + struct fw_desc ucode_code; /* runtime inst */ + struct fw_desc ucode_data; /* runtime data original */ + struct fw_desc ucode_data_backup; /* runtime data save/restore */ + struct fw_desc ucode_init; /* initialization inst */ + struct fw_desc ucode_init_data; /* initialization data */ + struct fw_desc ucode_boot; /* bootstrap inst */ + enum ucode_type ucode_type; + u8 ucode_write_complete; /* the image write is complete */ + char firmware_name[25]; + + struct il_rxon_context ctx; + + __le16 switch_channel; + + /* 1st responses from initialize and runtime uCode images. + * _4965's initialize alive response contains some calibration data. */ + struct il_init_alive_resp card_alive_init; + struct il_alive_resp card_alive; + + u16 active_rate; + + u8 start_calib; + struct il_sensitivity_data sensitivity_data; + struct il_chain_noise_data chain_noise_data; + __le16 sensitivity_tbl[HD_TBL_SIZE]; + + struct il_ht_config current_ht_config; + + /* Rate scaling data */ + u8 retry_rate; + + wait_queue_head_t wait_command_queue; + + int activity_timer_active; + + /* Rx and Tx DMA processing queues */ + struct il_rx_queue rxq; + struct il_tx_queue *txq; + unsigned long txq_ctx_active_msk; + struct il_dma_ptr kw; /* keep warm address */ + struct il_dma_ptr scd_bc_tbls; + + u32 scd_base_addr; /* scheduler sram base address */ + + unsigned long status; + + /* counts mgmt, ctl, and data packets */ + struct traffic_stats tx_stats; + struct traffic_stats rx_stats; + + /* counts interrupts */ + struct isr_stats isr_stats; + + struct il_power_mgr power_data; + + /* context information */ + u8 bssid[ETH_ALEN]; /* used only on 3945 but filled by core */ + + /* station table variables */ + + /* Note: if lock and sta_lock are needed, lock must be acquired first */ + spinlock_t sta_lock; + int num_stations; + struct il_station_entry stations[IL_STATION_COUNT]; + unsigned long ucode_key_table; + + /* queue refcounts */ +#define IL_MAX_HW_QUEUES 32 + unsigned long queue_stopped[BITS_TO_LONGS(IL_MAX_HW_QUEUES)]; + /* for each AC */ + atomic_t queue_stop_count[4]; + + /* Indication if ieee80211_ops->open has been called */ + u8 is_open; + + u8 mac80211_registered; + + /* eeprom -- this is in the card's little endian byte order */ + u8 *eeprom; + struct il_eeprom_calib_info *calib_info; + + enum nl80211_iftype iw_mode; + + /* Last Rx'd beacon timestamp */ + u64 timestamp; + + union { +#if defined(CONFIG_IWL3945) || defined(CONFIG_IWL3945_MODULE) + struct { + void *shared_virt; + dma_addr_t shared_phys; + + struct delayed_work thermal_periodic; + struct delayed_work rfkill_poll; + + struct il3945_notif_stats stats; +#ifdef CONFIG_IWLEGACY_DEBUGFS + struct il3945_notif_stats accum_stats; + struct il3945_notif_stats delta_stats; + struct il3945_notif_stats max_delta; +#endif + + u32 sta_supp_rates; + int last_rx_rssi; /* From Rx packet stats */ + + /* Rx'd packet timing information */ + u32 last_beacon_time; + u64 last_tsf; + + /* + * each calibration channel group in the + * EEPROM has a derived clip setting for + * each rate. + */ + const struct il3945_clip_group clip_groups[5]; + + } _3945; +#endif +#if defined(CONFIG_IWL4965) || defined(CONFIG_IWL4965_MODULE) + struct { + struct il_rx_phy_res last_phy_res; + bool last_phy_res_valid; + + struct completion firmware_loading_complete; + + /* + * chain noise reset and gain commands are the + * two extra calibration commands follows the standard + * phy calibration commands + */ + u8 phy_calib_chain_noise_reset_cmd; + u8 phy_calib_chain_noise_gain_cmd; + + struct il_notif_stats stats; +#ifdef CONFIG_IWLEGACY_DEBUGFS + struct il_notif_stats accum_stats; + struct il_notif_stats delta_stats; + struct il_notif_stats max_delta; +#endif + + } _4965; +#endif + }; + + struct il_hw_params hw_params; + + u32 inta_mask; + + struct workqueue_struct *workqueue; + + struct work_struct restart; + struct work_struct scan_completed; + struct work_struct rx_replenish; + struct work_struct abort_scan; + + struct il_rxon_context *beacon_ctx; + struct sk_buff *beacon_skb; + + struct work_struct tx_flush; + + struct tasklet_struct irq_tasklet; + + struct delayed_work init_alive_start; + struct delayed_work alive_start; + struct delayed_work scan_check; + + /* TX Power */ + s8 tx_power_user_lmt; + s8 tx_power_device_lmt; + s8 tx_power_next; + +#ifdef CONFIG_IWLEGACY_DEBUG + /* debugging info */ + u32 debug_level; /* per device debugging will override global + il_debug_level if set */ +#endif /* CONFIG_IWLEGACY_DEBUG */ +#ifdef CONFIG_IWLEGACY_DEBUGFS + /* debugfs */ + u16 tx_traffic_idx; + u16 rx_traffic_idx; + u8 *tx_traffic; + u8 *rx_traffic; + struct dentry *debugfs_dir; + u32 dbgfs_sram_offset, dbgfs_sram_len; + bool disable_ht40; +#endif /* CONFIG_IWLEGACY_DEBUGFS */ + + struct work_struct txpower_work; + u32 disable_sens_cal; + u32 disable_chain_noise_cal; + u32 disable_tx_power_cal; + struct work_struct run_time_calib_work; + struct timer_list stats_periodic; + struct timer_list watchdog; + bool hw_ready; + + struct led_classdev led; + unsigned long blink_on, blink_off; + bool led_registered; +}; /*il_priv */ + +static inline void +il_txq_ctx_activate(struct il_priv *il, int txq_id) +{ + set_bit(txq_id, &il->txq_ctx_active_msk); +} + +static inline void +il_txq_ctx_deactivate(struct il_priv *il, int txq_id) +{ + clear_bit(txq_id, &il->txq_ctx_active_msk); +} + +static inline struct ieee80211_hdr * +il_tx_queue_get_hdr(struct il_priv *il, int txq_id, int idx) +{ + if (il->txq[txq_id].txb[idx].skb) + return (struct ieee80211_hdr *)il->txq[txq_id].txb[idx].skb-> + data; + return NULL; +} + +static inline struct il_rxon_context * +il_rxon_ctx_from_vif(struct ieee80211_vif *vif) +{ + struct il_vif_priv *vif_priv = (void *)vif->drv_priv; + + return vif_priv->ctx; +} + +#define for_each_context(il, _ctx) \ + for (_ctx = &il->ctx; _ctx == &il->ctx; _ctx++) + +static inline int +il_is_associated(struct il_priv *il) +{ + return (il->ctx.active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0; +} + +static inline int +il_is_any_associated(struct il_priv *il) +{ + return il_is_associated(il); +} + +static inline int +il_is_associated_ctx(struct il_rxon_context *ctx) +{ + return (ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0; +} + +static inline int +il_is_channel_valid(const struct il_channel_info *ch_info) +{ + if (ch_info == NULL) + return 0; + return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0; +} + +static inline int +il_is_channel_radar(const struct il_channel_info *ch_info) +{ + return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0; +} + +static inline u8 +il_is_channel_a_band(const struct il_channel_info *ch_info) +{ + return ch_info->band == IEEE80211_BAND_5GHZ; +} + +static inline int +il_is_channel_passive(const struct il_channel_info *ch) +{ + return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0; +} + +static inline int +il_is_channel_ibss(const struct il_channel_info *ch) +{ + return (ch->flags & EEPROM_CHANNEL_IBSS) ? 1 : 0; +} + +static inline void +__il_free_pages(struct il_priv *il, struct page *page) +{ + __free_pages(page, il->hw_params.rx_page_order); + il->alloc_rxb_page--; +} + +static inline void +il_free_pages(struct il_priv *il, unsigned long page) +{ + free_pages(page, il->hw_params.rx_page_order); + il->alloc_rxb_page--; +} + +#define IWLWIFI_VERSION "in-tree:" +#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation" +#define DRV_AUTHOR "<ilw@linux.intel.com>" + +#define IL_PCI_DEVICE(dev, subdev, cfg) \ + .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \ + .subvendor = PCI_ANY_ID, .subdevice = (subdev), \ + .driver_data = (kernel_ulong_t)&(cfg) + +#define TIME_UNIT 1024 + +#define IL_SKU_G 0x1 +#define IL_SKU_A 0x2 +#define IL_SKU_N 0x8 + +#define IL_CMD(x) case x: return #x + +/* Size of one Rx buffer in host DRAM */ +#define IL_RX_BUF_SIZE_3K (3 * 1000) /* 3945 only */ +#define IL_RX_BUF_SIZE_4K (4 * 1024) +#define IL_RX_BUF_SIZE_8K (8 * 1024) + +struct il_hcmd_ops { + int (*rxon_assoc) (struct il_priv *il, struct il_rxon_context *ctx); + int (*commit_rxon) (struct il_priv *il, struct il_rxon_context *ctx); + void (*set_rxon_chain) (struct il_priv *il, + struct il_rxon_context *ctx); +}; + +struct il_hcmd_utils_ops { + u16(*get_hcmd_size) (u8 cmd_id, u16 len); + u16(*build_addsta_hcmd) (const struct il_addsta_cmd *cmd, u8 *data); + int (*request_scan) (struct il_priv *il, struct ieee80211_vif *vif); + void (*post_scan) (struct il_priv *il); +}; + +struct il_apm_ops { + int (*init) (struct il_priv *il); + void (*config) (struct il_priv *il); +}; + +#ifdef CONFIG_IWLEGACY_DEBUGFS +struct il_debugfs_ops { + ssize_t(*rx_stats_read) (struct file *file, char __user *user_buf, + size_t count, loff_t *ppos); + ssize_t(*tx_stats_read) (struct file *file, char __user *user_buf, + size_t count, loff_t *ppos); + ssize_t(*general_stats_read) (struct file *file, + char __user *user_buf, size_t count, + loff_t *ppos); +}; +#endif + +struct il_temp_ops { + void (*temperature) (struct il_priv *il); +}; + +struct il_lib_ops { + /* set hw dependent parameters */ + int (*set_hw_params) (struct il_priv *il); + /* Handling TX */ + void (*txq_update_byte_cnt_tbl) (struct il_priv *il, + struct il_tx_queue *txq, + u16 byte_cnt); + int (*txq_attach_buf_to_tfd) (struct il_priv *il, + struct il_tx_queue *txq, dma_addr_t addr, + u16 len, u8 reset, u8 pad); + void (*txq_free_tfd) (struct il_priv *il, struct il_tx_queue *txq); + int (*txq_init) (struct il_priv *il, struct il_tx_queue *txq); + /* setup Rx handler */ + void (*handler_setup) (struct il_priv *il); + /* alive notification after init uCode load */ + void (*init_alive_start) (struct il_priv *il); + /* check validity of rtc data address */ + int (*is_valid_rtc_data_addr) (u32 addr); + /* 1st ucode load */ + int (*load_ucode) (struct il_priv *il); + + void (*dump_nic_error_log) (struct il_priv *il); + int (*dump_fh) (struct il_priv *il, char **buf, bool display); + int (*set_channel_switch) (struct il_priv *il, + struct ieee80211_channel_switch *ch_switch); + /* power management */ + struct il_apm_ops apm_ops; + + /* power */ + int (*send_tx_power) (struct il_priv *il); + void (*update_chain_flags) (struct il_priv *il); + + /* eeprom operations */ + struct il_eeprom_ops eeprom_ops; + + /* temperature */ + struct il_temp_ops temp_ops; + +#ifdef CONFIG_IWLEGACY_DEBUGFS + struct il_debugfs_ops debugfs_ops; +#endif + +}; + +struct il_led_ops { + int (*cmd) (struct il_priv *il, struct il_led_cmd *led_cmd); +}; + +struct il_legacy_ops { + void (*post_associate) (struct il_priv *il); + void (*config_ap) (struct il_priv *il); + /* station management */ + int (*update_bcast_stations) (struct il_priv *il); + int (*manage_ibss_station) (struct il_priv *il, + struct ieee80211_vif *vif, bool add); +}; + +struct il_ops { + const struct il_lib_ops *lib; + const struct il_hcmd_ops *hcmd; + const struct il_hcmd_utils_ops *utils; + const struct il_led_ops *led; + const struct il_nic_ops *nic; + const struct il_legacy_ops *legacy; + const struct ieee80211_ops *ieee80211_ops; +}; + +struct il_mod_params { + int sw_crypto; /* def: 0 = using hardware encryption */ + int disable_hw_scan; /* def: 0 = use h/w scan */ + int num_of_queues; /* def: HW dependent */ + int disable_11n; /* def: 0 = 11n capabilities enabled */ + int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */ + int antenna; /* def: 0 = both antennas (use diversity) */ + int restart_fw; /* def: 1 = restart firmware */ +}; + +/* + * @led_compensation: compensate on the led on/off time per HW according + * to the deviation to achieve the desired led frequency. + * The detail algorithm is described in common.c + * @chain_noise_num_beacons: number of beacons used to compute chain noise + * @wd_timeout: TX queues watchdog timeout + * @temperature_kelvin: temperature report by uCode in kelvin + * @ucode_tracing: support ucode continuous tracing + * @sensitivity_calib_by_driver: driver has the capability to perform + * sensitivity calibration operation + * @chain_noise_calib_by_driver: driver has the capability to perform + * chain noise calibration operation + */ +struct il_base_params { + int eeprom_size; + int num_of_queues; /* def: HW dependent */ + int num_of_ampdu_queues; /* def: HW dependent */ + /* for il_apm_init() */ + u32 pll_cfg_val; + bool set_l0s; + bool use_bsm; + + u16 led_compensation; + int chain_noise_num_beacons; + unsigned int wd_timeout; + bool temperature_kelvin; + const bool ucode_tracing; + const bool sensitivity_calib_by_driver; + const bool chain_noise_calib_by_driver; +}; + +#define IL_LED_SOLID 11 +#define IL_DEF_LED_INTRVL cpu_to_le32(1000) + +#define IL_LED_ACTIVITY (0<<1) +#define IL_LED_LINK (1<<1) + +/* + * LED mode + * IL_LED_DEFAULT: use device default + * IL_LED_RF_STATE: turn LED on/off based on RF state + * LED ON = RF ON + * LED OFF = RF OFF + * IL_LED_BLINK: adjust led blink rate based on blink table + */ +enum il_led_mode { + IL_LED_DEFAULT, + IL_LED_RF_STATE, + IL_LED_BLINK, +}; + +void il_leds_init(struct il_priv *il); +void il_leds_exit(struct il_priv *il); + +/** + * struct il_cfg + * @fw_name_pre: Firmware filename prefix. The api version and extension + * (.ucode) will be added to filename before loading from disk. The + * filename is constructed as fw_name_pre<api>.ucode. + * @ucode_api_max: Highest version of uCode API supported by driver. + * @ucode_api_min: Lowest version of uCode API supported by driver. + * @scan_antennas: available antenna for scan operation + * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off) + * + * We enable the driver to be backward compatible wrt API version. The + * driver specifies which APIs it supports (with @ucode_api_max being the + * highest and @ucode_api_min the lowest). Firmware will only be loaded if + * it has a supported API version. The firmware's API version will be + * stored in @il_priv, enabling the driver to make runtime changes based + * on firmware version used. + * + * For example, + * if (IL_UCODE_API(il->ucode_ver) >= 2) { + * Driver interacts with Firmware API version >= 2. + * } else { + * Driver interacts with Firmware API version 1. + * } + * + * The ideal usage of this infrastructure is to treat a new ucode API + * release as a new hardware revision. That is, through utilizing the + * il_hcmd_utils_ops etc. we accommodate different command structures + * and flows between hardware versions as well as their API + * versions. + * + */ +struct il_cfg { + /* params specific to an individual device within a device family */ + const char *name; + const char *fw_name_pre; + const unsigned int ucode_api_max; + const unsigned int ucode_api_min; + u8 valid_tx_ant; + u8 valid_rx_ant; + unsigned int sku; + u16 eeprom_ver; + u16 eeprom_calib_ver; + const struct il_ops *ops; + /* module based parameters which can be set from modprobe cmd */ + const struct il_mod_params *mod_params; + /* params not likely to change within a device family */ + struct il_base_params *base_params; + /* params likely to change within a device family */ + u8 scan_rx_antennas[IEEE80211_NUM_BANDS]; + enum il_led_mode led_mode; +}; + +/*************************** + * L i b * + ***************************/ + +struct ieee80211_hw *il_alloc_all(struct il_cfg *cfg); +int il_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u16 queue, const struct ieee80211_tx_queue_params *params); +int il_mac_tx_last_beacon(struct ieee80211_hw *hw); + +void il_set_rxon_hwcrypto(struct il_priv *il, struct il_rxon_context *ctx, + int hw_decrypt); +int il_check_rxon_cmd(struct il_priv *il, struct il_rxon_context *ctx); +int il_full_rxon_required(struct il_priv *il, struct il_rxon_context *ctx); +int il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch, + struct il_rxon_context *ctx); +void il_set_flags_for_band(struct il_priv *il, struct il_rxon_context *ctx, + enum ieee80211_band band, struct ieee80211_vif *vif); +u8 il_get_single_channel_number(struct il_priv *il, enum ieee80211_band band); +void il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf); +bool il_is_ht40_tx_allowed(struct il_priv *il, struct il_rxon_context *ctx, + struct ieee80211_sta_ht_cap *ht_cap); +void il_connection_init_rx_config(struct il_priv *il, + struct il_rxon_context *ctx); +void il_set_rate(struct il_priv *il); +int il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr, + u32 decrypt_res, struct ieee80211_rx_status *stats); +void il_irq_handle_error(struct il_priv *il); +int il_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif); +void il_mac_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); +int il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + enum nl80211_iftype newtype, bool newp2p); +int il_alloc_txq_mem(struct il_priv *il); +void il_txq_mem(struct il_priv *il); + +#ifdef CONFIG_IWLEGACY_DEBUGFS +int il_alloc_traffic_mem(struct il_priv *il); +void il_free_traffic_mem(struct il_priv *il); +void il_reset_traffic_log(struct il_priv *il); +void il_dbg_log_tx_data_frame(struct il_priv *il, u16 length, + struct ieee80211_hdr *header); +void il_dbg_log_rx_data_frame(struct il_priv *il, u16 length, + struct ieee80211_hdr *header); +const char *il_get_mgmt_string(int cmd); +const char *il_get_ctrl_string(int cmd); +void il_clear_traffic_stats(struct il_priv *il); +void il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len); +#else +static inline int +il_alloc_traffic_mem(struct il_priv *il) +{ + return 0; +} + +static inline void +il_free_traffic_mem(struct il_priv *il) +{ +} + +static inline void +il_reset_traffic_log(struct il_priv *il) +{ +} + +static inline void +il_dbg_log_tx_data_frame(struct il_priv *il, u16 length, + struct ieee80211_hdr *header) +{ +} + +static inline void +il_dbg_log_rx_data_frame(struct il_priv *il, u16 length, + struct ieee80211_hdr *header) +{ +} + +static inline void +il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len) +{ +} +#endif +/***************************************************** + * RX handlers. + * **************************************************/ +void il_hdl_pm_sleep(struct il_priv *il, struct il_rx_buf *rxb); +void il_hdl_pm_debug_stats(struct il_priv *il, struct il_rx_buf *rxb); +void il_hdl_error(struct il_priv *il, struct il_rx_buf *rxb); + +/***************************************************** +* RX +******************************************************/ +void il_cmd_queue_unmap(struct il_priv *il); +void il_cmd_queue_free(struct il_priv *il); +int il_rx_queue_alloc(struct il_priv *il); +void il_rx_queue_update_write_ptr(struct il_priv *il, struct il_rx_queue *q); +int il_rx_queue_space(const struct il_rx_queue *q); +void il_tx_cmd_complete(struct il_priv *il, struct il_rx_buf *rxb); +/* Handlers */ +void il_hdl_spectrum_measurement(struct il_priv *il, struct il_rx_buf *rxb); +void il_recover_from_stats(struct il_priv *il, struct il_rx_pkt *pkt); +void il_chswitch_done(struct il_priv *il, bool is_success); +void il_hdl_csa(struct il_priv *il, struct il_rx_buf *rxb); + +/* TX helpers */ + +/***************************************************** +* TX +******************************************************/ +void il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq); +int il_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq, int slots_num, + u32 txq_id); +void il_tx_queue_reset(struct il_priv *il, struct il_tx_queue *txq, + int slots_num, u32 txq_id); +void il_tx_queue_unmap(struct il_priv *il, int txq_id); +void il_tx_queue_free(struct il_priv *il, int txq_id); +void il_setup_watchdog(struct il_priv *il); +/***************************************************** + * TX power + ****************************************************/ +int il_set_tx_power(struct il_priv *il, s8 tx_power, bool force); + +/******************************************************************************* + * Rate + ******************************************************************************/ + +u8 il_get_lowest_plcp(struct il_priv *il, struct il_rxon_context *ctx); + +/******************************************************************************* + * Scanning + ******************************************************************************/ +void il_init_scan_params(struct il_priv *il); +int il_scan_cancel(struct il_priv *il); +int il_scan_cancel_timeout(struct il_priv *il, unsigned long ms); +void il_force_scan_end(struct il_priv *il); +int il_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct cfg80211_scan_request *req); +void il_internal_short_hw_scan(struct il_priv *il); +int il_force_reset(struct il_priv *il, bool external); +u16 il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame, + const u8 *ta, const u8 *ie, int ie_len, int left); +void il_setup_rx_scan_handlers(struct il_priv *il); +u16 il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band, + u8 n_probes); +u16 il_get_passive_dwell_time(struct il_priv *il, enum ieee80211_band band, + struct ieee80211_vif *vif); +void il_setup_scan_deferred_work(struct il_priv *il); +void il_cancel_scan_deferred_work(struct il_priv *il); + +/* For faster active scanning, scan will move to the next channel if fewer than + * PLCP_QUIET_THRESH packets are heard on this channel within + * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell + * time if it's a quiet channel (nothing responded to our probe, and there's + * no other traffic). + * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */ +#define IL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */ +#define IL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */ + +#define IL_SCAN_CHECK_WATCHDOG (HZ * 7) + +/***************************************************** + * S e n d i n g H o s t C o m m a n d s * + *****************************************************/ + +const char *il_get_cmd_string(u8 cmd); +int __must_check il_send_cmd_sync(struct il_priv *il, struct il_host_cmd *cmd); +int il_send_cmd(struct il_priv *il, struct il_host_cmd *cmd); +int __must_check il_send_cmd_pdu(struct il_priv *il, u8 id, u16 len, + const void *data); +int il_send_cmd_pdu_async(struct il_priv *il, u8 id, u16 len, const void *data, + void (*callback) (struct il_priv *il, + struct il_device_cmd *cmd, + struct il_rx_pkt *pkt)); + +int il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd); + +/***************************************************** + * PCI * + *****************************************************/ + +static inline u16 +il_pcie_link_ctl(struct il_priv *il) +{ + int pos; + u16 pci_lnk_ctl; + pos = pci_pcie_cap(il->pci_dev); + pci_read_config_word(il->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl); + return pci_lnk_ctl; +} + +void il_bg_watchdog(unsigned long data); +u32 il_usecs_to_beacons(struct il_priv *il, u32 usec, u32 beacon_interval); +__le32 il_add_beacon_time(struct il_priv *il, u32 base, u32 addon, + u32 beacon_interval); + +#ifdef CONFIG_PM +int il_pci_suspend(struct device *device); +int il_pci_resume(struct device *device); +extern const struct dev_pm_ops il_pm_ops; + +#define IL_LEGACY_PM_OPS (&il_pm_ops) + +#else /* !CONFIG_PM */ + +#define IL_LEGACY_PM_OPS NULL + +#endif /* !CONFIG_PM */ + +/***************************************************** +* Error Handling Debugging +******************************************************/ +void il4965_dump_nic_error_log(struct il_priv *il); +#ifdef CONFIG_IWLEGACY_DEBUG +void il_print_rx_config_cmd(struct il_priv *il, struct il_rxon_context *ctx); +#else +static inline void +il_print_rx_config_cmd(struct il_priv *il, struct il_rxon_context *ctx) +{ +} +#endif + +void il_clear_isr_stats(struct il_priv *il); + +/***************************************************** +* GEOS +******************************************************/ +int il_init_geos(struct il_priv *il); +void il_free_geos(struct il_priv *il); + +/*************** DRIVER STATUS FUNCTIONS *****/ + +#define S_HCMD_ACTIVE 0 /* host command in progress */ +/* 1 is unused (used to be S_HCMD_SYNC_ACTIVE) */ +#define S_INT_ENABLED 2 +#define S_RF_KILL_HW 3 +#define S_CT_KILL 4 +#define S_INIT 5 +#define S_ALIVE 6 +#define S_READY 7 +#define S_TEMPERATURE 8 +#define S_GEO_CONFIGURED 9 +#define S_EXIT_PENDING 10 +#define S_STATS 12 +#define S_SCANNING 13 +#define S_SCAN_ABORTING 14 +#define S_SCAN_HW 15 +#define S_POWER_PMI 16 +#define S_FW_ERROR 17 +#define S_CHANNEL_SWITCH_PENDING 18 + +static inline int +il_is_ready(struct il_priv *il) +{ + /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are + * set but EXIT_PENDING is not */ + return test_bit(S_READY, &il->status) && + test_bit(S_GEO_CONFIGURED, &il->status) && + !test_bit(S_EXIT_PENDING, &il->status); +} + +static inline int +il_is_alive(struct il_priv *il) +{ + return test_bit(S_ALIVE, &il->status); +} + +static inline int +il_is_init(struct il_priv *il) +{ + return test_bit(S_INIT, &il->status); +} + +static inline int +il_is_rfkill_hw(struct il_priv *il) +{ + return test_bit(S_RF_KILL_HW, &il->status); +} + +static inline int +il_is_rfkill(struct il_priv *il) +{ + return il_is_rfkill_hw(il); +} + +static inline int +il_is_ctkill(struct il_priv *il) +{ + return test_bit(S_CT_KILL, &il->status); +} + +static inline int +il_is_ready_rf(struct il_priv *il) +{ + + if (il_is_rfkill(il)) + return 0; + + return il_is_ready(il); +} + +extern void il_send_bt_config(struct il_priv *il); +extern int il_send_stats_request(struct il_priv *il, u8 flags, bool clear); +void il_apm_stop(struct il_priv *il); +int il_apm_init(struct il_priv *il); + +int il_send_rxon_timing(struct il_priv *il, struct il_rxon_context *ctx); +static inline int +il_send_rxon_assoc(struct il_priv *il, struct il_rxon_context *ctx) +{ + return il->cfg->ops->hcmd->rxon_assoc(il, ctx); +} + +static inline int +il_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx) +{ + return il->cfg->ops->hcmd->commit_rxon(il, ctx); +} + +static inline const struct ieee80211_supported_band * +il_get_hw_mode(struct il_priv *il, enum ieee80211_band band) +{ + return il->hw->wiphy->bands[band]; +} + +/* mac80211 handlers */ +int il_mac_config(struct ieee80211_hw *hw, u32 changed); +void il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif); +void il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, u32 changes); +void il_tx_cmd_protection(struct il_priv *il, struct ieee80211_tx_info *info, + __le16 fc, __le32 *tx_flags); + +irqreturn_t il_isr(int irq, void *data); + +extern void il_set_bit(struct il_priv *p, u32 r, u32 m); +extern void il_clear_bit(struct il_priv *p, u32 r, u32 m); +extern int _il_grab_nic_access(struct il_priv *il); +extern int _il_poll_bit(struct il_priv *il, u32 addr, u32 bits, u32 mask, int timeout); +extern int il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout); +extern u32 il_rd_prph(struct il_priv *il, u32 reg); +extern void il_wr_prph(struct il_priv *il, u32 addr, u32 val); +extern u32 il_read_targ_mem(struct il_priv *il, u32 addr); +extern void il_write_targ_mem(struct il_priv *il, u32 addr, u32 val); + +static inline void +_il_write8(struct il_priv *il, u32 ofs, u8 val) +{ + iowrite8(val, il->hw_base + ofs); +} +#define il_write8(il, ofs, val) _il_write8(il, ofs, val) + +static inline void +_il_wr(struct il_priv *il, u32 ofs, u32 val) +{ + iowrite32(val, il->hw_base + ofs); +} + +static inline u32 +_il_rd(struct il_priv *il, u32 ofs) +{ + return ioread32(il->hw_base + ofs); +} + +static inline void +_il_clear_bit(struct il_priv *il, u32 reg, u32 mask) +{ + _il_wr(il, reg, _il_rd(il, reg) & ~mask); +} + +static inline void +_il_set_bit(struct il_priv *il, u32 reg, u32 mask) +{ + _il_wr(il, reg, _il_rd(il, reg) | mask); +} + +static inline void +_il_release_nic_access(struct il_priv *il) +{ + _il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); +} + +static inline u32 +il_rd(struct il_priv *il, u32 reg) +{ + u32 value; + unsigned long reg_flags; + + spin_lock_irqsave(&il->reg_lock, reg_flags); + _il_grab_nic_access(il); + value = _il_rd(il, reg); + _il_release_nic_access(il); + spin_unlock_irqrestore(&il->reg_lock, reg_flags); + return value; +} + +static inline void +il_wr(struct il_priv *il, u32 reg, u32 value) +{ + unsigned long reg_flags; + + spin_lock_irqsave(&il->reg_lock, reg_flags); + if (!_il_grab_nic_access(il)) { + _il_wr(il, reg, value); + _il_release_nic_access(il); + } + spin_unlock_irqrestore(&il->reg_lock, reg_flags); +} + +static inline u32 +_il_rd_prph(struct il_priv *il, u32 reg) +{ + _il_wr(il, HBUS_TARG_PRPH_RADDR, reg | (3 << 24)); + rmb(); + return _il_rd(il, HBUS_TARG_PRPH_RDAT); +} + +static inline void +_il_wr_prph(struct il_priv *il, u32 addr, u32 val) +{ + _il_wr(il, HBUS_TARG_PRPH_WADDR, ((addr & 0x0000FFFF) | (3 << 24))); + wmb(); + _il_wr(il, HBUS_TARG_PRPH_WDAT, val); +} + +static inline void +il_set_bits_prph(struct il_priv *il, u32 reg, u32 mask) +{ + unsigned long reg_flags; + + spin_lock_irqsave(&il->reg_lock, reg_flags); + _il_grab_nic_access(il); + _il_wr_prph(il, reg, (_il_rd_prph(il, reg) | mask)); + _il_release_nic_access(il); + spin_unlock_irqrestore(&il->reg_lock, reg_flags); +} + +static inline void +il_set_bits_mask_prph(struct il_priv *il, u32 reg, u32 bits, u32 mask) +{ + unsigned long reg_flags; + + spin_lock_irqsave(&il->reg_lock, reg_flags); + _il_grab_nic_access(il); + _il_wr_prph(il, reg, ((_il_rd_prph(il, reg) & mask) | bits)); + _il_release_nic_access(il); + spin_unlock_irqrestore(&il->reg_lock, reg_flags); +} + +static inline void +il_clear_bits_prph(struct il_priv *il, u32 reg, u32 mask) +{ + unsigned long reg_flags; + u32 val; + + spin_lock_irqsave(&il->reg_lock, reg_flags); + _il_grab_nic_access(il); + val = _il_rd_prph(il, reg); + _il_wr_prph(il, reg, (val & ~mask)); + _il_release_nic_access(il); + spin_unlock_irqrestore(&il->reg_lock, reg_flags); +} + +#define HW_KEY_DYNAMIC 0 +#define HW_KEY_DEFAULT 1 + +#define IL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */ +#define IL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */ +#define IL_STA_UCODE_INPROGRESS BIT(2) /* ucode entry is in process of + being activated */ +#define IL_STA_LOCAL BIT(3) /* station state not directed by mac80211; + (this is for the IBSS BSSID stations) */ +#define IL_STA_BCAST BIT(4) /* this station is the special bcast station */ + +void il_restore_stations(struct il_priv *il, struct il_rxon_context *ctx); +void il_clear_ucode_stations(struct il_priv *il, struct il_rxon_context *ctx); +void il_dealloc_bcast_stations(struct il_priv *il); +int il_get_free_ucode_key_idx(struct il_priv *il); +int il_send_add_sta(struct il_priv *il, struct il_addsta_cmd *sta, u8 flags); +int il_add_station_common(struct il_priv *il, struct il_rxon_context *ctx, + const u8 *addr, bool is_ap, + struct ieee80211_sta *sta, u8 *sta_id_r); +int il_remove_station(struct il_priv *il, const u8 sta_id, const u8 * addr); +int il_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); + +u8 il_prep_station(struct il_priv *il, struct il_rxon_context *ctx, + const u8 *addr, bool is_ap, struct ieee80211_sta *sta); + +int il_send_lq_cmd(struct il_priv *il, struct il_rxon_context *ctx, + struct il_link_quality_cmd *lq, u8 flags, bool init); + +/** + * il_clear_driver_stations - clear knowledge of all stations from driver + * @il: iwl il struct + * + * This is called during il_down() to make sure that in the case + * we're coming there from a hardware restart mac80211 will be + * able to reconfigure stations -- if we're getting there in the + * normal down flow then the stations will already be cleared. + */ +static inline void +il_clear_driver_stations(struct il_priv *il) +{ + unsigned long flags; + struct il_rxon_context *ctx = &il->ctx; + + spin_lock_irqsave(&il->sta_lock, flags); + memset(il->stations, 0, sizeof(il->stations)); + il->num_stations = 0; + + il->ucode_key_table = 0; + + /* + * Remove all key information that is not stored as part + * of station information since mac80211 may not have had + * a chance to remove all the keys. When device is + * reconfigured by mac80211 after an error all keys will + * be reconfigured. + */ + memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys)); + ctx->key_mapping_keys = 0; + + spin_unlock_irqrestore(&il->sta_lock, flags); +} + +static inline int +il_sta_id(struct ieee80211_sta *sta) +{ + if (WARN_ON(!sta)) + return IL_INVALID_STATION; + + return ((struct il_station_priv_common *)sta->drv_priv)->sta_id; +} + +/** + * il_sta_id_or_broadcast - return sta_id or broadcast sta + * @il: iwl il + * @context: the current context + * @sta: mac80211 station + * + * In certain circumstances mac80211 passes a station pointer + * that may be %NULL, for example during TX or key setup. In + * that case, we need to use the broadcast station, so this + * inline wraps that pattern. + */ +static inline int +il_sta_id_or_broadcast(struct il_priv *il, struct il_rxon_context *context, + struct ieee80211_sta *sta) +{ + int sta_id; + + if (!sta) + return context->bcast_sta_id; + + sta_id = il_sta_id(sta); + + /* + * mac80211 should not be passing a partially + * initialised station! + */ + WARN_ON(sta_id == IL_INVALID_STATION); + + return sta_id; +} + +/** + * il_queue_inc_wrap - increment queue idx, wrap back to beginning + * @idx -- current idx + * @n_bd -- total number of entries in queue (must be power of 2) + */ +static inline int +il_queue_inc_wrap(int idx, int n_bd) +{ + return ++idx & (n_bd - 1); +} + +/** + * il_queue_dec_wrap - decrement queue idx, wrap back to end + * @idx -- current idx + * @n_bd -- total number of entries in queue (must be power of 2) + */ +static inline int +il_queue_dec_wrap(int idx, int n_bd) +{ + return --idx & (n_bd - 1); +} + +/* TODO: Move fw_desc functions to iwl-pci.ko */ +static inline void +il_free_fw_desc(struct pci_dev *pci_dev, struct fw_desc *desc) +{ + if (desc->v_addr) + dma_free_coherent(&pci_dev->dev, desc->len, desc->v_addr, + desc->p_addr); + desc->v_addr = NULL; + desc->len = 0; +} + +static inline int +il_alloc_fw_desc(struct pci_dev *pci_dev, struct fw_desc *desc) +{ + if (!desc->len) { + desc->v_addr = NULL; + return -EINVAL; + } + + desc->v_addr = + dma_alloc_coherent(&pci_dev->dev, desc->len, &desc->p_addr, + GFP_KERNEL); + return (desc->v_addr != NULL) ? 0 : -ENOMEM; +} + +/* + * we have 8 bits used like this: + * + * 7 6 5 4 3 2 1 0 + * | | | | | | | | + * | | | | | | +-+-------- AC queue (0-3) + * | | | | | | + * | +-+-+-+-+------------ HW queue ID + * | + * +---------------------- unused + */ +static inline void +il_set_swq_id(struct il_tx_queue *txq, u8 ac, u8 hwq) +{ + BUG_ON(ac > 3); /* only have 2 bits */ + BUG_ON(hwq > 31); /* only use 5 bits */ + + txq->swq_id = (hwq << 2) | ac; +} + +static inline void +il_wake_queue(struct il_priv *il, struct il_tx_queue *txq) +{ + u8 queue = txq->swq_id; + u8 ac = queue & 3; + u8 hwq = (queue >> 2) & 0x1f; + + if (test_and_clear_bit(hwq, il->queue_stopped)) + if (atomic_dec_return(&il->queue_stop_count[ac]) <= 0) + ieee80211_wake_queue(il->hw, ac); +} + +static inline void +il_stop_queue(struct il_priv *il, struct il_tx_queue *txq) +{ + u8 queue = txq->swq_id; + u8 ac = queue & 3; + u8 hwq = (queue >> 2) & 0x1f; + + if (!test_and_set_bit(hwq, il->queue_stopped)) + if (atomic_inc_return(&il->queue_stop_count[ac]) > 0) + ieee80211_stop_queue(il->hw, ac); +} + +#ifdef ieee80211_stop_queue +#undef ieee80211_stop_queue +#endif + +#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue + +#ifdef ieee80211_wake_queue +#undef ieee80211_wake_queue +#endif + +#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue + +static inline void +il_disable_interrupts(struct il_priv *il) +{ + clear_bit(S_INT_ENABLED, &il->status); + + /* disable interrupts from uCode/NIC to host */ + _il_wr(il, CSR_INT_MASK, 0x00000000); + + /* acknowledge/clear/reset any interrupts still pending + * from uCode or flow handler (Rx/Tx DMA) */ + _il_wr(il, CSR_INT, 0xffffffff); + _il_wr(il, CSR_FH_INT_STATUS, 0xffffffff); +} + +static inline void +il_enable_rfkill_int(struct il_priv *il) +{ + _il_wr(il, CSR_INT_MASK, CSR_INT_BIT_RF_KILL); +} + +static inline void +il_enable_interrupts(struct il_priv *il) +{ + set_bit(S_INT_ENABLED, &il->status); + _il_wr(il, CSR_INT_MASK, il->inta_mask); +} + +/** + * il_beacon_time_mask_low - mask of lower 32 bit of beacon time + * @il -- pointer to il_priv data structure + * @tsf_bits -- number of bits need to shift for masking) + */ +static inline u32 +il_beacon_time_mask_low(struct il_priv *il, u16 tsf_bits) +{ + return (1 << tsf_bits) - 1; +} + +/** + * il_beacon_time_mask_high - mask of higher 32 bit of beacon time + * @il -- pointer to il_priv data structure + * @tsf_bits -- number of bits need to shift for masking) + */ +static inline u32 +il_beacon_time_mask_high(struct il_priv *il, u16 tsf_bits) +{ + return ((1 << (32 - tsf_bits)) - 1) << tsf_bits; +} + +/** + * struct il_rb_status - reseve buffer status host memory mapped FH registers + * + * @closed_rb_num [0:11] - Indicates the idx of the RB which was closed + * @closed_fr_num [0:11] - Indicates the idx of the RX Frame which was closed + * @finished_rb_num [0:11] - Indicates the idx of the current RB + * in which the last frame was written to + * @finished_fr_num [0:11] - Indicates the idx of the RX Frame + * which was transferred + */ +struct il_rb_status { + __le16 closed_rb_num; + __le16 closed_fr_num; + __le16 finished_rb_num; + __le16 finished_fr_nam; + __le32 __unused; /* 3945 only */ +} __packed; + +#define TFD_QUEUE_SIZE_MAX (256) +#define TFD_QUEUE_SIZE_BC_DUP (64) +#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP) +#define IL_TX_DMA_MASK DMA_BIT_MASK(36) +#define IL_NUM_OF_TBS 20 + +static inline u8 +il_get_dma_hi_addr(dma_addr_t addr) +{ + return (sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0) & 0xF; +} + +/** + * struct il_tfd_tb transmit buffer descriptor within transmit frame descriptor + * + * This structure contains dma address and length of transmission address + * + * @lo: low [31:0] portion of the dma address of TX buffer every even is + * unaligned on 16 bit boundary + * @hi_n_len: 0-3 [35:32] portion of dma + * 4-15 length of the tx buffer + */ +struct il_tfd_tb { + __le32 lo; + __le16 hi_n_len; +} __packed; + +/** + * struct il_tfd + * + * Transmit Frame Descriptor (TFD) + * + * @ __reserved1[3] reserved + * @ num_tbs 0-4 number of active tbs + * 5 reserved + * 6-7 padding (not used) + * @ tbs[20] transmit frame buffer descriptors + * @ __pad padding + * + * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM. + * Both driver and device share these circular buffers, each of which must be + * contiguous 256 TFDs x 128 bytes-per-TFD = 32 KBytes + * + * Driver must indicate the physical address of the base of each + * circular buffer via the FH49_MEM_CBBC_QUEUE registers. + * + * Each TFD contains pointer/size information for up to 20 data buffers + * in host DRAM. These buffers collectively contain the (one) frame described + * by the TFD. Each buffer must be a single contiguous block of memory within + * itself, but buffers may be scattered in host DRAM. Each buffer has max size + * of (4K - 4). The concatenates all of a TFD's buffers into a single + * Tx frame, up to 8 KBytes in size. + * + * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx. + */ +struct il_tfd { + u8 __reserved1[3]; + u8 num_tbs; + struct il_tfd_tb tbs[IL_NUM_OF_TBS]; + __le32 __pad; +} __packed; +/* PCI registers */ +#define PCI_CFG_RETRY_TIMEOUT 0x041 + +/* PCI register values */ +#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01 +#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02 + +struct il_rate_info { + u8 plcp; /* uCode API: RATE_6M_PLCP, etc. */ + u8 plcp_siso; /* uCode API: RATE_SISO_6M_PLCP, etc. */ + u8 plcp_mimo2; /* uCode API: RATE_MIMO2_6M_PLCP, etc. */ + u8 ieee; /* MAC header: RATE_6M_IEEE, etc. */ + u8 prev_ieee; /* previous rate in IEEE speeds */ + u8 next_ieee; /* next rate in IEEE speeds */ + u8 prev_rs; /* previous rate used in rs algo */ + u8 next_rs; /* next rate used in rs algo */ + u8 prev_rs_tgg; /* previous rate used in TGG rs algo */ + u8 next_rs_tgg; /* next rate used in TGG rs algo */ +}; + +struct il3945_rate_info { + u8 plcp; /* uCode API: RATE_6M_PLCP, etc. */ + u8 ieee; /* MAC header: RATE_6M_IEEE, etc. */ + u8 prev_ieee; /* previous rate in IEEE speeds */ + u8 next_ieee; /* next rate in IEEE speeds */ + u8 prev_rs; /* previous rate used in rs algo */ + u8 next_rs; /* next rate used in rs algo */ + u8 prev_rs_tgg; /* previous rate used in TGG rs algo */ + u8 next_rs_tgg; /* next rate used in TGG rs algo */ + u8 table_rs_idx; /* idx in rate scale table cmd */ + u8 prev_table_rs; /* prev in rate table cmd */ +}; + +/* + * These serve as idxes into + * struct il_rate_info il_rates[RATE_COUNT]; + */ +enum { + RATE_1M_IDX = 0, + RATE_2M_IDX, + RATE_5M_IDX, + RATE_11M_IDX, + RATE_6M_IDX, + RATE_9M_IDX, + RATE_12M_IDX, + RATE_18M_IDX, + RATE_24M_IDX, + RATE_36M_IDX, + RATE_48M_IDX, + RATE_54M_IDX, + RATE_60M_IDX, + RATE_COUNT, + RATE_COUNT_LEGACY = RATE_COUNT - 1, /* Excluding 60M */ + RATE_COUNT_3945 = RATE_COUNT - 1, + RATE_INVM_IDX = RATE_COUNT, + RATE_INVALID = RATE_COUNT, +}; + +enum { + RATE_6M_IDX_TBL = 0, + RATE_9M_IDX_TBL, + RATE_12M_IDX_TBL, + RATE_18M_IDX_TBL, + RATE_24M_IDX_TBL, + RATE_36M_IDX_TBL, + RATE_48M_IDX_TBL, + RATE_54M_IDX_TBL, + RATE_1M_IDX_TBL, + RATE_2M_IDX_TBL, + RATE_5M_IDX_TBL, + RATE_11M_IDX_TBL, + RATE_INVM_IDX_TBL = RATE_INVM_IDX - 1, +}; + +enum { + IL_FIRST_OFDM_RATE = RATE_6M_IDX, + IL39_LAST_OFDM_RATE = RATE_54M_IDX, + IL_LAST_OFDM_RATE = RATE_60M_IDX, + IL_FIRST_CCK_RATE = RATE_1M_IDX, + IL_LAST_CCK_RATE = RATE_11M_IDX, +}; + +/* #define vs. enum to keep from defaulting to 'large integer' */ +#define RATE_6M_MASK (1 << RATE_6M_IDX) +#define RATE_9M_MASK (1 << RATE_9M_IDX) +#define RATE_12M_MASK (1 << RATE_12M_IDX) +#define RATE_18M_MASK (1 << RATE_18M_IDX) +#define RATE_24M_MASK (1 << RATE_24M_IDX) +#define RATE_36M_MASK (1 << RATE_36M_IDX) +#define RATE_48M_MASK (1 << RATE_48M_IDX) +#define RATE_54M_MASK (1 << RATE_54M_IDX) +#define RATE_60M_MASK (1 << RATE_60M_IDX) +#define RATE_1M_MASK (1 << RATE_1M_IDX) +#define RATE_2M_MASK (1 << RATE_2M_IDX) +#define RATE_5M_MASK (1 << RATE_5M_IDX) +#define RATE_11M_MASK (1 << RATE_11M_IDX) + +/* uCode API values for legacy bit rates, both OFDM and CCK */ +enum { + RATE_6M_PLCP = 13, + RATE_9M_PLCP = 15, + RATE_12M_PLCP = 5, + RATE_18M_PLCP = 7, + RATE_24M_PLCP = 9, + RATE_36M_PLCP = 11, + RATE_48M_PLCP = 1, + RATE_54M_PLCP = 3, + RATE_60M_PLCP = 3, /*FIXME:RS:should be removed */ + RATE_1M_PLCP = 10, + RATE_2M_PLCP = 20, + RATE_5M_PLCP = 55, + RATE_11M_PLCP = 110, + /*FIXME:RS:add RATE_LEGACY_INVM_PLCP = 0, */ +}; + +/* uCode API values for OFDM high-throughput (HT) bit rates */ +enum { + RATE_SISO_6M_PLCP = 0, + RATE_SISO_12M_PLCP = 1, + RATE_SISO_18M_PLCP = 2, + RATE_SISO_24M_PLCP = 3, + RATE_SISO_36M_PLCP = 4, + RATE_SISO_48M_PLCP = 5, + RATE_SISO_54M_PLCP = 6, + RATE_SISO_60M_PLCP = 7, + RATE_MIMO2_6M_PLCP = 0x8, + RATE_MIMO2_12M_PLCP = 0x9, + RATE_MIMO2_18M_PLCP = 0xa, + RATE_MIMO2_24M_PLCP = 0xb, + RATE_MIMO2_36M_PLCP = 0xc, + RATE_MIMO2_48M_PLCP = 0xd, + RATE_MIMO2_54M_PLCP = 0xe, + RATE_MIMO2_60M_PLCP = 0xf, + RATE_SISO_INVM_PLCP, + RATE_MIMO2_INVM_PLCP = RATE_SISO_INVM_PLCP, +}; + +/* MAC header values for bit rates */ +enum { + RATE_6M_IEEE = 12, + RATE_9M_IEEE = 18, + RATE_12M_IEEE = 24, + RATE_18M_IEEE = 36, + RATE_24M_IEEE = 48, + RATE_36M_IEEE = 72, + RATE_48M_IEEE = 96, + RATE_54M_IEEE = 108, + RATE_60M_IEEE = 120, + RATE_1M_IEEE = 2, + RATE_2M_IEEE = 4, + RATE_5M_IEEE = 11, + RATE_11M_IEEE = 22, +}; + +#define IL_CCK_BASIC_RATES_MASK \ + (RATE_1M_MASK | \ + RATE_2M_MASK) + +#define IL_CCK_RATES_MASK \ + (IL_CCK_BASIC_RATES_MASK | \ + RATE_5M_MASK | \ + RATE_11M_MASK) + +#define IL_OFDM_BASIC_RATES_MASK \ + (RATE_6M_MASK | \ + RATE_12M_MASK | \ + RATE_24M_MASK) + +#define IL_OFDM_RATES_MASK \ + (IL_OFDM_BASIC_RATES_MASK | \ + RATE_9M_MASK | \ + RATE_18M_MASK | \ + RATE_36M_MASK | \ + RATE_48M_MASK | \ + RATE_54M_MASK) + +#define IL_BASIC_RATES_MASK \ + (IL_OFDM_BASIC_RATES_MASK | \ + IL_CCK_BASIC_RATES_MASK) + +#define RATES_MASK ((1 << RATE_COUNT) - 1) +#define RATES_MASK_3945 ((1 << RATE_COUNT_3945) - 1) + +#define IL_INVALID_VALUE -1 + +#define IL_MIN_RSSI_VAL -100 +#define IL_MAX_RSSI_VAL 0 + +/* These values specify how many Tx frame attempts before + * searching for a new modulation mode */ +#define IL_LEGACY_FAILURE_LIMIT 160 +#define IL_LEGACY_SUCCESS_LIMIT 480 +#define IL_LEGACY_TBL_COUNT 160 + +#define IL_NONE_LEGACY_FAILURE_LIMIT 400 +#define IL_NONE_LEGACY_SUCCESS_LIMIT 4500 +#define IL_NONE_LEGACY_TBL_COUNT 1500 + +/* Success ratio (ACKed / attempted tx frames) values (perfect is 128 * 100) */ +#define IL_RS_GOOD_RATIO 12800 /* 100% */ +#define RATE_SCALE_SWITCH 10880 /* 85% */ +#define RATE_HIGH_TH 10880 /* 85% */ +#define RATE_INCREASE_TH 6400 /* 50% */ +#define RATE_DECREASE_TH 1920 /* 15% */ + +/* possible actions when in legacy mode */ +#define IL_LEGACY_SWITCH_ANTENNA1 0 +#define IL_LEGACY_SWITCH_ANTENNA2 1 +#define IL_LEGACY_SWITCH_SISO 2 +#define IL_LEGACY_SWITCH_MIMO2_AB 3 +#define IL_LEGACY_SWITCH_MIMO2_AC 4 +#define IL_LEGACY_SWITCH_MIMO2_BC 5 + +/* possible actions when in siso mode */ +#define IL_SISO_SWITCH_ANTENNA1 0 +#define IL_SISO_SWITCH_ANTENNA2 1 +#define IL_SISO_SWITCH_MIMO2_AB 2 +#define IL_SISO_SWITCH_MIMO2_AC 3 +#define IL_SISO_SWITCH_MIMO2_BC 4 +#define IL_SISO_SWITCH_GI 5 + +/* possible actions when in mimo mode */ +#define IL_MIMO2_SWITCH_ANTENNA1 0 +#define IL_MIMO2_SWITCH_ANTENNA2 1 +#define IL_MIMO2_SWITCH_SISO_A 2 +#define IL_MIMO2_SWITCH_SISO_B 3 +#define IL_MIMO2_SWITCH_SISO_C 4 +#define IL_MIMO2_SWITCH_GI 5 + +#define IL_MAX_SEARCH IL_MIMO2_SWITCH_GI + +#define IL_ACTION_LIMIT 3 /* # possible actions */ + +#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */ + +/* load per tid defines for A-MPDU activation */ +#define IL_AGG_TPT_THREHOLD 0 +#define IL_AGG_LOAD_THRESHOLD 10 +#define IL_AGG_ALL_TID 0xff +#define TID_QUEUE_CELL_SPACING 50 /*mS */ +#define TID_QUEUE_MAX_SIZE 20 +#define TID_ROUND_VALUE 5 /* mS */ +#define TID_MAX_LOAD_COUNT 8 + +#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING) +#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y)) + +extern const struct il_rate_info il_rates[RATE_COUNT]; + +enum il_table_type { + LQ_NONE, + LQ_G, /* legacy types */ + LQ_A, + LQ_SISO, /* high-throughput types */ + LQ_MIMO2, + LQ_MAX, +}; + +#define is_legacy(tbl) ((tbl) == LQ_G || (tbl) == LQ_A) +#define is_siso(tbl) ((tbl) == LQ_SISO) +#define is_mimo2(tbl) ((tbl) == LQ_MIMO2) +#define is_mimo(tbl) (is_mimo2(tbl)) +#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl)) +#define is_a_band(tbl) ((tbl) == LQ_A) +#define is_g_and(tbl) ((tbl) == LQ_G) + +#define ANT_NONE 0x0 +#define ANT_A BIT(0) +#define ANT_B BIT(1) +#define ANT_AB (ANT_A | ANT_B) +#define ANT_C BIT(2) +#define ANT_AC (ANT_A | ANT_C) +#define ANT_BC (ANT_B | ANT_C) +#define ANT_ABC (ANT_AB | ANT_C) + +#define IL_MAX_MCS_DISPLAY_SIZE 12 + +struct il_rate_mcs_info { + char mbps[IL_MAX_MCS_DISPLAY_SIZE]; + char mcs[IL_MAX_MCS_DISPLAY_SIZE]; +}; + +/** + * struct il_rate_scale_data -- tx success history for one rate + */ +struct il_rate_scale_data { + u64 data; /* bitmap of successful frames */ + s32 success_counter; /* number of frames successful */ + s32 success_ratio; /* per-cent * 128 */ + s32 counter; /* number of frames attempted */ + s32 average_tpt; /* success ratio * expected throughput */ + unsigned long stamp; +}; + +/** + * struct il_scale_tbl_info -- tx params and success history for all rates + * + * There are two of these in struct il_lq_sta, + * one for "active", and one for "search". + */ +struct il_scale_tbl_info { + enum il_table_type lq_type; + u8 ant_type; + u8 is_SGI; /* 1 = short guard interval */ + u8 is_ht40; /* 1 = 40 MHz channel width */ + u8 is_dup; /* 1 = duplicated data streams */ + u8 action; /* change modulation; IL_[LEGACY/SISO/MIMO]_SWITCH_* */ + u8 max_search; /* maximun number of tables we can search */ + s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */ + u32 current_rate; /* rate_n_flags, uCode API format */ + struct il_rate_scale_data win[RATE_COUNT]; /* rate histories */ +}; + +struct il_traffic_load { + unsigned long time_stamp; /* age of the oldest stats */ + u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time + * slice */ + u32 total; /* total num of packets during the + * last TID_MAX_TIME_DIFF */ + u8 queue_count; /* number of queues that has + * been used since the last cleanup */ + u8 head; /* start of the circular buffer */ +}; + +/** + * struct il_lq_sta -- driver's rate scaling ilate structure + * + * Pointer to this gets passed back and forth between driver and mac80211. + */ +struct il_lq_sta { + u8 active_tbl; /* idx of active table, range 0-1 */ + u8 enable_counter; /* indicates HT mode */ + u8 stay_in_tbl; /* 1: disallow, 0: allow search for new mode */ + u8 search_better_tbl; /* 1: currently trying alternate mode */ + s32 last_tpt; + + /* The following determine when to search for a new mode */ + u32 table_count_limit; + u32 max_failure_limit; /* # failed frames before new search */ + u32 max_success_limit; /* # successful frames before new search */ + u32 table_count; + u32 total_failed; /* total failed frames, any/all rates */ + u32 total_success; /* total successful frames, any/all rates */ + u64 flush_timer; /* time staying in mode before new search */ + + u8 action_counter; /* # mode-switch actions tried */ + u8 is_green; + u8 is_dup; + enum ieee80211_band band; + + /* The following are bitmaps of rates; RATE_6M_MASK, etc. */ + u32 supp_rates; + u16 active_legacy_rate; + u16 active_siso_rate; + u16 active_mimo2_rate; + s8 max_rate_idx; /* Max rate set by user */ + u8 missed_rate_counter; + + struct il_link_quality_cmd lq; + struct il_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */ + struct il_traffic_load load[TID_MAX_LOAD_COUNT]; + u8 tx_agg_tid_en; +#ifdef CONFIG_MAC80211_DEBUGFS + struct dentry *rs_sta_dbgfs_scale_table_file; + struct dentry *rs_sta_dbgfs_stats_table_file; + struct dentry *rs_sta_dbgfs_rate_scale_data_file; + struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file; + u32 dbg_fixed_rate; +#endif + struct il_priv *drv; + + /* used to be in sta_info */ + int last_txrate_idx; + /* last tx rate_n_flags */ + u32 last_rate_n_flags; + /* packets destined for this STA are aggregated */ + u8 is_agg; +}; + +/* + * il_station_priv: Driver's ilate station information + * + * When mac80211 creates a station it reserves some space (hw->sta_data_size) + * in the structure for use by driver. This structure is places in that + * space. + * + * The common struct MUST be first because it is shared between + * 3945 and 4965! + */ +struct il_station_priv { + struct il_station_priv_common common; + struct il_lq_sta lq_sta; + atomic_t pending_frames; + bool client; + bool asleep; +}; + +static inline u8 +il4965_num_of_ant(u8 m) +{ + return !!(m & ANT_A) + !!(m & ANT_B) + !!(m & ANT_C); +} + +static inline u8 +il4965_first_antenna(u8 mask) +{ + if (mask & ANT_A) + return ANT_A; + if (mask & ANT_B) + return ANT_B; + return ANT_C; +} + +/** + * il3945_rate_scale_init - Initialize the rate scale table based on assoc info + * + * The specific throughput table used is based on the type of network + * the associated with, including A, B, G, and G w/ TGG protection + */ +extern void il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id); + +/* Initialize station's rate scaling information after adding station */ +extern void il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, + u8 sta_id); +extern void il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, + u8 sta_id); + +/** + * il_rate_control_register - Register the rate control algorithm callbacks + * + * Since the rate control algorithm is hardware specific, there is no need + * or reason to place it as a stand alone module. The driver can call + * il_rate_control_register in order to register the rate control callbacks + * with the mac80211 subsystem. This should be performed prior to calling + * ieee80211_register_hw + * + */ +extern int il4965_rate_control_register(void); +extern int il3945_rate_control_register(void); + +/** + * il_rate_control_unregister - Unregister the rate control callbacks + * + * This should be called after calling ieee80211_unregister_hw, but before + * the driver is unloaded. + */ +extern void il4965_rate_control_unregister(void); +extern void il3945_rate_control_unregister(void); + +extern int il_power_update_mode(struct il_priv *il, bool force); +extern void il_power_initialize(struct il_priv *il); + +extern u32 il_debug_level; + +#ifdef CONFIG_IWLEGACY_DEBUG +/* + * il_get_debug_level: Return active debug level for device + * + * Using sysfs it is possible to set per device debug level. This debug + * level will be used if set, otherwise the global debug level which can be + * set via module parameter is used. + */ +static inline u32 +il_get_debug_level(struct il_priv *il) +{ + if (il->debug_level) + return il->debug_level; + else + return il_debug_level; +} +#else +static inline u32 +il_get_debug_level(struct il_priv *il) +{ + return il_debug_level; +} +#endif + +#define il_print_hex_error(il, p, len) \ +do { \ + print_hex_dump(KERN_ERR, "iwl data: ", \ + DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \ +} while (0) + +#ifdef CONFIG_IWLEGACY_DEBUG +#define IL_DBG(level, fmt, args...) \ +do { \ + if (il_get_debug_level(il) & level) \ + dev_printk(KERN_ERR, &il->hw->wiphy->dev, \ + "%c %s " fmt, in_interrupt() ? 'I' : 'U', \ + __func__ , ## args); \ +} while (0) + +#define il_print_hex_dump(il, level, p, len) \ +do { \ + if (il_get_debug_level(il) & level) \ + print_hex_dump(KERN_DEBUG, "iwl data: ", \ + DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \ +} while (0) + +#else +#define IL_DBG(level, fmt, args...) +static inline void +il_print_hex_dump(struct il_priv *il, int level, const void *p, u32 len) +{ +} +#endif /* CONFIG_IWLEGACY_DEBUG */ + +#ifdef CONFIG_IWLEGACY_DEBUGFS +int il_dbgfs_register(struct il_priv *il, const char *name); +void il_dbgfs_unregister(struct il_priv *il); +#else +static inline int +il_dbgfs_register(struct il_priv *il, const char *name) +{ + return 0; +} + +static inline void +il_dbgfs_unregister(struct il_priv *il) +{ +} +#endif /* CONFIG_IWLEGACY_DEBUGFS */ + +/* + * To use the debug system: + * + * If you are defining a new debug classification, simply add it to the #define + * list here in the form of + * + * #define IL_DL_xxxx VALUE + * + * where xxxx should be the name of the classification (for example, WEP). + * + * You then need to either add a IL_xxxx_DEBUG() macro definition for your + * classification, or use IL_DBG(IL_DL_xxxx, ...) whenever you want + * to send output to that classification. + * + * The active debug levels can be accessed via files + * + * /sys/module/iwl4965/parameters/debug + * /sys/module/iwl3945/parameters/debug + * /sys/class/net/wlan0/device/debug_level + * + * when CONFIG_IWLEGACY_DEBUG=y. + */ + +/* 0x0000000F - 0x00000001 */ +#define IL_DL_INFO (1 << 0) +#define IL_DL_MAC80211 (1 << 1) +#define IL_DL_HCMD (1 << 2) +#define IL_DL_STATE (1 << 3) +/* 0x000000F0 - 0x00000010 */ +#define IL_DL_MACDUMP (1 << 4) +#define IL_DL_HCMD_DUMP (1 << 5) +#define IL_DL_EEPROM (1 << 6) +#define IL_DL_RADIO (1 << 7) +/* 0x00000F00 - 0x00000100 */ +#define IL_DL_POWER (1 << 8) +#define IL_DL_TEMP (1 << 9) +#define IL_DL_NOTIF (1 << 10) +#define IL_DL_SCAN (1 << 11) +/* 0x0000F000 - 0x00001000 */ +#define IL_DL_ASSOC (1 << 12) +#define IL_DL_DROP (1 << 13) +#define IL_DL_TXPOWER (1 << 14) +#define IL_DL_AP (1 << 15) +/* 0x000F0000 - 0x00010000 */ +#define IL_DL_FW (1 << 16) +#define IL_DL_RF_KILL (1 << 17) +#define IL_DL_FW_ERRORS (1 << 18) +#define IL_DL_LED (1 << 19) +/* 0x00F00000 - 0x00100000 */ +#define IL_DL_RATE (1 << 20) +#define IL_DL_CALIB (1 << 21) +#define IL_DL_WEP (1 << 22) +#define IL_DL_TX (1 << 23) +/* 0x0F000000 - 0x01000000 */ +#define IL_DL_RX (1 << 24) +#define IL_DL_ISR (1 << 25) +#define IL_DL_HT (1 << 26) +/* 0xF0000000 - 0x10000000 */ +#define IL_DL_11H (1 << 28) +#define IL_DL_STATS (1 << 29) +#define IL_DL_TX_REPLY (1 << 30) +#define IL_DL_QOS (1 << 31) + +#define D_INFO(f, a...) IL_DBG(IL_DL_INFO, f, ## a) +#define D_MAC80211(f, a...) IL_DBG(IL_DL_MAC80211, f, ## a) +#define D_MACDUMP(f, a...) IL_DBG(IL_DL_MACDUMP, f, ## a) +#define D_TEMP(f, a...) IL_DBG(IL_DL_TEMP, f, ## a) +#define D_SCAN(f, a...) IL_DBG(IL_DL_SCAN, f, ## a) +#define D_RX(f, a...) IL_DBG(IL_DL_RX, f, ## a) +#define D_TX(f, a...) IL_DBG(IL_DL_TX, f, ## a) +#define D_ISR(f, a...) IL_DBG(IL_DL_ISR, f, ## a) +#define D_LED(f, a...) IL_DBG(IL_DL_LED, f, ## a) +#define D_WEP(f, a...) IL_DBG(IL_DL_WEP, f, ## a) +#define D_HC(f, a...) IL_DBG(IL_DL_HCMD, f, ## a) +#define D_HC_DUMP(f, a...) IL_DBG(IL_DL_HCMD_DUMP, f, ## a) +#define D_EEPROM(f, a...) IL_DBG(IL_DL_EEPROM, f, ## a) +#define D_CALIB(f, a...) IL_DBG(IL_DL_CALIB, f, ## a) +#define D_FW(f, a...) IL_DBG(IL_DL_FW, f, ## a) +#define D_RF_KILL(f, a...) IL_DBG(IL_DL_RF_KILL, f, ## a) +#define D_DROP(f, a...) IL_DBG(IL_DL_DROP, f, ## a) +#define D_AP(f, a...) IL_DBG(IL_DL_AP, f, ## a) +#define D_TXPOWER(f, a...) IL_DBG(IL_DL_TXPOWER, f, ## a) +#define D_RATE(f, a...) IL_DBG(IL_DL_RATE, f, ## a) +#define D_NOTIF(f, a...) IL_DBG(IL_DL_NOTIF, f, ## a) +#define D_ASSOC(f, a...) IL_DBG(IL_DL_ASSOC, f, ## a) +#define D_HT(f, a...) IL_DBG(IL_DL_HT, f, ## a) +#define D_STATS(f, a...) IL_DBG(IL_DL_STATS, f, ## a) +#define D_TX_REPLY(f, a...) IL_DBG(IL_DL_TX_REPLY, f, ## a) +#define D_QOS(f, a...) IL_DBG(IL_DL_QOS, f, ## a) +#define D_RADIO(f, a...) IL_DBG(IL_DL_RADIO, f, ## a) +#define D_POWER(f, a...) IL_DBG(IL_DL_POWER, f, ## a) +#define D_11H(f, a...) IL_DBG(IL_DL_11H, f, ## a) + +#endif /* __il_core_h__ */ diff --git a/drivers/net/wireless/iwlegacy/iwl-csr.h b/drivers/net/wireless/iwlegacy/csr.h index 668a9616c26..9138e15004f 100644 --- a/drivers/net/wireless/iwlegacy/iwl-csr.h +++ b/drivers/net/wireless/iwlegacy/csr.h @@ -60,8 +60,8 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ -#ifndef __iwl_legacy_csr_h__ -#define __iwl_legacy_csr_h__ +#ifndef __il_csr_h__ +#define __il_csr_h__ /* * CSR (control and status registers) * @@ -70,9 +70,9 @@ * low power states due to driver-invoked device resets * (e.g. CSR_RESET_REG_FLAG_SW_RESET) or uCode-driven power-saving modes. * - * Use iwl_write32() and iwl_read32() family to access these registers; + * Use _il_wr() and _il_rd() family to access these registers; * these provide simple PCI bus access, without waking up the MAC. - * Do not use iwl_legacy_write_direct32() family for these registers; + * Do not use il_wr() family for these registers; * no need to "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ. * The MAC (uCode processor, etc.) does not need to be powered up for accessing * the CSR registers. @@ -82,16 +82,16 @@ */ #define CSR_BASE (0x000) -#define CSR_HW_IF_CONFIG_REG (CSR_BASE+0x000) /* hardware interface config */ -#define CSR_INT_COALESCING (CSR_BASE+0x004) /* accum ints, 32-usec units */ -#define CSR_INT (CSR_BASE+0x008) /* host interrupt status/ack */ -#define CSR_INT_MASK (CSR_BASE+0x00c) /* host interrupt enable */ -#define CSR_FH_INT_STATUS (CSR_BASE+0x010) /* busmaster int status/ack*/ -#define CSR_GPIO_IN (CSR_BASE+0x018) /* read external chip pins */ -#define CSR_RESET (CSR_BASE+0x020) /* busmaster enable, NMI, etc*/ +#define CSR_HW_IF_CONFIG_REG (CSR_BASE+0x000) /* hardware interface config */ +#define CSR_INT_COALESCING (CSR_BASE+0x004) /* accum ints, 32-usec units */ +#define CSR_INT (CSR_BASE+0x008) /* host interrupt status/ack */ +#define CSR_INT_MASK (CSR_BASE+0x00c) /* host interrupt enable */ +#define CSR_FH_INT_STATUS (CSR_BASE+0x010) /* busmaster int status/ack */ +#define CSR_GPIO_IN (CSR_BASE+0x018) /* read external chip pins */ +#define CSR_RESET (CSR_BASE+0x020) /* busmaster enable, NMI, etc */ #define CSR_GP_CNTRL (CSR_BASE+0x024) -/* 2nd byte of CSR_INT_COALESCING, not accessible via iwl_write32()! */ +/* 2nd byte of CSR_INT_COALESCING, not accessible via _il_wr()! */ #define CSR_INT_PERIODIC_REG (CSR_BASE+0x005) /* @@ -166,26 +166,26 @@ #define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000) #define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000) -#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */ -#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */ -#define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */ +#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */ +#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */ +#define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */ -#define CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int*/ -#define CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec*/ +#define CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int */ +#define CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec */ /* interrupt flags in INTA, set by uCode or hardware (e.g. dma), * acknowledged (reset) by host writing "1" to flagged bits. */ -#define CSR_INT_BIT_FH_RX (1 << 31) /* Rx DMA, cmd responses, FH_INT[17:16] */ -#define CSR_INT_BIT_HW_ERR (1 << 29) /* DMA hardware error FH_INT[31] */ -#define CSR_INT_BIT_RX_PERIODIC (1 << 28) /* Rx periodic */ -#define CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */ -#define CSR_INT_BIT_SCD (1 << 26) /* TXQ pointer advanced */ -#define CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */ -#define CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */ -#define CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */ -#define CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses, 3945 */ -#define CSR_INT_BIT_WAKEUP (1 << 1) /* NIC controller waking up (pwr mgmt) */ -#define CSR_INT_BIT_ALIVE (1 << 0) /* uCode interrupts once it initializes */ +#define CSR_INT_BIT_FH_RX (1 << 31) /* Rx DMA, cmd responses, FH_INT[17:16] */ +#define CSR_INT_BIT_HW_ERR (1 << 29) /* DMA hardware error FH_INT[31] */ +#define CSR_INT_BIT_RX_PERIODIC (1 << 28) /* Rx periodic */ +#define CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */ +#define CSR_INT_BIT_SCD (1 << 26) /* TXQ pointer advanced */ +#define CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */ +#define CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */ +#define CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */ +#define CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses, 3945 */ +#define CSR_INT_BIT_WAKEUP (1 << 1) /* NIC controller waking up (pwr mgmt) */ +#define CSR_INT_BIT_ALIVE (1 << 0) /* uCode interrupts once it initializes */ #define CSR_INI_SET_MASK (CSR_INT_BIT_FH_RX | \ CSR_INT_BIT_HW_ERR | \ @@ -197,21 +197,20 @@ CSR_INT_BIT_ALIVE) /* interrupt flags in FH (flow handler) (PCI busmaster DMA) */ -#define CSR_FH_INT_BIT_ERR (1 << 31) /* Error */ -#define CSR_FH_INT_BIT_HI_PRIOR (1 << 30) /* High priority Rx, bypass coalescing */ -#define CSR39_FH_INT_BIT_RX_CHNL2 (1 << 18) /* Rx channel 2 (3945 only) */ -#define CSR_FH_INT_BIT_RX_CHNL1 (1 << 17) /* Rx channel 1 */ -#define CSR_FH_INT_BIT_RX_CHNL0 (1 << 16) /* Rx channel 0 */ -#define CSR39_FH_INT_BIT_TX_CHNL6 (1 << 6) /* Tx channel 6 (3945 only) */ -#define CSR_FH_INT_BIT_TX_CHNL1 (1 << 1) /* Tx channel 1 */ -#define CSR_FH_INT_BIT_TX_CHNL0 (1 << 0) /* Tx channel 0 */ +#define CSR_FH_INT_BIT_ERR (1 << 31) /* Error */ +#define CSR_FH_INT_BIT_HI_PRIOR (1 << 30) /* High priority Rx, bypass coalescing */ +#define CSR39_FH_INT_BIT_RX_CHNL2 (1 << 18) /* Rx channel 2 (3945 only) */ +#define CSR_FH_INT_BIT_RX_CHNL1 (1 << 17) /* Rx channel 1 */ +#define CSR_FH_INT_BIT_RX_CHNL0 (1 << 16) /* Rx channel 0 */ +#define CSR39_FH_INT_BIT_TX_CHNL6 (1 << 6) /* Tx channel 6 (3945 only) */ +#define CSR_FH_INT_BIT_TX_CHNL1 (1 << 1) /* Tx channel 1 */ +#define CSR_FH_INT_BIT_TX_CHNL0 (1 << 0) /* Tx channel 0 */ #define CSR39_FH_INT_RX_MASK (CSR_FH_INT_BIT_HI_PRIOR | \ CSR39_FH_INT_BIT_RX_CHNL2 | \ CSR_FH_INT_BIT_RX_CHNL1 | \ CSR_FH_INT_BIT_RX_CHNL0) - #define CSR39_FH_INT_TX_MASK (CSR39_FH_INT_BIT_TX_CHNL6 | \ CSR_FH_INT_BIT_TX_CHNL1 | \ CSR_FH_INT_BIT_TX_CHNL0) @@ -285,7 +284,6 @@ #define CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE (0x04000000) #define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000) - /* EEPROM REG */ #define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001) #define CSR_EEPROM_REG_BIT_CMD (0x00000002) @@ -293,19 +291,18 @@ #define CSR_EEPROM_REG_MSK_DATA (0xFFFF0000) /* EEPROM GP */ -#define CSR_EEPROM_GP_VALID_MSK (0x00000007) /* signature */ +#define CSR_EEPROM_GP_VALID_MSK (0x00000007) /* signature */ #define CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180) #define CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K (0x00000002) #define CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K (0x00000004) /* GP REG */ -#define CSR_GP_REG_POWER_SAVE_STATUS_MSK (0x03000000) /* bit 24/25 */ +#define CSR_GP_REG_POWER_SAVE_STATUS_MSK (0x03000000) /* bit 24/25 */ #define CSR_GP_REG_NO_POWER_SAVE (0x00000000) #define CSR_GP_REG_MAC_POWER_SAVE (0x01000000) #define CSR_GP_REG_PHY_POWER_SAVE (0x02000000) #define CSR_GP_REG_POWER_SAVE_ERROR (0x03000000) - /* CSR GIO */ #define CSR_GIO_REG_VAL_L0S_ENABLED (0x00000002) @@ -357,7 +354,7 @@ /* HPET MEM debug */ #define CSR_DBG_HPET_MEM_REG_VAL (0xFFFF0000) -/* DRAM INT TABLE */ +/* DRAM INT TBL */ #define CSR_DRAM_INT_TBL_ENABLE (1 << 31) #define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27) @@ -368,13 +365,13 @@ * to indirectly access device's internal memory or registers that * may be powered-down. * - * Use iwl_legacy_write_direct32()/iwl_legacy_read_direct32() family + * Use il_wr()/il_rd() family * for these registers; * host must "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ * to make sure the MAC (uCode processor, etc.) is powered up for accessing * internal resources. * - * Do not use iwl_write32()/iwl_read32() family to access these registers; + * Do not use _il_wr()/_il_rd() family to access these registers; * these provide only simple PCI bus access, without waking up the MAC. */ #define HBUS_BASE (0x400) @@ -411,12 +408,12 @@ #define HBUS_TARG_PRPH_RDAT (HBUS_BASE+0x050) /* - * Per-Tx-queue write pointer (index, really!) - * Indicates index to next TFD that driver will fill (1 past latest filled). + * Per-Tx-queue write pointer (idx, really!) + * Indicates idx to next TFD that driver will fill (1 past latest filled). * Bit usage: - * 0-7: queue write index + * 0-7: queue write idx * 11-8: queue selector */ #define HBUS_TARG_WRPTR (HBUS_BASE+0x060) -#endif /* !__iwl_legacy_csr_h__ */ +#endif /* !__il_csr_h__ */ diff --git a/drivers/net/wireless/iwlegacy/debug.c b/drivers/net/wireless/iwlegacy/debug.c new file mode 100644 index 00000000000..b1b8926a9c7 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/debug.c @@ -0,0 +1,1411 @@ +/****************************************************************************** + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + *****************************************************************************/ +#include <linux/ieee80211.h> +#include <linux/export.h> +#include <net/mac80211.h> + +#include "common.h" + +/* create and remove of files */ +#define DEBUGFS_ADD_FILE(name, parent, mode) do { \ + if (!debugfs_create_file(#name, mode, parent, il, \ + &il_dbgfs_##name##_ops)) \ + goto err; \ +} while (0) + +#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \ + struct dentry *__tmp; \ + __tmp = debugfs_create_bool(#name, S_IWUSR | S_IRUSR, \ + parent, ptr); \ + if (IS_ERR(__tmp) || !__tmp) \ + goto err; \ +} while (0) + +#define DEBUGFS_ADD_X32(name, parent, ptr) do { \ + struct dentry *__tmp; \ + __tmp = debugfs_create_x32(#name, S_IWUSR | S_IRUSR, \ + parent, ptr); \ + if (IS_ERR(__tmp) || !__tmp) \ + goto err; \ +} while (0) + +/* file operation */ +#define DEBUGFS_READ_FUNC(name) \ +static ssize_t il_dbgfs_##name##_read(struct file *file, \ + char __user *user_buf, \ + size_t count, loff_t *ppos); + +#define DEBUGFS_WRITE_FUNC(name) \ +static ssize_t il_dbgfs_##name##_write(struct file *file, \ + const char __user *user_buf, \ + size_t count, loff_t *ppos); + +static int +il_dbgfs_open_file_generic(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +#define DEBUGFS_READ_FILE_OPS(name) \ + DEBUGFS_READ_FUNC(name); \ +static const struct file_operations il_dbgfs_##name##_ops = { \ + .read = il_dbgfs_##name##_read, \ + .open = il_dbgfs_open_file_generic, \ + .llseek = generic_file_llseek, \ +}; + +#define DEBUGFS_WRITE_FILE_OPS(name) \ + DEBUGFS_WRITE_FUNC(name); \ +static const struct file_operations il_dbgfs_##name##_ops = { \ + .write = il_dbgfs_##name##_write, \ + .open = il_dbgfs_open_file_generic, \ + .llseek = generic_file_llseek, \ +}; + +#define DEBUGFS_READ_WRITE_FILE_OPS(name) \ + DEBUGFS_READ_FUNC(name); \ + DEBUGFS_WRITE_FUNC(name); \ +static const struct file_operations il_dbgfs_##name##_ops = { \ + .write = il_dbgfs_##name##_write, \ + .read = il_dbgfs_##name##_read, \ + .open = il_dbgfs_open_file_generic, \ + .llseek = generic_file_llseek, \ +}; + +static ssize_t +il_dbgfs_tx_stats_read(struct file *file, char __user *user_buf, size_t count, + loff_t *ppos) +{ + + struct il_priv *il = file->private_data; + char *buf; + int pos = 0; + + int cnt; + ssize_t ret; + const size_t bufsz = + 100 + sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX); + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + pos += scnprintf(buf + pos, bufsz - pos, "Management:\n"); + for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) { + pos += + scnprintf(buf + pos, bufsz - pos, "\t%25s\t\t: %u\n", + il_get_mgmt_string(cnt), il->tx_stats.mgmt[cnt]); + } + pos += scnprintf(buf + pos, bufsz - pos, "Control\n"); + for (cnt = 0; cnt < CONTROL_MAX; cnt++) { + pos += + scnprintf(buf + pos, bufsz - pos, "\t%25s\t\t: %u\n", + il_get_ctrl_string(cnt), il->tx_stats.ctrl[cnt]); + } + pos += scnprintf(buf + pos, bufsz - pos, "Data:\n"); + pos += + scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n", + il->tx_stats.data_cnt); + pos += + scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n", + il->tx_stats.data_bytes); + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t +il_dbgfs_clear_traffic_stats_write(struct file *file, + const char __user *user_buf, size_t count, + loff_t *ppos) +{ + struct il_priv *il = file->private_data; + u32 clear_flag; + char buf[8]; + int buf_size; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%x", &clear_flag) != 1) + return -EFAULT; + il_clear_traffic_stats(il); + + return count; +} + +static ssize_t +il_dbgfs_rx_stats_read(struct file *file, char __user *user_buf, size_t count, + loff_t *ppos) +{ + + struct il_priv *il = file->private_data; + char *buf; + int pos = 0; + int cnt; + ssize_t ret; + const size_t bufsz = + 100 + sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX); + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + pos += scnprintf(buf + pos, bufsz - pos, "Management:\n"); + for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) { + pos += + scnprintf(buf + pos, bufsz - pos, "\t%25s\t\t: %u\n", + il_get_mgmt_string(cnt), il->rx_stats.mgmt[cnt]); + } + pos += scnprintf(buf + pos, bufsz - pos, "Control:\n"); + for (cnt = 0; cnt < CONTROL_MAX; cnt++) { + pos += + scnprintf(buf + pos, bufsz - pos, "\t%25s\t\t: %u\n", + il_get_ctrl_string(cnt), il->rx_stats.ctrl[cnt]); + } + pos += scnprintf(buf + pos, bufsz - pos, "Data:\n"); + pos += + scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n", + il->rx_stats.data_cnt); + pos += + scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n", + il->rx_stats.data_bytes); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +#define BYTE1_MASK 0x000000ff; +#define BYTE2_MASK 0x0000ffff; +#define BYTE3_MASK 0x00ffffff; +static ssize_t +il_dbgfs_sram_read(struct file *file, char __user *user_buf, size_t count, + loff_t *ppos) +{ + u32 val; + char *buf; + ssize_t ret; + int i; + int pos = 0; + struct il_priv *il = file->private_data; + size_t bufsz; + + /* default is to dump the entire data segment */ + if (!il->dbgfs_sram_offset && !il->dbgfs_sram_len) { + il->dbgfs_sram_offset = 0x800000; + if (il->ucode_type == UCODE_INIT) + il->dbgfs_sram_len = il->ucode_init_data.len; + else + il->dbgfs_sram_len = il->ucode_data.len; + } + bufsz = 30 + il->dbgfs_sram_len * sizeof(char) * 10; + buf = kmalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + pos += + scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n", + il->dbgfs_sram_len); + pos += + scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n", + il->dbgfs_sram_offset); + for (i = il->dbgfs_sram_len; i > 0; i -= 4) { + val = + il_read_targ_mem(il, + il->dbgfs_sram_offset + + il->dbgfs_sram_len - i); + if (i < 4) { + switch (i) { + case 1: + val &= BYTE1_MASK; + break; + case 2: + val &= BYTE2_MASK; + break; + case 3: + val &= BYTE3_MASK; + break; + } + } + if (!(i % 16)) + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + pos += scnprintf(buf + pos, bufsz - pos, "0x%08x ", val); + } + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t +il_dbgfs_sram_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct il_priv *il = file->private_data; + char buf[64]; + int buf_size; + u32 offset, len; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + + if (sscanf(buf, "%x,%x", &offset, &len) == 2) { + il->dbgfs_sram_offset = offset; + il->dbgfs_sram_len = len; + } else { + il->dbgfs_sram_offset = 0; + il->dbgfs_sram_len = 0; + } + + return count; +} + +static ssize_t +il_dbgfs_stations_read(struct file *file, char __user *user_buf, size_t count, + loff_t *ppos) +{ + struct il_priv *il = file->private_data; + struct il_station_entry *station; + int max_sta = il->hw_params.max_stations; + char *buf; + int i, j, pos = 0; + ssize_t ret; + /* Add 30 for initial string */ + const size_t bufsz = 30 + sizeof(char) * 500 * (il->num_stations); + + buf = kmalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + pos += + scnprintf(buf + pos, bufsz - pos, "num of stations: %d\n\n", + il->num_stations); + + for (i = 0; i < max_sta; i++) { + station = &il->stations[i]; + if (!station->used) + continue; + pos += + scnprintf(buf + pos, bufsz - pos, + "station %d - addr: %pM, flags: %#x\n", i, + station->sta.sta.addr, + station->sta.station_flags_msk); + pos += + scnprintf(buf + pos, bufsz - pos, + "TID\tseq_num\ttxq_id\tframes\ttfds\t"); + pos += + scnprintf(buf + pos, bufsz - pos, + "start_idx\tbitmap\t\t\trate_n_flags\n"); + + for (j = 0; j < MAX_TID_COUNT; j++) { + pos += + scnprintf(buf + pos, bufsz - pos, + "%d:\t%#x\t%#x\t%u\t%u\t%u\t\t%#.16llx\t%#x", + j, station->tid[j].seq_number, + station->tid[j].agg.txq_id, + station->tid[j].agg.frame_count, + station->tid[j].tfds_in_queue, + station->tid[j].agg.start_idx, + station->tid[j].agg.bitmap, + station->tid[j].agg.rate_n_flags); + + if (station->tid[j].agg.wait_for_ba) + pos += + scnprintf(buf + pos, bufsz - pos, + " - waitforba"); + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + } + + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + } + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t +il_dbgfs_nvm_read(struct file *file, char __user *user_buf, size_t count, + loff_t *ppos) +{ + ssize_t ret; + struct il_priv *il = file->private_data; + int pos = 0, ofs = 0, buf_size = 0; + const u8 *ptr; + char *buf; + u16 eeprom_ver; + size_t eeprom_len = il->cfg->base_params->eeprom_size; + buf_size = 4 * eeprom_len + 256; + + if (eeprom_len % 16) { + IL_ERR("NVM size is not multiple of 16.\n"); + return -ENODATA; + } + + ptr = il->eeprom; + if (!ptr) { + IL_ERR("Invalid EEPROM memory\n"); + return -ENOMEM; + } + + /* 4 characters for byte 0xYY */ + buf = kzalloc(buf_size, GFP_KERNEL); + if (!buf) { + IL_ERR("Can not allocate Buffer\n"); + return -ENOMEM; + } + eeprom_ver = il_eeprom_query16(il, EEPROM_VERSION); + pos += + scnprintf(buf + pos, buf_size - pos, "EEPROM " "version: 0x%x\n", + eeprom_ver); + for (ofs = 0; ofs < eeprom_len; ofs += 16) { + pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs); + hex_dump_to_buffer(ptr + ofs, 16, 16, 2, buf + pos, + buf_size - pos, 0); + pos += strlen(buf + pos); + if (buf_size - pos > 0) + buf[pos++] = '\n'; + } + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t +il_dbgfs_channels_read(struct file *file, char __user *user_buf, size_t count, + loff_t *ppos) +{ + struct il_priv *il = file->private_data; + struct ieee80211_channel *channels = NULL; + const struct ieee80211_supported_band *supp_band = NULL; + int pos = 0, i, bufsz = PAGE_SIZE; + char *buf; + ssize_t ret; + + if (!test_bit(S_GEO_CONFIGURED, &il->status)) + return -EAGAIN; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) { + IL_ERR("Can not allocate Buffer\n"); + return -ENOMEM; + } + + supp_band = il_get_hw_mode(il, IEEE80211_BAND_2GHZ); + if (supp_band) { + channels = supp_band->channels; + + pos += + scnprintf(buf + pos, bufsz - pos, + "Displaying %d channels in 2.4GHz band 802.11bg):\n", + supp_band->n_channels); + + for (i = 0; i < supp_band->n_channels; i++) + pos += + scnprintf(buf + pos, bufsz - pos, + "%d: %ddBm: BSS%s%s, %s.\n", + channels[i].hw_value, + channels[i].max_power, + channels[i]. + flags & IEEE80211_CHAN_RADAR ? + " (IEEE 802.11h required)" : "", + ((channels[i]. + flags & IEEE80211_CHAN_NO_IBSS) || + (channels[i]. + flags & IEEE80211_CHAN_RADAR)) ? "" : + ", IBSS", + channels[i]. + flags & IEEE80211_CHAN_PASSIVE_SCAN ? + "passive only" : "active/passive"); + } + supp_band = il_get_hw_mode(il, IEEE80211_BAND_5GHZ); + if (supp_band) { + channels = supp_band->channels; + + pos += + scnprintf(buf + pos, bufsz - pos, + "Displaying %d channels in 5.2GHz band (802.11a)\n", + supp_band->n_channels); + + for (i = 0; i < supp_band->n_channels; i++) + pos += + scnprintf(buf + pos, bufsz - pos, + "%d: %ddBm: BSS%s%s, %s.\n", + channels[i].hw_value, + channels[i].max_power, + channels[i]. + flags & IEEE80211_CHAN_RADAR ? + " (IEEE 802.11h required)" : "", + ((channels[i]. + flags & IEEE80211_CHAN_NO_IBSS) || + (channels[i]. + flags & IEEE80211_CHAN_RADAR)) ? "" : + ", IBSS", + channels[i]. + flags & IEEE80211_CHAN_PASSIVE_SCAN ? + "passive only" : "active/passive"); + } + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t +il_dbgfs_status_read(struct file *file, char __user *user_buf, size_t count, + loff_t *ppos) +{ + + struct il_priv *il = file->private_data; + char buf[512]; + int pos = 0; + const size_t bufsz = sizeof(buf); + + pos += + scnprintf(buf + pos, bufsz - pos, "S_HCMD_ACTIVE:\t %d\n", + test_bit(S_HCMD_ACTIVE, &il->status)); + pos += + scnprintf(buf + pos, bufsz - pos, "S_INT_ENABLED:\t %d\n", + test_bit(S_INT_ENABLED, &il->status)); + pos += + scnprintf(buf + pos, bufsz - pos, "S_RF_KILL_HW:\t %d\n", + test_bit(S_RF_KILL_HW, &il->status)); + pos += + scnprintf(buf + pos, bufsz - pos, "S_CT_KILL:\t\t %d\n", + test_bit(S_CT_KILL, &il->status)); + pos += + scnprintf(buf + pos, bufsz - pos, "S_INIT:\t\t %d\n", + test_bit(S_INIT, &il->status)); + pos += + scnprintf(buf + pos, bufsz - pos, "S_ALIVE:\t\t %d\n", + test_bit(S_ALIVE, &il->status)); + pos += + scnprintf(buf + pos, bufsz - pos, "S_READY:\t\t %d\n", + test_bit(S_READY, &il->status)); + pos += + scnprintf(buf + pos, bufsz - pos, "S_TEMPERATURE:\t %d\n", + test_bit(S_TEMPERATURE, &il->status)); + pos += + scnprintf(buf + pos, bufsz - pos, "S_GEO_CONFIGURED:\t %d\n", + test_bit(S_GEO_CONFIGURED, &il->status)); + pos += + scnprintf(buf + pos, bufsz - pos, "S_EXIT_PENDING:\t %d\n", + test_bit(S_EXIT_PENDING, &il->status)); + pos += + scnprintf(buf + pos, bufsz - pos, "S_STATS:\t %d\n", + test_bit(S_STATS, &il->status)); + pos += + scnprintf(buf + pos, bufsz - pos, "S_SCANNING:\t %d\n", + test_bit(S_SCANNING, &il->status)); + pos += + scnprintf(buf + pos, bufsz - pos, "S_SCAN_ABORTING:\t %d\n", + test_bit(S_SCAN_ABORTING, &il->status)); + pos += + scnprintf(buf + pos, bufsz - pos, "S_SCAN_HW:\t\t %d\n", + test_bit(S_SCAN_HW, &il->status)); + pos += + scnprintf(buf + pos, bufsz - pos, "S_POWER_PMI:\t %d\n", + test_bit(S_POWER_PMI, &il->status)); + pos += + scnprintf(buf + pos, bufsz - pos, "S_FW_ERROR:\t %d\n", + test_bit(S_FW_ERROR, &il->status)); + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t +il_dbgfs_interrupt_read(struct file *file, char __user *user_buf, size_t count, + loff_t *ppos) +{ + + struct il_priv *il = file->private_data; + int pos = 0; + int cnt = 0; + char *buf; + int bufsz = 24 * 64; /* 24 items * 64 char per item */ + ssize_t ret; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) { + IL_ERR("Can not allocate Buffer\n"); + return -ENOMEM; + } + + pos += + scnprintf(buf + pos, bufsz - pos, "Interrupt Statistics Report:\n"); + + pos += + scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n", + il->isr_stats.hw); + pos += + scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n", + il->isr_stats.sw); + if (il->isr_stats.sw || il->isr_stats.hw) { + pos += + scnprintf(buf + pos, bufsz - pos, + "\tLast Restarting Code: 0x%X\n", + il->isr_stats.err_code); + } +#ifdef CONFIG_IWLEGACY_DEBUG + pos += + scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n", + il->isr_stats.sch); + pos += + scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n", + il->isr_stats.alive); +#endif + pos += + scnprintf(buf + pos, bufsz - pos, + "HW RF KILL switch toggled:\t %u\n", + il->isr_stats.rfkill); + + pos += + scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n", + il->isr_stats.ctkill); + + pos += + scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n", + il->isr_stats.wakeup); + + pos += + scnprintf(buf + pos, bufsz - pos, "Rx command responses:\t\t %u\n", + il->isr_stats.rx); + for (cnt = 0; cnt < IL_CN_MAX; cnt++) { + if (il->isr_stats.handlers[cnt] > 0) + pos += + scnprintf(buf + pos, bufsz - pos, + "\tRx handler[%36s]:\t\t %u\n", + il_get_cmd_string(cnt), + il->isr_stats.handlers[cnt]); + } + + pos += + scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n", + il->isr_stats.tx); + + pos += + scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n", + il->isr_stats.unhandled); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t +il_dbgfs_interrupt_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct il_priv *il = file->private_data; + char buf[8]; + int buf_size; + u32 reset_flag; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%x", &reset_flag) != 1) + return -EFAULT; + if (reset_flag == 0) + il_clear_isr_stats(il); + + return count; +} + +static ssize_t +il_dbgfs_qos_read(struct file *file, char __user *user_buf, size_t count, + loff_t *ppos) +{ + struct il_priv *il = file->private_data; + struct il_rxon_context *ctx = &il->ctx; + int pos = 0, i; + char buf[256]; + const size_t bufsz = sizeof(buf); + + pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n", ctx->ctxid); + for (i = 0; i < AC_NUM; i++) { + pos += + scnprintf(buf + pos, bufsz - pos, + "\tcw_min\tcw_max\taifsn\ttxop\n"); + pos += + scnprintf(buf + pos, bufsz - pos, + "AC[%d]\t%u\t%u\t%u\t%u\n", i, + ctx->qos_data.def_qos_parm.ac[i].cw_min, + ctx->qos_data.def_qos_parm.ac[i].cw_max, + ctx->qos_data.def_qos_parm.ac[i].aifsn, + ctx->qos_data.def_qos_parm.ac[i].edca_txop); + } + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t +il_dbgfs_disable_ht40_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct il_priv *il = file->private_data; + char buf[8]; + int buf_size; + int ht40; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &ht40) != 1) + return -EFAULT; + if (!il_is_any_associated(il)) + il->disable_ht40 = ht40 ? true : false; + else { + IL_ERR("Sta associated with AP - " + "Change to 40MHz channel support is not allowed\n"); + return -EINVAL; + } + + return count; +} + +static ssize_t +il_dbgfs_disable_ht40_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct il_priv *il = file->private_data; + char buf[100]; + int pos = 0; + const size_t bufsz = sizeof(buf); + + pos += + scnprintf(buf + pos, bufsz - pos, "11n 40MHz Mode: %s\n", + il->disable_ht40 ? "Disabled" : "Enabled"); + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +DEBUGFS_READ_WRITE_FILE_OPS(sram); +DEBUGFS_READ_FILE_OPS(nvm); +DEBUGFS_READ_FILE_OPS(stations); +DEBUGFS_READ_FILE_OPS(channels); +DEBUGFS_READ_FILE_OPS(status); +DEBUGFS_READ_WRITE_FILE_OPS(interrupt); +DEBUGFS_READ_FILE_OPS(qos); +DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40); + +static ssize_t +il_dbgfs_traffic_log_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct il_priv *il = file->private_data; + int pos = 0, ofs = 0; + int cnt = 0, entry; + struct il_tx_queue *txq; + struct il_queue *q; + struct il_rx_queue *rxq = &il->rxq; + char *buf; + int bufsz = + ((IL_TRAFFIC_ENTRIES * IL_TRAFFIC_ENTRY_SIZE * 64) * 2) + + (il->cfg->base_params->num_of_queues * 32 * 8) + 400; + const u8 *ptr; + ssize_t ret; + + if (!il->txq) { + IL_ERR("txq not ready\n"); + return -EAGAIN; + } + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) { + IL_ERR("Can not allocate buffer\n"); + return -ENOMEM; + } + pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n"); + for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) { + txq = &il->txq[cnt]; + q = &txq->q; + pos += + scnprintf(buf + pos, bufsz - pos, + "q[%d]: read_ptr: %u, write_ptr: %u\n", cnt, + q->read_ptr, q->write_ptr); + } + if (il->tx_traffic && (il_debug_level & IL_DL_TX)) { + ptr = il->tx_traffic; + pos += + scnprintf(buf + pos, bufsz - pos, "Tx Traffic idx: %u\n", + il->tx_traffic_idx); + for (cnt = 0, ofs = 0; cnt < IL_TRAFFIC_ENTRIES; cnt++) { + for (entry = 0; entry < IL_TRAFFIC_ENTRY_SIZE / 16; + entry++, ofs += 16) { + pos += + scnprintf(buf + pos, bufsz - pos, "0x%.4x ", + ofs); + hex_dump_to_buffer(ptr + ofs, 16, 16, 2, + buf + pos, bufsz - pos, 0); + pos += strlen(buf + pos); + if (bufsz - pos > 0) + buf[pos++] = '\n'; + } + } + } + + pos += scnprintf(buf + pos, bufsz - pos, "Rx Queue\n"); + pos += + scnprintf(buf + pos, bufsz - pos, "read: %u, write: %u\n", + rxq->read, rxq->write); + + if (il->rx_traffic && (il_debug_level & IL_DL_RX)) { + ptr = il->rx_traffic; + pos += + scnprintf(buf + pos, bufsz - pos, "Rx Traffic idx: %u\n", + il->rx_traffic_idx); + for (cnt = 0, ofs = 0; cnt < IL_TRAFFIC_ENTRIES; cnt++) { + for (entry = 0; entry < IL_TRAFFIC_ENTRY_SIZE / 16; + entry++, ofs += 16) { + pos += + scnprintf(buf + pos, bufsz - pos, "0x%.4x ", + ofs); + hex_dump_to_buffer(ptr + ofs, 16, 16, 2, + buf + pos, bufsz - pos, 0); + pos += strlen(buf + pos); + if (bufsz - pos > 0) + buf[pos++] = '\n'; + } + } + } + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t +il_dbgfs_traffic_log_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct il_priv *il = file->private_data; + char buf[8]; + int buf_size; + int traffic_log; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &traffic_log) != 1) + return -EFAULT; + if (traffic_log == 0) + il_reset_traffic_log(il); + + return count; +} + +static ssize_t +il_dbgfs_tx_queue_read(struct file *file, char __user *user_buf, size_t count, + loff_t *ppos) +{ + + struct il_priv *il = file->private_data; + struct il_tx_queue *txq; + struct il_queue *q; + char *buf; + int pos = 0; + int cnt; + int ret; + const size_t bufsz = + sizeof(char) * 64 * il->cfg->base_params->num_of_queues; + + if (!il->txq) { + IL_ERR("txq not ready\n"); + return -EAGAIN; + } + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) { + txq = &il->txq[cnt]; + q = &txq->q; + pos += + scnprintf(buf + pos, bufsz - pos, + "hwq %.2d: read=%u write=%u stop=%d" + " swq_id=%#.2x (ac %d/hwq %d)\n", cnt, + q->read_ptr, q->write_ptr, + !!test_bit(cnt, il->queue_stopped), + txq->swq_id, txq->swq_id & 3, + (txq->swq_id >> 2) & 0x1f); + if (cnt >= 4) + continue; + /* for the ACs, display the stop count too */ + pos += + scnprintf(buf + pos, bufsz - pos, + " stop-count: %d\n", + atomic_read(&il->queue_stop_count[cnt])); + } + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t +il_dbgfs_rx_queue_read(struct file *file, char __user *user_buf, size_t count, + loff_t *ppos) +{ + + struct il_priv *il = file->private_data; + struct il_rx_queue *rxq = &il->rxq; + char buf[256]; + int pos = 0; + const size_t bufsz = sizeof(buf); + + pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n", rxq->read); + pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n", rxq->write); + pos += + scnprintf(buf + pos, bufsz - pos, "free_count: %u\n", + rxq->free_count); + if (rxq->rb_stts) { + pos += + scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n", + le16_to_cpu(rxq->rb_stts-> + closed_rb_num) & 0x0FFF); + } else { + pos += + scnprintf(buf + pos, bufsz - pos, + "closed_rb_num: Not Allocated\n"); + } + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t +il_dbgfs_ucode_rx_stats_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct il_priv *il = file->private_data; + return il->cfg->ops->lib->debugfs_ops.rx_stats_read(file, user_buf, + count, ppos); +} + +static ssize_t +il_dbgfs_ucode_tx_stats_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct il_priv *il = file->private_data; + return il->cfg->ops->lib->debugfs_ops.tx_stats_read(file, user_buf, + count, ppos); +} + +static ssize_t +il_dbgfs_ucode_general_stats_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct il_priv *il = file->private_data; + return il->cfg->ops->lib->debugfs_ops.general_stats_read(file, user_buf, + count, ppos); +} + +static ssize_t +il_dbgfs_sensitivity_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + + struct il_priv *il = file->private_data; + int pos = 0; + int cnt = 0; + char *buf; + int bufsz = sizeof(struct il_sensitivity_data) * 4 + 100; + ssize_t ret; + struct il_sensitivity_data *data; + + data = &il->sensitivity_data; + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) { + IL_ERR("Can not allocate Buffer\n"); + return -ENOMEM; + } + + pos += + scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm:\t\t\t %u\n", + data->auto_corr_ofdm); + pos += + scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_mrc:\t\t %u\n", + data->auto_corr_ofdm_mrc); + pos += + scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_x1:\t\t %u\n", + data->auto_corr_ofdm_x1); + pos += + scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_mrc_x1:\t\t %u\n", + data->auto_corr_ofdm_mrc_x1); + pos += + scnprintf(buf + pos, bufsz - pos, "auto_corr_cck:\t\t\t %u\n", + data->auto_corr_cck); + pos += + scnprintf(buf + pos, bufsz - pos, "auto_corr_cck_mrc:\t\t %u\n", + data->auto_corr_cck_mrc); + pos += + scnprintf(buf + pos, bufsz - pos, + "last_bad_plcp_cnt_ofdm:\t\t %u\n", + data->last_bad_plcp_cnt_ofdm); + pos += + scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_ofdm:\t\t %u\n", + data->last_fa_cnt_ofdm); + pos += + scnprintf(buf + pos, bufsz - pos, "last_bad_plcp_cnt_cck:\t\t %u\n", + data->last_bad_plcp_cnt_cck); + pos += + scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_cck:\t\t %u\n", + data->last_fa_cnt_cck); + pos += + scnprintf(buf + pos, bufsz - pos, "nrg_curr_state:\t\t\t %u\n", + data->nrg_curr_state); + pos += + scnprintf(buf + pos, bufsz - pos, "nrg_prev_state:\t\t\t %u\n", + data->nrg_prev_state); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_value:\t\t\t"); + for (cnt = 0; cnt < 10; cnt++) { + pos += + scnprintf(buf + pos, bufsz - pos, " %u", + data->nrg_value[cnt]); + } + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_rssi:\t\t"); + for (cnt = 0; cnt < NRG_NUM_PREV_STAT_L; cnt++) { + pos += + scnprintf(buf + pos, bufsz - pos, " %u", + data->nrg_silence_rssi[cnt]); + } + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + pos += + scnprintf(buf + pos, bufsz - pos, "nrg_silence_ref:\t\t %u\n", + data->nrg_silence_ref); + pos += + scnprintf(buf + pos, bufsz - pos, "nrg_energy_idx:\t\t\t %u\n", + data->nrg_energy_idx); + pos += + scnprintf(buf + pos, bufsz - pos, "nrg_silence_idx:\t\t %u\n", + data->nrg_silence_idx); + pos += + scnprintf(buf + pos, bufsz - pos, "nrg_th_cck:\t\t\t %u\n", + data->nrg_th_cck); + pos += + scnprintf(buf + pos, bufsz - pos, + "nrg_auto_corr_silence_diff:\t %u\n", + data->nrg_auto_corr_silence_diff); + pos += + scnprintf(buf + pos, bufsz - pos, "num_in_cck_no_fa:\t\t %u\n", + data->num_in_cck_no_fa); + pos += + scnprintf(buf + pos, bufsz - pos, "nrg_th_ofdm:\t\t\t %u\n", + data->nrg_th_ofdm); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t +il_dbgfs_chain_noise_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + + struct il_priv *il = file->private_data; + int pos = 0; + int cnt = 0; + char *buf; + int bufsz = sizeof(struct il_chain_noise_data) * 4 + 100; + ssize_t ret; + struct il_chain_noise_data *data; + + data = &il->chain_noise_data; + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) { + IL_ERR("Can not allocate Buffer\n"); + return -ENOMEM; + } + + pos += + scnprintf(buf + pos, bufsz - pos, "active_chains:\t\t\t %u\n", + data->active_chains); + pos += + scnprintf(buf + pos, bufsz - pos, "chain_noise_a:\t\t\t %u\n", + data->chain_noise_a); + pos += + scnprintf(buf + pos, bufsz - pos, "chain_noise_b:\t\t\t %u\n", + data->chain_noise_b); + pos += + scnprintf(buf + pos, bufsz - pos, "chain_noise_c:\t\t\t %u\n", + data->chain_noise_c); + pos += + scnprintf(buf + pos, bufsz - pos, "chain_signal_a:\t\t\t %u\n", + data->chain_signal_a); + pos += + scnprintf(buf + pos, bufsz - pos, "chain_signal_b:\t\t\t %u\n", + data->chain_signal_b); + pos += + scnprintf(buf + pos, bufsz - pos, "chain_signal_c:\t\t\t %u\n", + data->chain_signal_c); + pos += + scnprintf(buf + pos, bufsz - pos, "beacon_count:\t\t\t %u\n", + data->beacon_count); + + pos += scnprintf(buf + pos, bufsz - pos, "disconn_array:\t\t\t"); + for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) { + pos += + scnprintf(buf + pos, bufsz - pos, " %u", + data->disconn_array[cnt]); + } + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + pos += scnprintf(buf + pos, bufsz - pos, "delta_gain_code:\t\t"); + for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) { + pos += + scnprintf(buf + pos, bufsz - pos, " %u", + data->delta_gain_code[cnt]); + } + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + pos += + scnprintf(buf + pos, bufsz - pos, "radio_write:\t\t\t %u\n", + data->radio_write); + pos += + scnprintf(buf + pos, bufsz - pos, "state:\t\t\t\t %u\n", + data->state); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t +il_dbgfs_power_save_status_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct il_priv *il = file->private_data; + char buf[60]; + int pos = 0; + const size_t bufsz = sizeof(buf); + u32 pwrsave_status; + + pwrsave_status = + _il_rd(il, CSR_GP_CNTRL) & CSR_GP_REG_POWER_SAVE_STATUS_MSK; + + pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: "); + pos += + scnprintf(buf + pos, bufsz - pos, "%s\n", + (pwrsave_status == CSR_GP_REG_NO_POWER_SAVE) ? "none" : + (pwrsave_status == CSR_GP_REG_MAC_POWER_SAVE) ? "MAC" : + (pwrsave_status == CSR_GP_REG_PHY_POWER_SAVE) ? "PHY" : + "error"); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t +il_dbgfs_clear_ucode_stats_write(struct file *file, + const char __user *user_buf, size_t count, + loff_t *ppos) +{ + struct il_priv *il = file->private_data; + char buf[8]; + int buf_size; + int clear; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &clear) != 1) + return -EFAULT; + + /* make request to uCode to retrieve stats information */ + mutex_lock(&il->mutex); + il_send_stats_request(il, CMD_SYNC, true); + mutex_unlock(&il->mutex); + + return count; +} + +static ssize_t +il_dbgfs_rxon_flags_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + + struct il_priv *il = file->private_data; + int len = 0; + char buf[20]; + + len = sprintf(buf, "0x%04X\n", le32_to_cpu(il->ctx.active.flags)); + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t +il_dbgfs_rxon_filter_flags_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + + struct il_priv *il = file->private_data; + int len = 0; + char buf[20]; + + len = + sprintf(buf, "0x%04X\n", le32_to_cpu(il->ctx.active.filter_flags)); + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t +il_dbgfs_fh_reg_read(struct file *file, char __user *user_buf, size_t count, + loff_t *ppos) +{ + struct il_priv *il = file->private_data; + char *buf; + int pos = 0; + ssize_t ret = -EFAULT; + + if (il->cfg->ops->lib->dump_fh) { + ret = pos = il->cfg->ops->lib->dump_fh(il, &buf, true); + if (buf) { + ret = + simple_read_from_buffer(user_buf, count, ppos, buf, + pos); + kfree(buf); + } + } + + return ret; +} + +static ssize_t +il_dbgfs_missed_beacon_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + + struct il_priv *il = file->private_data; + int pos = 0; + char buf[12]; + const size_t bufsz = sizeof(buf); + + pos += + scnprintf(buf + pos, bufsz - pos, "%d\n", + il->missed_beacon_threshold); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t +il_dbgfs_missed_beacon_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct il_priv *il = file->private_data; + char buf[8]; + int buf_size; + int missed; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &missed) != 1) + return -EINVAL; + + if (missed < IL_MISSED_BEACON_THRESHOLD_MIN || + missed > IL_MISSED_BEACON_THRESHOLD_MAX) + il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF; + else + il->missed_beacon_threshold = missed; + + return count; +} + +static ssize_t +il_dbgfs_force_reset_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + + struct il_priv *il = file->private_data; + int pos = 0; + char buf[300]; + const size_t bufsz = sizeof(buf); + struct il_force_reset *force_reset; + + force_reset = &il->force_reset; + + pos += + scnprintf(buf + pos, bufsz - pos, "\tnumber of reset request: %d\n", + force_reset->reset_request_count); + pos += + scnprintf(buf + pos, bufsz - pos, + "\tnumber of reset request success: %d\n", + force_reset->reset_success_count); + pos += + scnprintf(buf + pos, bufsz - pos, + "\tnumber of reset request reject: %d\n", + force_reset->reset_reject_count); + pos += + scnprintf(buf + pos, bufsz - pos, "\treset duration: %lu\n", + force_reset->reset_duration); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t +il_dbgfs_force_reset_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + + int ret; + struct il_priv *il = file->private_data; + + ret = il_force_reset(il, true); + + return ret ? ret : count; +} + +static ssize_t +il_dbgfs_wd_timeout_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + + struct il_priv *il = file->private_data; + char buf[8]; + int buf_size; + int timeout; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &timeout) != 1) + return -EINVAL; + if (timeout < 0 || timeout > IL_MAX_WD_TIMEOUT) + timeout = IL_DEF_WD_TIMEOUT; + + il->cfg->base_params->wd_timeout = timeout; + il_setup_watchdog(il); + return count; +} + +DEBUGFS_READ_FILE_OPS(rx_stats); +DEBUGFS_READ_FILE_OPS(tx_stats); +DEBUGFS_READ_WRITE_FILE_OPS(traffic_log); +DEBUGFS_READ_FILE_OPS(rx_queue); +DEBUGFS_READ_FILE_OPS(tx_queue); +DEBUGFS_READ_FILE_OPS(ucode_rx_stats); +DEBUGFS_READ_FILE_OPS(ucode_tx_stats); +DEBUGFS_READ_FILE_OPS(ucode_general_stats); +DEBUGFS_READ_FILE_OPS(sensitivity); +DEBUGFS_READ_FILE_OPS(chain_noise); +DEBUGFS_READ_FILE_OPS(power_save_status); +DEBUGFS_WRITE_FILE_OPS(clear_ucode_stats); +DEBUGFS_WRITE_FILE_OPS(clear_traffic_stats); +DEBUGFS_READ_FILE_OPS(fh_reg); +DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon); +DEBUGFS_READ_WRITE_FILE_OPS(force_reset); +DEBUGFS_READ_FILE_OPS(rxon_flags); +DEBUGFS_READ_FILE_OPS(rxon_filter_flags); +DEBUGFS_WRITE_FILE_OPS(wd_timeout); + +/* + * Create the debugfs files and directories + * + */ +int +il_dbgfs_register(struct il_priv *il, const char *name) +{ + struct dentry *phyd = il->hw->wiphy->debugfsdir; + struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug; + + dir_drv = debugfs_create_dir(name, phyd); + if (!dir_drv) + return -ENOMEM; + + il->debugfs_dir = dir_drv; + + dir_data = debugfs_create_dir("data", dir_drv); + if (!dir_data) + goto err; + dir_rf = debugfs_create_dir("rf", dir_drv); + if (!dir_rf) + goto err; + dir_debug = debugfs_create_dir("debug", dir_drv); + if (!dir_debug) + goto err; + + DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR); + DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR); + DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR); + DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR); + DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR); + DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(rx_stats, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(tx_stats, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(clear_ucode_stats, dir_debug, S_IWUSR); + DEBUGFS_ADD_FILE(clear_traffic_stats, dir_debug, S_IWUSR); + DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR); + DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR); + + if (il->cfg->base_params->sensitivity_calib_by_driver) + DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR); + if (il->cfg->base_params->chain_noise_calib_by_driver) + DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR); + DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR); + DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR); + if (il->cfg->base_params->sensitivity_calib_by_driver) + DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf, + &il->disable_sens_cal); + if (il->cfg->base_params->chain_noise_calib_by_driver) + DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf, + &il->disable_chain_noise_cal); + DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf, &il->disable_tx_power_cal); + return 0; + +err: + IL_ERR("Can't create the debugfs directory\n"); + il_dbgfs_unregister(il); + return -ENOMEM; +} +EXPORT_SYMBOL(il_dbgfs_register); + +/** + * Remove the debugfs files and directories + * + */ +void +il_dbgfs_unregister(struct il_priv *il) +{ + if (!il->debugfs_dir) + return; + + debugfs_remove_recursive(il->debugfs_dir); + il->debugfs_dir = NULL; +} +EXPORT_SYMBOL(il_dbgfs_unregister); diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c deleted file mode 100644 index cfabb38793a..00000000000 --- a/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c +++ /dev/null @@ -1,523 +0,0 @@ -/****************************************************************************** - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - *****************************************************************************/ - -#include "iwl-3945-debugfs.h" - - -static int iwl3945_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz) -{ - int p = 0; - - p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", - le32_to_cpu(priv->_3945.statistics.flag)); - if (le32_to_cpu(priv->_3945.statistics.flag) & - UCODE_STATISTICS_CLEAR_MSK) - p += scnprintf(buf + p, bufsz - p, - "\tStatistics have been cleared\n"); - p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n", - (le32_to_cpu(priv->_3945.statistics.flag) & - UCODE_STATISTICS_FREQUENCY_MSK) - ? "2.4 GHz" : "5.2 GHz"); - p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n", - (le32_to_cpu(priv->_3945.statistics.flag) & - UCODE_STATISTICS_NARROW_BAND_MSK) - ? "enabled" : "disabled"); - return p; -} - -ssize_t iwl3945_ucode_rx_stats_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - int pos = 0; - char *buf; - int bufsz = sizeof(struct iwl39_statistics_rx_phy) * 40 + - sizeof(struct iwl39_statistics_rx_non_phy) * 40 + 400; - ssize_t ret; - struct iwl39_statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, - *max_ofdm; - struct iwl39_statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck; - struct iwl39_statistics_rx_non_phy *general, *accum_general; - struct iwl39_statistics_rx_non_phy *delta_general, *max_general; - - if (!iwl_legacy_is_alive(priv)) - return -EAGAIN; - - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) { - IWL_ERR(priv, "Can not allocate Buffer\n"); - return -ENOMEM; - } - - /* - * The statistic information display here is based on - * the last statistics notification from uCode - * might not reflect the current uCode activity - */ - ofdm = &priv->_3945.statistics.rx.ofdm; - cck = &priv->_3945.statistics.rx.cck; - general = &priv->_3945.statistics.rx.general; - accum_ofdm = &priv->_3945.accum_statistics.rx.ofdm; - accum_cck = &priv->_3945.accum_statistics.rx.cck; - accum_general = &priv->_3945.accum_statistics.rx.general; - delta_ofdm = &priv->_3945.delta_statistics.rx.ofdm; - delta_cck = &priv->_3945.delta_statistics.rx.cck; - delta_general = &priv->_3945.delta_statistics.rx.general; - max_ofdm = &priv->_3945.max_delta.rx.ofdm; - max_cck = &priv->_3945.max_delta.rx.cck; - max_general = &priv->_3945.max_delta.rx.general; - - pos += iwl3945_statistics_flag(priv, buf, bufsz); - pos += scnprintf(buf + pos, bufsz - pos, "%-32s current" - "acumulative delta max\n", - "Statistics_Rx - OFDM:"); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", - "ina_cnt:", le32_to_cpu(ofdm->ina_cnt), - accum_ofdm->ina_cnt, - delta_ofdm->ina_cnt, max_ofdm->ina_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", - "fina_cnt:", - le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt, - delta_ofdm->fina_cnt, max_ofdm->fina_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "plcp_err:", - le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err, - delta_ofdm->plcp_err, max_ofdm->plcp_err); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "crc32_err:", - le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err, - delta_ofdm->crc32_err, max_ofdm->crc32_err); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "overrun_err:", - le32_to_cpu(ofdm->overrun_err), - accum_ofdm->overrun_err, delta_ofdm->overrun_err, - max_ofdm->overrun_err); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", - "early_overrun_err:", - le32_to_cpu(ofdm->early_overrun_err), - accum_ofdm->early_overrun_err, - delta_ofdm->early_overrun_err, - max_ofdm->early_overrun_err); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", - "crc32_good:", le32_to_cpu(ofdm->crc32_good), - accum_ofdm->crc32_good, delta_ofdm->crc32_good, - max_ofdm->crc32_good); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:", - le32_to_cpu(ofdm->false_alarm_cnt), - accum_ofdm->false_alarm_cnt, - delta_ofdm->false_alarm_cnt, - max_ofdm->false_alarm_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", - "fina_sync_err_cnt:", - le32_to_cpu(ofdm->fina_sync_err_cnt), - accum_ofdm->fina_sync_err_cnt, - delta_ofdm->fina_sync_err_cnt, - max_ofdm->fina_sync_err_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", - "sfd_timeout:", - le32_to_cpu(ofdm->sfd_timeout), - accum_ofdm->sfd_timeout, - delta_ofdm->sfd_timeout, - max_ofdm->sfd_timeout); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", - "fina_timeout:", - le32_to_cpu(ofdm->fina_timeout), - accum_ofdm->fina_timeout, - delta_ofdm->fina_timeout, - max_ofdm->fina_timeout); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", - "unresponded_rts:", - le32_to_cpu(ofdm->unresponded_rts), - accum_ofdm->unresponded_rts, - delta_ofdm->unresponded_rts, - max_ofdm->unresponded_rts); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", - "rxe_frame_lmt_ovrun:", - le32_to_cpu(ofdm->rxe_frame_limit_overrun), - accum_ofdm->rxe_frame_limit_overrun, - delta_ofdm->rxe_frame_limit_overrun, - max_ofdm->rxe_frame_limit_overrun); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", - "sent_ack_cnt:", - le32_to_cpu(ofdm->sent_ack_cnt), - accum_ofdm->sent_ack_cnt, - delta_ofdm->sent_ack_cnt, - max_ofdm->sent_ack_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", - "sent_cts_cnt:", - le32_to_cpu(ofdm->sent_cts_cnt), - accum_ofdm->sent_cts_cnt, - delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt); - - pos += scnprintf(buf + pos, bufsz - pos, "%-32s current" - "acumulative delta max\n", - "Statistics_Rx - CCK:"); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", - "ina_cnt:", - le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt, - delta_cck->ina_cnt, max_cck->ina_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", - "fina_cnt:", - le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt, - delta_cck->fina_cnt, max_cck->fina_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", - "plcp_err:", - le32_to_cpu(cck->plcp_err), accum_cck->plcp_err, - delta_cck->plcp_err, max_cck->plcp_err); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", - "crc32_err:", - le32_to_cpu(cck->crc32_err), accum_cck->crc32_err, - delta_cck->crc32_err, max_cck->crc32_err); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", - "overrun_err:", - le32_to_cpu(cck->overrun_err), - accum_cck->overrun_err, - delta_cck->overrun_err, max_cck->overrun_err); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", - "early_overrun_err:", - le32_to_cpu(cck->early_overrun_err), - accum_cck->early_overrun_err, - delta_cck->early_overrun_err, - max_cck->early_overrun_err); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", - "crc32_good:", - le32_to_cpu(cck->crc32_good), accum_cck->crc32_good, - delta_cck->crc32_good, - max_cck->crc32_good); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", - "false_alarm_cnt:", - le32_to_cpu(cck->false_alarm_cnt), - accum_cck->false_alarm_cnt, - delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", - "fina_sync_err_cnt:", - le32_to_cpu(cck->fina_sync_err_cnt), - accum_cck->fina_sync_err_cnt, - delta_cck->fina_sync_err_cnt, - max_cck->fina_sync_err_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", - "sfd_timeout:", - le32_to_cpu(cck->sfd_timeout), - accum_cck->sfd_timeout, - delta_cck->sfd_timeout, max_cck->sfd_timeout); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", - "fina_timeout:", - le32_to_cpu(cck->fina_timeout), - accum_cck->fina_timeout, - delta_cck->fina_timeout, max_cck->fina_timeout); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", - "unresponded_rts:", - le32_to_cpu(cck->unresponded_rts), - accum_cck->unresponded_rts, - delta_cck->unresponded_rts, - max_cck->unresponded_rts); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", - "rxe_frame_lmt_ovrun:", - le32_to_cpu(cck->rxe_frame_limit_overrun), - accum_cck->rxe_frame_limit_overrun, - delta_cck->rxe_frame_limit_overrun, - max_cck->rxe_frame_limit_overrun); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", - "sent_ack_cnt:", - le32_to_cpu(cck->sent_ack_cnt), - accum_cck->sent_ack_cnt, - delta_cck->sent_ack_cnt, - max_cck->sent_ack_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", - "sent_cts_cnt:", - le32_to_cpu(cck->sent_cts_cnt), - accum_cck->sent_cts_cnt, - delta_cck->sent_cts_cnt, - max_cck->sent_cts_cnt); - - pos += scnprintf(buf + pos, bufsz - pos, "%-32s current" - "acumulative delta max\n", - "Statistics_Rx - GENERAL:"); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", - "bogus_cts:", - le32_to_cpu(general->bogus_cts), - accum_general->bogus_cts, - delta_general->bogus_cts, max_general->bogus_cts); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", - "bogus_ack:", - le32_to_cpu(general->bogus_ack), - accum_general->bogus_ack, - delta_general->bogus_ack, max_general->bogus_ack); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", - "non_bssid_frames:", - le32_to_cpu(general->non_bssid_frames), - accum_general->non_bssid_frames, - delta_general->non_bssid_frames, - max_general->non_bssid_frames); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", - "filtered_frames:", - le32_to_cpu(general->filtered_frames), - accum_general->filtered_frames, - delta_general->filtered_frames, - max_general->filtered_frames); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", - "non_channel_beacons:", - le32_to_cpu(general->non_channel_beacons), - accum_general->non_channel_beacons, - delta_general->non_channel_beacons, - max_general->non_channel_beacons); - - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -ssize_t iwl3945_ucode_tx_stats_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - int pos = 0; - char *buf; - int bufsz = (sizeof(struct iwl39_statistics_tx) * 48) + 250; - ssize_t ret; - struct iwl39_statistics_tx *tx, *accum_tx, *delta_tx, *max_tx; - - if (!iwl_legacy_is_alive(priv)) - return -EAGAIN; - - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) { - IWL_ERR(priv, "Can not allocate Buffer\n"); - return -ENOMEM; - } - - /* - * The statistic information display here is based on - * the last statistics notification from uCode - * might not reflect the current uCode activity - */ - tx = &priv->_3945.statistics.tx; - accum_tx = &priv->_3945.accum_statistics.tx; - delta_tx = &priv->_3945.delta_statistics.tx; - max_tx = &priv->_3945.max_delta.tx; - pos += iwl3945_statistics_flag(priv, buf, bufsz); - pos += scnprintf(buf + pos, bufsz - pos, "%-32s current" - "acumulative delta max\n", - "Statistics_Tx:"); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", - "preamble:", - le32_to_cpu(tx->preamble_cnt), - accum_tx->preamble_cnt, - delta_tx->preamble_cnt, max_tx->preamble_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", - "rx_detected_cnt:", - le32_to_cpu(tx->rx_detected_cnt), - accum_tx->rx_detected_cnt, - delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", - "bt_prio_defer_cnt:", - le32_to_cpu(tx->bt_prio_defer_cnt), - accum_tx->bt_prio_defer_cnt, - delta_tx->bt_prio_defer_cnt, - max_tx->bt_prio_defer_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", - "bt_prio_kill_cnt:", - le32_to_cpu(tx->bt_prio_kill_cnt), - accum_tx->bt_prio_kill_cnt, - delta_tx->bt_prio_kill_cnt, - max_tx->bt_prio_kill_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", - "few_bytes_cnt:", - le32_to_cpu(tx->few_bytes_cnt), - accum_tx->few_bytes_cnt, - delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", - "cts_timeout:", - le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout, - delta_tx->cts_timeout, max_tx->cts_timeout); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", - "ack_timeout:", - le32_to_cpu(tx->ack_timeout), - accum_tx->ack_timeout, - delta_tx->ack_timeout, max_tx->ack_timeout); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", - "expected_ack_cnt:", - le32_to_cpu(tx->expected_ack_cnt), - accum_tx->expected_ack_cnt, - delta_tx->expected_ack_cnt, - max_tx->expected_ack_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", - "actual_ack_cnt:", - le32_to_cpu(tx->actual_ack_cnt), - accum_tx->actual_ack_cnt, - delta_tx->actual_ack_cnt, - max_tx->actual_ack_cnt); - - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -ssize_t iwl3945_ucode_general_stats_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - int pos = 0; - char *buf; - int bufsz = sizeof(struct iwl39_statistics_general) * 10 + 300; - ssize_t ret; - struct iwl39_statistics_general *general, *accum_general; - struct iwl39_statistics_general *delta_general, *max_general; - struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg; - struct iwl39_statistics_div *div, *accum_div, *delta_div, *max_div; - - if (!iwl_legacy_is_alive(priv)) - return -EAGAIN; - - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) { - IWL_ERR(priv, "Can not allocate Buffer\n"); - return -ENOMEM; - } - - /* - * The statistic information display here is based on - * the last statistics notification from uCode - * might not reflect the current uCode activity - */ - general = &priv->_3945.statistics.general; - dbg = &priv->_3945.statistics.general.dbg; - div = &priv->_3945.statistics.general.div; - accum_general = &priv->_3945.accum_statistics.general; - delta_general = &priv->_3945.delta_statistics.general; - max_general = &priv->_3945.max_delta.general; - accum_dbg = &priv->_3945.accum_statistics.general.dbg; - delta_dbg = &priv->_3945.delta_statistics.general.dbg; - max_dbg = &priv->_3945.max_delta.general.dbg; - accum_div = &priv->_3945.accum_statistics.general.div; - delta_div = &priv->_3945.delta_statistics.general.div; - max_div = &priv->_3945.max_delta.general.div; - pos += iwl3945_statistics_flag(priv, buf, bufsz); - pos += scnprintf(buf + pos, bufsz - pos, "%-32s current" - "acumulative delta max\n", - "Statistics_General:"); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", - "burst_check:", - le32_to_cpu(dbg->burst_check), - accum_dbg->burst_check, - delta_dbg->burst_check, max_dbg->burst_check); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", - "burst_count:", - le32_to_cpu(dbg->burst_count), - accum_dbg->burst_count, - delta_dbg->burst_count, max_dbg->burst_count); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", - "sleep_time:", - le32_to_cpu(general->sleep_time), - accum_general->sleep_time, - delta_general->sleep_time, max_general->sleep_time); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", - "slots_out:", - le32_to_cpu(general->slots_out), - accum_general->slots_out, - delta_general->slots_out, max_general->slots_out); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", - "slots_idle:", - le32_to_cpu(general->slots_idle), - accum_general->slots_idle, - delta_general->slots_idle, max_general->slots_idle); - pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n", - le32_to_cpu(general->ttl_timestamp)); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", - "tx_on_a:", - le32_to_cpu(div->tx_on_a), accum_div->tx_on_a, - delta_div->tx_on_a, max_div->tx_on_a); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", - "tx_on_b:", - le32_to_cpu(div->tx_on_b), accum_div->tx_on_b, - delta_div->tx_on_b, max_div->tx_on_b); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", - "exec_time:", - le32_to_cpu(div->exec_time), accum_div->exec_time, - delta_div->exec_time, max_div->exec_time); - pos += scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", - "probe_time:", - le32_to_cpu(div->probe_time), accum_div->probe_time, - delta_div->probe_time, max_div->probe_time); - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h deleted file mode 100644 index 8fef4b32b44..00000000000 --- a/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h +++ /dev/null @@ -1,60 +0,0 @@ -/****************************************************************************** - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - *****************************************************************************/ - -#include "iwl-dev.h" -#include "iwl-core.h" -#include "iwl-debug.h" - -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS -ssize_t iwl3945_ucode_rx_stats_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos); -ssize_t iwl3945_ucode_tx_stats_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos); -ssize_t iwl3945_ucode_general_stats_read(struct file *file, - char __user *user_buf, size_t count, - loff_t *ppos); -#else -static ssize_t iwl3945_ucode_rx_stats_read(struct file *file, - char __user *user_buf, size_t count, - loff_t *ppos) -{ - return 0; -} -static ssize_t iwl3945_ucode_tx_stats_read(struct file *file, - char __user *user_buf, size_t count, - loff_t *ppos) -{ - return 0; -} -static ssize_t iwl3945_ucode_general_stats_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - return 0; -} -#endif diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-fh.h b/drivers/net/wireless/iwlegacy/iwl-3945-fh.h deleted file mode 100644 index 836c9919f82..00000000000 --- a/drivers/net/wireless/iwlegacy/iwl-3945-fh.h +++ /dev/null @@ -1,187 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ -#ifndef __iwl_3945_fh_h__ -#define __iwl_3945_fh_h__ - -/************************************/ -/* iwl3945 Flow Handler Definitions */ -/************************************/ - -/** - * This I/O area is directly read/writable by driver (e.g. Linux uses writel()) - * Addresses are offsets from device's PCI hardware base address. - */ -#define FH39_MEM_LOWER_BOUND (0x0800) -#define FH39_MEM_UPPER_BOUND (0x1000) - -#define FH39_CBCC_TABLE (FH39_MEM_LOWER_BOUND + 0x140) -#define FH39_TFDB_TABLE (FH39_MEM_LOWER_BOUND + 0x180) -#define FH39_RCSR_TABLE (FH39_MEM_LOWER_BOUND + 0x400) -#define FH39_RSSR_TABLE (FH39_MEM_LOWER_BOUND + 0x4c0) -#define FH39_TCSR_TABLE (FH39_MEM_LOWER_BOUND + 0x500) -#define FH39_TSSR_TABLE (FH39_MEM_LOWER_BOUND + 0x680) - -/* TFDB (Transmit Frame Buffer Descriptor) */ -#define FH39_TFDB(_ch, buf) (FH39_TFDB_TABLE + \ - ((_ch) * 2 + (buf)) * 0x28) -#define FH39_TFDB_CHNL_BUF_CTRL_REG(_ch) (FH39_TFDB_TABLE + 0x50 * (_ch)) - -/* CBCC channel is [0,2] */ -#define FH39_CBCC(_ch) (FH39_CBCC_TABLE + (_ch) * 0x8) -#define FH39_CBCC_CTRL(_ch) (FH39_CBCC(_ch) + 0x00) -#define FH39_CBCC_BASE(_ch) (FH39_CBCC(_ch) + 0x04) - -/* RCSR channel is [0,2] */ -#define FH39_RCSR(_ch) (FH39_RCSR_TABLE + (_ch) * 0x40) -#define FH39_RCSR_CONFIG(_ch) (FH39_RCSR(_ch) + 0x00) -#define FH39_RCSR_RBD_BASE(_ch) (FH39_RCSR(_ch) + 0x04) -#define FH39_RCSR_WPTR(_ch) (FH39_RCSR(_ch) + 0x20) -#define FH39_RCSR_RPTR_ADDR(_ch) (FH39_RCSR(_ch) + 0x24) - -#define FH39_RSCSR_CHNL0_WPTR (FH39_RCSR_WPTR(0)) - -/* RSSR */ -#define FH39_RSSR_CTRL (FH39_RSSR_TABLE + 0x000) -#define FH39_RSSR_STATUS (FH39_RSSR_TABLE + 0x004) - -/* TCSR */ -#define FH39_TCSR(_ch) (FH39_TCSR_TABLE + (_ch) * 0x20) -#define FH39_TCSR_CONFIG(_ch) (FH39_TCSR(_ch) + 0x00) -#define FH39_TCSR_CREDIT(_ch) (FH39_TCSR(_ch) + 0x04) -#define FH39_TCSR_BUFF_STTS(_ch) (FH39_TCSR(_ch) + 0x08) - -/* TSSR */ -#define FH39_TSSR_CBB_BASE (FH39_TSSR_TABLE + 0x000) -#define FH39_TSSR_MSG_CONFIG (FH39_TSSR_TABLE + 0x008) -#define FH39_TSSR_TX_STATUS (FH39_TSSR_TABLE + 0x010) - - -/* DBM */ - -#define FH39_SRVC_CHNL (6) - -#define FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE (20) -#define FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH (4) - -#define FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN (0x08000000) - -#define FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE (0x80000000) - -#define FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE (0x20000000) - -#define FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 (0x01000000) - -#define FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST (0x00001000) - -#define FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH (0x00000000) - -#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000) -#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRIVER (0x00000001) - -#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL (0x00000000) -#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL (0x00000008) - -#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000) - -#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000) - -#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000) -#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000) - -#define FH39_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00004000) - -#define FH39_TCSR_CHNL_TX_BUF_STS_REG_BIT_TFDB_WPTR (0x00000001) - -#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON (0xFF000000) -#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON (0x00FF0000) - -#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B (0x00000400) - -#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON (0x00000100) -#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON (0x00000080) - -#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH (0x00000020) -#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH (0x00000005) - -#define FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) (BIT(_ch) << 24) -#define FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch) (BIT(_ch) << 16) - -#define FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_ch) \ - (FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) | \ - FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch)) - -#define FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000) - -struct iwl3945_tfd_tb { - __le32 addr; - __le32 len; -} __packed; - -struct iwl3945_tfd { - __le32 control_flags; - struct iwl3945_tfd_tb tbs[4]; - u8 __pad[28]; -} __packed; - - -#endif /* __iwl_3945_fh_h__ */ diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-hw.h b/drivers/net/wireless/iwlegacy/iwl-3945-hw.h deleted file mode 100644 index 5c3a68d3af1..00000000000 --- a/drivers/net/wireless/iwlegacy/iwl-3945-hw.h +++ /dev/null @@ -1,291 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ -/* - * Please use this file (iwl-3945-hw.h) only for hardware-related definitions. - * Please use iwl-commands.h for uCode API definitions. - * Please use iwl-3945.h for driver implementation definitions. - */ - -#ifndef __iwl_3945_hw__ -#define __iwl_3945_hw__ - -#include "iwl-eeprom.h" - -/* RSSI to dBm */ -#define IWL39_RSSI_OFFSET 95 - -/* - * EEPROM related constants, enums, and structures. - */ -#define EEPROM_SKU_CAP_OP_MODE_MRC (1 << 7) - -/* - * Mapping of a Tx power level, at factory calibration temperature, - * to a radio/DSP gain table index. - * One for each of 5 "sample" power levels in each band. - * v_det is measured at the factory, using the 3945's built-in power amplifier - * (PA) output voltage detector. This same detector is used during Tx of - * long packets in normal operation to provide feedback as to proper output - * level. - * Data copied from EEPROM. - * DO NOT ALTER THIS STRUCTURE!!! - */ -struct iwl3945_eeprom_txpower_sample { - u8 gain_index; /* index into power (gain) setup table ... */ - s8 power; /* ... for this pwr level for this chnl group */ - u16 v_det; /* PA output voltage */ -} __packed; - -/* - * Mappings of Tx power levels -> nominal radio/DSP gain table indexes. - * One for each channel group (a.k.a. "band") (1 for BG, 4 for A). - * Tx power setup code interpolates between the 5 "sample" power levels - * to determine the nominal setup for a requested power level. - * Data copied from EEPROM. - * DO NOT ALTER THIS STRUCTURE!!! - */ -struct iwl3945_eeprom_txpower_group { - struct iwl3945_eeprom_txpower_sample samples[5]; /* 5 power levels */ - s32 a, b, c, d, e; /* coefficients for voltage->power - * formula (signed) */ - s32 Fa, Fb, Fc, Fd, Fe; /* these modify coeffs based on - * frequency (signed) */ - s8 saturation_power; /* highest power possible by h/w in this - * band */ - u8 group_channel; /* "representative" channel # in this band */ - s16 temperature; /* h/w temperature at factory calib this band - * (signed) */ -} __packed; - -/* - * Temperature-based Tx-power compensation data, not band-specific. - * These coefficients are use to modify a/b/c/d/e coeffs based on - * difference between current temperature and factory calib temperature. - * Data copied from EEPROM. - */ -struct iwl3945_eeprom_temperature_corr { - u32 Ta; - u32 Tb; - u32 Tc; - u32 Td; - u32 Te; -} __packed; - -/* - * EEPROM map - */ -struct iwl3945_eeprom { - u8 reserved0[16]; - u16 device_id; /* abs.ofs: 16 */ - u8 reserved1[2]; - u16 pmc; /* abs.ofs: 20 */ - u8 reserved2[20]; - u8 mac_address[6]; /* abs.ofs: 42 */ - u8 reserved3[58]; - u16 board_revision; /* abs.ofs: 106 */ - u8 reserved4[11]; - u8 board_pba_number[9]; /* abs.ofs: 119 */ - u8 reserved5[8]; - u16 version; /* abs.ofs: 136 */ - u8 sku_cap; /* abs.ofs: 138 */ - u8 leds_mode; /* abs.ofs: 139 */ - u16 oem_mode; - u16 wowlan_mode; /* abs.ofs: 142 */ - u16 leds_time_interval; /* abs.ofs: 144 */ - u8 leds_off_time; /* abs.ofs: 146 */ - u8 leds_on_time; /* abs.ofs: 147 */ - u8 almgor_m_version; /* abs.ofs: 148 */ - u8 antenna_switch_type; /* abs.ofs: 149 */ - u8 reserved6[42]; - u8 sku_id[4]; /* abs.ofs: 192 */ - -/* - * Per-channel regulatory data. - * - * Each channel that *might* be supported by 3945 has a fixed location - * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory - * txpower (MSB). - * - * Entries immediately below are for 20 MHz channel width. - * - * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 - */ - u16 band_1_count; /* abs.ofs: 196 */ - struct iwl_eeprom_channel band_1_channels[14]; /* abs.ofs: 198 */ - -/* - * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196, - * 5.0 GHz channels 7, 8, 11, 12, 16 - * (4915-5080MHz) (none of these is ever supported) - */ - u16 band_2_count; /* abs.ofs: 226 */ - struct iwl_eeprom_channel band_2_channels[13]; /* abs.ofs: 228 */ - -/* - * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64 - * (5170-5320MHz) - */ - u16 band_3_count; /* abs.ofs: 254 */ - struct iwl_eeprom_channel band_3_channels[12]; /* abs.ofs: 256 */ - -/* - * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140 - * (5500-5700MHz) - */ - u16 band_4_count; /* abs.ofs: 280 */ - struct iwl_eeprom_channel band_4_channels[11]; /* abs.ofs: 282 */ - -/* - * 5.7 GHz channels 145, 149, 153, 157, 161, 165 - * (5725-5825MHz) - */ - u16 band_5_count; /* abs.ofs: 304 */ - struct iwl_eeprom_channel band_5_channels[6]; /* abs.ofs: 306 */ - - u8 reserved9[194]; - -/* - * 3945 Txpower calibration data. - */ -#define IWL_NUM_TX_CALIB_GROUPS 5 - struct iwl3945_eeprom_txpower_group groups[IWL_NUM_TX_CALIB_GROUPS]; -/* abs.ofs: 512 */ - struct iwl3945_eeprom_temperature_corr corrections; /* abs.ofs: 832 */ - u8 reserved16[172]; /* fill out to full 1024 byte block */ -} __packed; - -#define IWL3945_EEPROM_IMG_SIZE 1024 - -/* End of EEPROM */ - -#define PCI_CFG_REV_ID_BIT_BASIC_SKU (0x40) /* bit 6 */ -#define PCI_CFG_REV_ID_BIT_RTP (0x80) /* bit 7 */ - -/* 4 DATA + 1 CMD. There are 2 HCCA queues that are not used. */ -#define IWL39_NUM_QUEUES 5 -#define IWL39_CMD_QUEUE_NUM 4 - -#define IWL_DEFAULT_TX_RETRY 15 - -/*********************************************/ - -#define RFD_SIZE 4 -#define NUM_TFD_CHUNKS 4 - -#define RX_QUEUE_SIZE 256 -#define RX_QUEUE_MASK 255 -#define RX_QUEUE_SIZE_LOG 8 - -#define U32_PAD(n) ((4-(n))&0x3) - -#define TFD_CTL_COUNT_SET(n) (n << 24) -#define TFD_CTL_COUNT_GET(ctl) ((ctl >> 24) & 7) -#define TFD_CTL_PAD_SET(n) (n << 28) -#define TFD_CTL_PAD_GET(ctl) (ctl >> 28) - -/* Sizes and addresses for instruction and data memory (SRAM) in - * 3945's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */ -#define IWL39_RTC_INST_LOWER_BOUND (0x000000) -#define IWL39_RTC_INST_UPPER_BOUND (0x014000) - -#define IWL39_RTC_DATA_LOWER_BOUND (0x800000) -#define IWL39_RTC_DATA_UPPER_BOUND (0x808000) - -#define IWL39_RTC_INST_SIZE (IWL39_RTC_INST_UPPER_BOUND - \ - IWL39_RTC_INST_LOWER_BOUND) -#define IWL39_RTC_DATA_SIZE (IWL39_RTC_DATA_UPPER_BOUND - \ - IWL39_RTC_DATA_LOWER_BOUND) - -#define IWL39_MAX_INST_SIZE IWL39_RTC_INST_SIZE -#define IWL39_MAX_DATA_SIZE IWL39_RTC_DATA_SIZE - -/* Size of uCode instruction memory in bootstrap state machine */ -#define IWL39_MAX_BSM_SIZE IWL39_RTC_INST_SIZE - -static inline int iwl3945_hw_valid_rtc_data_addr(u32 addr) -{ - return (addr >= IWL39_RTC_DATA_LOWER_BOUND) && - (addr < IWL39_RTC_DATA_UPPER_BOUND); -} - -/* Base physical address of iwl3945_shared is provided to FH_TSSR_CBB_BASE - * and &iwl3945_shared.rx_read_ptr[0] is provided to FH_RCSR_RPTR_ADDR(0) */ -struct iwl3945_shared { - __le32 tx_base_ptr[8]; -} __packed; - -static inline u8 iwl3945_hw_get_rate(__le16 rate_n_flags) -{ - return le16_to_cpu(rate_n_flags) & 0xFF; -} - -static inline u16 iwl3945_hw_get_rate_n_flags(__le16 rate_n_flags) -{ - return le16_to_cpu(rate_n_flags); -} - -static inline __le16 iwl3945_hw_set_rate_n_flags(u8 rate, u16 flags) -{ - return cpu_to_le16((u16)rate|flags); -} -#endif diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-led.c b/drivers/net/wireless/iwlegacy/iwl-3945-led.c deleted file mode 100644 index 7a7f0f38c8a..00000000000 --- a/drivers/net/wireless/iwlegacy/iwl-3945-led.c +++ /dev/null @@ -1,63 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/init.h> -#include <linux/pci.h> -#include <linux/dma-mapping.h> -#include <linux/delay.h> -#include <linux/skbuff.h> -#include <linux/netdevice.h> -#include <net/mac80211.h> -#include <linux/etherdevice.h> -#include <asm/unaligned.h> - -#include "iwl-commands.h" -#include "iwl-3945.h" -#include "iwl-core.h" -#include "iwl-dev.h" -#include "iwl-3945-led.h" - - -/* Send led command */ -static int iwl3945_send_led_cmd(struct iwl_priv *priv, - struct iwl_led_cmd *led_cmd) -{ - struct iwl_host_cmd cmd = { - .id = REPLY_LEDS_CMD, - .len = sizeof(struct iwl_led_cmd), - .data = led_cmd, - .flags = CMD_ASYNC, - .callback = NULL, - }; - - return iwl_legacy_send_cmd(priv, &cmd); -} - -const struct iwl_led_ops iwl3945_led_ops = { - .cmd = iwl3945_send_led_cmd, -}; diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-led.h b/drivers/net/wireless/iwlegacy/iwl-3945-led.h deleted file mode 100644 index 96716276eb0..00000000000 --- a/drivers/net/wireless/iwlegacy/iwl-3945-led.h +++ /dev/null @@ -1,32 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#ifndef __iwl_3945_led_h__ -#define __iwl_3945_led_h__ - -extern const struct iwl_led_ops iwl3945_led_ops; - -#endif /* __iwl_3945_led_h__ */ diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-rs.c b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c deleted file mode 100644 index 8faeaf2ddde..00000000000 --- a/drivers/net/wireless/iwlegacy/iwl-3945-rs.c +++ /dev/null @@ -1,996 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/skbuff.h> -#include <linux/slab.h> -#include <net/mac80211.h> - -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/delay.h> - -#include <linux/workqueue.h> - -#include "iwl-commands.h" -#include "iwl-3945.h" -#include "iwl-sta.h" - -#define RS_NAME "iwl-3945-rs" - -static s32 iwl3945_expected_tpt_g[IWL_RATE_COUNT_3945] = { - 7, 13, 35, 58, 0, 0, 76, 104, 130, 168, 191, 202 -}; - -static s32 iwl3945_expected_tpt_g_prot[IWL_RATE_COUNT_3945] = { - 7, 13, 35, 58, 0, 0, 0, 80, 93, 113, 123, 125 -}; - -static s32 iwl3945_expected_tpt_a[IWL_RATE_COUNT_3945] = { - 0, 0, 0, 0, 40, 57, 72, 98, 121, 154, 177, 186 -}; - -static s32 iwl3945_expected_tpt_b[IWL_RATE_COUNT_3945] = { - 7, 13, 35, 58, 0, 0, 0, 0, 0, 0, 0, 0 -}; - -struct iwl3945_tpt_entry { - s8 min_rssi; - u8 index; -}; - -static struct iwl3945_tpt_entry iwl3945_tpt_table_a[] = { - {-60, IWL_RATE_54M_INDEX}, - {-64, IWL_RATE_48M_INDEX}, - {-72, IWL_RATE_36M_INDEX}, - {-80, IWL_RATE_24M_INDEX}, - {-84, IWL_RATE_18M_INDEX}, - {-85, IWL_RATE_12M_INDEX}, - {-87, IWL_RATE_9M_INDEX}, - {-89, IWL_RATE_6M_INDEX} -}; - -static struct iwl3945_tpt_entry iwl3945_tpt_table_g[] = { - {-60, IWL_RATE_54M_INDEX}, - {-64, IWL_RATE_48M_INDEX}, - {-68, IWL_RATE_36M_INDEX}, - {-80, IWL_RATE_24M_INDEX}, - {-84, IWL_RATE_18M_INDEX}, - {-85, IWL_RATE_12M_INDEX}, - {-86, IWL_RATE_11M_INDEX}, - {-88, IWL_RATE_5M_INDEX}, - {-90, IWL_RATE_2M_INDEX}, - {-92, IWL_RATE_1M_INDEX} -}; - -#define IWL_RATE_MAX_WINDOW 62 -#define IWL_RATE_FLUSH (3*HZ) -#define IWL_RATE_WIN_FLUSH (HZ/2) -#define IWL39_RATE_HIGH_TH 11520 -#define IWL_SUCCESS_UP_TH 8960 -#define IWL_SUCCESS_DOWN_TH 10880 -#define IWL_RATE_MIN_FAILURE_TH 6 -#define IWL_RATE_MIN_SUCCESS_TH 8 -#define IWL_RATE_DECREASE_TH 1920 -#define IWL_RATE_RETRY_TH 15 - -static u8 iwl3945_get_rate_index_by_rssi(s32 rssi, enum ieee80211_band band) -{ - u32 index = 0; - u32 table_size = 0; - struct iwl3945_tpt_entry *tpt_table = NULL; - - if ((rssi < IWL_MIN_RSSI_VAL) || (rssi > IWL_MAX_RSSI_VAL)) - rssi = IWL_MIN_RSSI_VAL; - - switch (band) { - case IEEE80211_BAND_2GHZ: - tpt_table = iwl3945_tpt_table_g; - table_size = ARRAY_SIZE(iwl3945_tpt_table_g); - break; - - case IEEE80211_BAND_5GHZ: - tpt_table = iwl3945_tpt_table_a; - table_size = ARRAY_SIZE(iwl3945_tpt_table_a); - break; - - default: - BUG(); - break; - } - - while ((index < table_size) && (rssi < tpt_table[index].min_rssi)) - index++; - - index = min(index, (table_size - 1)); - - return tpt_table[index].index; -} - -static void iwl3945_clear_window(struct iwl3945_rate_scale_data *window) -{ - window->data = 0; - window->success_counter = 0; - window->success_ratio = -1; - window->counter = 0; - window->average_tpt = IWL_INVALID_VALUE; - window->stamp = 0; -} - -/** - * iwl3945_rate_scale_flush_windows - flush out the rate scale windows - * - * Returns the number of windows that have gathered data but were - * not flushed. If there were any that were not flushed, then - * reschedule the rate flushing routine. - */ -static int iwl3945_rate_scale_flush_windows(struct iwl3945_rs_sta *rs_sta) -{ - int unflushed = 0; - int i; - unsigned long flags; - struct iwl_priv *priv __maybe_unused = rs_sta->priv; - - /* - * For each rate, if we have collected data on that rate - * and it has been more than IWL_RATE_WIN_FLUSH - * since we flushed, clear out the gathered statistics - */ - for (i = 0; i < IWL_RATE_COUNT_3945; i++) { - if (!rs_sta->win[i].counter) - continue; - - spin_lock_irqsave(&rs_sta->lock, flags); - if (time_after(jiffies, rs_sta->win[i].stamp + - IWL_RATE_WIN_FLUSH)) { - IWL_DEBUG_RATE(priv, "flushing %d samples of rate " - "index %d\n", - rs_sta->win[i].counter, i); - iwl3945_clear_window(&rs_sta->win[i]); - } else - unflushed++; - spin_unlock_irqrestore(&rs_sta->lock, flags); - } - - return unflushed; -} - -#define IWL_RATE_FLUSH_MAX 5000 /* msec */ -#define IWL_RATE_FLUSH_MIN 50 /* msec */ -#define IWL_AVERAGE_PACKETS 1500 - -static void iwl3945_bg_rate_scale_flush(unsigned long data) -{ - struct iwl3945_rs_sta *rs_sta = (void *)data; - struct iwl_priv *priv __maybe_unused = rs_sta->priv; - int unflushed = 0; - unsigned long flags; - u32 packet_count, duration, pps; - - IWL_DEBUG_RATE(priv, "enter\n"); - - unflushed = iwl3945_rate_scale_flush_windows(rs_sta); - - spin_lock_irqsave(&rs_sta->lock, flags); - - /* Number of packets Rx'd since last time this timer ran */ - packet_count = (rs_sta->tx_packets - rs_sta->last_tx_packets) + 1; - - rs_sta->last_tx_packets = rs_sta->tx_packets + 1; - - if (unflushed) { - duration = - jiffies_to_msecs(jiffies - rs_sta->last_partial_flush); - - IWL_DEBUG_RATE(priv, "Tx'd %d packets in %dms\n", - packet_count, duration); - - /* Determine packets per second */ - if (duration) - pps = (packet_count * 1000) / duration; - else - pps = 0; - - if (pps) { - duration = (IWL_AVERAGE_PACKETS * 1000) / pps; - if (duration < IWL_RATE_FLUSH_MIN) - duration = IWL_RATE_FLUSH_MIN; - else if (duration > IWL_RATE_FLUSH_MAX) - duration = IWL_RATE_FLUSH_MAX; - } else - duration = IWL_RATE_FLUSH_MAX; - - rs_sta->flush_time = msecs_to_jiffies(duration); - - IWL_DEBUG_RATE(priv, "new flush period: %d msec ave %d\n", - duration, packet_count); - - mod_timer(&rs_sta->rate_scale_flush, jiffies + - rs_sta->flush_time); - - rs_sta->last_partial_flush = jiffies; - } else { - rs_sta->flush_time = IWL_RATE_FLUSH; - rs_sta->flush_pending = 0; - } - /* If there weren't any unflushed entries, we don't schedule the timer - * to run again */ - - rs_sta->last_flush = jiffies; - - spin_unlock_irqrestore(&rs_sta->lock, flags); - - IWL_DEBUG_RATE(priv, "leave\n"); -} - -/** - * iwl3945_collect_tx_data - Update the success/failure sliding window - * - * We keep a sliding window of the last 64 packets transmitted - * at this rate. window->data contains the bitmask of successful - * packets. - */ -static void iwl3945_collect_tx_data(struct iwl3945_rs_sta *rs_sta, - struct iwl3945_rate_scale_data *window, - int success, int retries, int index) -{ - unsigned long flags; - s32 fail_count; - struct iwl_priv *priv __maybe_unused = rs_sta->priv; - - if (!retries) { - IWL_DEBUG_RATE(priv, "leave: retries == 0 -- should be at least 1\n"); - return; - } - - spin_lock_irqsave(&rs_sta->lock, flags); - - /* - * Keep track of only the latest 62 tx frame attempts in this rate's - * history window; anything older isn't really relevant any more. - * If we have filled up the sliding window, drop the oldest attempt; - * if the oldest attempt (highest bit in bitmap) shows "success", - * subtract "1" from the success counter (this is the main reason - * we keep these bitmaps!). - * */ - while (retries > 0) { - if (window->counter >= IWL_RATE_MAX_WINDOW) { - - /* remove earliest */ - window->counter = IWL_RATE_MAX_WINDOW - 1; - - if (window->data & (1ULL << (IWL_RATE_MAX_WINDOW - 1))) { - window->data &= ~(1ULL << (IWL_RATE_MAX_WINDOW - 1)); - window->success_counter--; - } - } - - /* Increment frames-attempted counter */ - window->counter++; - - /* Shift bitmap by one frame (throw away oldest history), - * OR in "1", and increment "success" if this - * frame was successful. */ - window->data <<= 1; - if (success > 0) { - window->success_counter++; - window->data |= 0x1; - success--; - } - - retries--; - } - - /* Calculate current success ratio, avoid divide-by-0! */ - if (window->counter > 0) - window->success_ratio = 128 * (100 * window->success_counter) - / window->counter; - else - window->success_ratio = IWL_INVALID_VALUE; - - fail_count = window->counter - window->success_counter; - - /* Calculate average throughput, if we have enough history. */ - if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) || - (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH)) - window->average_tpt = ((window->success_ratio * - rs_sta->expected_tpt[index] + 64) / 128); - else - window->average_tpt = IWL_INVALID_VALUE; - - /* Tag this window as having been updated */ - window->stamp = jiffies; - - spin_unlock_irqrestore(&rs_sta->lock, flags); - -} - -/* - * Called after adding a new station to initialize rate scaling - */ -void iwl3945_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_id) -{ - struct ieee80211_hw *hw = priv->hw; - struct ieee80211_conf *conf = &priv->hw->conf; - struct iwl3945_sta_priv *psta; - struct iwl3945_rs_sta *rs_sta; - struct ieee80211_supported_band *sband; - int i; - - IWL_DEBUG_INFO(priv, "enter\n"); - if (sta_id == priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id) - goto out; - - psta = (struct iwl3945_sta_priv *) sta->drv_priv; - rs_sta = &psta->rs_sta; - sband = hw->wiphy->bands[conf->channel->band]; - - rs_sta->priv = priv; - - rs_sta->start_rate = IWL_RATE_INVALID; - - /* default to just 802.11b */ - rs_sta->expected_tpt = iwl3945_expected_tpt_b; - - rs_sta->last_partial_flush = jiffies; - rs_sta->last_flush = jiffies; - rs_sta->flush_time = IWL_RATE_FLUSH; - rs_sta->last_tx_packets = 0; - - rs_sta->rate_scale_flush.data = (unsigned long)rs_sta; - rs_sta->rate_scale_flush.function = iwl3945_bg_rate_scale_flush; - - for (i = 0; i < IWL_RATE_COUNT_3945; i++) - iwl3945_clear_window(&rs_sta->win[i]); - - /* TODO: what is a good starting rate for STA? About middle? Maybe not - * the lowest or the highest rate.. Could consider using RSSI from - * previous packets? Need to have IEEE 802.1X auth succeed immediately - * after assoc.. */ - - for (i = sband->n_bitrates - 1; i >= 0; i--) { - if (sta->supp_rates[sband->band] & (1 << i)) { - rs_sta->last_txrate_idx = i; - break; - } - } - - priv->_3945.sta_supp_rates = sta->supp_rates[sband->band]; - /* For 5 GHz band it start at IWL_FIRST_OFDM_RATE */ - if (sband->band == IEEE80211_BAND_5GHZ) { - rs_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE; - priv->_3945.sta_supp_rates = priv->_3945.sta_supp_rates << - IWL_FIRST_OFDM_RATE; - } - -out: - priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS; - - IWL_DEBUG_INFO(priv, "leave\n"); -} - -static void *iwl3945_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) -{ - return hw->priv; -} - -/* rate scale requires free function to be implemented */ -static void iwl3945_rs_free(void *priv) -{ - return; -} - -static void *iwl3945_rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp) -{ - struct iwl3945_rs_sta *rs_sta; - struct iwl3945_sta_priv *psta = (void *) sta->drv_priv; - struct iwl_priv *priv __maybe_unused = iwl_priv; - - IWL_DEBUG_RATE(priv, "enter\n"); - - rs_sta = &psta->rs_sta; - - spin_lock_init(&rs_sta->lock); - init_timer(&rs_sta->rate_scale_flush); - - IWL_DEBUG_RATE(priv, "leave\n"); - - return rs_sta; -} - -static void iwl3945_rs_free_sta(void *iwl_priv, struct ieee80211_sta *sta, - void *priv_sta) -{ - struct iwl3945_rs_sta *rs_sta = priv_sta; - - /* - * Be careful not to use any members of iwl3945_rs_sta (like trying - * to use iwl_priv to print out debugging) since it may not be fully - * initialized at this point. - */ - del_timer_sync(&rs_sta->rate_scale_flush); -} - - -/** - * iwl3945_rs_tx_status - Update rate control values based on Tx results - * - * NOTE: Uses iwl_priv->retry_rate for the # of retries attempted by - * the hardware for each rate. - */ -static void iwl3945_rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband, - struct ieee80211_sta *sta, void *priv_sta, - struct sk_buff *skb) -{ - s8 retries = 0, current_count; - int scale_rate_index, first_index, last_index; - unsigned long flags; - struct iwl_priv *priv = (struct iwl_priv *)priv_rate; - struct iwl3945_rs_sta *rs_sta = priv_sta; - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - - IWL_DEBUG_RATE(priv, "enter\n"); - - retries = info->status.rates[0].count; - /* Sanity Check for retries */ - if (retries > IWL_RATE_RETRY_TH) - retries = IWL_RATE_RETRY_TH; - - first_index = sband->bitrates[info->status.rates[0].idx].hw_value; - if ((first_index < 0) || (first_index >= IWL_RATE_COUNT_3945)) { - IWL_DEBUG_RATE(priv, "leave: Rate out of bounds: %d\n", first_index); - return; - } - - if (!priv_sta) { - IWL_DEBUG_RATE(priv, "leave: No STA priv data to update!\n"); - return; - } - - /* Treat uninitialized rate scaling data same as non-existing. */ - if (!rs_sta->priv) { - IWL_DEBUG_RATE(priv, "leave: STA priv data uninitialized!\n"); - return; - } - - - rs_sta->tx_packets++; - - scale_rate_index = first_index; - last_index = first_index; - - /* - * Update the window for each rate. We determine which rates - * were Tx'd based on the total number of retries vs. the number - * of retries configured for each rate -- currently set to the - * priv value 'retry_rate' vs. rate specific - * - * On exit from this while loop last_index indicates the rate - * at which the frame was finally transmitted (or failed if no - * ACK) - */ - while (retries > 1) { - if ((retries - 1) < priv->retry_rate) { - current_count = (retries - 1); - last_index = scale_rate_index; - } else { - current_count = priv->retry_rate; - last_index = iwl3945_rs_next_rate(priv, - scale_rate_index); - } - - /* Update this rate accounting for as many retries - * as was used for it (per current_count) */ - iwl3945_collect_tx_data(rs_sta, - &rs_sta->win[scale_rate_index], - 0, current_count, scale_rate_index); - IWL_DEBUG_RATE(priv, "Update rate %d for %d retries.\n", - scale_rate_index, current_count); - - retries -= current_count; - - scale_rate_index = last_index; - } - - - /* Update the last index window with success/failure based on ACK */ - IWL_DEBUG_RATE(priv, "Update rate %d with %s.\n", - last_index, - (info->flags & IEEE80211_TX_STAT_ACK) ? - "success" : "failure"); - iwl3945_collect_tx_data(rs_sta, - &rs_sta->win[last_index], - info->flags & IEEE80211_TX_STAT_ACK, 1, last_index); - - /* We updated the rate scale window -- if its been more than - * flush_time since the last run, schedule the flush - * again */ - spin_lock_irqsave(&rs_sta->lock, flags); - - if (!rs_sta->flush_pending && - time_after(jiffies, rs_sta->last_flush + - rs_sta->flush_time)) { - - rs_sta->last_partial_flush = jiffies; - rs_sta->flush_pending = 1; - mod_timer(&rs_sta->rate_scale_flush, - jiffies + rs_sta->flush_time); - } - - spin_unlock_irqrestore(&rs_sta->lock, flags); - - IWL_DEBUG_RATE(priv, "leave\n"); -} - -static u16 iwl3945_get_adjacent_rate(struct iwl3945_rs_sta *rs_sta, - u8 index, u16 rate_mask, enum ieee80211_band band) -{ - u8 high = IWL_RATE_INVALID; - u8 low = IWL_RATE_INVALID; - struct iwl_priv *priv __maybe_unused = rs_sta->priv; - - /* 802.11A walks to the next literal adjacent rate in - * the rate table */ - if (unlikely(band == IEEE80211_BAND_5GHZ)) { - int i; - u32 mask; - - /* Find the previous rate that is in the rate mask */ - i = index - 1; - for (mask = (1 << i); i >= 0; i--, mask >>= 1) { - if (rate_mask & mask) { - low = i; - break; - } - } - - /* Find the next rate that is in the rate mask */ - i = index + 1; - for (mask = (1 << i); i < IWL_RATE_COUNT_3945; - i++, mask <<= 1) { - if (rate_mask & mask) { - high = i; - break; - } - } - - return (high << 8) | low; - } - - low = index; - while (low != IWL_RATE_INVALID) { - if (rs_sta->tgg) - low = iwl3945_rates[low].prev_rs_tgg; - else - low = iwl3945_rates[low].prev_rs; - if (low == IWL_RATE_INVALID) - break; - if (rate_mask & (1 << low)) - break; - IWL_DEBUG_RATE(priv, "Skipping masked lower rate: %d\n", low); - } - - high = index; - while (high != IWL_RATE_INVALID) { - if (rs_sta->tgg) - high = iwl3945_rates[high].next_rs_tgg; - else - high = iwl3945_rates[high].next_rs; - if (high == IWL_RATE_INVALID) - break; - if (rate_mask & (1 << high)) - break; - IWL_DEBUG_RATE(priv, "Skipping masked higher rate: %d\n", high); - } - - return (high << 8) | low; -} - -/** - * iwl3945_rs_get_rate - find the rate for the requested packet - * - * Returns the ieee80211_rate structure allocated by the driver. - * - * The rate control algorithm has no internal mapping between hw_mode's - * rate ordering and the rate ordering used by the rate control algorithm. - * - * The rate control algorithm uses a single table of rates that goes across - * the entire A/B/G spectrum vs. being limited to just one particular - * hw_mode. - * - * As such, we can't convert the index obtained below into the hw_mode's - * rate table and must reference the driver allocated rate table - * - */ -static void iwl3945_rs_get_rate(void *priv_r, struct ieee80211_sta *sta, - void *priv_sta, struct ieee80211_tx_rate_control *txrc) -{ - struct ieee80211_supported_band *sband = txrc->sband; - struct sk_buff *skb = txrc->skb; - u8 low = IWL_RATE_INVALID; - u8 high = IWL_RATE_INVALID; - u16 high_low; - int index; - struct iwl3945_rs_sta *rs_sta = priv_sta; - struct iwl3945_rate_scale_data *window = NULL; - int current_tpt = IWL_INVALID_VALUE; - int low_tpt = IWL_INVALID_VALUE; - int high_tpt = IWL_INVALID_VALUE; - u32 fail_count; - s8 scale_action = 0; - unsigned long flags; - u16 rate_mask; - s8 max_rate_idx = -1; - struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r; - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - - IWL_DEBUG_RATE(priv, "enter\n"); - - /* Treat uninitialized rate scaling data same as non-existing. */ - if (rs_sta && !rs_sta->priv) { - IWL_DEBUG_RATE(priv, "Rate scaling information not initialized yet.\n"); - priv_sta = NULL; - } - - if (rate_control_send_low(sta, priv_sta, txrc)) - return; - - rate_mask = sta->supp_rates[sband->band]; - - /* get user max rate if set */ - max_rate_idx = txrc->max_rate_idx; - if ((sband->band == IEEE80211_BAND_5GHZ) && (max_rate_idx != -1)) - max_rate_idx += IWL_FIRST_OFDM_RATE; - if ((max_rate_idx < 0) || (max_rate_idx >= IWL_RATE_COUNT)) - max_rate_idx = -1; - - index = min(rs_sta->last_txrate_idx & 0xffff, IWL_RATE_COUNT_3945 - 1); - - if (sband->band == IEEE80211_BAND_5GHZ) - rate_mask = rate_mask << IWL_FIRST_OFDM_RATE; - - spin_lock_irqsave(&rs_sta->lock, flags); - - /* for recent assoc, choose best rate regarding - * to rssi value - */ - if (rs_sta->start_rate != IWL_RATE_INVALID) { - if (rs_sta->start_rate < index && - (rate_mask & (1 << rs_sta->start_rate))) - index = rs_sta->start_rate; - rs_sta->start_rate = IWL_RATE_INVALID; - } - - /* force user max rate if set by user */ - if ((max_rate_idx != -1) && (max_rate_idx < index)) { - if (rate_mask & (1 << max_rate_idx)) - index = max_rate_idx; - } - - window = &(rs_sta->win[index]); - - fail_count = window->counter - window->success_counter; - - if (((fail_count < IWL_RATE_MIN_FAILURE_TH) && - (window->success_counter < IWL_RATE_MIN_SUCCESS_TH))) { - spin_unlock_irqrestore(&rs_sta->lock, flags); - - IWL_DEBUG_RATE(priv, "Invalid average_tpt on rate %d: " - "counter: %d, success_counter: %d, " - "expected_tpt is %sNULL\n", - index, - window->counter, - window->success_counter, - rs_sta->expected_tpt ? "not " : ""); - - /* Can't calculate this yet; not enough history */ - window->average_tpt = IWL_INVALID_VALUE; - goto out; - - } - - current_tpt = window->average_tpt; - - high_low = iwl3945_get_adjacent_rate(rs_sta, index, rate_mask, - sband->band); - low = high_low & 0xff; - high = (high_low >> 8) & 0xff; - - /* If user set max rate, dont allow higher than user constrain */ - if ((max_rate_idx != -1) && (max_rate_idx < high)) - high = IWL_RATE_INVALID; - - /* Collect Measured throughputs of adjacent rates */ - if (low != IWL_RATE_INVALID) - low_tpt = rs_sta->win[low].average_tpt; - - if (high != IWL_RATE_INVALID) - high_tpt = rs_sta->win[high].average_tpt; - - spin_unlock_irqrestore(&rs_sta->lock, flags); - - scale_action = 0; - - /* Low success ratio , need to drop the rate */ - if ((window->success_ratio < IWL_RATE_DECREASE_TH) || !current_tpt) { - IWL_DEBUG_RATE(priv, "decrease rate because of low success_ratio\n"); - scale_action = -1; - /* No throughput measured yet for adjacent rates, - * try increase */ - } else if ((low_tpt == IWL_INVALID_VALUE) && - (high_tpt == IWL_INVALID_VALUE)) { - - if (high != IWL_RATE_INVALID && window->success_ratio >= IWL_RATE_INCREASE_TH) - scale_action = 1; - else if (low != IWL_RATE_INVALID) - scale_action = 0; - - /* Both adjacent throughputs are measured, but neither one has - * better throughput; we're using the best rate, don't change - * it! */ - } else if ((low_tpt != IWL_INVALID_VALUE) && - (high_tpt != IWL_INVALID_VALUE) && - (low_tpt < current_tpt) && (high_tpt < current_tpt)) { - - IWL_DEBUG_RATE(priv, "No action -- low [%d] & high [%d] < " - "current_tpt [%d]\n", - low_tpt, high_tpt, current_tpt); - scale_action = 0; - - /* At least one of the rates has better throughput */ - } else { - if (high_tpt != IWL_INVALID_VALUE) { - - /* High rate has better throughput, Increase - * rate */ - if (high_tpt > current_tpt && - window->success_ratio >= IWL_RATE_INCREASE_TH) - scale_action = 1; - else { - IWL_DEBUG_RATE(priv, - "decrease rate because of high tpt\n"); - scale_action = 0; - } - } else if (low_tpt != IWL_INVALID_VALUE) { - if (low_tpt > current_tpt) { - IWL_DEBUG_RATE(priv, - "decrease rate because of low tpt\n"); - scale_action = -1; - } else if (window->success_ratio >= IWL_RATE_INCREASE_TH) { - /* Lower rate has better - * throughput,decrease rate */ - scale_action = 1; - } - } - } - - /* Sanity check; asked for decrease, but success rate or throughput - * has been good at old rate. Don't change it. */ - if ((scale_action == -1) && (low != IWL_RATE_INVALID) && - ((window->success_ratio > IWL_RATE_HIGH_TH) || - (current_tpt > (100 * rs_sta->expected_tpt[low])))) - scale_action = 0; - - switch (scale_action) { - case -1: - - /* Decrese rate */ - if (low != IWL_RATE_INVALID) - index = low; - break; - - case 1: - /* Increase rate */ - if (high != IWL_RATE_INVALID) - index = high; - - break; - - case 0: - default: - /* No change */ - break; - } - - IWL_DEBUG_RATE(priv, "Selected %d (action %d) - low %d high %d\n", - index, scale_action, low, high); - - out: - - if (sband->band == IEEE80211_BAND_5GHZ) { - if (WARN_ON_ONCE(index < IWL_FIRST_OFDM_RATE)) - index = IWL_FIRST_OFDM_RATE; - rs_sta->last_txrate_idx = index; - info->control.rates[0].idx = index - IWL_FIRST_OFDM_RATE; - } else { - rs_sta->last_txrate_idx = index; - info->control.rates[0].idx = rs_sta->last_txrate_idx; - } - - IWL_DEBUG_RATE(priv, "leave: %d\n", index); -} - -#ifdef CONFIG_MAC80211_DEBUGFS -static int iwl3945_open_file_generic(struct inode *inode, struct file *file) -{ - file->private_data = inode->i_private; - return 0; -} - -static ssize_t iwl3945_sta_dbgfs_stats_table_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - char *buff; - int desc = 0; - int j; - ssize_t ret; - struct iwl3945_rs_sta *lq_sta = file->private_data; - - buff = kmalloc(1024, GFP_KERNEL); - if (!buff) - return -ENOMEM; - - desc += sprintf(buff + desc, "tx packets=%d last rate index=%d\n" - "rate=0x%X flush time %d\n", - lq_sta->tx_packets, - lq_sta->last_txrate_idx, - lq_sta->start_rate, jiffies_to_msecs(lq_sta->flush_time)); - for (j = 0; j < IWL_RATE_COUNT_3945; j++) { - desc += sprintf(buff+desc, - "counter=%d success=%d %%=%d\n", - lq_sta->win[j].counter, - lq_sta->win[j].success_counter, - lq_sta->win[j].success_ratio); - } - ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); - kfree(buff); - return ret; -} - -static const struct file_operations rs_sta_dbgfs_stats_table_ops = { - .read = iwl3945_sta_dbgfs_stats_table_read, - .open = iwl3945_open_file_generic, - .llseek = default_llseek, -}; - -static void iwl3945_add_debugfs(void *priv, void *priv_sta, - struct dentry *dir) -{ - struct iwl3945_rs_sta *lq_sta = priv_sta; - - lq_sta->rs_sta_dbgfs_stats_table_file = - debugfs_create_file("rate_stats_table", 0600, dir, - lq_sta, &rs_sta_dbgfs_stats_table_ops); - -} - -static void iwl3945_remove_debugfs(void *priv, void *priv_sta) -{ - struct iwl3945_rs_sta *lq_sta = priv_sta; - debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file); -} -#endif - -/* - * Initialization of rate scaling information is done by driver after - * the station is added. Since mac80211 calls this function before a - * station is added we ignore it. - */ -static void iwl3945_rs_rate_init_stub(void *priv_r, - struct ieee80211_supported_band *sband, - struct ieee80211_sta *sta, void *priv_sta) -{ -} - -static struct rate_control_ops rs_ops = { - .module = NULL, - .name = RS_NAME, - .tx_status = iwl3945_rs_tx_status, - .get_rate = iwl3945_rs_get_rate, - .rate_init = iwl3945_rs_rate_init_stub, - .alloc = iwl3945_rs_alloc, - .free = iwl3945_rs_free, - .alloc_sta = iwl3945_rs_alloc_sta, - .free_sta = iwl3945_rs_free_sta, -#ifdef CONFIG_MAC80211_DEBUGFS - .add_sta_debugfs = iwl3945_add_debugfs, - .remove_sta_debugfs = iwl3945_remove_debugfs, -#endif - -}; -void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id) -{ - struct iwl_priv *priv = hw->priv; - s32 rssi = 0; - unsigned long flags; - struct iwl3945_rs_sta *rs_sta; - struct ieee80211_sta *sta; - struct iwl3945_sta_priv *psta; - - IWL_DEBUG_RATE(priv, "enter\n"); - - rcu_read_lock(); - - sta = ieee80211_find_sta(priv->contexts[IWL_RXON_CTX_BSS].vif, - priv->stations[sta_id].sta.sta.addr); - if (!sta) { - IWL_DEBUG_RATE(priv, "Unable to find station to initialize rate scaling.\n"); - rcu_read_unlock(); - return; - } - - psta = (void *) sta->drv_priv; - rs_sta = &psta->rs_sta; - - spin_lock_irqsave(&rs_sta->lock, flags); - - rs_sta->tgg = 0; - switch (priv->band) { - case IEEE80211_BAND_2GHZ: - /* TODO: this always does G, not a regression */ - if (priv->contexts[IWL_RXON_CTX_BSS].active.flags & - RXON_FLG_TGG_PROTECT_MSK) { - rs_sta->tgg = 1; - rs_sta->expected_tpt = iwl3945_expected_tpt_g_prot; - } else - rs_sta->expected_tpt = iwl3945_expected_tpt_g; - break; - - case IEEE80211_BAND_5GHZ: - rs_sta->expected_tpt = iwl3945_expected_tpt_a; - break; - case IEEE80211_NUM_BANDS: - BUG(); - break; - } - - spin_unlock_irqrestore(&rs_sta->lock, flags); - - rssi = priv->_3945.last_rx_rssi; - if (rssi == 0) - rssi = IWL_MIN_RSSI_VAL; - - IWL_DEBUG_RATE(priv, "Network RSSI: %d\n", rssi); - - rs_sta->start_rate = iwl3945_get_rate_index_by_rssi(rssi, priv->band); - - IWL_DEBUG_RATE(priv, "leave: rssi %d assign rate index: " - "%d (plcp 0x%x)\n", rssi, rs_sta->start_rate, - iwl3945_rates[rs_sta->start_rate].plcp); - rcu_read_unlock(); -} - -int iwl3945_rate_control_register(void) -{ - return ieee80211_rate_control_register(&rs_ops); -} - -void iwl3945_rate_control_unregister(void) -{ - ieee80211_rate_control_unregister(&rs_ops); -} diff --git a/drivers/net/wireless/iwlegacy/iwl-3945.c b/drivers/net/wireless/iwlegacy/iwl-3945.c deleted file mode 100644 index f7c0a743847..00000000000 --- a/drivers/net/wireless/iwlegacy/iwl-3945.c +++ /dev/null @@ -1,2741 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/init.h> -#include <linux/slab.h> -#include <linux/pci.h> -#include <linux/dma-mapping.h> -#include <linux/delay.h> -#include <linux/sched.h> -#include <linux/skbuff.h> -#include <linux/netdevice.h> -#include <linux/firmware.h> -#include <linux/etherdevice.h> -#include <asm/unaligned.h> -#include <net/mac80211.h> - -#include "iwl-fh.h" -#include "iwl-3945-fh.h" -#include "iwl-commands.h" -#include "iwl-sta.h" -#include "iwl-3945.h" -#include "iwl-eeprom.h" -#include "iwl-core.h" -#include "iwl-helpers.h" -#include "iwl-led.h" -#include "iwl-3945-led.h" -#include "iwl-3945-debugfs.h" - -#define IWL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \ - [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \ - IWL_RATE_##r##M_IEEE, \ - IWL_RATE_##ip##M_INDEX, \ - IWL_RATE_##in##M_INDEX, \ - IWL_RATE_##rp##M_INDEX, \ - IWL_RATE_##rn##M_INDEX, \ - IWL_RATE_##pp##M_INDEX, \ - IWL_RATE_##np##M_INDEX, \ - IWL_RATE_##r##M_INDEX_TABLE, \ - IWL_RATE_##ip##M_INDEX_TABLE } - -/* - * Parameter order: - * rate, prev rate, next rate, prev tgg rate, next tgg rate - * - * If there isn't a valid next or previous rate then INV is used which - * maps to IWL_RATE_INVALID - * - */ -const struct iwl3945_rate_info iwl3945_rates[IWL_RATE_COUNT_3945] = { - IWL_DECLARE_RATE_INFO(1, INV, 2, INV, 2, INV, 2), /* 1mbps */ - IWL_DECLARE_RATE_INFO(2, 1, 5, 1, 5, 1, 5), /* 2mbps */ - IWL_DECLARE_RATE_INFO(5, 2, 6, 2, 11, 2, 11), /*5.5mbps */ - IWL_DECLARE_RATE_INFO(11, 9, 12, 5, 12, 5, 18), /* 11mbps */ - IWL_DECLARE_RATE_INFO(6, 5, 9, 5, 11, 5, 11), /* 6mbps */ - IWL_DECLARE_RATE_INFO(9, 6, 11, 5, 11, 5, 11), /* 9mbps */ - IWL_DECLARE_RATE_INFO(12, 11, 18, 11, 18, 11, 18), /* 12mbps */ - IWL_DECLARE_RATE_INFO(18, 12, 24, 12, 24, 11, 24), /* 18mbps */ - IWL_DECLARE_RATE_INFO(24, 18, 36, 18, 36, 18, 36), /* 24mbps */ - IWL_DECLARE_RATE_INFO(36, 24, 48, 24, 48, 24, 48), /* 36mbps */ - IWL_DECLARE_RATE_INFO(48, 36, 54, 36, 54, 36, 54), /* 48mbps */ - IWL_DECLARE_RATE_INFO(54, 48, INV, 48, INV, 48, INV),/* 54mbps */ -}; - -static inline u8 iwl3945_get_prev_ieee_rate(u8 rate_index) -{ - u8 rate = iwl3945_rates[rate_index].prev_ieee; - - if (rate == IWL_RATE_INVALID) - rate = rate_index; - return rate; -} - -/* 1 = enable the iwl3945_disable_events() function */ -#define IWL_EVT_DISABLE (0) -#define IWL_EVT_DISABLE_SIZE (1532/32) - -/** - * iwl3945_disable_events - Disable selected events in uCode event log - * - * Disable an event by writing "1"s into "disable" - * bitmap in SRAM. Bit position corresponds to Event # (id/type). - * Default values of 0 enable uCode events to be logged. - * Use for only special debugging. This function is just a placeholder as-is, - * you'll need to provide the special bits! ... - * ... and set IWL_EVT_DISABLE to 1. */ -void iwl3945_disable_events(struct iwl_priv *priv) -{ - int i; - u32 base; /* SRAM address of event log header */ - u32 disable_ptr; /* SRAM address of event-disable bitmap array */ - u32 array_size; /* # of u32 entries in array */ - static const u32 evt_disable[IWL_EVT_DISABLE_SIZE] = { - 0x00000000, /* 31 - 0 Event id numbers */ - 0x00000000, /* 63 - 32 */ - 0x00000000, /* 95 - 64 */ - 0x00000000, /* 127 - 96 */ - 0x00000000, /* 159 - 128 */ - 0x00000000, /* 191 - 160 */ - 0x00000000, /* 223 - 192 */ - 0x00000000, /* 255 - 224 */ - 0x00000000, /* 287 - 256 */ - 0x00000000, /* 319 - 288 */ - 0x00000000, /* 351 - 320 */ - 0x00000000, /* 383 - 352 */ - 0x00000000, /* 415 - 384 */ - 0x00000000, /* 447 - 416 */ - 0x00000000, /* 479 - 448 */ - 0x00000000, /* 511 - 480 */ - 0x00000000, /* 543 - 512 */ - 0x00000000, /* 575 - 544 */ - 0x00000000, /* 607 - 576 */ - 0x00000000, /* 639 - 608 */ - 0x00000000, /* 671 - 640 */ - 0x00000000, /* 703 - 672 */ - 0x00000000, /* 735 - 704 */ - 0x00000000, /* 767 - 736 */ - 0x00000000, /* 799 - 768 */ - 0x00000000, /* 831 - 800 */ - 0x00000000, /* 863 - 832 */ - 0x00000000, /* 895 - 864 */ - 0x00000000, /* 927 - 896 */ - 0x00000000, /* 959 - 928 */ - 0x00000000, /* 991 - 960 */ - 0x00000000, /* 1023 - 992 */ - 0x00000000, /* 1055 - 1024 */ - 0x00000000, /* 1087 - 1056 */ - 0x00000000, /* 1119 - 1088 */ - 0x00000000, /* 1151 - 1120 */ - 0x00000000, /* 1183 - 1152 */ - 0x00000000, /* 1215 - 1184 */ - 0x00000000, /* 1247 - 1216 */ - 0x00000000, /* 1279 - 1248 */ - 0x00000000, /* 1311 - 1280 */ - 0x00000000, /* 1343 - 1312 */ - 0x00000000, /* 1375 - 1344 */ - 0x00000000, /* 1407 - 1376 */ - 0x00000000, /* 1439 - 1408 */ - 0x00000000, /* 1471 - 1440 */ - 0x00000000, /* 1503 - 1472 */ - }; - - base = le32_to_cpu(priv->card_alive.log_event_table_ptr); - if (!iwl3945_hw_valid_rtc_data_addr(base)) { - IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base); - return; - } - - disable_ptr = iwl_legacy_read_targ_mem(priv, base + (4 * sizeof(u32))); - array_size = iwl_legacy_read_targ_mem(priv, base + (5 * sizeof(u32))); - - if (IWL_EVT_DISABLE && (array_size == IWL_EVT_DISABLE_SIZE)) { - IWL_DEBUG_INFO(priv, "Disabling selected uCode log events at 0x%x\n", - disable_ptr); - for (i = 0; i < IWL_EVT_DISABLE_SIZE; i++) - iwl_legacy_write_targ_mem(priv, - disable_ptr + (i * sizeof(u32)), - evt_disable[i]); - - } else { - IWL_DEBUG_INFO(priv, "Selected uCode log events may be disabled\n"); - IWL_DEBUG_INFO(priv, " by writing \"1\"s into disable bitmap\n"); - IWL_DEBUG_INFO(priv, " in SRAM at 0x%x, size %d u32s\n", - disable_ptr, array_size); - } - -} - -static int iwl3945_hwrate_to_plcp_idx(u8 plcp) -{ - int idx; - - for (idx = 0; idx < IWL_RATE_COUNT_3945; idx++) - if (iwl3945_rates[idx].plcp == plcp) - return idx; - return -1; -} - -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG -#define TX_STATUS_ENTRY(x) case TX_3945_STATUS_FAIL_ ## x: return #x - -static const char *iwl3945_get_tx_fail_reason(u32 status) -{ - switch (status & TX_STATUS_MSK) { - case TX_3945_STATUS_SUCCESS: - return "SUCCESS"; - TX_STATUS_ENTRY(SHORT_LIMIT); - TX_STATUS_ENTRY(LONG_LIMIT); - TX_STATUS_ENTRY(FIFO_UNDERRUN); - TX_STATUS_ENTRY(MGMNT_ABORT); - TX_STATUS_ENTRY(NEXT_FRAG); - TX_STATUS_ENTRY(LIFE_EXPIRE); - TX_STATUS_ENTRY(DEST_PS); - TX_STATUS_ENTRY(ABORTED); - TX_STATUS_ENTRY(BT_RETRY); - TX_STATUS_ENTRY(STA_INVALID); - TX_STATUS_ENTRY(FRAG_DROPPED); - TX_STATUS_ENTRY(TID_DISABLE); - TX_STATUS_ENTRY(FRAME_FLUSHED); - TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL); - TX_STATUS_ENTRY(TX_LOCKED); - TX_STATUS_ENTRY(NO_BEACON_ON_RADAR); - } - - return "UNKNOWN"; -} -#else -static inline const char *iwl3945_get_tx_fail_reason(u32 status) -{ - return ""; -} -#endif - -/* - * get ieee prev rate from rate scale table. - * for A and B mode we need to overright prev - * value - */ -int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate) -{ - int next_rate = iwl3945_get_prev_ieee_rate(rate); - - switch (priv->band) { - case IEEE80211_BAND_5GHZ: - if (rate == IWL_RATE_12M_INDEX) - next_rate = IWL_RATE_9M_INDEX; - else if (rate == IWL_RATE_6M_INDEX) - next_rate = IWL_RATE_6M_INDEX; - break; - case IEEE80211_BAND_2GHZ: - if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) && - iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) { - if (rate == IWL_RATE_11M_INDEX) - next_rate = IWL_RATE_5M_INDEX; - } - break; - - default: - break; - } - - return next_rate; -} - - -/** - * iwl3945_tx_queue_reclaim - Reclaim Tx queue entries already Tx'd - * - * When FW advances 'R' index, all entries between old and new 'R' index - * need to be reclaimed. As result, some free space forms. If there is - * enough free space (> low mark), wake the stack that feeds us. - */ -static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv, - int txq_id, int index) -{ - struct iwl_tx_queue *txq = &priv->txq[txq_id]; - struct iwl_queue *q = &txq->q; - struct iwl_tx_info *tx_info; - - BUG_ON(txq_id == IWL39_CMD_QUEUE_NUM); - - for (index = iwl_legacy_queue_inc_wrap(index, q->n_bd); - q->read_ptr != index; - q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) { - - tx_info = &txq->txb[txq->q.read_ptr]; - ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb); - tx_info->skb = NULL; - priv->cfg->ops->lib->txq_free_tfd(priv, txq); - } - - if (iwl_legacy_queue_space(q) > q->low_mark && (txq_id >= 0) && - (txq_id != IWL39_CMD_QUEUE_NUM) && - priv->mac80211_registered) - iwl_legacy_wake_queue(priv, txq); -} - -/** - * iwl3945_rx_reply_tx - Handle Tx response - */ -static void iwl3945_rx_reply_tx(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - u16 sequence = le16_to_cpu(pkt->hdr.sequence); - int txq_id = SEQ_TO_QUEUE(sequence); - int index = SEQ_TO_INDEX(sequence); - struct iwl_tx_queue *txq = &priv->txq[txq_id]; - struct ieee80211_tx_info *info; - struct iwl3945_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; - u32 status = le32_to_cpu(tx_resp->status); - int rate_idx; - int fail; - - if ((index >= txq->q.n_bd) || (iwl_legacy_queue_used(&txq->q, index) == 0)) { - IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d " - "is out of range [0-%d] %d %d\n", txq_id, - index, txq->q.n_bd, txq->q.write_ptr, - txq->q.read_ptr); - return; - } - - txq->time_stamp = jiffies; - info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb); - ieee80211_tx_info_clear_status(info); - - /* Fill the MRR chain with some info about on-chip retransmissions */ - rate_idx = iwl3945_hwrate_to_plcp_idx(tx_resp->rate); - if (info->band == IEEE80211_BAND_5GHZ) - rate_idx -= IWL_FIRST_OFDM_RATE; - - fail = tx_resp->failure_frame; - - info->status.rates[0].idx = rate_idx; - info->status.rates[0].count = fail + 1; /* add final attempt */ - - /* tx_status->rts_retry_count = tx_resp->failure_rts; */ - info->flags |= ((status & TX_STATUS_MSK) == TX_STATUS_SUCCESS) ? - IEEE80211_TX_STAT_ACK : 0; - - IWL_DEBUG_TX(priv, "Tx queue %d Status %s (0x%08x) plcp rate %d retries %d\n", - txq_id, iwl3945_get_tx_fail_reason(status), status, - tx_resp->rate, tx_resp->failure_frame); - - IWL_DEBUG_TX_REPLY(priv, "Tx queue reclaim %d\n", index); - iwl3945_tx_queue_reclaim(priv, txq_id, index); - - if (status & TX_ABORT_REQUIRED_MSK) - IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n"); -} - - - -/***************************************************************************** - * - * Intel PRO/Wireless 3945ABG/BG Network Connection - * - * RX handler implementations - * - *****************************************************************************/ -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS -static void iwl3945_accumulative_statistics(struct iwl_priv *priv, - __le32 *stats) -{ - int i; - __le32 *prev_stats; - u32 *accum_stats; - u32 *delta, *max_delta; - - prev_stats = (__le32 *)&priv->_3945.statistics; - accum_stats = (u32 *)&priv->_3945.accum_statistics; - delta = (u32 *)&priv->_3945.delta_statistics; - max_delta = (u32 *)&priv->_3945.max_delta; - - for (i = sizeof(__le32); i < sizeof(struct iwl3945_notif_statistics); - i += sizeof(__le32), stats++, prev_stats++, delta++, - max_delta++, accum_stats++) { - if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) { - *delta = (le32_to_cpu(*stats) - - le32_to_cpu(*prev_stats)); - *accum_stats += *delta; - if (*delta > *max_delta) - *max_delta = *delta; - } - } - - /* reset accumulative statistics for "no-counter" type statistics */ - priv->_3945.accum_statistics.general.temperature = - priv->_3945.statistics.general.temperature; - priv->_3945.accum_statistics.general.ttl_timestamp = - priv->_3945.statistics.general.ttl_timestamp; -} -#endif - -void iwl3945_hw_rx_statistics(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - - IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n", - (int)sizeof(struct iwl3945_notif_statistics), - le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK); -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS - iwl3945_accumulative_statistics(priv, (__le32 *)&pkt->u.raw); -#endif - - memcpy(&priv->_3945.statistics, pkt->u.raw, sizeof(priv->_3945.statistics)); -} - -void iwl3945_reply_statistics(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - __le32 *flag = (__le32 *)&pkt->u.raw; - - if (le32_to_cpu(*flag) & UCODE_STATISTICS_CLEAR_MSK) { -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS - memset(&priv->_3945.accum_statistics, 0, - sizeof(struct iwl3945_notif_statistics)); - memset(&priv->_3945.delta_statistics, 0, - sizeof(struct iwl3945_notif_statistics)); - memset(&priv->_3945.max_delta, 0, - sizeof(struct iwl3945_notif_statistics)); -#endif - IWL_DEBUG_RX(priv, "Statistics have been cleared\n"); - } - iwl3945_hw_rx_statistics(priv, rxb); -} - - -/****************************************************************************** - * - * Misc. internal state and helper functions - * - ******************************************************************************/ - -/* This is necessary only for a number of statistics, see the caller. */ -static int iwl3945_is_network_packet(struct iwl_priv *priv, - struct ieee80211_hdr *header) -{ - /* Filter incoming packets to determine if they are targeted toward - * this network, discarding packets coming from ourselves */ - switch (priv->iw_mode) { - case NL80211_IFTYPE_ADHOC: /* Header: Dest. | Source | BSSID */ - /* packets to our IBSS update information */ - return !compare_ether_addr(header->addr3, priv->bssid); - case NL80211_IFTYPE_STATION: /* Header: Dest. | AP{BSSID} | Source */ - /* packets to our IBSS update information */ - return !compare_ether_addr(header->addr2, priv->bssid); - default: - return 1; - } -} - -static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb, - struct ieee80211_rx_status *stats) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IWL_RX_DATA(pkt); - struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt); - struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt); - u16 len = le16_to_cpu(rx_hdr->len); - struct sk_buff *skb; - __le16 fc = hdr->frame_control; - - /* We received data from the HW, so stop the watchdog */ - if (unlikely(len + IWL39_RX_FRAME_SIZE > - PAGE_SIZE << priv->hw_params.rx_page_order)) { - IWL_DEBUG_DROP(priv, "Corruption detected!\n"); - return; - } - - /* We only process data packets if the interface is open */ - if (unlikely(!priv->is_open)) { - IWL_DEBUG_DROP_LIMIT(priv, - "Dropping packet while interface is not open.\n"); - return; - } - - skb = dev_alloc_skb(128); - if (!skb) { - IWL_ERR(priv, "dev_alloc_skb failed\n"); - return; - } - - if (!iwl3945_mod_params.sw_crypto) - iwl_legacy_set_decrypted_flag(priv, - (struct ieee80211_hdr *)rxb_addr(rxb), - le32_to_cpu(rx_end->status), stats); - - skb_add_rx_frag(skb, 0, rxb->page, - (void *)rx_hdr->payload - (void *)pkt, len); - - iwl_legacy_update_stats(priv, false, fc, len); - memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); - - ieee80211_rx(priv->hw, skb); - priv->alloc_rxb_page--; - rxb->page = NULL; -} - -#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6) - -static void iwl3945_rx_reply_rx(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb) -{ - struct ieee80211_hdr *header; - struct ieee80211_rx_status rx_status; - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl3945_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt); - struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt); - struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt); - u16 rx_stats_sig_avg __maybe_unused = le16_to_cpu(rx_stats->sig_avg); - u16 rx_stats_noise_diff __maybe_unused = le16_to_cpu(rx_stats->noise_diff); - u8 network_packet; - - rx_status.flag = 0; - rx_status.mactime = le64_to_cpu(rx_end->timestamp); - rx_status.band = (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? - IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; - rx_status.freq = - ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel), - rx_status.band); - - rx_status.rate_idx = iwl3945_hwrate_to_plcp_idx(rx_hdr->rate); - if (rx_status.band == IEEE80211_BAND_5GHZ) - rx_status.rate_idx -= IWL_FIRST_OFDM_RATE; - - rx_status.antenna = (le16_to_cpu(rx_hdr->phy_flags) & - RX_RES_PHY_FLAGS_ANTENNA_MSK) >> 4; - - /* set the preamble flag if appropriate */ - if (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK) - rx_status.flag |= RX_FLAG_SHORTPRE; - - if ((unlikely(rx_stats->phy_count > 20))) { - IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n", - rx_stats->phy_count); - return; - } - - if (!(rx_end->status & RX_RES_STATUS_NO_CRC32_ERROR) - || !(rx_end->status & RX_RES_STATUS_NO_RXE_OVERFLOW)) { - IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n", rx_end->status); - return; - } - - - - /* Convert 3945's rssi indicator to dBm */ - rx_status.signal = rx_stats->rssi - IWL39_RSSI_OFFSET; - - IWL_DEBUG_STATS(priv, "Rssi %d sig_avg %d noise_diff %d\n", - rx_status.signal, rx_stats_sig_avg, - rx_stats_noise_diff); - - header = (struct ieee80211_hdr *)IWL_RX_DATA(pkt); - - network_packet = iwl3945_is_network_packet(priv, header); - - IWL_DEBUG_STATS_LIMIT(priv, "[%c] %d RSSI:%d Signal:%u, Rate:%u\n", - network_packet ? '*' : ' ', - le16_to_cpu(rx_hdr->channel), - rx_status.signal, rx_status.signal, - rx_status.rate_idx); - - iwl_legacy_dbg_log_rx_data_frame(priv, le16_to_cpu(rx_hdr->len), - header); - - if (network_packet) { - priv->_3945.last_beacon_time = - le32_to_cpu(rx_end->beacon_timestamp); - priv->_3945.last_tsf = le64_to_cpu(rx_end->timestamp); - priv->_3945.last_rx_rssi = rx_status.signal; - } - - iwl3945_pass_packet_to_mac80211(priv, rxb, &rx_status); -} - -int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, - struct iwl_tx_queue *txq, - dma_addr_t addr, u16 len, u8 reset, u8 pad) -{ - int count; - struct iwl_queue *q; - struct iwl3945_tfd *tfd, *tfd_tmp; - - q = &txq->q; - tfd_tmp = (struct iwl3945_tfd *)txq->tfds; - tfd = &tfd_tmp[q->write_ptr]; - - if (reset) - memset(tfd, 0, sizeof(*tfd)); - - count = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags)); - - if ((count >= NUM_TFD_CHUNKS) || (count < 0)) { - IWL_ERR(priv, "Error can not send more than %d chunks\n", - NUM_TFD_CHUNKS); - return -EINVAL; - } - - tfd->tbs[count].addr = cpu_to_le32(addr); - tfd->tbs[count].len = cpu_to_le32(len); - - count++; - - tfd->control_flags = cpu_to_le32(TFD_CTL_COUNT_SET(count) | - TFD_CTL_PAD_SET(pad)); - - return 0; -} - -/** - * iwl3945_hw_txq_free_tfd - Free one TFD, those at index [txq->q.read_ptr] - * - * Does NOT advance any indexes - */ -void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq) -{ - struct iwl3945_tfd *tfd_tmp = (struct iwl3945_tfd *)txq->tfds; - int index = txq->q.read_ptr; - struct iwl3945_tfd *tfd = &tfd_tmp[index]; - struct pci_dev *dev = priv->pci_dev; - int i; - int counter; - - /* sanity check */ - counter = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags)); - if (counter > NUM_TFD_CHUNKS) { - IWL_ERR(priv, "Too many chunks: %i\n", counter); - /* @todo issue fatal error, it is quite serious situation */ - return; - } - - /* Unmap tx_cmd */ - if (counter) - pci_unmap_single(dev, - dma_unmap_addr(&txq->meta[index], mapping), - dma_unmap_len(&txq->meta[index], len), - PCI_DMA_TODEVICE); - - /* unmap chunks if any */ - - for (i = 1; i < counter; i++) - pci_unmap_single(dev, le32_to_cpu(tfd->tbs[i].addr), - le32_to_cpu(tfd->tbs[i].len), PCI_DMA_TODEVICE); - - /* free SKB */ - if (txq->txb) { - struct sk_buff *skb; - - skb = txq->txb[txq->q.read_ptr].skb; - - /* can be called from irqs-disabled context */ - if (skb) { - dev_kfree_skb_any(skb); - txq->txb[txq->q.read_ptr].skb = NULL; - } - } -} - -/** - * iwl3945_hw_build_tx_cmd_rate - Add rate portion to TX_CMD: - * -*/ -void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv, - struct iwl_device_cmd *cmd, - struct ieee80211_tx_info *info, - struct ieee80211_hdr *hdr, - int sta_id, int tx_id) -{ - u16 hw_value = ieee80211_get_tx_rate(priv->hw, info)->hw_value; - u16 rate_index = min(hw_value & 0xffff, IWL_RATE_COUNT_3945); - u16 rate_mask; - int rate; - u8 rts_retry_limit; - u8 data_retry_limit; - __le32 tx_flags; - __le16 fc = hdr->frame_control; - struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload; - - rate = iwl3945_rates[rate_index].plcp; - tx_flags = tx_cmd->tx_flags; - - /* We need to figure out how to get the sta->supp_rates while - * in this running context */ - rate_mask = IWL_RATES_MASK_3945; - - /* Set retry limit on DATA packets and Probe Responses*/ - if (ieee80211_is_probe_resp(fc)) - data_retry_limit = 3; - else - data_retry_limit = IWL_DEFAULT_TX_RETRY; - tx_cmd->data_retry_limit = data_retry_limit; - - if (tx_id >= IWL39_CMD_QUEUE_NUM) - rts_retry_limit = 3; - else - rts_retry_limit = 7; - - if (data_retry_limit < rts_retry_limit) - rts_retry_limit = data_retry_limit; - tx_cmd->rts_retry_limit = rts_retry_limit; - - tx_cmd->rate = rate; - tx_cmd->tx_flags = tx_flags; - - /* OFDM */ - tx_cmd->supp_rates[0] = - ((rate_mask & IWL_OFDM_RATES_MASK) >> IWL_FIRST_OFDM_RATE) & 0xFF; - - /* CCK */ - tx_cmd->supp_rates[1] = (rate_mask & 0xF); - - IWL_DEBUG_RATE(priv, "Tx sta id: %d, rate: %d (plcp), flags: 0x%4X " - "cck/ofdm mask: 0x%x/0x%x\n", sta_id, - tx_cmd->rate, le32_to_cpu(tx_cmd->tx_flags), - tx_cmd->supp_rates[1], tx_cmd->supp_rates[0]); -} - -static u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate) -{ - unsigned long flags_spin; - struct iwl_station_entry *station; - - if (sta_id == IWL_INVALID_STATION) - return IWL_INVALID_STATION; - - spin_lock_irqsave(&priv->sta_lock, flags_spin); - station = &priv->stations[sta_id]; - - station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK; - station->sta.rate_n_flags = cpu_to_le16(tx_rate); - station->sta.mode = STA_CONTROL_MODIFY_MSK; - iwl_legacy_send_add_sta(priv, &station->sta, CMD_ASYNC); - spin_unlock_irqrestore(&priv->sta_lock, flags_spin); - - IWL_DEBUG_RATE(priv, "SCALE sync station %d to rate %d\n", - sta_id, tx_rate); - return sta_id; -} - -static void iwl3945_set_pwr_vmain(struct iwl_priv *priv) -{ -/* - * (for documentation purposes) - * to set power to V_AUX, do - - if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) { - iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, - APMG_PS_CTRL_VAL_PWR_SRC_VAUX, - ~APMG_PS_CTRL_MSK_PWR_SRC); - - iwl_poll_bit(priv, CSR_GPIO_IN, - CSR_GPIO_IN_VAL_VAUX_PWR_SRC, - CSR_GPIO_IN_BIT_AUX_POWER, 5000); - } - */ - - iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, - APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, - ~APMG_PS_CTRL_MSK_PWR_SRC); - - iwl_poll_bit(priv, CSR_GPIO_IN, CSR_GPIO_IN_VAL_VMAIN_PWR_SRC, - CSR_GPIO_IN_BIT_AUX_POWER, 5000); /* uS */ -} - -static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq) -{ - iwl_legacy_write_direct32(priv, FH39_RCSR_RBD_BASE(0), rxq->bd_dma); - iwl_legacy_write_direct32(priv, FH39_RCSR_RPTR_ADDR(0), - rxq->rb_stts_dma); - iwl_legacy_write_direct32(priv, FH39_RCSR_WPTR(0), 0); - iwl_legacy_write_direct32(priv, FH39_RCSR_CONFIG(0), - FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE | - FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE | - FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN | - FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 | - (RX_QUEUE_SIZE_LOG << FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE) | - FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST | - (1 << FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH) | - FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH); - - /* fake read to flush all prev I/O */ - iwl_legacy_read_direct32(priv, FH39_RSSR_CTRL); - - return 0; -} - -static int iwl3945_tx_reset(struct iwl_priv *priv) -{ - - /* bypass mode */ - iwl_legacy_write_prph(priv, ALM_SCD_MODE_REG, 0x2); - - /* RA 0 is active */ - iwl_legacy_write_prph(priv, ALM_SCD_ARASTAT_REG, 0x01); - - /* all 6 fifo are active */ - iwl_legacy_write_prph(priv, ALM_SCD_TXFACT_REG, 0x3f); - - iwl_legacy_write_prph(priv, ALM_SCD_SBYP_MODE_1_REG, 0x010000); - iwl_legacy_write_prph(priv, ALM_SCD_SBYP_MODE_2_REG, 0x030002); - iwl_legacy_write_prph(priv, ALM_SCD_TXF4MF_REG, 0x000004); - iwl_legacy_write_prph(priv, ALM_SCD_TXF5MF_REG, 0x000005); - - iwl_legacy_write_direct32(priv, FH39_TSSR_CBB_BASE, - priv->_3945.shared_phys); - - iwl_legacy_write_direct32(priv, FH39_TSSR_MSG_CONFIG, - FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON | - FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON | - FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B | - FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON | - FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON | - FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH | - FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH); - - - return 0; -} - -/** - * iwl3945_txq_ctx_reset - Reset TX queue context - * - * Destroys all DMA structures and initialize them again - */ -static int iwl3945_txq_ctx_reset(struct iwl_priv *priv) -{ - int rc; - int txq_id, slots_num; - - iwl3945_hw_txq_ctx_free(priv); - - /* allocate tx queue structure */ - rc = iwl_legacy_alloc_txq_mem(priv); - if (rc) - return rc; - - /* Tx CMD queue */ - rc = iwl3945_tx_reset(priv); - if (rc) - goto error; - - /* Tx queue(s) */ - for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { - slots_num = (txq_id == IWL39_CMD_QUEUE_NUM) ? - TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; - rc = iwl_legacy_tx_queue_init(priv, &priv->txq[txq_id], - slots_num, txq_id); - if (rc) { - IWL_ERR(priv, "Tx %d queue init failed\n", txq_id); - goto error; - } - } - - return rc; - - error: - iwl3945_hw_txq_ctx_free(priv); - return rc; -} - - -/* - * Start up 3945's basic functionality after it has been reset - * (e.g. after platform boot, or shutdown via iwl_legacy_apm_stop()) - * NOTE: This does not load uCode nor start the embedded processor - */ -static int iwl3945_apm_init(struct iwl_priv *priv) -{ - int ret = iwl_legacy_apm_init(priv); - - /* Clear APMG (NIC's internal power management) interrupts */ - iwl_legacy_write_prph(priv, APMG_RTC_INT_MSK_REG, 0x0); - iwl_legacy_write_prph(priv, APMG_RTC_INT_STT_REG, 0xFFFFFFFF); - - /* Reset radio chip */ - iwl_legacy_set_bits_prph(priv, APMG_PS_CTRL_REG, - APMG_PS_CTRL_VAL_RESET_REQ); - udelay(5); - iwl_legacy_clear_bits_prph(priv, APMG_PS_CTRL_REG, - APMG_PS_CTRL_VAL_RESET_REQ); - - return ret; -} - -static void iwl3945_nic_config(struct iwl_priv *priv) -{ - struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; - unsigned long flags; - u8 rev_id = priv->pci_dev->revision; - - spin_lock_irqsave(&priv->lock, flags); - - /* Determine HW type */ - IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id); - - if (rev_id & PCI_CFG_REV_ID_BIT_RTP) - IWL_DEBUG_INFO(priv, "RTP type\n"); - else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) { - IWL_DEBUG_INFO(priv, "3945 RADIO-MB type\n"); - iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, - CSR39_HW_IF_CONFIG_REG_BIT_3945_MB); - } else { - IWL_DEBUG_INFO(priv, "3945 RADIO-MM type\n"); - iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, - CSR39_HW_IF_CONFIG_REG_BIT_3945_MM); - } - - if (EEPROM_SKU_CAP_OP_MODE_MRC == eeprom->sku_cap) { - IWL_DEBUG_INFO(priv, "SKU OP mode is mrc\n"); - iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, - CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC); - } else - IWL_DEBUG_INFO(priv, "SKU OP mode is basic\n"); - - if ((eeprom->board_revision & 0xF0) == 0xD0) { - IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n", - eeprom->board_revision); - iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, - CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE); - } else { - IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n", - eeprom->board_revision); - iwl_legacy_clear_bit(priv, CSR_HW_IF_CONFIG_REG, - CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE); - } - - if (eeprom->almgor_m_version <= 1) { - iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, - CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A); - IWL_DEBUG_INFO(priv, "Card M type A version is 0x%X\n", - eeprom->almgor_m_version); - } else { - IWL_DEBUG_INFO(priv, "Card M type B version is 0x%X\n", - eeprom->almgor_m_version); - iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, - CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B); - } - spin_unlock_irqrestore(&priv->lock, flags); - - if (eeprom->sku_cap & EEPROM_SKU_CAP_SW_RF_KILL_ENABLE) - IWL_DEBUG_RF_KILL(priv, "SW RF KILL supported in EEPROM.\n"); - - if (eeprom->sku_cap & EEPROM_SKU_CAP_HW_RF_KILL_ENABLE) - IWL_DEBUG_RF_KILL(priv, "HW RF KILL supported in EEPROM.\n"); -} - -int iwl3945_hw_nic_init(struct iwl_priv *priv) -{ - int rc; - unsigned long flags; - struct iwl_rx_queue *rxq = &priv->rxq; - - spin_lock_irqsave(&priv->lock, flags); - priv->cfg->ops->lib->apm_ops.init(priv); - spin_unlock_irqrestore(&priv->lock, flags); - - iwl3945_set_pwr_vmain(priv); - - priv->cfg->ops->lib->apm_ops.config(priv); - - /* Allocate the RX queue, or reset if it is already allocated */ - if (!rxq->bd) { - rc = iwl_legacy_rx_queue_alloc(priv); - if (rc) { - IWL_ERR(priv, "Unable to initialize Rx queue\n"); - return -ENOMEM; - } - } else - iwl3945_rx_queue_reset(priv, rxq); - - iwl3945_rx_replenish(priv); - - iwl3945_rx_init(priv, rxq); - - - /* Look at using this instead: - rxq->need_update = 1; - iwl_legacy_rx_queue_update_write_ptr(priv, rxq); - */ - - iwl_legacy_write_direct32(priv, FH39_RCSR_WPTR(0), rxq->write & ~7); - - rc = iwl3945_txq_ctx_reset(priv); - if (rc) - return rc; - - set_bit(STATUS_INIT, &priv->status); - - return 0; -} - -/** - * iwl3945_hw_txq_ctx_free - Free TXQ Context - * - * Destroy all TX DMA queues and structures - */ -void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv) -{ - int txq_id; - - /* Tx queues */ - if (priv->txq) - for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; - txq_id++) - if (txq_id == IWL39_CMD_QUEUE_NUM) - iwl_legacy_cmd_queue_free(priv); - else - iwl_legacy_tx_queue_free(priv, txq_id); - - /* free tx queue structure */ - iwl_legacy_txq_mem(priv); -} - -void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv) -{ - int txq_id; - - /* stop SCD */ - iwl_legacy_write_prph(priv, ALM_SCD_MODE_REG, 0); - iwl_legacy_write_prph(priv, ALM_SCD_TXFACT_REG, 0); - - /* reset TFD queues */ - for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { - iwl_legacy_write_direct32(priv, FH39_TCSR_CONFIG(txq_id), 0x0); - iwl_poll_direct_bit(priv, FH39_TSSR_TX_STATUS, - FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id), - 1000); - } - - iwl3945_hw_txq_ctx_free(priv); -} - -/** - * iwl3945_hw_reg_adjust_power_by_temp - * return index delta into power gain settings table -*/ -static int iwl3945_hw_reg_adjust_power_by_temp(int new_reading, int old_reading) -{ - return (new_reading - old_reading) * (-11) / 100; -} - -/** - * iwl3945_hw_reg_temp_out_of_range - Keep temperature in sane range - */ -static inline int iwl3945_hw_reg_temp_out_of_range(int temperature) -{ - return ((temperature < -260) || (temperature > 25)) ? 1 : 0; -} - -int iwl3945_hw_get_temperature(struct iwl_priv *priv) -{ - return iwl_read32(priv, CSR_UCODE_DRV_GP2); -} - -/** - * iwl3945_hw_reg_txpower_get_temperature - * get the current temperature by reading from NIC -*/ -static int iwl3945_hw_reg_txpower_get_temperature(struct iwl_priv *priv) -{ - struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; - int temperature; - - temperature = iwl3945_hw_get_temperature(priv); - - /* driver's okay range is -260 to +25. - * human readable okay range is 0 to +285 */ - IWL_DEBUG_INFO(priv, "Temperature: %d\n", temperature + IWL_TEMP_CONVERT); - - /* handle insane temp reading */ - if (iwl3945_hw_reg_temp_out_of_range(temperature)) { - IWL_ERR(priv, "Error bad temperature value %d\n", temperature); - - /* if really really hot(?), - * substitute the 3rd band/group's temp measured at factory */ - if (priv->last_temperature > 100) - temperature = eeprom->groups[2].temperature; - else /* else use most recent "sane" value from driver */ - temperature = priv->last_temperature; - } - - return temperature; /* raw, not "human readable" */ -} - -/* Adjust Txpower only if temperature variance is greater than threshold. - * - * Both are lower than older versions' 9 degrees */ -#define IWL_TEMPERATURE_LIMIT_TIMER 6 - -/** - * iwl3945_is_temp_calib_needed - determines if new calibration is needed - * - * records new temperature in tx_mgr->temperature. - * replaces tx_mgr->last_temperature *only* if calib needed - * (assumes caller will actually do the calibration!). */ -static int iwl3945_is_temp_calib_needed(struct iwl_priv *priv) -{ - int temp_diff; - - priv->temperature = iwl3945_hw_reg_txpower_get_temperature(priv); - temp_diff = priv->temperature - priv->last_temperature; - - /* get absolute value */ - if (temp_diff < 0) { - IWL_DEBUG_POWER(priv, "Getting cooler, delta %d,\n", temp_diff); - temp_diff = -temp_diff; - } else if (temp_diff == 0) - IWL_DEBUG_POWER(priv, "Same temp,\n"); - else - IWL_DEBUG_POWER(priv, "Getting warmer, delta %d,\n", temp_diff); - - /* if we don't need calibration, *don't* update last_temperature */ - if (temp_diff < IWL_TEMPERATURE_LIMIT_TIMER) { - IWL_DEBUG_POWER(priv, "Timed thermal calib not needed\n"); - return 0; - } - - IWL_DEBUG_POWER(priv, "Timed thermal calib needed\n"); - - /* assume that caller will actually do calib ... - * update the "last temperature" value */ - priv->last_temperature = priv->temperature; - return 1; -} - -#define IWL_MAX_GAIN_ENTRIES 78 -#define IWL_CCK_FROM_OFDM_POWER_DIFF -5 -#define IWL_CCK_FROM_OFDM_INDEX_DIFF (10) - -/* radio and DSP power table, each step is 1/2 dB. - * 1st number is for RF analog gain, 2nd number is for DSP pre-DAC gain. */ -static struct iwl3945_tx_power power_gain_table[2][IWL_MAX_GAIN_ENTRIES] = { - { - {251, 127}, /* 2.4 GHz, highest power */ - {251, 127}, - {251, 127}, - {251, 127}, - {251, 125}, - {251, 110}, - {251, 105}, - {251, 98}, - {187, 125}, - {187, 115}, - {187, 108}, - {187, 99}, - {243, 119}, - {243, 111}, - {243, 105}, - {243, 97}, - {243, 92}, - {211, 106}, - {211, 100}, - {179, 120}, - {179, 113}, - {179, 107}, - {147, 125}, - {147, 119}, - {147, 112}, - {147, 106}, - {147, 101}, - {147, 97}, - {147, 91}, - {115, 107}, - {235, 121}, - {235, 115}, - {235, 109}, - {203, 127}, - {203, 121}, - {203, 115}, - {203, 108}, - {203, 102}, - {203, 96}, - {203, 92}, - {171, 110}, - {171, 104}, - {171, 98}, - {139, 116}, - {227, 125}, - {227, 119}, - {227, 113}, - {227, 107}, - {227, 101}, - {227, 96}, - {195, 113}, - {195, 106}, - {195, 102}, - {195, 95}, - {163, 113}, - {163, 106}, - {163, 102}, - {163, 95}, - {131, 113}, - {131, 106}, - {131, 102}, - {131, 95}, - {99, 113}, - {99, 106}, - {99, 102}, - {99, 95}, - {67, 113}, - {67, 106}, - {67, 102}, - {67, 95}, - {35, 113}, - {35, 106}, - {35, 102}, - {35, 95}, - {3, 113}, - {3, 106}, - {3, 102}, - {3, 95} }, /* 2.4 GHz, lowest power */ - { - {251, 127}, /* 5.x GHz, highest power */ - {251, 120}, - {251, 114}, - {219, 119}, - {219, 101}, - {187, 113}, - {187, 102}, - {155, 114}, - {155, 103}, - {123, 117}, - {123, 107}, - {123, 99}, - {123, 92}, - {91, 108}, - {59, 125}, - {59, 118}, - {59, 109}, - {59, 102}, - {59, 96}, - {59, 90}, - {27, 104}, - {27, 98}, - {27, 92}, - {115, 118}, - {115, 111}, - {115, 104}, - {83, 126}, - {83, 121}, - {83, 113}, - {83, 105}, - {83, 99}, - {51, 118}, - {51, 111}, - {51, 104}, - {51, 98}, - {19, 116}, - {19, 109}, - {19, 102}, - {19, 98}, - {19, 93}, - {171, 113}, - {171, 107}, - {171, 99}, - {139, 120}, - {139, 113}, - {139, 107}, - {139, 99}, - {107, 120}, - {107, 113}, - {107, 107}, - {107, 99}, - {75, 120}, - {75, 113}, - {75, 107}, - {75, 99}, - {43, 120}, - {43, 113}, - {43, 107}, - {43, 99}, - {11, 120}, - {11, 113}, - {11, 107}, - {11, 99}, - {131, 107}, - {131, 99}, - {99, 120}, - {99, 113}, - {99, 107}, - {99, 99}, - {67, 120}, - {67, 113}, - {67, 107}, - {67, 99}, - {35, 120}, - {35, 113}, - {35, 107}, - {35, 99}, - {3, 120} } /* 5.x GHz, lowest power */ -}; - -static inline u8 iwl3945_hw_reg_fix_power_index(int index) -{ - if (index < 0) - return 0; - if (index >= IWL_MAX_GAIN_ENTRIES) - return IWL_MAX_GAIN_ENTRIES - 1; - return (u8) index; -} - -/* Kick off thermal recalibration check every 60 seconds */ -#define REG_RECALIB_PERIOD (60) - -/** - * iwl3945_hw_reg_set_scan_power - Set Tx power for scan probe requests - * - * Set (in our channel info database) the direct scan Tx power for 1 Mbit (CCK) - * or 6 Mbit (OFDM) rates. - */ -static void iwl3945_hw_reg_set_scan_power(struct iwl_priv *priv, u32 scan_tbl_index, - s32 rate_index, const s8 *clip_pwrs, - struct iwl_channel_info *ch_info, - int band_index) -{ - struct iwl3945_scan_power_info *scan_power_info; - s8 power; - u8 power_index; - - scan_power_info = &ch_info->scan_pwr_info[scan_tbl_index]; - - /* use this channel group's 6Mbit clipping/saturation pwr, - * but cap at regulatory scan power restriction (set during init - * based on eeprom channel data) for this channel. */ - power = min(ch_info->scan_power, clip_pwrs[IWL_RATE_6M_INDEX_TABLE]); - - power = min(power, priv->tx_power_user_lmt); - scan_power_info->requested_power = power; - - /* find difference between new scan *power* and current "normal" - * Tx *power* for 6Mb. Use this difference (x2) to adjust the - * current "normal" temperature-compensated Tx power *index* for - * this rate (1Mb or 6Mb) to yield new temp-compensated scan power - * *index*. */ - power_index = ch_info->power_info[rate_index].power_table_index - - (power - ch_info->power_info - [IWL_RATE_6M_INDEX_TABLE].requested_power) * 2; - - /* store reference index that we use when adjusting *all* scan - * powers. So we can accommodate user (all channel) or spectrum - * management (single channel) power changes "between" temperature - * feedback compensation procedures. - * don't force fit this reference index into gain table; it may be a - * negative number. This will help avoid errors when we're at - * the lower bounds (highest gains, for warmest temperatures) - * of the table. */ - - /* don't exceed table bounds for "real" setting */ - power_index = iwl3945_hw_reg_fix_power_index(power_index); - - scan_power_info->power_table_index = power_index; - scan_power_info->tpc.tx_gain = - power_gain_table[band_index][power_index].tx_gain; - scan_power_info->tpc.dsp_atten = - power_gain_table[band_index][power_index].dsp_atten; -} - -/** - * iwl3945_send_tx_power - fill in Tx Power command with gain settings - * - * Configures power settings for all rates for the current channel, - * using values from channel info struct, and send to NIC - */ -static int iwl3945_send_tx_power(struct iwl_priv *priv) -{ - int rate_idx, i; - const struct iwl_channel_info *ch_info = NULL; - struct iwl3945_txpowertable_cmd txpower = { - .channel = priv->contexts[IWL_RXON_CTX_BSS].active.channel, - }; - u16 chan; - - if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status), - "TX Power requested while scanning!\n")) - return -EAGAIN; - - chan = le16_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.channel); - - txpower.band = (priv->band == IEEE80211_BAND_5GHZ) ? 0 : 1; - ch_info = iwl_legacy_get_channel_info(priv, priv->band, chan); - if (!ch_info) { - IWL_ERR(priv, - "Failed to get channel info for channel %d [%d]\n", - chan, priv->band); - return -EINVAL; - } - - if (!iwl_legacy_is_channel_valid(ch_info)) { - IWL_DEBUG_POWER(priv, "Not calling TX_PWR_TABLE_CMD on " - "non-Tx channel.\n"); - return 0; - } - - /* fill cmd with power settings for all rates for current channel */ - /* Fill OFDM rate */ - for (rate_idx = IWL_FIRST_OFDM_RATE, i = 0; - rate_idx <= IWL39_LAST_OFDM_RATE; rate_idx++, i++) { - - txpower.power[i].tpc = ch_info->power_info[i].tpc; - txpower.power[i].rate = iwl3945_rates[rate_idx].plcp; - - IWL_DEBUG_POWER(priv, "ch %d:%d rf %d dsp %3d rate code 0x%02x\n", - le16_to_cpu(txpower.channel), - txpower.band, - txpower.power[i].tpc.tx_gain, - txpower.power[i].tpc.dsp_atten, - txpower.power[i].rate); - } - /* Fill CCK rates */ - for (rate_idx = IWL_FIRST_CCK_RATE; - rate_idx <= IWL_LAST_CCK_RATE; rate_idx++, i++) { - txpower.power[i].tpc = ch_info->power_info[i].tpc; - txpower.power[i].rate = iwl3945_rates[rate_idx].plcp; - - IWL_DEBUG_POWER(priv, "ch %d:%d rf %d dsp %3d rate code 0x%02x\n", - le16_to_cpu(txpower.channel), - txpower.band, - txpower.power[i].tpc.tx_gain, - txpower.power[i].tpc.dsp_atten, - txpower.power[i].rate); - } - - return iwl_legacy_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD, - sizeof(struct iwl3945_txpowertable_cmd), - &txpower); - -} - -/** - * iwl3945_hw_reg_set_new_power - Configures power tables at new levels - * @ch_info: Channel to update. Uses power_info.requested_power. - * - * Replace requested_power and base_power_index ch_info fields for - * one channel. - * - * Called if user or spectrum management changes power preferences. - * Takes into account h/w and modulation limitations (clip power). - * - * This does *not* send anything to NIC, just sets up ch_info for one channel. - * - * NOTE: reg_compensate_for_temperature_dif() *must* be run after this to - * properly fill out the scan powers, and actual h/w gain settings, - * and send changes to NIC - */ -static int iwl3945_hw_reg_set_new_power(struct iwl_priv *priv, - struct iwl_channel_info *ch_info) -{ - struct iwl3945_channel_power_info *power_info; - int power_changed = 0; - int i; - const s8 *clip_pwrs; - int power; - - /* Get this chnlgrp's rate-to-max/clip-powers table */ - clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers; - - /* Get this channel's rate-to-current-power settings table */ - power_info = ch_info->power_info; - - /* update OFDM Txpower settings */ - for (i = IWL_RATE_6M_INDEX_TABLE; i <= IWL_RATE_54M_INDEX_TABLE; - i++, ++power_info) { - int delta_idx; - - /* limit new power to be no more than h/w capability */ - power = min(ch_info->curr_txpow, clip_pwrs[i]); - if (power == power_info->requested_power) - continue; - - /* find difference between old and new requested powers, - * update base (non-temp-compensated) power index */ - delta_idx = (power - power_info->requested_power) * 2; - power_info->base_power_index -= delta_idx; - - /* save new requested power value */ - power_info->requested_power = power; - - power_changed = 1; - } - - /* update CCK Txpower settings, based on OFDM 12M setting ... - * ... all CCK power settings for a given channel are the *same*. */ - if (power_changed) { - power = - ch_info->power_info[IWL_RATE_12M_INDEX_TABLE]. - requested_power + IWL_CCK_FROM_OFDM_POWER_DIFF; - - /* do all CCK rates' iwl3945_channel_power_info structures */ - for (i = IWL_RATE_1M_INDEX_TABLE; i <= IWL_RATE_11M_INDEX_TABLE; i++) { - power_info->requested_power = power; - power_info->base_power_index = - ch_info->power_info[IWL_RATE_12M_INDEX_TABLE]. - base_power_index + IWL_CCK_FROM_OFDM_INDEX_DIFF; - ++power_info; - } - } - - return 0; -} - -/** - * iwl3945_hw_reg_get_ch_txpower_limit - returns new power limit for channel - * - * NOTE: Returned power limit may be less (but not more) than requested, - * based strictly on regulatory (eeprom and spectrum mgt) limitations - * (no consideration for h/w clipping limitations). - */ -static int iwl3945_hw_reg_get_ch_txpower_limit(struct iwl_channel_info *ch_info) -{ - s8 max_power; - -#if 0 - /* if we're using TGd limits, use lower of TGd or EEPROM */ - if (ch_info->tgd_data.max_power != 0) - max_power = min(ch_info->tgd_data.max_power, - ch_info->eeprom.max_power_avg); - - /* else just use EEPROM limits */ - else -#endif - max_power = ch_info->eeprom.max_power_avg; - - return min(max_power, ch_info->max_power_avg); -} - -/** - * iwl3945_hw_reg_comp_txpower_temp - Compensate for temperature - * - * Compensate txpower settings of *all* channels for temperature. - * This only accounts for the difference between current temperature - * and the factory calibration temperatures, and bases the new settings - * on the channel's base_power_index. - * - * If RxOn is "associated", this sends the new Txpower to NIC! - */ -static int iwl3945_hw_reg_comp_txpower_temp(struct iwl_priv *priv) -{ - struct iwl_channel_info *ch_info = NULL; - struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; - int delta_index; - const s8 *clip_pwrs; /* array of h/w max power levels for each rate */ - u8 a_band; - u8 rate_index; - u8 scan_tbl_index; - u8 i; - int ref_temp; - int temperature = priv->temperature; - - if (priv->disable_tx_power_cal || - test_bit(STATUS_SCANNING, &priv->status)) { - /* do not perform tx power calibration */ - return 0; - } - /* set up new Tx power info for each and every channel, 2.4 and 5.x */ - for (i = 0; i < priv->channel_count; i++) { - ch_info = &priv->channel_info[i]; - a_band = iwl_legacy_is_channel_a_band(ch_info); - - /* Get this chnlgrp's factory calibration temperature */ - ref_temp = (s16)eeprom->groups[ch_info->group_index]. - temperature; - - /* get power index adjustment based on current and factory - * temps */ - delta_index = iwl3945_hw_reg_adjust_power_by_temp(temperature, - ref_temp); - - /* set tx power value for all rates, OFDM and CCK */ - for (rate_index = 0; rate_index < IWL_RATE_COUNT_3945; - rate_index++) { - int power_idx = - ch_info->power_info[rate_index].base_power_index; - - /* temperature compensate */ - power_idx += delta_index; - - /* stay within table range */ - power_idx = iwl3945_hw_reg_fix_power_index(power_idx); - ch_info->power_info[rate_index]. - power_table_index = (u8) power_idx; - ch_info->power_info[rate_index].tpc = - power_gain_table[a_band][power_idx]; - } - - /* Get this chnlgrp's rate-to-max/clip-powers table */ - clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers; - - /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */ - for (scan_tbl_index = 0; - scan_tbl_index < IWL_NUM_SCAN_RATES; scan_tbl_index++) { - s32 actual_index = (scan_tbl_index == 0) ? - IWL_RATE_1M_INDEX_TABLE : IWL_RATE_6M_INDEX_TABLE; - iwl3945_hw_reg_set_scan_power(priv, scan_tbl_index, - actual_index, clip_pwrs, - ch_info, a_band); - } - } - - /* send Txpower command for current channel to ucode */ - return priv->cfg->ops->lib->send_tx_power(priv); -} - -int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power) -{ - struct iwl_channel_info *ch_info; - s8 max_power; - u8 a_band; - u8 i; - - if (priv->tx_power_user_lmt == power) { - IWL_DEBUG_POWER(priv, "Requested Tx power same as current " - "limit: %ddBm.\n", power); - return 0; - } - - IWL_DEBUG_POWER(priv, "Setting upper limit clamp to %ddBm.\n", power); - priv->tx_power_user_lmt = power; - - /* set up new Tx powers for each and every channel, 2.4 and 5.x */ - - for (i = 0; i < priv->channel_count; i++) { - ch_info = &priv->channel_info[i]; - a_band = iwl_legacy_is_channel_a_band(ch_info); - - /* find minimum power of all user and regulatory constraints - * (does not consider h/w clipping limitations) */ - max_power = iwl3945_hw_reg_get_ch_txpower_limit(ch_info); - max_power = min(power, max_power); - if (max_power != ch_info->curr_txpow) { - ch_info->curr_txpow = max_power; - - /* this considers the h/w clipping limitations */ - iwl3945_hw_reg_set_new_power(priv, ch_info); - } - } - - /* update txpower settings for all channels, - * send to NIC if associated. */ - iwl3945_is_temp_calib_needed(priv); - iwl3945_hw_reg_comp_txpower_temp(priv); - - return 0; -} - -static int iwl3945_send_rxon_assoc(struct iwl_priv *priv, - struct iwl_rxon_context *ctx) -{ - int rc = 0; - struct iwl_rx_packet *pkt; - struct iwl3945_rxon_assoc_cmd rxon_assoc; - struct iwl_host_cmd cmd = { - .id = REPLY_RXON_ASSOC, - .len = sizeof(rxon_assoc), - .flags = CMD_WANT_SKB, - .data = &rxon_assoc, - }; - const struct iwl_legacy_rxon_cmd *rxon1 = &ctx->staging; - const struct iwl_legacy_rxon_cmd *rxon2 = &ctx->active; - - if ((rxon1->flags == rxon2->flags) && - (rxon1->filter_flags == rxon2->filter_flags) && - (rxon1->cck_basic_rates == rxon2->cck_basic_rates) && - (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) { - IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n"); - return 0; - } - - rxon_assoc.flags = ctx->staging.flags; - rxon_assoc.filter_flags = ctx->staging.filter_flags; - rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates; - rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates; - rxon_assoc.reserved = 0; - - rc = iwl_legacy_send_cmd_sync(priv, &cmd); - if (rc) - return rc; - - pkt = (struct iwl_rx_packet *)cmd.reply_page; - if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { - IWL_ERR(priv, "Bad return from REPLY_RXON_ASSOC command\n"); - rc = -EIO; - } - - iwl_legacy_free_pages(priv, cmd.reply_page); - - return rc; -} - -/** - * iwl3945_commit_rxon - commit staging_rxon to hardware - * - * The RXON command in staging_rxon is committed to the hardware and - * the active_rxon structure is updated with the new data. This - * function correctly transitions out of the RXON_ASSOC_MSK state if - * a HW tune is required based on the RXON structure changes. - */ -int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) -{ - /* cast away the const for active_rxon in this function */ - struct iwl3945_rxon_cmd *active_rxon = (void *)&ctx->active; - struct iwl3945_rxon_cmd *staging_rxon = (void *)&ctx->staging; - int rc = 0; - bool new_assoc = !!(staging_rxon->filter_flags & RXON_FILTER_ASSOC_MSK); - - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - return -EINVAL; - - if (!iwl_legacy_is_alive(priv)) - return -1; - - /* always get timestamp with Rx frame */ - staging_rxon->flags |= RXON_FLG_TSF2HOST_MSK; - - /* select antenna */ - staging_rxon->flags &= - ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK); - staging_rxon->flags |= iwl3945_get_antenna_flags(priv); - - rc = iwl_legacy_check_rxon_cmd(priv, ctx); - if (rc) { - IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n"); - return -EINVAL; - } - - /* If we don't need to send a full RXON, we can use - * iwl3945_rxon_assoc_cmd which is used to reconfigure filter - * and other flags for the current radio configuration. */ - if (!iwl_legacy_full_rxon_required(priv, - &priv->contexts[IWL_RXON_CTX_BSS])) { - rc = iwl_legacy_send_rxon_assoc(priv, - &priv->contexts[IWL_RXON_CTX_BSS]); - if (rc) { - IWL_ERR(priv, "Error setting RXON_ASSOC " - "configuration (%d).\n", rc); - return rc; - } - - memcpy(active_rxon, staging_rxon, sizeof(*active_rxon)); - /* - * We do not commit tx power settings while channel changing, - * do it now if tx power changed. - */ - iwl_legacy_set_tx_power(priv, priv->tx_power_next, false); - return 0; - } - - /* If we are currently associated and the new config requires - * an RXON_ASSOC and the new config wants the associated mask enabled, - * we must clear the associated from the active configuration - * before we apply the new config */ - if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS) && new_assoc) { - IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n"); - active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; - - /* - * reserved4 and 5 could have been filled by the iwlcore code. - * Let's clear them before pushing to the 3945. - */ - active_rxon->reserved4 = 0; - active_rxon->reserved5 = 0; - rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RXON, - sizeof(struct iwl3945_rxon_cmd), - &priv->contexts[IWL_RXON_CTX_BSS].active); - - /* If the mask clearing failed then we set - * active_rxon back to what it was previously */ - if (rc) { - active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK; - IWL_ERR(priv, "Error clearing ASSOC_MSK on current " - "configuration (%d).\n", rc); - return rc; - } - iwl_legacy_clear_ucode_stations(priv, - &priv->contexts[IWL_RXON_CTX_BSS]); - iwl_legacy_restore_stations(priv, - &priv->contexts[IWL_RXON_CTX_BSS]); - } - - IWL_DEBUG_INFO(priv, "Sending RXON\n" - "* with%s RXON_FILTER_ASSOC_MSK\n" - "* channel = %d\n" - "* bssid = %pM\n", - (new_assoc ? "" : "out"), - le16_to_cpu(staging_rxon->channel), - staging_rxon->bssid_addr); - - /* - * reserved4 and 5 could have been filled by the iwlcore code. - * Let's clear them before pushing to the 3945. - */ - staging_rxon->reserved4 = 0; - staging_rxon->reserved5 = 0; - - iwl_legacy_set_rxon_hwcrypto(priv, ctx, !iwl3945_mod_params.sw_crypto); - - /* Apply the new configuration */ - rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RXON, - sizeof(struct iwl3945_rxon_cmd), - staging_rxon); - if (rc) { - IWL_ERR(priv, "Error setting new configuration (%d).\n", rc); - return rc; - } - - memcpy(active_rxon, staging_rxon, sizeof(*active_rxon)); - - if (!new_assoc) { - iwl_legacy_clear_ucode_stations(priv, - &priv->contexts[IWL_RXON_CTX_BSS]); - iwl_legacy_restore_stations(priv, - &priv->contexts[IWL_RXON_CTX_BSS]); - } - - /* If we issue a new RXON command which required a tune then we must - * send a new TXPOWER command or we won't be able to Tx any frames */ - rc = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true); - if (rc) { - IWL_ERR(priv, "Error setting Tx power (%d).\n", rc); - return rc; - } - - /* Init the hardware's rate fallback order based on the band */ - rc = iwl3945_init_hw_rate_table(priv); - if (rc) { - IWL_ERR(priv, "Error setting HW rate table: %02X\n", rc); - return -EIO; - } - - return 0; -} - -/** - * iwl3945_reg_txpower_periodic - called when time to check our temperature. - * - * -- reset periodic timer - * -- see if temp has changed enough to warrant re-calibration ... if so: - * -- correct coeffs for temp (can reset temp timer) - * -- save this temp as "last", - * -- send new set of gain settings to NIC - * NOTE: This should continue working, even when we're not associated, - * so we can keep our internal table of scan powers current. */ -void iwl3945_reg_txpower_periodic(struct iwl_priv *priv) -{ - /* This will kick in the "brute force" - * iwl3945_hw_reg_comp_txpower_temp() below */ - if (!iwl3945_is_temp_calib_needed(priv)) - goto reschedule; - - /* Set up a new set of temp-adjusted TxPowers, send to NIC. - * This is based *only* on current temperature, - * ignoring any previous power measurements */ - iwl3945_hw_reg_comp_txpower_temp(priv); - - reschedule: - queue_delayed_work(priv->workqueue, - &priv->_3945.thermal_periodic, REG_RECALIB_PERIOD * HZ); -} - -static void iwl3945_bg_reg_txpower_periodic(struct work_struct *work) -{ - struct iwl_priv *priv = container_of(work, struct iwl_priv, - _3945.thermal_periodic.work); - - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - return; - - mutex_lock(&priv->mutex); - iwl3945_reg_txpower_periodic(priv); - mutex_unlock(&priv->mutex); -} - -/** - * iwl3945_hw_reg_get_ch_grp_index - find the channel-group index (0-4) - * for the channel. - * - * This function is used when initializing channel-info structs. - * - * NOTE: These channel groups do *NOT* match the bands above! - * These channel groups are based on factory-tested channels; - * on A-band, EEPROM's "group frequency" entries represent the top - * channel in each group 1-4. Group 5 All B/G channels are in group 0. - */ -static u16 iwl3945_hw_reg_get_ch_grp_index(struct iwl_priv *priv, - const struct iwl_channel_info *ch_info) -{ - struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; - struct iwl3945_eeprom_txpower_group *ch_grp = &eeprom->groups[0]; - u8 group; - u16 group_index = 0; /* based on factory calib frequencies */ - u8 grp_channel; - - /* Find the group index for the channel ... don't use index 1(?) */ - if (iwl_legacy_is_channel_a_band(ch_info)) { - for (group = 1; group < 5; group++) { - grp_channel = ch_grp[group].group_channel; - if (ch_info->channel <= grp_channel) { - group_index = group; - break; - } - } - /* group 4 has a few channels *above* its factory cal freq */ - if (group == 5) - group_index = 4; - } else - group_index = 0; /* 2.4 GHz, group 0 */ - - IWL_DEBUG_POWER(priv, "Chnl %d mapped to grp %d\n", ch_info->channel, - group_index); - return group_index; -} - -/** - * iwl3945_hw_reg_get_matched_power_index - Interpolate to get nominal index - * - * Interpolate to get nominal (i.e. at factory calibration temperature) index - * into radio/DSP gain settings table for requested power. - */ -static int iwl3945_hw_reg_get_matched_power_index(struct iwl_priv *priv, - s8 requested_power, - s32 setting_index, s32 *new_index) -{ - const struct iwl3945_eeprom_txpower_group *chnl_grp = NULL; - struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; - s32 index0, index1; - s32 power = 2 * requested_power; - s32 i; - const struct iwl3945_eeprom_txpower_sample *samples; - s32 gains0, gains1; - s32 res; - s32 denominator; - - chnl_grp = &eeprom->groups[setting_index]; - samples = chnl_grp->samples; - for (i = 0; i < 5; i++) { - if (power == samples[i].power) { - *new_index = samples[i].gain_index; - return 0; - } - } - - if (power > samples[1].power) { - index0 = 0; - index1 = 1; - } else if (power > samples[2].power) { - index0 = 1; - index1 = 2; - } else if (power > samples[3].power) { - index0 = 2; - index1 = 3; - } else { - index0 = 3; - index1 = 4; - } - - denominator = (s32) samples[index1].power - (s32) samples[index0].power; - if (denominator == 0) - return -EINVAL; - gains0 = (s32) samples[index0].gain_index * (1 << 19); - gains1 = (s32) samples[index1].gain_index * (1 << 19); - res = gains0 + (gains1 - gains0) * - ((s32) power - (s32) samples[index0].power) / denominator + - (1 << 18); - *new_index = res >> 19; - return 0; -} - -static void iwl3945_hw_reg_init_channel_groups(struct iwl_priv *priv) -{ - u32 i; - s32 rate_index; - struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; - const struct iwl3945_eeprom_txpower_group *group; - - IWL_DEBUG_POWER(priv, "Initializing factory calib info from EEPROM\n"); - - for (i = 0; i < IWL_NUM_TX_CALIB_GROUPS; i++) { - s8 *clip_pwrs; /* table of power levels for each rate */ - s8 satur_pwr; /* saturation power for each chnl group */ - group = &eeprom->groups[i]; - - /* sanity check on factory saturation power value */ - if (group->saturation_power < 40) { - IWL_WARN(priv, "Error: saturation power is %d, " - "less than minimum expected 40\n", - group->saturation_power); - return; - } - - /* - * Derive requested power levels for each rate, based on - * hardware capabilities (saturation power for band). - * Basic value is 3dB down from saturation, with further - * power reductions for highest 3 data rates. These - * backoffs provide headroom for high rate modulation - * power peaks, without too much distortion (clipping). - */ - /* we'll fill in this array with h/w max power levels */ - clip_pwrs = (s8 *) priv->_3945.clip_groups[i].clip_powers; - - /* divide factory saturation power by 2 to find -3dB level */ - satur_pwr = (s8) (group->saturation_power >> 1); - - /* fill in channel group's nominal powers for each rate */ - for (rate_index = 0; - rate_index < IWL_RATE_COUNT_3945; rate_index++, clip_pwrs++) { - switch (rate_index) { - case IWL_RATE_36M_INDEX_TABLE: - if (i == 0) /* B/G */ - *clip_pwrs = satur_pwr; - else /* A */ - *clip_pwrs = satur_pwr - 5; - break; - case IWL_RATE_48M_INDEX_TABLE: - if (i == 0) - *clip_pwrs = satur_pwr - 7; - else - *clip_pwrs = satur_pwr - 10; - break; - case IWL_RATE_54M_INDEX_TABLE: - if (i == 0) - *clip_pwrs = satur_pwr - 9; - else - *clip_pwrs = satur_pwr - 12; - break; - default: - *clip_pwrs = satur_pwr; - break; - } - } - } -} - -/** - * iwl3945_txpower_set_from_eeprom - Set channel power info based on EEPROM - * - * Second pass (during init) to set up priv->channel_info - * - * Set up Tx-power settings in our channel info database for each VALID - * (for this geo/SKU) channel, at all Tx data rates, based on eeprom values - * and current temperature. - * - * Since this is based on current temperature (at init time), these values may - * not be valid for very long, but it gives us a starting/default point, - * and allows us to active (i.e. using Tx) scan. - * - * This does *not* write values to NIC, just sets up our internal table. - */ -int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv) -{ - struct iwl_channel_info *ch_info = NULL; - struct iwl3945_channel_power_info *pwr_info; - struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; - int delta_index; - u8 rate_index; - u8 scan_tbl_index; - const s8 *clip_pwrs; /* array of power levels for each rate */ - u8 gain, dsp_atten; - s8 power; - u8 pwr_index, base_pwr_index, a_band; - u8 i; - int temperature; - - /* save temperature reference, - * so we can determine next time to calibrate */ - temperature = iwl3945_hw_reg_txpower_get_temperature(priv); - priv->last_temperature = temperature; - - iwl3945_hw_reg_init_channel_groups(priv); - - /* initialize Tx power info for each and every channel, 2.4 and 5.x */ - for (i = 0, ch_info = priv->channel_info; i < priv->channel_count; - i++, ch_info++) { - a_band = iwl_legacy_is_channel_a_band(ch_info); - if (!iwl_legacy_is_channel_valid(ch_info)) - continue; - - /* find this channel's channel group (*not* "band") index */ - ch_info->group_index = - iwl3945_hw_reg_get_ch_grp_index(priv, ch_info); - - /* Get this chnlgrp's rate->max/clip-powers table */ - clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers; - - /* calculate power index *adjustment* value according to - * diff between current temperature and factory temperature */ - delta_index = iwl3945_hw_reg_adjust_power_by_temp(temperature, - eeprom->groups[ch_info->group_index]. - temperature); - - IWL_DEBUG_POWER(priv, "Delta index for channel %d: %d [%d]\n", - ch_info->channel, delta_index, temperature + - IWL_TEMP_CONVERT); - - /* set tx power value for all OFDM rates */ - for (rate_index = 0; rate_index < IWL_OFDM_RATES; - rate_index++) { - s32 uninitialized_var(power_idx); - int rc; - - /* use channel group's clip-power table, - * but don't exceed channel's max power */ - s8 pwr = min(ch_info->max_power_avg, - clip_pwrs[rate_index]); - - pwr_info = &ch_info->power_info[rate_index]; - - /* get base (i.e. at factory-measured temperature) - * power table index for this rate's power */ - rc = iwl3945_hw_reg_get_matched_power_index(priv, pwr, - ch_info->group_index, - &power_idx); - if (rc) { - IWL_ERR(priv, "Invalid power index\n"); - return rc; - } - pwr_info->base_power_index = (u8) power_idx; - - /* temperature compensate */ - power_idx += delta_index; - - /* stay within range of gain table */ - power_idx = iwl3945_hw_reg_fix_power_index(power_idx); - - /* fill 1 OFDM rate's iwl3945_channel_power_info struct */ - pwr_info->requested_power = pwr; - pwr_info->power_table_index = (u8) power_idx; - pwr_info->tpc.tx_gain = - power_gain_table[a_band][power_idx].tx_gain; - pwr_info->tpc.dsp_atten = - power_gain_table[a_band][power_idx].dsp_atten; - } - - /* set tx power for CCK rates, based on OFDM 12 Mbit settings*/ - pwr_info = &ch_info->power_info[IWL_RATE_12M_INDEX_TABLE]; - power = pwr_info->requested_power + - IWL_CCK_FROM_OFDM_POWER_DIFF; - pwr_index = pwr_info->power_table_index + - IWL_CCK_FROM_OFDM_INDEX_DIFF; - base_pwr_index = pwr_info->base_power_index + - IWL_CCK_FROM_OFDM_INDEX_DIFF; - - /* stay within table range */ - pwr_index = iwl3945_hw_reg_fix_power_index(pwr_index); - gain = power_gain_table[a_band][pwr_index].tx_gain; - dsp_atten = power_gain_table[a_band][pwr_index].dsp_atten; - - /* fill each CCK rate's iwl3945_channel_power_info structure - * NOTE: All CCK-rate Txpwrs are the same for a given chnl! - * NOTE: CCK rates start at end of OFDM rates! */ - for (rate_index = 0; - rate_index < IWL_CCK_RATES; rate_index++) { - pwr_info = &ch_info->power_info[rate_index+IWL_OFDM_RATES]; - pwr_info->requested_power = power; - pwr_info->power_table_index = pwr_index; - pwr_info->base_power_index = base_pwr_index; - pwr_info->tpc.tx_gain = gain; - pwr_info->tpc.dsp_atten = dsp_atten; - } - - /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */ - for (scan_tbl_index = 0; - scan_tbl_index < IWL_NUM_SCAN_RATES; scan_tbl_index++) { - s32 actual_index = (scan_tbl_index == 0) ? - IWL_RATE_1M_INDEX_TABLE : IWL_RATE_6M_INDEX_TABLE; - iwl3945_hw_reg_set_scan_power(priv, scan_tbl_index, - actual_index, clip_pwrs, ch_info, a_band); - } - } - - return 0; -} - -int iwl3945_hw_rxq_stop(struct iwl_priv *priv) -{ - int rc; - - iwl_legacy_write_direct32(priv, FH39_RCSR_CONFIG(0), 0); - rc = iwl_poll_direct_bit(priv, FH39_RSSR_STATUS, - FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); - if (rc < 0) - IWL_ERR(priv, "Can't stop Rx DMA.\n"); - - return 0; -} - -int iwl3945_hw_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq) -{ - int txq_id = txq->q.id; - - struct iwl3945_shared *shared_data = priv->_3945.shared_virt; - - shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32)txq->q.dma_addr); - - iwl_legacy_write_direct32(priv, FH39_CBCC_CTRL(txq_id), 0); - iwl_legacy_write_direct32(priv, FH39_CBCC_BASE(txq_id), 0); - - iwl_legacy_write_direct32(priv, FH39_TCSR_CONFIG(txq_id), - FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT | - FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF | - FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD | - FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL | - FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE); - - /* fake read to flush all prev. writes */ - iwl_read32(priv, FH39_TSSR_CBB_BASE); - - return 0; -} - -/* - * HCMD utils - */ -static u16 iwl3945_get_hcmd_size(u8 cmd_id, u16 len) -{ - switch (cmd_id) { - case REPLY_RXON: - return sizeof(struct iwl3945_rxon_cmd); - case POWER_TABLE_CMD: - return sizeof(struct iwl3945_powertable_cmd); - default: - return len; - } -} - - -static u16 iwl3945_build_addsta_hcmd(const struct iwl_legacy_addsta_cmd *cmd, - u8 *data) -{ - struct iwl3945_addsta_cmd *addsta = (struct iwl3945_addsta_cmd *)data; - addsta->mode = cmd->mode; - memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify)); - memcpy(&addsta->key, &cmd->key, sizeof(struct iwl4965_keyinfo)); - addsta->station_flags = cmd->station_flags; - addsta->station_flags_msk = cmd->station_flags_msk; - addsta->tid_disable_tx = cpu_to_le16(0); - addsta->rate_n_flags = cmd->rate_n_flags; - addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid; - addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid; - addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn; - - return (u16)sizeof(struct iwl3945_addsta_cmd); -} - -static int iwl3945_add_bssid_station(struct iwl_priv *priv, - const u8 *addr, u8 *sta_id_r) -{ - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - int ret; - u8 sta_id; - unsigned long flags; - - if (sta_id_r) - *sta_id_r = IWL_INVALID_STATION; - - ret = iwl_legacy_add_station_common(priv, ctx, addr, 0, NULL, &sta_id); - if (ret) { - IWL_ERR(priv, "Unable to add station %pM\n", addr); - return ret; - } - - if (sta_id_r) - *sta_id_r = sta_id; - - spin_lock_irqsave(&priv->sta_lock, flags); - priv->stations[sta_id].used |= IWL_STA_LOCAL; - spin_unlock_irqrestore(&priv->sta_lock, flags); - - return 0; -} -static int iwl3945_manage_ibss_station(struct iwl_priv *priv, - struct ieee80211_vif *vif, bool add) -{ - struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; - int ret; - - if (add) { - ret = iwl3945_add_bssid_station(priv, vif->bss_conf.bssid, - &vif_priv->ibss_bssid_sta_id); - if (ret) - return ret; - - iwl3945_sync_sta(priv, vif_priv->ibss_bssid_sta_id, - (priv->band == IEEE80211_BAND_5GHZ) ? - IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP); - iwl3945_rate_scale_init(priv->hw, vif_priv->ibss_bssid_sta_id); - - return 0; - } - - return iwl_legacy_remove_station(priv, vif_priv->ibss_bssid_sta_id, - vif->bss_conf.bssid); -} - -/** - * iwl3945_init_hw_rate_table - Initialize the hardware rate fallback table - */ -int iwl3945_init_hw_rate_table(struct iwl_priv *priv) -{ - int rc, i, index, prev_index; - struct iwl3945_rate_scaling_cmd rate_cmd = { - .reserved = {0, 0, 0}, - }; - struct iwl3945_rate_scaling_info *table = rate_cmd.table; - - for (i = 0; i < ARRAY_SIZE(iwl3945_rates); i++) { - index = iwl3945_rates[i].table_rs_index; - - table[index].rate_n_flags = - iwl3945_hw_set_rate_n_flags(iwl3945_rates[i].plcp, 0); - table[index].try_cnt = priv->retry_rate; - prev_index = iwl3945_get_prev_ieee_rate(i); - table[index].next_rate_index = - iwl3945_rates[prev_index].table_rs_index; - } - - switch (priv->band) { - case IEEE80211_BAND_5GHZ: - IWL_DEBUG_RATE(priv, "Select A mode rate scale\n"); - /* If one of the following CCK rates is used, - * have it fall back to the 6M OFDM rate */ - for (i = IWL_RATE_1M_INDEX_TABLE; - i <= IWL_RATE_11M_INDEX_TABLE; i++) - table[i].next_rate_index = - iwl3945_rates[IWL_FIRST_OFDM_RATE].table_rs_index; - - /* Don't fall back to CCK rates */ - table[IWL_RATE_12M_INDEX_TABLE].next_rate_index = - IWL_RATE_9M_INDEX_TABLE; - - /* Don't drop out of OFDM rates */ - table[IWL_RATE_6M_INDEX_TABLE].next_rate_index = - iwl3945_rates[IWL_FIRST_OFDM_RATE].table_rs_index; - break; - - case IEEE80211_BAND_2GHZ: - IWL_DEBUG_RATE(priv, "Select B/G mode rate scale\n"); - /* If an OFDM rate is used, have it fall back to the - * 1M CCK rates */ - - if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) && - iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) { - - index = IWL_FIRST_CCK_RATE; - for (i = IWL_RATE_6M_INDEX_TABLE; - i <= IWL_RATE_54M_INDEX_TABLE; i++) - table[i].next_rate_index = - iwl3945_rates[index].table_rs_index; - - index = IWL_RATE_11M_INDEX_TABLE; - /* CCK shouldn't fall back to OFDM... */ - table[index].next_rate_index = IWL_RATE_5M_INDEX_TABLE; - } - break; - - default: - WARN_ON(1); - break; - } - - /* Update the rate scaling for control frame Tx */ - rate_cmd.table_id = 0; - rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd), - &rate_cmd); - if (rc) - return rc; - - /* Update the rate scaling for data frame Tx */ - rate_cmd.table_id = 1; - return iwl_legacy_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd), - &rate_cmd); -} - -/* Called when initializing driver */ -int iwl3945_hw_set_hw_params(struct iwl_priv *priv) -{ - memset((void *)&priv->hw_params, 0, - sizeof(struct iwl_hw_params)); - - priv->_3945.shared_virt = - dma_alloc_coherent(&priv->pci_dev->dev, - sizeof(struct iwl3945_shared), - &priv->_3945.shared_phys, GFP_KERNEL); - if (!priv->_3945.shared_virt) { - IWL_ERR(priv, "failed to allocate pci memory\n"); - return -ENOMEM; - } - - /* Assign number of Usable TX queues */ - priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues; - - priv->hw_params.tfd_size = sizeof(struct iwl3945_tfd); - priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_3K); - priv->hw_params.max_rxq_size = RX_QUEUE_SIZE; - priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG; - priv->hw_params.max_stations = IWL3945_STATION_COUNT; - priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWL3945_BROADCAST_ID; - - priv->sta_key_max_num = STA_KEY_MAX_NUM; - - priv->hw_params.rx_wrt_ptr_reg = FH39_RSCSR_CHNL0_WPTR; - priv->hw_params.max_beacon_itrvl = IWL39_MAX_UCODE_BEACON_INTERVAL; - priv->hw_params.beacon_time_tsf_bits = IWL3945_EXT_BEACON_TIME_POS; - - return 0; -} - -unsigned int iwl3945_hw_get_beacon_cmd(struct iwl_priv *priv, - struct iwl3945_frame *frame, u8 rate) -{ - struct iwl3945_tx_beacon_cmd *tx_beacon_cmd; - unsigned int frame_size; - - tx_beacon_cmd = (struct iwl3945_tx_beacon_cmd *)&frame->u; - memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd)); - - tx_beacon_cmd->tx.sta_id = - priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id; - tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; - - frame_size = iwl3945_fill_beacon_frame(priv, - tx_beacon_cmd->frame, - sizeof(frame->u) - sizeof(*tx_beacon_cmd)); - - BUG_ON(frame_size > MAX_MPDU_SIZE); - tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size); - - tx_beacon_cmd->tx.rate = rate; - tx_beacon_cmd->tx.tx_flags = (TX_CMD_FLG_SEQ_CTL_MSK | - TX_CMD_FLG_TSF_MSK); - - /* supp_rates[0] == OFDM start at IWL_FIRST_OFDM_RATE*/ - tx_beacon_cmd->tx.supp_rates[0] = - (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF; - - tx_beacon_cmd->tx.supp_rates[1] = - (IWL_CCK_BASIC_RATES_MASK & 0xF); - - return sizeof(struct iwl3945_tx_beacon_cmd) + frame_size; -} - -void iwl3945_hw_rx_handler_setup(struct iwl_priv *priv) -{ - priv->rx_handlers[REPLY_TX] = iwl3945_rx_reply_tx; - priv->rx_handlers[REPLY_3945_RX] = iwl3945_rx_reply_rx; -} - -void iwl3945_hw_setup_deferred_work(struct iwl_priv *priv) -{ - INIT_DELAYED_WORK(&priv->_3945.thermal_periodic, - iwl3945_bg_reg_txpower_periodic); -} - -void iwl3945_hw_cancel_deferred_work(struct iwl_priv *priv) -{ - cancel_delayed_work(&priv->_3945.thermal_periodic); -} - -/* check contents of special bootstrap uCode SRAM */ -static int iwl3945_verify_bsm(struct iwl_priv *priv) - { - __le32 *image = priv->ucode_boot.v_addr; - u32 len = priv->ucode_boot.len; - u32 reg; - u32 val; - - IWL_DEBUG_INFO(priv, "Begin verify bsm\n"); - - /* verify BSM SRAM contents */ - val = iwl_legacy_read_prph(priv, BSM_WR_DWCOUNT_REG); - for (reg = BSM_SRAM_LOWER_BOUND; - reg < BSM_SRAM_LOWER_BOUND + len; - reg += sizeof(u32), image++) { - val = iwl_legacy_read_prph(priv, reg); - if (val != le32_to_cpu(*image)) { - IWL_ERR(priv, "BSM uCode verification failed at " - "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n", - BSM_SRAM_LOWER_BOUND, - reg - BSM_SRAM_LOWER_BOUND, len, - val, le32_to_cpu(*image)); - return -EIO; - } - } - - IWL_DEBUG_INFO(priv, "BSM bootstrap uCode image OK\n"); - - return 0; -} - - -/****************************************************************************** - * - * EEPROM related functions - * - ******************************************************************************/ - -/* - * Clear the OWNER_MSK, to establish driver (instead of uCode running on - * embedded controller) as EEPROM reader; each read is a series of pulses - * to/from the EEPROM chip, not a single event, so even reads could conflict - * if they weren't arbitrated by some ownership mechanism. Here, the driver - * simply claims ownership, which should be safe when this function is called - * (i.e. before loading uCode!). - */ -static int iwl3945_eeprom_acquire_semaphore(struct iwl_priv *priv) -{ - _iwl_legacy_clear_bit(priv, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK); - return 0; -} - - -static void iwl3945_eeprom_release_semaphore(struct iwl_priv *priv) -{ - return; -} - - /** - * iwl3945_load_bsm - Load bootstrap instructions - * - * BSM operation: - * - * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program - * in special SRAM that does not power down during RFKILL. When powering back - * up after power-saving sleeps (or during initial uCode load), the BSM loads - * the bootstrap program into the on-board processor, and starts it. - * - * The bootstrap program loads (via DMA) instructions and data for a new - * program from host DRAM locations indicated by the host driver in the - * BSM_DRAM_* registers. Once the new program is loaded, it starts - * automatically. - * - * When initializing the NIC, the host driver points the BSM to the - * "initialize" uCode image. This uCode sets up some internal data, then - * notifies host via "initialize alive" that it is complete. - * - * The host then replaces the BSM_DRAM_* pointer values to point to the - * normal runtime uCode instructions and a backup uCode data cache buffer - * (filled initially with starting data values for the on-board processor), - * then triggers the "initialize" uCode to load and launch the runtime uCode, - * which begins normal operation. - * - * When doing a power-save shutdown, runtime uCode saves data SRAM into - * the backup data cache in DRAM before SRAM is powered down. - * - * When powering back up, the BSM loads the bootstrap program. This reloads - * the runtime uCode instructions and the backup data cache into SRAM, - * and re-launches the runtime uCode from where it left off. - */ -static int iwl3945_load_bsm(struct iwl_priv *priv) -{ - __le32 *image = priv->ucode_boot.v_addr; - u32 len = priv->ucode_boot.len; - dma_addr_t pinst; - dma_addr_t pdata; - u32 inst_len; - u32 data_len; - int rc; - int i; - u32 done; - u32 reg_offset; - - IWL_DEBUG_INFO(priv, "Begin load bsm\n"); - - /* make sure bootstrap program is no larger than BSM's SRAM size */ - if (len > IWL39_MAX_BSM_SIZE) - return -EINVAL; - - /* Tell bootstrap uCode where to find the "Initialize" uCode - * in host DRAM ... host DRAM physical address bits 31:0 for 3945. - * NOTE: iwl3945_initialize_alive_start() will replace these values, - * after the "initialize" uCode has run, to point to - * runtime/protocol instructions and backup data cache. */ - pinst = priv->ucode_init.p_addr; - pdata = priv->ucode_init_data.p_addr; - inst_len = priv->ucode_init.len; - data_len = priv->ucode_init_data.len; - - iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst); - iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata); - iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len); - iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len); - - /* Fill BSM memory with bootstrap instructions */ - for (reg_offset = BSM_SRAM_LOWER_BOUND; - reg_offset < BSM_SRAM_LOWER_BOUND + len; - reg_offset += sizeof(u32), image++) - _iwl_legacy_write_prph(priv, reg_offset, - le32_to_cpu(*image)); - - rc = iwl3945_verify_bsm(priv); - if (rc) - return rc; - - /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */ - iwl_legacy_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0); - iwl_legacy_write_prph(priv, BSM_WR_MEM_DST_REG, - IWL39_RTC_INST_LOWER_BOUND); - iwl_legacy_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32)); - - /* Load bootstrap code into instruction SRAM now, - * to prepare to load "initialize" uCode */ - iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG, - BSM_WR_CTRL_REG_BIT_START); - - /* Wait for load of bootstrap uCode to finish */ - for (i = 0; i < 100; i++) { - done = iwl_legacy_read_prph(priv, BSM_WR_CTRL_REG); - if (!(done & BSM_WR_CTRL_REG_BIT_START)) - break; - udelay(10); - } - if (i < 100) - IWL_DEBUG_INFO(priv, "BSM write complete, poll %d iterations\n", i); - else { - IWL_ERR(priv, "BSM write did not complete!\n"); - return -EIO; - } - - /* Enable future boot loads whenever power management unit triggers it - * (e.g. when powering back up after power-save shutdown) */ - iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG, - BSM_WR_CTRL_REG_BIT_START_EN); - - return 0; -} - -static struct iwl_hcmd_ops iwl3945_hcmd = { - .rxon_assoc = iwl3945_send_rxon_assoc, - .commit_rxon = iwl3945_commit_rxon, -}; - -static struct iwl_lib_ops iwl3945_lib = { - .txq_attach_buf_to_tfd = iwl3945_hw_txq_attach_buf_to_tfd, - .txq_free_tfd = iwl3945_hw_txq_free_tfd, - .txq_init = iwl3945_hw_tx_queue_init, - .load_ucode = iwl3945_load_bsm, - .dump_nic_error_log = iwl3945_dump_nic_error_log, - .apm_ops = { - .init = iwl3945_apm_init, - .config = iwl3945_nic_config, - }, - .eeprom_ops = { - .regulatory_bands = { - EEPROM_REGULATORY_BAND_1_CHANNELS, - EEPROM_REGULATORY_BAND_2_CHANNELS, - EEPROM_REGULATORY_BAND_3_CHANNELS, - EEPROM_REGULATORY_BAND_4_CHANNELS, - EEPROM_REGULATORY_BAND_5_CHANNELS, - EEPROM_REGULATORY_BAND_NO_HT40, - EEPROM_REGULATORY_BAND_NO_HT40, - }, - .acquire_semaphore = iwl3945_eeprom_acquire_semaphore, - .release_semaphore = iwl3945_eeprom_release_semaphore, - }, - .send_tx_power = iwl3945_send_tx_power, - .is_valid_rtc_data_addr = iwl3945_hw_valid_rtc_data_addr, - - .debugfs_ops = { - .rx_stats_read = iwl3945_ucode_rx_stats_read, - .tx_stats_read = iwl3945_ucode_tx_stats_read, - .general_stats_read = iwl3945_ucode_general_stats_read, - }, -}; - -static const struct iwl_legacy_ops iwl3945_legacy_ops = { - .post_associate = iwl3945_post_associate, - .config_ap = iwl3945_config_ap, - .manage_ibss_station = iwl3945_manage_ibss_station, -}; - -static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = { - .get_hcmd_size = iwl3945_get_hcmd_size, - .build_addsta_hcmd = iwl3945_build_addsta_hcmd, - .request_scan = iwl3945_request_scan, - .post_scan = iwl3945_post_scan, -}; - -static const struct iwl_ops iwl3945_ops = { - .lib = &iwl3945_lib, - .hcmd = &iwl3945_hcmd, - .utils = &iwl3945_hcmd_utils, - .led = &iwl3945_led_ops, - .legacy = &iwl3945_legacy_ops, - .ieee80211_ops = &iwl3945_hw_ops, -}; - -static struct iwl_base_params iwl3945_base_params = { - .eeprom_size = IWL3945_EEPROM_IMG_SIZE, - .num_of_queues = IWL39_NUM_QUEUES, - .pll_cfg_val = CSR39_ANA_PLL_CFG_VAL, - .set_l0s = false, - .use_bsm = true, - .led_compensation = 64, - .wd_timeout = IWL_DEF_WD_TIMEOUT, -}; - -static struct iwl_cfg iwl3945_bg_cfg = { - .name = "3945BG", - .fw_name_pre = IWL3945_FW_PRE, - .ucode_api_max = IWL3945_UCODE_API_MAX, - .ucode_api_min = IWL3945_UCODE_API_MIN, - .sku = IWL_SKU_G, - .eeprom_ver = EEPROM_3945_EEPROM_VERSION, - .ops = &iwl3945_ops, - .mod_params = &iwl3945_mod_params, - .base_params = &iwl3945_base_params, - .led_mode = IWL_LED_BLINK, -}; - -static struct iwl_cfg iwl3945_abg_cfg = { - .name = "3945ABG", - .fw_name_pre = IWL3945_FW_PRE, - .ucode_api_max = IWL3945_UCODE_API_MAX, - .ucode_api_min = IWL3945_UCODE_API_MIN, - .sku = IWL_SKU_A|IWL_SKU_G, - .eeprom_ver = EEPROM_3945_EEPROM_VERSION, - .ops = &iwl3945_ops, - .mod_params = &iwl3945_mod_params, - .base_params = &iwl3945_base_params, - .led_mode = IWL_LED_BLINK, -}; - -DEFINE_PCI_DEVICE_TABLE(iwl3945_hw_card_ids) = { - {IWL_PCI_DEVICE(0x4222, 0x1005, iwl3945_bg_cfg)}, - {IWL_PCI_DEVICE(0x4222, 0x1034, iwl3945_bg_cfg)}, - {IWL_PCI_DEVICE(0x4222, 0x1044, iwl3945_bg_cfg)}, - {IWL_PCI_DEVICE(0x4227, 0x1014, iwl3945_bg_cfg)}, - {IWL_PCI_DEVICE(0x4222, PCI_ANY_ID, iwl3945_abg_cfg)}, - {IWL_PCI_DEVICE(0x4227, PCI_ANY_ID, iwl3945_abg_cfg)}, - {0} -}; - -MODULE_DEVICE_TABLE(pci, iwl3945_hw_card_ids); diff --git a/drivers/net/wireless/iwlegacy/iwl-3945.h b/drivers/net/wireless/iwlegacy/iwl-3945.h deleted file mode 100644 index b118b59b71d..00000000000 --- a/drivers/net/wireless/iwlegacy/iwl-3945.h +++ /dev/null @@ -1,308 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ -/* - * Please use this file (iwl-3945.h) for driver implementation definitions. - * Please use iwl-3945-commands.h for uCode API definitions. - * Please use iwl-3945-hw.h for hardware-related definitions. - */ - -#ifndef __iwl_3945_h__ -#define __iwl_3945_h__ - -#include <linux/pci.h> /* for struct pci_device_id */ -#include <linux/kernel.h> -#include <net/ieee80211_radiotap.h> - -/* Hardware specific file defines the PCI IDs table for that hardware module */ -extern const struct pci_device_id iwl3945_hw_card_ids[]; - -#include "iwl-csr.h" -#include "iwl-prph.h" -#include "iwl-fh.h" -#include "iwl-3945-hw.h" -#include "iwl-debug.h" -#include "iwl-power.h" -#include "iwl-dev.h" -#include "iwl-led.h" - -/* Highest firmware API version supported */ -#define IWL3945_UCODE_API_MAX 2 - -/* Lowest firmware API version supported */ -#define IWL3945_UCODE_API_MIN 1 - -#define IWL3945_FW_PRE "iwlwifi-3945-" -#define _IWL3945_MODULE_FIRMWARE(api) IWL3945_FW_PRE #api ".ucode" -#define IWL3945_MODULE_FIRMWARE(api) _IWL3945_MODULE_FIRMWARE(api) - -/* Default noise level to report when noise measurement is not available. - * This may be because we're: - * 1) Not associated (4965, no beacon statistics being sent to driver) - * 2) Scanning (noise measurement does not apply to associated channel) - * 3) Receiving CCK (3945 delivers noise info only for OFDM frames) - * Use default noise value of -127 ... this is below the range of measurable - * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user. - * Also, -127 works better than 0 when averaging frames with/without - * noise info (e.g. averaging might be done in app); measured dBm values are - * always negative ... using a negative value as the default keeps all - * averages within an s8's (used in some apps) range of negative values. */ -#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127) - -/* Module parameters accessible from iwl-*.c */ -extern struct iwl_mod_params iwl3945_mod_params; - -struct iwl3945_rate_scale_data { - u64 data; - s32 success_counter; - s32 success_ratio; - s32 counter; - s32 average_tpt; - unsigned long stamp; -}; - -struct iwl3945_rs_sta { - spinlock_t lock; - struct iwl_priv *priv; - s32 *expected_tpt; - unsigned long last_partial_flush; - unsigned long last_flush; - u32 flush_time; - u32 last_tx_packets; - u32 tx_packets; - u8 tgg; - u8 flush_pending; - u8 start_rate; - struct timer_list rate_scale_flush; - struct iwl3945_rate_scale_data win[IWL_RATE_COUNT_3945]; -#ifdef CONFIG_MAC80211_DEBUGFS - struct dentry *rs_sta_dbgfs_stats_table_file; -#endif - - /* used to be in sta_info */ - int last_txrate_idx; -}; - - -/* - * The common struct MUST be first because it is shared between - * 3945 and 4965! - */ -struct iwl3945_sta_priv { - struct iwl_station_priv_common common; - struct iwl3945_rs_sta rs_sta; -}; - -enum iwl3945_antenna { - IWL_ANTENNA_DIVERSITY, - IWL_ANTENNA_MAIN, - IWL_ANTENNA_AUX -}; - -/* - * RTS threshold here is total size [2347] minus 4 FCS bytes - * Per spec: - * a value of 0 means RTS on all data/management packets - * a value > max MSDU size means no RTS - * else RTS for data/management frames where MPDU is larger - * than RTS value. - */ -#define DEFAULT_RTS_THRESHOLD 2347U -#define MIN_RTS_THRESHOLD 0U -#define MAX_RTS_THRESHOLD 2347U -#define MAX_MSDU_SIZE 2304U -#define MAX_MPDU_SIZE 2346U -#define DEFAULT_BEACON_INTERVAL 100U -#define DEFAULT_SHORT_RETRY_LIMIT 7U -#define DEFAULT_LONG_RETRY_LIMIT 4U - -#define IWL_TX_FIFO_AC0 0 -#define IWL_TX_FIFO_AC1 1 -#define IWL_TX_FIFO_AC2 2 -#define IWL_TX_FIFO_AC3 3 -#define IWL_TX_FIFO_HCCA_1 5 -#define IWL_TX_FIFO_HCCA_2 6 -#define IWL_TX_FIFO_NONE 7 - -#define IEEE80211_DATA_LEN 2304 -#define IEEE80211_4ADDR_LEN 30 -#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN) -#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN) - -struct iwl3945_frame { - union { - struct ieee80211_hdr frame; - struct iwl3945_tx_beacon_cmd beacon; - u8 raw[IEEE80211_FRAME_LEN]; - u8 cmd[360]; - } u; - struct list_head list; -}; - -#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4) -#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ) -#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4) - -#define SUP_RATE_11A_MAX_NUM_CHANNELS 8 -#define SUP_RATE_11B_MAX_NUM_CHANNELS 4 -#define SUP_RATE_11G_MAX_NUM_CHANNELS 12 - -#define IWL_SUPPORTED_RATES_IE_LEN 8 - -#define SCAN_INTERVAL 100 - -#define MAX_TID_COUNT 9 - -#define IWL_INVALID_RATE 0xFF -#define IWL_INVALID_VALUE -1 - -#define STA_PS_STATUS_WAKE 0 -#define STA_PS_STATUS_SLEEP 1 - -struct iwl3945_ibss_seq { - u8 mac[ETH_ALEN]; - u16 seq_num; - u16 frag_num; - unsigned long packet_time; - struct list_head list; -}; - -#define IWL_RX_HDR(x) ((struct iwl3945_rx_frame_hdr *)(\ - x->u.rx_frame.stats.payload + \ - x->u.rx_frame.stats.phy_count)) -#define IWL_RX_END(x) ((struct iwl3945_rx_frame_end *)(\ - IWL_RX_HDR(x)->payload + \ - le16_to_cpu(IWL_RX_HDR(x)->len))) -#define IWL_RX_STATS(x) (&x->u.rx_frame.stats) -#define IWL_RX_DATA(x) (IWL_RX_HDR(x)->payload) - - -/****************************************************************************** - * - * Functions implemented in iwl3945-base.c which are forward declared here - * for use by iwl-*.c - * - *****************************************************************************/ -extern int iwl3945_calc_db_from_ratio(int sig_ratio); -extern void iwl3945_rx_replenish(void *data); -extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq); -extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv, - struct ieee80211_hdr *hdr, int left); -extern int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log, - char **buf, bool display); -extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv); - -/****************************************************************************** - * - * Functions implemented in iwl-[34]*.c which are forward declared here - * for use by iwl3945-base.c - * - * NOTE: The implementation of these functions are hardware specific - * which is why they are in the hardware specific files (vs. iwl-base.c) - * - * Naming convention -- - * iwl3945_ <-- Its part of iwlwifi (should be changed to iwl3945_) - * iwl3945_hw_ <-- Hardware specific (implemented in iwl-XXXX.c by all HW) - * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX) - * iwl3945_bg_ <-- Called from work queue context - * iwl3945_mac_ <-- mac80211 callback - * - ****************************************************************************/ -extern void iwl3945_hw_rx_handler_setup(struct iwl_priv *priv); -extern void iwl3945_hw_setup_deferred_work(struct iwl_priv *priv); -extern void iwl3945_hw_cancel_deferred_work(struct iwl_priv *priv); -extern int iwl3945_hw_rxq_stop(struct iwl_priv *priv); -extern int iwl3945_hw_set_hw_params(struct iwl_priv *priv); -extern int iwl3945_hw_nic_init(struct iwl_priv *priv); -extern int iwl3945_hw_nic_stop_master(struct iwl_priv *priv); -extern void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv); -extern void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv); -extern int iwl3945_hw_nic_reset(struct iwl_priv *priv); -extern int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, - struct iwl_tx_queue *txq, - dma_addr_t addr, u16 len, - u8 reset, u8 pad); -extern void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv, - struct iwl_tx_queue *txq); -extern int iwl3945_hw_get_temperature(struct iwl_priv *priv); -extern int iwl3945_hw_tx_queue_init(struct iwl_priv *priv, - struct iwl_tx_queue *txq); -extern unsigned int iwl3945_hw_get_beacon_cmd(struct iwl_priv *priv, - struct iwl3945_frame *frame, u8 rate); -void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv, - struct iwl_device_cmd *cmd, - struct ieee80211_tx_info *info, - struct ieee80211_hdr *hdr, - int sta_id, int tx_id); -extern int iwl3945_hw_reg_send_txpower(struct iwl_priv *priv); -extern int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power); -extern void iwl3945_hw_rx_statistics(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb); -void iwl3945_reply_statistics(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb); -extern void iwl3945_disable_events(struct iwl_priv *priv); -extern int iwl4965_get_temperature(const struct iwl_priv *priv); -extern void iwl3945_post_associate(struct iwl_priv *priv); -extern void iwl3945_config_ap(struct iwl_priv *priv); - -extern int iwl3945_commit_rxon(struct iwl_priv *priv, - struct iwl_rxon_context *ctx); - -/** - * iwl3945_hw_find_station - Find station id for a given BSSID - * @bssid: MAC address of station ID to find - * - * NOTE: This should not be hardware specific but the code has - * not yet been merged into a single common layer for managing the - * station tables. - */ -extern u8 iwl3945_hw_find_station(struct iwl_priv *priv, const u8 *bssid); - -extern struct ieee80211_ops iwl3945_hw_ops; - -/* - * Forward declare iwl-3945.c functions for iwl3945-base.c - */ -extern __le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv); -extern int iwl3945_init_hw_rate_table(struct iwl_priv *priv); -extern void iwl3945_reg_txpower_periodic(struct iwl_priv *priv); -extern int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv); - -extern const struct iwl_channel_info *iwl3945_get_channel_info( - const struct iwl_priv *priv, enum ieee80211_band band, u16 channel); - -extern int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate); - -/* scanning */ -int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif); -void iwl3945_post_scan(struct iwl_priv *priv); - -/* rates */ -extern const struct iwl3945_rate_info iwl3945_rates[IWL_RATE_COUNT_3945]; - -/* Requires full declaration of iwl_priv before including */ -#include "iwl-io.h" - -#endif diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c deleted file mode 100644 index 1c93665766e..00000000000 --- a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c +++ /dev/null @@ -1,774 +0,0 @@ -/****************************************************************************** -* -* GPL LICENSE SUMMARY -* -* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. -* -* This program is free software; you can redistribute it and/or modify -* it under the terms of version 2 of the GNU General Public License as -* published by the Free Software Foundation. -* -* This program is distributed in the hope that it will be useful, but -* WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -* General Public License for more details. -* -* You should have received a copy of the GNU General Public License -* along with this program; if not, write to the Free Software -* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, -* USA -* -* The full GNU General Public License is included in this distribution -* in the file called LICENSE.GPL. -* -* Contact Information: -* Intel Linux Wireless <ilw@linux.intel.com> -* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -*****************************************************************************/ -#include "iwl-4965.h" -#include "iwl-4965-debugfs.h" - -static const char *fmt_value = " %-30s %10u\n"; -static const char *fmt_table = " %-30s %10u %10u %10u %10u\n"; -static const char *fmt_header = - "%-32s current cumulative delta max\n"; - -static int iwl4965_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz) -{ - int p = 0; - u32 flag; - - flag = le32_to_cpu(priv->_4965.statistics.flag); - - p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag); - if (flag & UCODE_STATISTICS_CLEAR_MSK) - p += scnprintf(buf + p, bufsz - p, - "\tStatistics have been cleared\n"); - p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n", - (flag & UCODE_STATISTICS_FREQUENCY_MSK) - ? "2.4 GHz" : "5.2 GHz"); - p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n", - (flag & UCODE_STATISTICS_NARROW_BAND_MSK) - ? "enabled" : "disabled"); - - return p; -} - -ssize_t iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - int pos = 0; - char *buf; - int bufsz = sizeof(struct statistics_rx_phy) * 40 + - sizeof(struct statistics_rx_non_phy) * 40 + - sizeof(struct statistics_rx_ht_phy) * 40 + 400; - ssize_t ret; - struct statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm; - struct statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck; - struct statistics_rx_non_phy *general, *accum_general; - struct statistics_rx_non_phy *delta_general, *max_general; - struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht; - - if (!iwl_legacy_is_alive(priv)) - return -EAGAIN; - - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) { - IWL_ERR(priv, "Can not allocate Buffer\n"); - return -ENOMEM; - } - - /* - * the statistic information display here is based on - * the last statistics notification from uCode - * might not reflect the current uCode activity - */ - ofdm = &priv->_4965.statistics.rx.ofdm; - cck = &priv->_4965.statistics.rx.cck; - general = &priv->_4965.statistics.rx.general; - ht = &priv->_4965.statistics.rx.ofdm_ht; - accum_ofdm = &priv->_4965.accum_statistics.rx.ofdm; - accum_cck = &priv->_4965.accum_statistics.rx.cck; - accum_general = &priv->_4965.accum_statistics.rx.general; - accum_ht = &priv->_4965.accum_statistics.rx.ofdm_ht; - delta_ofdm = &priv->_4965.delta_statistics.rx.ofdm; - delta_cck = &priv->_4965.delta_statistics.rx.cck; - delta_general = &priv->_4965.delta_statistics.rx.general; - delta_ht = &priv->_4965.delta_statistics.rx.ofdm_ht; - max_ofdm = &priv->_4965.max_delta.rx.ofdm; - max_cck = &priv->_4965.max_delta.rx.cck; - max_general = &priv->_4965.max_delta.rx.general; - max_ht = &priv->_4965.max_delta.rx.ofdm_ht; - - pos += iwl4965_statistics_flag(priv, buf, bufsz); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_header, "Statistics_Rx - OFDM:"); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "ina_cnt:", - le32_to_cpu(ofdm->ina_cnt), - accum_ofdm->ina_cnt, - delta_ofdm->ina_cnt, max_ofdm->ina_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "fina_cnt:", - le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt, - delta_ofdm->fina_cnt, max_ofdm->fina_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "plcp_err:", - le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err, - delta_ofdm->plcp_err, max_ofdm->plcp_err); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "crc32_err:", - le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err, - delta_ofdm->crc32_err, max_ofdm->crc32_err); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "overrun_err:", - le32_to_cpu(ofdm->overrun_err), - accum_ofdm->overrun_err, delta_ofdm->overrun_err, - max_ofdm->overrun_err); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "early_overrun_err:", - le32_to_cpu(ofdm->early_overrun_err), - accum_ofdm->early_overrun_err, - delta_ofdm->early_overrun_err, - max_ofdm->early_overrun_err); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "crc32_good:", - le32_to_cpu(ofdm->crc32_good), - accum_ofdm->crc32_good, delta_ofdm->crc32_good, - max_ofdm->crc32_good); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "false_alarm_cnt:", - le32_to_cpu(ofdm->false_alarm_cnt), - accum_ofdm->false_alarm_cnt, - delta_ofdm->false_alarm_cnt, - max_ofdm->false_alarm_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "fina_sync_err_cnt:", - le32_to_cpu(ofdm->fina_sync_err_cnt), - accum_ofdm->fina_sync_err_cnt, - delta_ofdm->fina_sync_err_cnt, - max_ofdm->fina_sync_err_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "sfd_timeout:", - le32_to_cpu(ofdm->sfd_timeout), - accum_ofdm->sfd_timeout, delta_ofdm->sfd_timeout, - max_ofdm->sfd_timeout); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "fina_timeout:", - le32_to_cpu(ofdm->fina_timeout), - accum_ofdm->fina_timeout, delta_ofdm->fina_timeout, - max_ofdm->fina_timeout); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "unresponded_rts:", - le32_to_cpu(ofdm->unresponded_rts), - accum_ofdm->unresponded_rts, - delta_ofdm->unresponded_rts, - max_ofdm->unresponded_rts); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "rxe_frame_lmt_ovrun:", - le32_to_cpu(ofdm->rxe_frame_limit_overrun), - accum_ofdm->rxe_frame_limit_overrun, - delta_ofdm->rxe_frame_limit_overrun, - max_ofdm->rxe_frame_limit_overrun); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "sent_ack_cnt:", - le32_to_cpu(ofdm->sent_ack_cnt), - accum_ofdm->sent_ack_cnt, delta_ofdm->sent_ack_cnt, - max_ofdm->sent_ack_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "sent_cts_cnt:", - le32_to_cpu(ofdm->sent_cts_cnt), - accum_ofdm->sent_cts_cnt, delta_ofdm->sent_cts_cnt, - max_ofdm->sent_cts_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "sent_ba_rsp_cnt:", - le32_to_cpu(ofdm->sent_ba_rsp_cnt), - accum_ofdm->sent_ba_rsp_cnt, - delta_ofdm->sent_ba_rsp_cnt, - max_ofdm->sent_ba_rsp_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "dsp_self_kill:", - le32_to_cpu(ofdm->dsp_self_kill), - accum_ofdm->dsp_self_kill, - delta_ofdm->dsp_self_kill, - max_ofdm->dsp_self_kill); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "mh_format_err:", - le32_to_cpu(ofdm->mh_format_err), - accum_ofdm->mh_format_err, - delta_ofdm->mh_format_err, - max_ofdm->mh_format_err); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "re_acq_main_rssi_sum:", - le32_to_cpu(ofdm->re_acq_main_rssi_sum), - accum_ofdm->re_acq_main_rssi_sum, - delta_ofdm->re_acq_main_rssi_sum, - max_ofdm->re_acq_main_rssi_sum); - - pos += scnprintf(buf + pos, bufsz - pos, - fmt_header, "Statistics_Rx - CCK:"); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "ina_cnt:", - le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt, - delta_cck->ina_cnt, max_cck->ina_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "fina_cnt:", - le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt, - delta_cck->fina_cnt, max_cck->fina_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "plcp_err:", - le32_to_cpu(cck->plcp_err), accum_cck->plcp_err, - delta_cck->plcp_err, max_cck->plcp_err); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "crc32_err:", - le32_to_cpu(cck->crc32_err), accum_cck->crc32_err, - delta_cck->crc32_err, max_cck->crc32_err); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "overrun_err:", - le32_to_cpu(cck->overrun_err), - accum_cck->overrun_err, delta_cck->overrun_err, - max_cck->overrun_err); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "early_overrun_err:", - le32_to_cpu(cck->early_overrun_err), - accum_cck->early_overrun_err, - delta_cck->early_overrun_err, - max_cck->early_overrun_err); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "crc32_good:", - le32_to_cpu(cck->crc32_good), accum_cck->crc32_good, - delta_cck->crc32_good, max_cck->crc32_good); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "false_alarm_cnt:", - le32_to_cpu(cck->false_alarm_cnt), - accum_cck->false_alarm_cnt, - delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "fina_sync_err_cnt:", - le32_to_cpu(cck->fina_sync_err_cnt), - accum_cck->fina_sync_err_cnt, - delta_cck->fina_sync_err_cnt, - max_cck->fina_sync_err_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "sfd_timeout:", - le32_to_cpu(cck->sfd_timeout), - accum_cck->sfd_timeout, delta_cck->sfd_timeout, - max_cck->sfd_timeout); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "fina_timeout:", - le32_to_cpu(cck->fina_timeout), - accum_cck->fina_timeout, delta_cck->fina_timeout, - max_cck->fina_timeout); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "unresponded_rts:", - le32_to_cpu(cck->unresponded_rts), - accum_cck->unresponded_rts, delta_cck->unresponded_rts, - max_cck->unresponded_rts); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "rxe_frame_lmt_ovrun:", - le32_to_cpu(cck->rxe_frame_limit_overrun), - accum_cck->rxe_frame_limit_overrun, - delta_cck->rxe_frame_limit_overrun, - max_cck->rxe_frame_limit_overrun); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "sent_ack_cnt:", - le32_to_cpu(cck->sent_ack_cnt), - accum_cck->sent_ack_cnt, delta_cck->sent_ack_cnt, - max_cck->sent_ack_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "sent_cts_cnt:", - le32_to_cpu(cck->sent_cts_cnt), - accum_cck->sent_cts_cnt, delta_cck->sent_cts_cnt, - max_cck->sent_cts_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "sent_ba_rsp_cnt:", - le32_to_cpu(cck->sent_ba_rsp_cnt), - accum_cck->sent_ba_rsp_cnt, - delta_cck->sent_ba_rsp_cnt, - max_cck->sent_ba_rsp_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "dsp_self_kill:", - le32_to_cpu(cck->dsp_self_kill), - accum_cck->dsp_self_kill, delta_cck->dsp_self_kill, - max_cck->dsp_self_kill); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "mh_format_err:", - le32_to_cpu(cck->mh_format_err), - accum_cck->mh_format_err, delta_cck->mh_format_err, - max_cck->mh_format_err); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "re_acq_main_rssi_sum:", - le32_to_cpu(cck->re_acq_main_rssi_sum), - accum_cck->re_acq_main_rssi_sum, - delta_cck->re_acq_main_rssi_sum, - max_cck->re_acq_main_rssi_sum); - - pos += scnprintf(buf + pos, bufsz - pos, - fmt_header, "Statistics_Rx - GENERAL:"); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "bogus_cts:", - le32_to_cpu(general->bogus_cts), - accum_general->bogus_cts, delta_general->bogus_cts, - max_general->bogus_cts); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "bogus_ack:", - le32_to_cpu(general->bogus_ack), - accum_general->bogus_ack, delta_general->bogus_ack, - max_general->bogus_ack); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "non_bssid_frames:", - le32_to_cpu(general->non_bssid_frames), - accum_general->non_bssid_frames, - delta_general->non_bssid_frames, - max_general->non_bssid_frames); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "filtered_frames:", - le32_to_cpu(general->filtered_frames), - accum_general->filtered_frames, - delta_general->filtered_frames, - max_general->filtered_frames); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "non_channel_beacons:", - le32_to_cpu(general->non_channel_beacons), - accum_general->non_channel_beacons, - delta_general->non_channel_beacons, - max_general->non_channel_beacons); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "channel_beacons:", - le32_to_cpu(general->channel_beacons), - accum_general->channel_beacons, - delta_general->channel_beacons, - max_general->channel_beacons); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "num_missed_bcon:", - le32_to_cpu(general->num_missed_bcon), - accum_general->num_missed_bcon, - delta_general->num_missed_bcon, - max_general->num_missed_bcon); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "adc_rx_saturation_time:", - le32_to_cpu(general->adc_rx_saturation_time), - accum_general->adc_rx_saturation_time, - delta_general->adc_rx_saturation_time, - max_general->adc_rx_saturation_time); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "ina_detect_search_tm:", - le32_to_cpu(general->ina_detection_search_time), - accum_general->ina_detection_search_time, - delta_general->ina_detection_search_time, - max_general->ina_detection_search_time); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "beacon_silence_rssi_a:", - le32_to_cpu(general->beacon_silence_rssi_a), - accum_general->beacon_silence_rssi_a, - delta_general->beacon_silence_rssi_a, - max_general->beacon_silence_rssi_a); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "beacon_silence_rssi_b:", - le32_to_cpu(general->beacon_silence_rssi_b), - accum_general->beacon_silence_rssi_b, - delta_general->beacon_silence_rssi_b, - max_general->beacon_silence_rssi_b); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "beacon_silence_rssi_c:", - le32_to_cpu(general->beacon_silence_rssi_c), - accum_general->beacon_silence_rssi_c, - delta_general->beacon_silence_rssi_c, - max_general->beacon_silence_rssi_c); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "interference_data_flag:", - le32_to_cpu(general->interference_data_flag), - accum_general->interference_data_flag, - delta_general->interference_data_flag, - max_general->interference_data_flag); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "channel_load:", - le32_to_cpu(general->channel_load), - accum_general->channel_load, - delta_general->channel_load, - max_general->channel_load); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "dsp_false_alarms:", - le32_to_cpu(general->dsp_false_alarms), - accum_general->dsp_false_alarms, - delta_general->dsp_false_alarms, - max_general->dsp_false_alarms); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "beacon_rssi_a:", - le32_to_cpu(general->beacon_rssi_a), - accum_general->beacon_rssi_a, - delta_general->beacon_rssi_a, - max_general->beacon_rssi_a); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "beacon_rssi_b:", - le32_to_cpu(general->beacon_rssi_b), - accum_general->beacon_rssi_b, - delta_general->beacon_rssi_b, - max_general->beacon_rssi_b); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "beacon_rssi_c:", - le32_to_cpu(general->beacon_rssi_c), - accum_general->beacon_rssi_c, - delta_general->beacon_rssi_c, - max_general->beacon_rssi_c); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "beacon_energy_a:", - le32_to_cpu(general->beacon_energy_a), - accum_general->beacon_energy_a, - delta_general->beacon_energy_a, - max_general->beacon_energy_a); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "beacon_energy_b:", - le32_to_cpu(general->beacon_energy_b), - accum_general->beacon_energy_b, - delta_general->beacon_energy_b, - max_general->beacon_energy_b); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "beacon_energy_c:", - le32_to_cpu(general->beacon_energy_c), - accum_general->beacon_energy_c, - delta_general->beacon_energy_c, - max_general->beacon_energy_c); - - pos += scnprintf(buf + pos, bufsz - pos, - fmt_header, "Statistics_Rx - OFDM_HT:"); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "plcp_err:", - le32_to_cpu(ht->plcp_err), accum_ht->plcp_err, - delta_ht->plcp_err, max_ht->plcp_err); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "overrun_err:", - le32_to_cpu(ht->overrun_err), accum_ht->overrun_err, - delta_ht->overrun_err, max_ht->overrun_err); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "early_overrun_err:", - le32_to_cpu(ht->early_overrun_err), - accum_ht->early_overrun_err, - delta_ht->early_overrun_err, - max_ht->early_overrun_err); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "crc32_good:", - le32_to_cpu(ht->crc32_good), accum_ht->crc32_good, - delta_ht->crc32_good, max_ht->crc32_good); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "crc32_err:", - le32_to_cpu(ht->crc32_err), accum_ht->crc32_err, - delta_ht->crc32_err, max_ht->crc32_err); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "mh_format_err:", - le32_to_cpu(ht->mh_format_err), - accum_ht->mh_format_err, - delta_ht->mh_format_err, max_ht->mh_format_err); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "agg_crc32_good:", - le32_to_cpu(ht->agg_crc32_good), - accum_ht->agg_crc32_good, - delta_ht->agg_crc32_good, max_ht->agg_crc32_good); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "agg_mpdu_cnt:", - le32_to_cpu(ht->agg_mpdu_cnt), - accum_ht->agg_mpdu_cnt, - delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "agg_cnt:", - le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt, - delta_ht->agg_cnt, max_ht->agg_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "unsupport_mcs:", - le32_to_cpu(ht->unsupport_mcs), - accum_ht->unsupport_mcs, - delta_ht->unsupport_mcs, max_ht->unsupport_mcs); - - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -ssize_t iwl4965_ucode_tx_stats_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - int pos = 0; - char *buf; - int bufsz = (sizeof(struct statistics_tx) * 48) + 250; - ssize_t ret; - struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx; - - if (!iwl_legacy_is_alive(priv)) - return -EAGAIN; - - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) { - IWL_ERR(priv, "Can not allocate Buffer\n"); - return -ENOMEM; - } - - /* the statistic information display here is based on - * the last statistics notification from uCode - * might not reflect the current uCode activity - */ - tx = &priv->_4965.statistics.tx; - accum_tx = &priv->_4965.accum_statistics.tx; - delta_tx = &priv->_4965.delta_statistics.tx; - max_tx = &priv->_4965.max_delta.tx; - - pos += iwl4965_statistics_flag(priv, buf, bufsz); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_header, "Statistics_Tx:"); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "preamble:", - le32_to_cpu(tx->preamble_cnt), - accum_tx->preamble_cnt, - delta_tx->preamble_cnt, max_tx->preamble_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "rx_detected_cnt:", - le32_to_cpu(tx->rx_detected_cnt), - accum_tx->rx_detected_cnt, - delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "bt_prio_defer_cnt:", - le32_to_cpu(tx->bt_prio_defer_cnt), - accum_tx->bt_prio_defer_cnt, - delta_tx->bt_prio_defer_cnt, - max_tx->bt_prio_defer_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "bt_prio_kill_cnt:", - le32_to_cpu(tx->bt_prio_kill_cnt), - accum_tx->bt_prio_kill_cnt, - delta_tx->bt_prio_kill_cnt, - max_tx->bt_prio_kill_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "few_bytes_cnt:", - le32_to_cpu(tx->few_bytes_cnt), - accum_tx->few_bytes_cnt, - delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "cts_timeout:", - le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout, - delta_tx->cts_timeout, max_tx->cts_timeout); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "ack_timeout:", - le32_to_cpu(tx->ack_timeout), - accum_tx->ack_timeout, - delta_tx->ack_timeout, max_tx->ack_timeout); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "expected_ack_cnt:", - le32_to_cpu(tx->expected_ack_cnt), - accum_tx->expected_ack_cnt, - delta_tx->expected_ack_cnt, - max_tx->expected_ack_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "actual_ack_cnt:", - le32_to_cpu(tx->actual_ack_cnt), - accum_tx->actual_ack_cnt, - delta_tx->actual_ack_cnt, - max_tx->actual_ack_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "dump_msdu_cnt:", - le32_to_cpu(tx->dump_msdu_cnt), - accum_tx->dump_msdu_cnt, - delta_tx->dump_msdu_cnt, - max_tx->dump_msdu_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "abort_nxt_frame_mismatch:", - le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt), - accum_tx->burst_abort_next_frame_mismatch_cnt, - delta_tx->burst_abort_next_frame_mismatch_cnt, - max_tx->burst_abort_next_frame_mismatch_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "abort_missing_nxt_frame:", - le32_to_cpu(tx->burst_abort_missing_next_frame_cnt), - accum_tx->burst_abort_missing_next_frame_cnt, - delta_tx->burst_abort_missing_next_frame_cnt, - max_tx->burst_abort_missing_next_frame_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "cts_timeout_collision:", - le32_to_cpu(tx->cts_timeout_collision), - accum_tx->cts_timeout_collision, - delta_tx->cts_timeout_collision, - max_tx->cts_timeout_collision); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "ack_ba_timeout_collision:", - le32_to_cpu(tx->ack_or_ba_timeout_collision), - accum_tx->ack_or_ba_timeout_collision, - delta_tx->ack_or_ba_timeout_collision, - max_tx->ack_or_ba_timeout_collision); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "agg ba_timeout:", - le32_to_cpu(tx->agg.ba_timeout), - accum_tx->agg.ba_timeout, - delta_tx->agg.ba_timeout, - max_tx->agg.ba_timeout); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "agg ba_resched_frames:", - le32_to_cpu(tx->agg.ba_reschedule_frames), - accum_tx->agg.ba_reschedule_frames, - delta_tx->agg.ba_reschedule_frames, - max_tx->agg.ba_reschedule_frames); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "agg scd_query_agg_frame:", - le32_to_cpu(tx->agg.scd_query_agg_frame_cnt), - accum_tx->agg.scd_query_agg_frame_cnt, - delta_tx->agg.scd_query_agg_frame_cnt, - max_tx->agg.scd_query_agg_frame_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "agg scd_query_no_agg:", - le32_to_cpu(tx->agg.scd_query_no_agg), - accum_tx->agg.scd_query_no_agg, - delta_tx->agg.scd_query_no_agg, - max_tx->agg.scd_query_no_agg); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "agg scd_query_agg:", - le32_to_cpu(tx->agg.scd_query_agg), - accum_tx->agg.scd_query_agg, - delta_tx->agg.scd_query_agg, - max_tx->agg.scd_query_agg); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "agg scd_query_mismatch:", - le32_to_cpu(tx->agg.scd_query_mismatch), - accum_tx->agg.scd_query_mismatch, - delta_tx->agg.scd_query_mismatch, - max_tx->agg.scd_query_mismatch); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "agg frame_not_ready:", - le32_to_cpu(tx->agg.frame_not_ready), - accum_tx->agg.frame_not_ready, - delta_tx->agg.frame_not_ready, - max_tx->agg.frame_not_ready); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "agg underrun:", - le32_to_cpu(tx->agg.underrun), - accum_tx->agg.underrun, - delta_tx->agg.underrun, max_tx->agg.underrun); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "agg bt_prio_kill:", - le32_to_cpu(tx->agg.bt_prio_kill), - accum_tx->agg.bt_prio_kill, - delta_tx->agg.bt_prio_kill, - max_tx->agg.bt_prio_kill); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "agg rx_ba_rsp_cnt:", - le32_to_cpu(tx->agg.rx_ba_rsp_cnt), - accum_tx->agg.rx_ba_rsp_cnt, - delta_tx->agg.rx_ba_rsp_cnt, - max_tx->agg.rx_ba_rsp_cnt); - - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -ssize_t -iwl4965_ucode_general_stats_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - int pos = 0; - char *buf; - int bufsz = sizeof(struct statistics_general) * 10 + 300; - ssize_t ret; - struct statistics_general_common *general, *accum_general; - struct statistics_general_common *delta_general, *max_general; - struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg; - struct statistics_div *div, *accum_div, *delta_div, *max_div; - - if (!iwl_legacy_is_alive(priv)) - return -EAGAIN; - - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) { - IWL_ERR(priv, "Can not allocate Buffer\n"); - return -ENOMEM; - } - - /* the statistic information display here is based on - * the last statistics notification from uCode - * might not reflect the current uCode activity - */ - general = &priv->_4965.statistics.general.common; - dbg = &priv->_4965.statistics.general.common.dbg; - div = &priv->_4965.statistics.general.common.div; - accum_general = &priv->_4965.accum_statistics.general.common; - accum_dbg = &priv->_4965.accum_statistics.general.common.dbg; - accum_div = &priv->_4965.accum_statistics.general.common.div; - delta_general = &priv->_4965.delta_statistics.general.common; - max_general = &priv->_4965.max_delta.general.common; - delta_dbg = &priv->_4965.delta_statistics.general.common.dbg; - max_dbg = &priv->_4965.max_delta.general.common.dbg; - delta_div = &priv->_4965.delta_statistics.general.common.div; - max_div = &priv->_4965.max_delta.general.common.div; - - pos += iwl4965_statistics_flag(priv, buf, bufsz); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_header, "Statistics_General:"); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_value, "temperature:", - le32_to_cpu(general->temperature)); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_value, "ttl_timestamp:", - le32_to_cpu(general->ttl_timestamp)); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "burst_check:", - le32_to_cpu(dbg->burst_check), - accum_dbg->burst_check, - delta_dbg->burst_check, max_dbg->burst_check); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "burst_count:", - le32_to_cpu(dbg->burst_count), - accum_dbg->burst_count, - delta_dbg->burst_count, max_dbg->burst_count); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "wait_for_silence_timeout_count:", - le32_to_cpu(dbg->wait_for_silence_timeout_cnt), - accum_dbg->wait_for_silence_timeout_cnt, - delta_dbg->wait_for_silence_timeout_cnt, - max_dbg->wait_for_silence_timeout_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "sleep_time:", - le32_to_cpu(general->sleep_time), - accum_general->sleep_time, - delta_general->sleep_time, max_general->sleep_time); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "slots_out:", - le32_to_cpu(general->slots_out), - accum_general->slots_out, - delta_general->slots_out, max_general->slots_out); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "slots_idle:", - le32_to_cpu(general->slots_idle), - accum_general->slots_idle, - delta_general->slots_idle, max_general->slots_idle); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "tx_on_a:", - le32_to_cpu(div->tx_on_a), accum_div->tx_on_a, - delta_div->tx_on_a, max_div->tx_on_a); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "tx_on_b:", - le32_to_cpu(div->tx_on_b), accum_div->tx_on_b, - delta_div->tx_on_b, max_div->tx_on_b); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "exec_time:", - le32_to_cpu(div->exec_time), accum_div->exec_time, - delta_div->exec_time, max_div->exec_time); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "probe_time:", - le32_to_cpu(div->probe_time), accum_div->probe_time, - delta_div->probe_time, max_div->probe_time); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "rx_enable_counter:", - le32_to_cpu(general->rx_enable_counter), - accum_general->rx_enable_counter, - delta_general->rx_enable_counter, - max_general->rx_enable_counter); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "num_of_sos_states:", - le32_to_cpu(general->num_of_sos_states), - accum_general->num_of_sos_states, - delta_general->num_of_sos_states, - max_general->num_of_sos_states); - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h deleted file mode 100644 index 6c8e35361a9..00000000000 --- a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h +++ /dev/null @@ -1,59 +0,0 @@ -/****************************************************************************** - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - *****************************************************************************/ - -#include "iwl-dev.h" -#include "iwl-core.h" -#include "iwl-debug.h" - -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS -ssize_t iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos); -ssize_t iwl4965_ucode_tx_stats_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos); -ssize_t iwl4965_ucode_general_stats_read(struct file *file, - char __user *user_buf, size_t count, loff_t *ppos); -#else -static ssize_t -iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - return 0; -} -static ssize_t -iwl4965_ucode_tx_stats_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - return 0; -} -static ssize_t -iwl4965_ucode_general_stats_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - return 0; -} -#endif diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c b/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c deleted file mode 100644 index cb9baab1ff7..00000000000 --- a/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c +++ /dev/null @@ -1,154 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ - - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/slab.h> -#include <linux/init.h> - -#include <net/mac80211.h> - -#include "iwl-commands.h" -#include "iwl-dev.h" -#include "iwl-core.h" -#include "iwl-debug.h" -#include "iwl-4965.h" -#include "iwl-io.h" - -/****************************************************************************** - * - * EEPROM related functions - * -******************************************************************************/ - -/* - * The device's EEPROM semaphore prevents conflicts between driver and uCode - * when accessing the EEPROM; each access is a series of pulses to/from the - * EEPROM chip, not a single event, so even reads could conflict if they - * weren't arbitrated by the semaphore. - */ -int iwl4965_eeprom_acquire_semaphore(struct iwl_priv *priv) -{ - u16 count; - int ret; - - for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) { - /* Request semaphore */ - iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM); - - /* See if we got it */ - ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM, - CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM, - EEPROM_SEM_TIMEOUT); - if (ret >= 0) { - IWL_DEBUG_IO(priv, - "Acquired semaphore after %d tries.\n", - count+1); - return ret; - } - } - - return ret; -} - -void iwl4965_eeprom_release_semaphore(struct iwl_priv *priv) -{ - iwl_legacy_clear_bit(priv, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM); - -} - -int iwl4965_eeprom_check_version(struct iwl_priv *priv) -{ - u16 eeprom_ver; - u16 calib_ver; - - eeprom_ver = iwl_legacy_eeprom_query16(priv, EEPROM_VERSION); - calib_ver = iwl_legacy_eeprom_query16(priv, - EEPROM_4965_CALIB_VERSION_OFFSET); - - if (eeprom_ver < priv->cfg->eeprom_ver || - calib_ver < priv->cfg->eeprom_calib_ver) - goto err; - - IWL_INFO(priv, "device EEPROM VER=0x%x, CALIB=0x%x\n", - eeprom_ver, calib_ver); - - return 0; -err: - IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x " - "CALIB=0x%x < 0x%x\n", - eeprom_ver, priv->cfg->eeprom_ver, - calib_ver, priv->cfg->eeprom_calib_ver); - return -EINVAL; - -} - -void iwl4965_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac) -{ - const u8 *addr = iwl_legacy_eeprom_query_addr(priv, - EEPROM_MAC_ADDRESS); - memcpy(mac, addr, ETH_ALEN); -} diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-hw.h b/drivers/net/wireless/iwlegacy/iwl-4965-hw.h deleted file mode 100644 index fc6fa2886d9..00000000000 --- a/drivers/net/wireless/iwlegacy/iwl-4965-hw.h +++ /dev/null @@ -1,811 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ -/* - * Please use this file (iwl-4965-hw.h) only for hardware-related definitions. - * Use iwl-commands.h for uCode API definitions. - * Use iwl-dev.h for driver implementation definitions. - */ - -#ifndef __iwl_4965_hw_h__ -#define __iwl_4965_hw_h__ - -#include "iwl-fh.h" - -/* EEPROM */ -#define IWL4965_EEPROM_IMG_SIZE 1024 - -/* - * uCode queue management definitions ... - * The first queue used for block-ack aggregation is #7 (4965 only). - * All block-ack aggregation queues should map to Tx DMA/FIFO channel 7. - */ -#define IWL49_FIRST_AMPDU_QUEUE 7 - -/* Sizes and addresses for instruction and data memory (SRAM) in - * 4965's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */ -#define IWL49_RTC_INST_LOWER_BOUND (0x000000) -#define IWL49_RTC_INST_UPPER_BOUND (0x018000) - -#define IWL49_RTC_DATA_LOWER_BOUND (0x800000) -#define IWL49_RTC_DATA_UPPER_BOUND (0x80A000) - -#define IWL49_RTC_INST_SIZE (IWL49_RTC_INST_UPPER_BOUND - \ - IWL49_RTC_INST_LOWER_BOUND) -#define IWL49_RTC_DATA_SIZE (IWL49_RTC_DATA_UPPER_BOUND - \ - IWL49_RTC_DATA_LOWER_BOUND) - -#define IWL49_MAX_INST_SIZE IWL49_RTC_INST_SIZE -#define IWL49_MAX_DATA_SIZE IWL49_RTC_DATA_SIZE - -/* Size of uCode instruction memory in bootstrap state machine */ -#define IWL49_MAX_BSM_SIZE BSM_SRAM_SIZE - -static inline int iwl4965_hw_valid_rtc_data_addr(u32 addr) -{ - return (addr >= IWL49_RTC_DATA_LOWER_BOUND) && - (addr < IWL49_RTC_DATA_UPPER_BOUND); -} - -/********************* START TEMPERATURE *************************************/ - -/** - * 4965 temperature calculation. - * - * The driver must calculate the device temperature before calculating - * a txpower setting (amplifier gain is temperature dependent). The - * calculation uses 4 measurements, 3 of which (R1, R2, R3) are calibration - * values used for the life of the driver, and one of which (R4) is the - * real-time temperature indicator. - * - * uCode provides all 4 values to the driver via the "initialize alive" - * notification (see struct iwl4965_init_alive_resp). After the runtime uCode - * image loads, uCode updates the R4 value via statistics notifications - * (see STATISTICS_NOTIFICATION), which occur after each received beacon - * when associated, or can be requested via REPLY_STATISTICS_CMD. - * - * NOTE: uCode provides the R4 value as a 23-bit signed value. Driver - * must sign-extend to 32 bits before applying formula below. - * - * Formula: - * - * degrees Kelvin = ((97 * 259 * (R4 - R2) / (R3 - R1)) / 100) + 8 - * - * NOTE: The basic formula is 259 * (R4-R2) / (R3-R1). The 97/100 is - * an additional correction, which should be centered around 0 degrees - * Celsius (273 degrees Kelvin). The 8 (3 percent of 273) compensates for - * centering the 97/100 correction around 0 degrees K. - * - * Add 273 to Kelvin value to find degrees Celsius, for comparing current - * temperature with factory-measured temperatures when calculating txpower - * settings. - */ -#define TEMPERATURE_CALIB_KELVIN_OFFSET 8 -#define TEMPERATURE_CALIB_A_VAL 259 - -/* Limit range of calculated temperature to be between these Kelvin values */ -#define IWL_TX_POWER_TEMPERATURE_MIN (263) -#define IWL_TX_POWER_TEMPERATURE_MAX (410) - -#define IWL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(t) \ - (((t) < IWL_TX_POWER_TEMPERATURE_MIN) || \ - ((t) > IWL_TX_POWER_TEMPERATURE_MAX)) - -/********************* END TEMPERATURE ***************************************/ - -/********************* START TXPOWER *****************************************/ - -/** - * 4965 txpower calculations rely on information from three sources: - * - * 1) EEPROM - * 2) "initialize" alive notification - * 3) statistics notifications - * - * EEPROM data consists of: - * - * 1) Regulatory information (max txpower and channel usage flags) is provided - * separately for each channel that can possibly supported by 4965. - * 40 MHz wide (.11n HT40) channels are listed separately from 20 MHz - * (legacy) channels. - * - * See struct iwl4965_eeprom_channel for format, and struct iwl4965_eeprom - * for locations in EEPROM. - * - * 2) Factory txpower calibration information is provided separately for - * sub-bands of contiguous channels. 2.4GHz has just one sub-band, - * but 5 GHz has several sub-bands. - * - * In addition, per-band (2.4 and 5 Ghz) saturation txpowers are provided. - * - * See struct iwl4965_eeprom_calib_info (and the tree of structures - * contained within it) for format, and struct iwl4965_eeprom for - * locations in EEPROM. - * - * "Initialization alive" notification (see struct iwl4965_init_alive_resp) - * consists of: - * - * 1) Temperature calculation parameters. - * - * 2) Power supply voltage measurement. - * - * 3) Tx gain compensation to balance 2 transmitters for MIMO use. - * - * Statistics notifications deliver: - * - * 1) Current values for temperature param R4. - */ - -/** - * To calculate a txpower setting for a given desired target txpower, channel, - * modulation bit rate, and transmitter chain (4965 has 2 transmitters to - * support MIMO and transmit diversity), driver must do the following: - * - * 1) Compare desired txpower vs. (EEPROM) regulatory limit for this channel. - * Do not exceed regulatory limit; reduce target txpower if necessary. - * - * If setting up txpowers for MIMO rates (rate indexes 8-15, 24-31), - * 2 transmitters will be used simultaneously; driver must reduce the - * regulatory limit by 3 dB (half-power) for each transmitter, so the - * combined total output of the 2 transmitters is within regulatory limits. - * - * - * 2) Compare target txpower vs. (EEPROM) saturation txpower *reduced by - * backoff for this bit rate*. Do not exceed (saturation - backoff[rate]); - * reduce target txpower if necessary. - * - * Backoff values below are in 1/2 dB units (equivalent to steps in - * txpower gain tables): - * - * OFDM 6 - 36 MBit: 10 steps (5 dB) - * OFDM 48 MBit: 15 steps (7.5 dB) - * OFDM 54 MBit: 17 steps (8.5 dB) - * OFDM 60 MBit: 20 steps (10 dB) - * CCK all rates: 10 steps (5 dB) - * - * Backoff values apply to saturation txpower on a per-transmitter basis; - * when using MIMO (2 transmitters), each transmitter uses the same - * saturation level provided in EEPROM, and the same backoff values; - * no reduction (such as with regulatory txpower limits) is required. - * - * Saturation and Backoff values apply equally to 20 Mhz (legacy) channel - * widths and 40 Mhz (.11n HT40) channel widths; there is no separate - * factory measurement for ht40 channels. - * - * The result of this step is the final target txpower. The rest of - * the steps figure out the proper settings for the device to achieve - * that target txpower. - * - * - * 3) Determine (EEPROM) calibration sub band for the target channel, by - * comparing against first and last channels in each sub band - * (see struct iwl4965_eeprom_calib_subband_info). - * - * - * 4) Linearly interpolate (EEPROM) factory calibration measurement sets, - * referencing the 2 factory-measured (sample) channels within the sub band. - * - * Interpolation is based on difference between target channel's frequency - * and the sample channels' frequencies. Since channel numbers are based - * on frequency (5 MHz between each channel number), this is equivalent - * to interpolating based on channel number differences. - * - * Note that the sample channels may or may not be the channels at the - * edges of the sub band. The target channel may be "outside" of the - * span of the sampled channels. - * - * Driver may choose the pair (for 2 Tx chains) of measurements (see - * struct iwl4965_eeprom_calib_ch_info) for which the actual measured - * txpower comes closest to the desired txpower. Usually, though, - * the middle set of measurements is closest to the regulatory limits, - * and is therefore a good choice for all txpower calculations (this - * assumes that high accuracy is needed for maximizing legal txpower, - * while lower txpower configurations do not need as much accuracy). - * - * Driver should interpolate both members of the chosen measurement pair, - * i.e. for both Tx chains (radio transmitters), unless the driver knows - * that only one of the chains will be used (e.g. only one tx antenna - * connected, but this should be unusual). The rate scaling algorithm - * switches antennas to find best performance, so both Tx chains will - * be used (although only one at a time) even for non-MIMO transmissions. - * - * Driver should interpolate factory values for temperature, gain table - * index, and actual power. The power amplifier detector values are - * not used by the driver. - * - * Sanity check: If the target channel happens to be one of the sample - * channels, the results should agree with the sample channel's - * measurements! - * - * - * 5) Find difference between desired txpower and (interpolated) - * factory-measured txpower. Using (interpolated) factory gain table index - * (shown elsewhere) as a starting point, adjust this index lower to - * increase txpower, or higher to decrease txpower, until the target - * txpower is reached. Each step in the gain table is 1/2 dB. - * - * For example, if factory measured txpower is 16 dBm, and target txpower - * is 13 dBm, add 6 steps to the factory gain index to reduce txpower - * by 3 dB. - * - * - * 6) Find difference between current device temperature and (interpolated) - * factory-measured temperature for sub-band. Factory values are in - * degrees Celsius. To calculate current temperature, see comments for - * "4965 temperature calculation". - * - * If current temperature is higher than factory temperature, driver must - * increase gain (lower gain table index), and vice verse. - * - * Temperature affects gain differently for different channels: - * - * 2.4 GHz all channels: 3.5 degrees per half-dB step - * 5 GHz channels 34-43: 4.5 degrees per half-dB step - * 5 GHz channels >= 44: 4.0 degrees per half-dB step - * - * NOTE: Temperature can increase rapidly when transmitting, especially - * with heavy traffic at high txpowers. Driver should update - * temperature calculations often under these conditions to - * maintain strong txpower in the face of rising temperature. - * - * - * 7) Find difference between current power supply voltage indicator - * (from "initialize alive") and factory-measured power supply voltage - * indicator (EEPROM). - * - * If the current voltage is higher (indicator is lower) than factory - * voltage, gain should be reduced (gain table index increased) by: - * - * (eeprom - current) / 7 - * - * If the current voltage is lower (indicator is higher) than factory - * voltage, gain should be increased (gain table index decreased) by: - * - * 2 * (current - eeprom) / 7 - * - * If number of index steps in either direction turns out to be > 2, - * something is wrong ... just use 0. - * - * NOTE: Voltage compensation is independent of band/channel. - * - * NOTE: "Initialize" uCode measures current voltage, which is assumed - * to be constant after this initial measurement. Voltage - * compensation for txpower (number of steps in gain table) - * may be calculated once and used until the next uCode bootload. - * - * - * 8) If setting up txpowers for MIMO rates (rate indexes 8-15, 24-31), - * adjust txpower for each transmitter chain, so txpower is balanced - * between the two chains. There are 5 pairs of tx_atten[group][chain] - * values in "initialize alive", one pair for each of 5 channel ranges: - * - * Group 0: 5 GHz channel 34-43 - * Group 1: 5 GHz channel 44-70 - * Group 2: 5 GHz channel 71-124 - * Group 3: 5 GHz channel 125-200 - * Group 4: 2.4 GHz all channels - * - * Add the tx_atten[group][chain] value to the index for the target chain. - * The values are signed, but are in pairs of 0 and a non-negative number, - * so as to reduce gain (if necessary) of the "hotter" channel. This - * avoids any need to double-check for regulatory compliance after - * this step. - * - * - * 9) If setting up for a CCK rate, lower the gain by adding a CCK compensation - * value to the index: - * - * Hardware rev B: 9 steps (4.5 dB) - * Hardware rev C: 5 steps (2.5 dB) - * - * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG, - * bits [3:2], 1 = B, 2 = C. - * - * NOTE: This compensation is in addition to any saturation backoff that - * might have been applied in an earlier step. - * - * - * 10) Select the gain table, based on band (2.4 vs 5 GHz). - * - * Limit the adjusted index to stay within the table! - * - * - * 11) Read gain table entries for DSP and radio gain, place into appropriate - * location(s) in command (struct iwl4965_txpowertable_cmd). - */ - -/** - * When MIMO is used (2 transmitters operating simultaneously), driver should - * limit each transmitter to deliver a max of 3 dB below the regulatory limit - * for the device. That is, use half power for each transmitter, so total - * txpower is within regulatory limits. - * - * The value "6" represents number of steps in gain table to reduce power 3 dB. - * Each step is 1/2 dB. - */ -#define IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION (6) - -/** - * CCK gain compensation. - * - * When calculating txpowers for CCK, after making sure that the target power - * is within regulatory and saturation limits, driver must additionally - * back off gain by adding these values to the gain table index. - * - * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG, - * bits [3:2], 1 = B, 2 = C. - */ -#define IWL_TX_POWER_CCK_COMPENSATION_B_STEP (9) -#define IWL_TX_POWER_CCK_COMPENSATION_C_STEP (5) - -/* - * 4965 power supply voltage compensation for txpower - */ -#define TX_POWER_IWL_VOLTAGE_CODES_PER_03V (7) - -/** - * Gain tables. - * - * The following tables contain pair of values for setting txpower, i.e. - * gain settings for the output of the device's digital signal processor (DSP), - * and for the analog gain structure of the transmitter. - * - * Each entry in the gain tables represents a step of 1/2 dB. Note that these - * are *relative* steps, not indications of absolute output power. Output - * power varies with temperature, voltage, and channel frequency, and also - * requires consideration of average power (to satisfy regulatory constraints), - * and peak power (to avoid distortion of the output signal). - * - * Each entry contains two values: - * 1) DSP gain (or sometimes called DSP attenuation). This is a fine-grained - * linear value that multiplies the output of the digital signal processor, - * before being sent to the analog radio. - * 2) Radio gain. This sets the analog gain of the radio Tx path. - * It is a coarser setting, and behaves in a logarithmic (dB) fashion. - * - * EEPROM contains factory calibration data for txpower. This maps actual - * measured txpower levels to gain settings in the "well known" tables - * below ("well-known" means here that both factory calibration *and* the - * driver work with the same table). - * - * There are separate tables for 2.4 GHz and 5 GHz bands. The 5 GHz table - * has an extension (into negative indexes), in case the driver needs to - * boost power setting for high device temperatures (higher than would be - * present during factory calibration). A 5 Ghz EEPROM index of "40" - * corresponds to the 49th entry in the table used by the driver. - */ -#define MIN_TX_GAIN_INDEX (0) /* highest gain, lowest idx, 2.4 */ -#define MIN_TX_GAIN_INDEX_52GHZ_EXT (-9) /* highest gain, lowest idx, 5 */ - -/** - * 2.4 GHz gain table - * - * Index Dsp gain Radio gain - * 0 110 0x3f (highest gain) - * 1 104 0x3f - * 2 98 0x3f - * 3 110 0x3e - * 4 104 0x3e - * 5 98 0x3e - * 6 110 0x3d - * 7 104 0x3d - * 8 98 0x3d - * 9 110 0x3c - * 10 104 0x3c - * 11 98 0x3c - * 12 110 0x3b - * 13 104 0x3b - * 14 98 0x3b - * 15 110 0x3a - * 16 104 0x3a - * 17 98 0x3a - * 18 110 0x39 - * 19 104 0x39 - * 20 98 0x39 - * 21 110 0x38 - * 22 104 0x38 - * 23 98 0x38 - * 24 110 0x37 - * 25 104 0x37 - * 26 98 0x37 - * 27 110 0x36 - * 28 104 0x36 - * 29 98 0x36 - * 30 110 0x35 - * 31 104 0x35 - * 32 98 0x35 - * 33 110 0x34 - * 34 104 0x34 - * 35 98 0x34 - * 36 110 0x33 - * 37 104 0x33 - * 38 98 0x33 - * 39 110 0x32 - * 40 104 0x32 - * 41 98 0x32 - * 42 110 0x31 - * 43 104 0x31 - * 44 98 0x31 - * 45 110 0x30 - * 46 104 0x30 - * 47 98 0x30 - * 48 110 0x6 - * 49 104 0x6 - * 50 98 0x6 - * 51 110 0x5 - * 52 104 0x5 - * 53 98 0x5 - * 54 110 0x4 - * 55 104 0x4 - * 56 98 0x4 - * 57 110 0x3 - * 58 104 0x3 - * 59 98 0x3 - * 60 110 0x2 - * 61 104 0x2 - * 62 98 0x2 - * 63 110 0x1 - * 64 104 0x1 - * 65 98 0x1 - * 66 110 0x0 - * 67 104 0x0 - * 68 98 0x0 - * 69 97 0 - * 70 96 0 - * 71 95 0 - * 72 94 0 - * 73 93 0 - * 74 92 0 - * 75 91 0 - * 76 90 0 - * 77 89 0 - * 78 88 0 - * 79 87 0 - * 80 86 0 - * 81 85 0 - * 82 84 0 - * 83 83 0 - * 84 82 0 - * 85 81 0 - * 86 80 0 - * 87 79 0 - * 88 78 0 - * 89 77 0 - * 90 76 0 - * 91 75 0 - * 92 74 0 - * 93 73 0 - * 94 72 0 - * 95 71 0 - * 96 70 0 - * 97 69 0 - * 98 68 0 - */ - -/** - * 5 GHz gain table - * - * Index Dsp gain Radio gain - * -9 123 0x3F (highest gain) - * -8 117 0x3F - * -7 110 0x3F - * -6 104 0x3F - * -5 98 0x3F - * -4 110 0x3E - * -3 104 0x3E - * -2 98 0x3E - * -1 110 0x3D - * 0 104 0x3D - * 1 98 0x3D - * 2 110 0x3C - * 3 104 0x3C - * 4 98 0x3C - * 5 110 0x3B - * 6 104 0x3B - * 7 98 0x3B - * 8 110 0x3A - * 9 104 0x3A - * 10 98 0x3A - * 11 110 0x39 - * 12 104 0x39 - * 13 98 0x39 - * 14 110 0x38 - * 15 104 0x38 - * 16 98 0x38 - * 17 110 0x37 - * 18 104 0x37 - * 19 98 0x37 - * 20 110 0x36 - * 21 104 0x36 - * 22 98 0x36 - * 23 110 0x35 - * 24 104 0x35 - * 25 98 0x35 - * 26 110 0x34 - * 27 104 0x34 - * 28 98 0x34 - * 29 110 0x33 - * 30 104 0x33 - * 31 98 0x33 - * 32 110 0x32 - * 33 104 0x32 - * 34 98 0x32 - * 35 110 0x31 - * 36 104 0x31 - * 37 98 0x31 - * 38 110 0x30 - * 39 104 0x30 - * 40 98 0x30 - * 41 110 0x25 - * 42 104 0x25 - * 43 98 0x25 - * 44 110 0x24 - * 45 104 0x24 - * 46 98 0x24 - * 47 110 0x23 - * 48 104 0x23 - * 49 98 0x23 - * 50 110 0x22 - * 51 104 0x18 - * 52 98 0x18 - * 53 110 0x17 - * 54 104 0x17 - * 55 98 0x17 - * 56 110 0x16 - * 57 104 0x16 - * 58 98 0x16 - * 59 110 0x15 - * 60 104 0x15 - * 61 98 0x15 - * 62 110 0x14 - * 63 104 0x14 - * 64 98 0x14 - * 65 110 0x13 - * 66 104 0x13 - * 67 98 0x13 - * 68 110 0x12 - * 69 104 0x08 - * 70 98 0x08 - * 71 110 0x07 - * 72 104 0x07 - * 73 98 0x07 - * 74 110 0x06 - * 75 104 0x06 - * 76 98 0x06 - * 77 110 0x05 - * 78 104 0x05 - * 79 98 0x05 - * 80 110 0x04 - * 81 104 0x04 - * 82 98 0x04 - * 83 110 0x03 - * 84 104 0x03 - * 85 98 0x03 - * 86 110 0x02 - * 87 104 0x02 - * 88 98 0x02 - * 89 110 0x01 - * 90 104 0x01 - * 91 98 0x01 - * 92 110 0x00 - * 93 104 0x00 - * 94 98 0x00 - * 95 93 0x00 - * 96 88 0x00 - * 97 83 0x00 - * 98 78 0x00 - */ - - -/** - * Sanity checks and default values for EEPROM regulatory levels. - * If EEPROM values fall outside MIN/MAX range, use default values. - * - * Regulatory limits refer to the maximum average txpower allowed by - * regulatory agencies in the geographies in which the device is meant - * to be operated. These limits are SKU-specific (i.e. geography-specific), - * and channel-specific; each channel has an individual regulatory limit - * listed in the EEPROM. - * - * Units are in half-dBm (i.e. "34" means 17 dBm). - */ -#define IWL_TX_POWER_DEFAULT_REGULATORY_24 (34) -#define IWL_TX_POWER_DEFAULT_REGULATORY_52 (34) -#define IWL_TX_POWER_REGULATORY_MIN (0) -#define IWL_TX_POWER_REGULATORY_MAX (34) - -/** - * Sanity checks and default values for EEPROM saturation levels. - * If EEPROM values fall outside MIN/MAX range, use default values. - * - * Saturation is the highest level that the output power amplifier can produce - * without significant clipping distortion. This is a "peak" power level. - * Different types of modulation (i.e. various "rates", and OFDM vs. CCK) - * require differing amounts of backoff, relative to their average power output, - * in order to avoid clipping distortion. - * - * Driver must make sure that it is violating neither the saturation limit, - * nor the regulatory limit, when calculating Tx power settings for various - * rates. - * - * Units are in half-dBm (i.e. "38" means 19 dBm). - */ -#define IWL_TX_POWER_DEFAULT_SATURATION_24 (38) -#define IWL_TX_POWER_DEFAULT_SATURATION_52 (38) -#define IWL_TX_POWER_SATURATION_MIN (20) -#define IWL_TX_POWER_SATURATION_MAX (50) - -/** - * Channel groups used for Tx Attenuation calibration (MIMO tx channel balance) - * and thermal Txpower calibration. - * - * When calculating txpower, driver must compensate for current device - * temperature; higher temperature requires higher gain. Driver must calculate - * current temperature (see "4965 temperature calculation"), then compare vs. - * factory calibration temperature in EEPROM; if current temperature is higher - * than factory temperature, driver must *increase* gain by proportions shown - * in table below. If current temperature is lower than factory, driver must - * *decrease* gain. - * - * Different frequency ranges require different compensation, as shown below. - */ -/* Group 0, 5.2 GHz ch 34-43: 4.5 degrees per 1/2 dB. */ -#define CALIB_IWL_TX_ATTEN_GR1_FCH 34 -#define CALIB_IWL_TX_ATTEN_GR1_LCH 43 - -/* Group 1, 5.3 GHz ch 44-70: 4.0 degrees per 1/2 dB. */ -#define CALIB_IWL_TX_ATTEN_GR2_FCH 44 -#define CALIB_IWL_TX_ATTEN_GR2_LCH 70 - -/* Group 2, 5.5 GHz ch 71-124: 4.0 degrees per 1/2 dB. */ -#define CALIB_IWL_TX_ATTEN_GR3_FCH 71 -#define CALIB_IWL_TX_ATTEN_GR3_LCH 124 - -/* Group 3, 5.7 GHz ch 125-200: 4.0 degrees per 1/2 dB. */ -#define CALIB_IWL_TX_ATTEN_GR4_FCH 125 -#define CALIB_IWL_TX_ATTEN_GR4_LCH 200 - -/* Group 4, 2.4 GHz all channels: 3.5 degrees per 1/2 dB. */ -#define CALIB_IWL_TX_ATTEN_GR5_FCH 1 -#define CALIB_IWL_TX_ATTEN_GR5_LCH 20 - -enum { - CALIB_CH_GROUP_1 = 0, - CALIB_CH_GROUP_2 = 1, - CALIB_CH_GROUP_3 = 2, - CALIB_CH_GROUP_4 = 3, - CALIB_CH_GROUP_5 = 4, - CALIB_CH_GROUP_MAX -}; - -/********************* END TXPOWER *****************************************/ - - -/** - * Tx/Rx Queues - * - * Most communication between driver and 4965 is via queues of data buffers. - * For example, all commands that the driver issues to device's embedded - * controller (uCode) are via the command queue (one of the Tx queues). All - * uCode command responses/replies/notifications, including Rx frames, are - * conveyed from uCode to driver via the Rx queue. - * - * Most support for these queues, including handshake support, resides in - * structures in host DRAM, shared between the driver and the device. When - * allocating this memory, the driver must make sure that data written by - * the host CPU updates DRAM immediately (and does not get "stuck" in CPU's - * cache memory), so DRAM and cache are consistent, and the device can - * immediately see changes made by the driver. - * - * 4965 supports up to 16 DRAM-based Tx queues, and services these queues via - * up to 7 DMA channels (FIFOs). Each Tx queue is supported by a circular array - * in DRAM containing 256 Transmit Frame Descriptors (TFDs). - */ -#define IWL49_NUM_FIFOS 7 -#define IWL49_CMD_FIFO_NUM 4 -#define IWL49_NUM_QUEUES 16 -#define IWL49_NUM_AMPDU_QUEUES 8 - - -/** - * struct iwl4965_schedq_bc_tbl - * - * Byte Count table - * - * Each Tx queue uses a byte-count table containing 320 entries: - * one 16-bit entry for each of 256 TFDs, plus an additional 64 entries that - * duplicate the first 64 entries (to avoid wrap-around within a Tx window; - * max Tx window is 64 TFDs). - * - * When driver sets up a new TFD, it must also enter the total byte count - * of the frame to be transmitted into the corresponding entry in the byte - * count table for the chosen Tx queue. If the TFD index is 0-63, the driver - * must duplicate the byte count entry in corresponding index 256-319. - * - * padding puts each byte count table on a 1024-byte boundary; - * 4965 assumes tables are separated by 1024 bytes. - */ -struct iwl4965_scd_bc_tbl { - __le16 tfd_offset[TFD_QUEUE_BC_SIZE]; - u8 pad[1024 - (TFD_QUEUE_BC_SIZE) * sizeof(__le16)]; -} __packed; - - -#define IWL4965_RTC_INST_LOWER_BOUND (0x000000) - -/* RSSI to dBm */ -#define IWL4965_RSSI_OFFSET 44 - -/* PCI registers */ -#define PCI_CFG_RETRY_TIMEOUT 0x041 - -/* PCI register values */ -#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01 -#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02 - -#define IWL4965_DEFAULT_TX_RETRY 15 - -/* EEPROM */ -#define IWL4965_FIRST_AMPDU_QUEUE 10 - - -#endif /* !__iwl_4965_hw_h__ */ diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-led.c b/drivers/net/wireless/iwlegacy/iwl-4965-led.c deleted file mode 100644 index 6862fdcaee6..00000000000 --- a/drivers/net/wireless/iwlegacy/iwl-4965-led.c +++ /dev/null @@ -1,73 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/init.h> -#include <linux/pci.h> -#include <linux/dma-mapping.h> -#include <linux/delay.h> -#include <linux/skbuff.h> -#include <linux/netdevice.h> -#include <net/mac80211.h> -#include <linux/etherdevice.h> -#include <asm/unaligned.h> - -#include "iwl-commands.h" -#include "iwl-dev.h" -#include "iwl-core.h" -#include "iwl-io.h" -#include "iwl-4965-led.h" - -/* Send led command */ -static int -iwl4965_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd) -{ - struct iwl_host_cmd cmd = { - .id = REPLY_LEDS_CMD, - .len = sizeof(struct iwl_led_cmd), - .data = led_cmd, - .flags = CMD_ASYNC, - .callback = NULL, - }; - u32 reg; - - reg = iwl_read32(priv, CSR_LED_REG); - if (reg != (reg & CSR_LED_BSM_CTRL_MSK)) - iwl_write32(priv, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK); - - return iwl_legacy_send_cmd(priv, &cmd); -} - -/* Set led register off */ -void iwl4965_led_enable(struct iwl_priv *priv) -{ - iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON); -} - -const struct iwl_led_ops iwl4965_led_ops = { - .cmd = iwl4965_send_led_cmd, -}; diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-led.h b/drivers/net/wireless/iwlegacy/iwl-4965-led.h deleted file mode 100644 index 5ed3615fc33..00000000000 --- a/drivers/net/wireless/iwlegacy/iwl-4965-led.h +++ /dev/null @@ -1,33 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#ifndef __iwl_4965_led_h__ -#define __iwl_4965_led_h__ - -extern const struct iwl_led_ops iwl4965_led_ops; -void iwl4965_led_enable(struct iwl_priv *priv); - -#endif /* __iwl_4965_led_h__ */ diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-lib.c b/drivers/net/wireless/iwlegacy/iwl-4965-lib.c deleted file mode 100644 index 2be6d9e3b01..00000000000 --- a/drivers/net/wireless/iwlegacy/iwl-4965-lib.c +++ /dev/null @@ -1,1194 +0,0 @@ -/****************************************************************************** - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ -#include <linux/etherdevice.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/init.h> -#include <linux/sched.h> - -#include "iwl-dev.h" -#include "iwl-core.h" -#include "iwl-io.h" -#include "iwl-helpers.h" -#include "iwl-4965-hw.h" -#include "iwl-4965.h" -#include "iwl-sta.h" - -void iwl4965_check_abort_status(struct iwl_priv *priv, - u8 frame_count, u32 status) -{ - if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) { - IWL_ERR(priv, "Tx flush command to flush out all frames\n"); - if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) - queue_work(priv->workqueue, &priv->tx_flush); - } -} - -/* - * EEPROM - */ -struct iwl_mod_params iwl4965_mod_params = { - .amsdu_size_8K = 1, - .restart_fw = 1, - /* the rest are 0 by default */ -}; - -void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq) -{ - unsigned long flags; - int i; - spin_lock_irqsave(&rxq->lock, flags); - INIT_LIST_HEAD(&rxq->rx_free); - INIT_LIST_HEAD(&rxq->rx_used); - /* Fill the rx_used queue with _all_ of the Rx buffers */ - for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { - /* In the reset function, these buffers may have been allocated - * to an SKB, so we need to unmap and free potential storage */ - if (rxq->pool[i].page != NULL) { - pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, - PAGE_SIZE << priv->hw_params.rx_page_order, - PCI_DMA_FROMDEVICE); - __iwl_legacy_free_pages(priv, rxq->pool[i].page); - rxq->pool[i].page = NULL; - } - list_add_tail(&rxq->pool[i].list, &rxq->rx_used); - } - - for (i = 0; i < RX_QUEUE_SIZE; i++) - rxq->queue[i] = NULL; - - /* Set us so that we have processed and used all buffers, but have - * not restocked the Rx queue with fresh buffers */ - rxq->read = rxq->write = 0; - rxq->write_actual = 0; - rxq->free_count = 0; - spin_unlock_irqrestore(&rxq->lock, flags); -} - -int iwl4965_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq) -{ - u32 rb_size; - const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ - u32 rb_timeout = 0; - - if (priv->cfg->mod_params->amsdu_size_8K) - rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; - else - rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; - - /* Stop Rx DMA */ - iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); - - /* Reset driver's Rx queue write index */ - iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); - - /* Tell device where to find RBD circular buffer in DRAM */ - iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG, - (u32)(rxq->bd_dma >> 8)); - - /* Tell device where in DRAM to update its Rx status */ - iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG, - rxq->rb_stts_dma >> 4); - - /* Enable Rx DMA - * Direct rx interrupts to hosts - * Rx buffer size 4 or 8k - * RB timeout 0x10 - * 256 RBDs - */ - iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, - FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | - FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | - FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK | - rb_size| - (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)| - (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); - - /* Set interrupt coalescing timer to default (2048 usecs) */ - iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); - - return 0; -} - -static void iwl4965_set_pwr_vmain(struct iwl_priv *priv) -{ -/* - * (for documentation purposes) - * to set power to V_AUX, do: - - if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) - iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, - APMG_PS_CTRL_VAL_PWR_SRC_VAUX, - ~APMG_PS_CTRL_MSK_PWR_SRC); - */ - - iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, - APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, - ~APMG_PS_CTRL_MSK_PWR_SRC); -} - -int iwl4965_hw_nic_init(struct iwl_priv *priv) -{ - unsigned long flags; - struct iwl_rx_queue *rxq = &priv->rxq; - int ret; - - /* nic_init */ - spin_lock_irqsave(&priv->lock, flags); - priv->cfg->ops->lib->apm_ops.init(priv); - - /* Set interrupt coalescing calibration timer to default (512 usecs) */ - iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF); - - spin_unlock_irqrestore(&priv->lock, flags); - - iwl4965_set_pwr_vmain(priv); - - priv->cfg->ops->lib->apm_ops.config(priv); - - /* Allocate the RX queue, or reset if it is already allocated */ - if (!rxq->bd) { - ret = iwl_legacy_rx_queue_alloc(priv); - if (ret) { - IWL_ERR(priv, "Unable to initialize Rx queue\n"); - return -ENOMEM; - } - } else - iwl4965_rx_queue_reset(priv, rxq); - - iwl4965_rx_replenish(priv); - - iwl4965_rx_init(priv, rxq); - - spin_lock_irqsave(&priv->lock, flags); - - rxq->need_update = 1; - iwl_legacy_rx_queue_update_write_ptr(priv, rxq); - - spin_unlock_irqrestore(&priv->lock, flags); - - /* Allocate or reset and init all Tx and Command queues */ - if (!priv->txq) { - ret = iwl4965_txq_ctx_alloc(priv); - if (ret) - return ret; - } else - iwl4965_txq_ctx_reset(priv); - - set_bit(STATUS_INIT, &priv->status); - - return 0; -} - -/** - * iwl4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr - */ -static inline __le32 iwl4965_dma_addr2rbd_ptr(struct iwl_priv *priv, - dma_addr_t dma_addr) -{ - return cpu_to_le32((u32)(dma_addr >> 8)); -} - -/** - * iwl4965_rx_queue_restock - refill RX queue from pre-allocated pool - * - * If there are slots in the RX queue that need to be restocked, - * and we have free pre-allocated buffers, fill the ranks as much - * as we can, pulling from rx_free. - * - * This moves the 'write' index forward to catch up with 'processed', and - * also updates the memory address in the firmware to reference the new - * target buffer. - */ -void iwl4965_rx_queue_restock(struct iwl_priv *priv) -{ - struct iwl_rx_queue *rxq = &priv->rxq; - struct list_head *element; - struct iwl_rx_mem_buffer *rxb; - unsigned long flags; - - spin_lock_irqsave(&rxq->lock, flags); - while ((iwl_legacy_rx_queue_space(rxq) > 0) && (rxq->free_count)) { - /* The overwritten rxb must be a used one */ - rxb = rxq->queue[rxq->write]; - BUG_ON(rxb && rxb->page); - - /* Get next free Rx buffer, remove from free list */ - element = rxq->rx_free.next; - rxb = list_entry(element, struct iwl_rx_mem_buffer, list); - list_del(element); - - /* Point to Rx buffer via next RBD in circular buffer */ - rxq->bd[rxq->write] = iwl4965_dma_addr2rbd_ptr(priv, - rxb->page_dma); - rxq->queue[rxq->write] = rxb; - rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; - rxq->free_count--; - } - spin_unlock_irqrestore(&rxq->lock, flags); - /* If the pre-allocated buffer pool is dropping low, schedule to - * refill it */ - if (rxq->free_count <= RX_LOW_WATERMARK) - queue_work(priv->workqueue, &priv->rx_replenish); - - - /* If we've added more space for the firmware to place data, tell it. - * Increment device's write pointer in multiples of 8. */ - if (rxq->write_actual != (rxq->write & ~0x7)) { - spin_lock_irqsave(&rxq->lock, flags); - rxq->need_update = 1; - spin_unlock_irqrestore(&rxq->lock, flags); - iwl_legacy_rx_queue_update_write_ptr(priv, rxq); - } -} - -/** - * iwl4965_rx_replenish - Move all used packet from rx_used to rx_free - * - * When moving to rx_free an SKB is allocated for the slot. - * - * Also restock the Rx queue via iwl_rx_queue_restock. - * This is called as a scheduled work item (except for during initialization) - */ -static void iwl4965_rx_allocate(struct iwl_priv *priv, gfp_t priority) -{ - struct iwl_rx_queue *rxq = &priv->rxq; - struct list_head *element; - struct iwl_rx_mem_buffer *rxb; - struct page *page; - unsigned long flags; - gfp_t gfp_mask = priority; - - while (1) { - spin_lock_irqsave(&rxq->lock, flags); - if (list_empty(&rxq->rx_used)) { - spin_unlock_irqrestore(&rxq->lock, flags); - return; - } - spin_unlock_irqrestore(&rxq->lock, flags); - - if (rxq->free_count > RX_LOW_WATERMARK) - gfp_mask |= __GFP_NOWARN; - - if (priv->hw_params.rx_page_order > 0) - gfp_mask |= __GFP_COMP; - - /* Alloc a new receive buffer */ - page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order); - if (!page) { - if (net_ratelimit()) - IWL_DEBUG_INFO(priv, "alloc_pages failed, " - "order: %d\n", - priv->hw_params.rx_page_order); - - if ((rxq->free_count <= RX_LOW_WATERMARK) && - net_ratelimit()) - IWL_CRIT(priv, - "Failed to alloc_pages with %s. " - "Only %u free buffers remaining.\n", - priority == GFP_ATOMIC ? - "GFP_ATOMIC" : "GFP_KERNEL", - rxq->free_count); - /* We don't reschedule replenish work here -- we will - * call the restock method and if it still needs - * more buffers it will schedule replenish */ - return; - } - - spin_lock_irqsave(&rxq->lock, flags); - - if (list_empty(&rxq->rx_used)) { - spin_unlock_irqrestore(&rxq->lock, flags); - __free_pages(page, priv->hw_params.rx_page_order); - return; - } - element = rxq->rx_used.next; - rxb = list_entry(element, struct iwl_rx_mem_buffer, list); - list_del(element); - - spin_unlock_irqrestore(&rxq->lock, flags); - - BUG_ON(rxb->page); - rxb->page = page; - /* Get physical address of the RB */ - rxb->page_dma = pci_map_page(priv->pci_dev, page, 0, - PAGE_SIZE << priv->hw_params.rx_page_order, - PCI_DMA_FROMDEVICE); - /* dma address must be no more than 36 bits */ - BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); - /* and also 256 byte aligned! */ - BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); - - spin_lock_irqsave(&rxq->lock, flags); - - list_add_tail(&rxb->list, &rxq->rx_free); - rxq->free_count++; - priv->alloc_rxb_page++; - - spin_unlock_irqrestore(&rxq->lock, flags); - } -} - -void iwl4965_rx_replenish(struct iwl_priv *priv) -{ - unsigned long flags; - - iwl4965_rx_allocate(priv, GFP_KERNEL); - - spin_lock_irqsave(&priv->lock, flags); - iwl4965_rx_queue_restock(priv); - spin_unlock_irqrestore(&priv->lock, flags); -} - -void iwl4965_rx_replenish_now(struct iwl_priv *priv) -{ - iwl4965_rx_allocate(priv, GFP_ATOMIC); - - iwl4965_rx_queue_restock(priv); -} - -/* Assumes that the skb field of the buffers in 'pool' is kept accurate. - * If an SKB has been detached, the POOL needs to have its SKB set to NULL - * This free routine walks the list of POOL entries and if SKB is set to - * non NULL it is unmapped and freed - */ -void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq) -{ - int i; - for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { - if (rxq->pool[i].page != NULL) { - pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, - PAGE_SIZE << priv->hw_params.rx_page_order, - PCI_DMA_FROMDEVICE); - __iwl_legacy_free_pages(priv, rxq->pool[i].page); - rxq->pool[i].page = NULL; - } - } - - dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, - rxq->bd_dma); - dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status), - rxq->rb_stts, rxq->rb_stts_dma); - rxq->bd = NULL; - rxq->rb_stts = NULL; -} - -int iwl4965_rxq_stop(struct iwl_priv *priv) -{ - - /* stop Rx DMA */ - iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); - iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG, - FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); - - return 0; -} - -int iwl4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band) -{ - int idx = 0; - int band_offset = 0; - - /* HT rate format: mac80211 wants an MCS number, which is just LSB */ - if (rate_n_flags & RATE_MCS_HT_MSK) { - idx = (rate_n_flags & 0xff); - return idx; - /* Legacy rate format, search for match in table */ - } else { - if (band == IEEE80211_BAND_5GHZ) - band_offset = IWL_FIRST_OFDM_RATE; - for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++) - if (iwlegacy_rates[idx].plcp == (rate_n_flags & 0xFF)) - return idx - band_offset; - } - - return -1; -} - -static int iwl4965_calc_rssi(struct iwl_priv *priv, - struct iwl_rx_phy_res *rx_resp) -{ - /* data from PHY/DSP regarding signal strength, etc., - * contents are always there, not configurable by host. */ - struct iwl4965_rx_non_cfg_phy *ncphy = - (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf; - u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL49_AGC_DB_MASK) - >> IWL49_AGC_DB_POS; - - u32 valid_antennae = - (le16_to_cpu(rx_resp->phy_flags) & IWL49_RX_PHY_FLAGS_ANTENNAE_MASK) - >> IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET; - u8 max_rssi = 0; - u32 i; - - /* Find max rssi among 3 possible receivers. - * These values are measured by the digital signal processor (DSP). - * They should stay fairly constant even as the signal strength varies, - * if the radio's automatic gain control (AGC) is working right. - * AGC value (see below) will provide the "interesting" info. */ - for (i = 0; i < 3; i++) - if (valid_antennae & (1 << i)) - max_rssi = max(ncphy->rssi_info[i << 1], max_rssi); - - IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n", - ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4], - max_rssi, agc); - - /* dBm = max_rssi dB - agc dB - constant. - * Higher AGC (higher radio gain) means lower signal. */ - return max_rssi - agc - IWL4965_RSSI_OFFSET; -} - - -static u32 iwl4965_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in) -{ - u32 decrypt_out = 0; - - if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) == - RX_RES_STATUS_STATION_FOUND) - decrypt_out |= (RX_RES_STATUS_STATION_FOUND | - RX_RES_STATUS_NO_STATION_INFO_MISMATCH); - - decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK); - - /* packet was not encrypted */ - if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) == - RX_RES_STATUS_SEC_TYPE_NONE) - return decrypt_out; - - /* packet was encrypted with unknown alg */ - if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) == - RX_RES_STATUS_SEC_TYPE_ERR) - return decrypt_out; - - /* decryption was not done in HW */ - if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) != - RX_MPDU_RES_STATUS_DEC_DONE_MSK) - return decrypt_out; - - switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) { - - case RX_RES_STATUS_SEC_TYPE_CCMP: - /* alg is CCM: check MIC only */ - if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK)) - /* Bad MIC */ - decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC; - else - decrypt_out |= RX_RES_STATUS_DECRYPT_OK; - - break; - - case RX_RES_STATUS_SEC_TYPE_TKIP: - if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) { - /* Bad TTAK */ - decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK; - break; - } - /* fall through if TTAK OK */ - default: - if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK)) - decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC; - else - decrypt_out |= RX_RES_STATUS_DECRYPT_OK; - break; - } - - IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n", - decrypt_in, decrypt_out); - - return decrypt_out; -} - -static void iwl4965_pass_packet_to_mac80211(struct iwl_priv *priv, - struct ieee80211_hdr *hdr, - u16 len, - u32 ampdu_status, - struct iwl_rx_mem_buffer *rxb, - struct ieee80211_rx_status *stats) -{ - struct sk_buff *skb; - __le16 fc = hdr->frame_control; - - /* We only process data packets if the interface is open */ - if (unlikely(!priv->is_open)) { - IWL_DEBUG_DROP_LIMIT(priv, - "Dropping packet while interface is not open.\n"); - return; - } - - /* In case of HW accelerated crypto and bad decryption, drop */ - if (!priv->cfg->mod_params->sw_crypto && - iwl_legacy_set_decrypted_flag(priv, hdr, ampdu_status, stats)) - return; - - skb = dev_alloc_skb(128); - if (!skb) { - IWL_ERR(priv, "dev_alloc_skb failed\n"); - return; - } - - skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len); - - iwl_legacy_update_stats(priv, false, fc, len); - memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); - - ieee80211_rx(priv->hw, skb); - priv->alloc_rxb_page--; - rxb->page = NULL; -} - -/* Called for REPLY_RX (legacy ABG frames), or - * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */ -void iwl4965_rx_reply_rx(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb) -{ - struct ieee80211_hdr *header; - struct ieee80211_rx_status rx_status; - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_rx_phy_res *phy_res; - __le32 rx_pkt_status; - struct iwl_rx_mpdu_res_start *amsdu; - u32 len; - u32 ampdu_status; - u32 rate_n_flags; - - /** - * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently. - * REPLY_RX: physical layer info is in this buffer - * REPLY_RX_MPDU_CMD: physical layer info was sent in separate - * command and cached in priv->last_phy_res - * - * Here we set up local variables depending on which command is - * received. - */ - if (pkt->hdr.cmd == REPLY_RX) { - phy_res = (struct iwl_rx_phy_res *)pkt->u.raw; - header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res) - + phy_res->cfg_phy_cnt); - - len = le16_to_cpu(phy_res->byte_count); - rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) + - phy_res->cfg_phy_cnt + len); - ampdu_status = le32_to_cpu(rx_pkt_status); - } else { - if (!priv->_4965.last_phy_res_valid) { - IWL_ERR(priv, "MPDU frame without cached PHY data\n"); - return; - } - phy_res = &priv->_4965.last_phy_res; - amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw; - header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu)); - len = le16_to_cpu(amsdu->byte_count); - rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len); - ampdu_status = iwl4965_translate_rx_status(priv, - le32_to_cpu(rx_pkt_status)); - } - - if ((unlikely(phy_res->cfg_phy_cnt > 20))) { - IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n", - phy_res->cfg_phy_cnt); - return; - } - - if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) || - !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) { - IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n", - le32_to_cpu(rx_pkt_status)); - return; - } - - /* This will be used in several places later */ - rate_n_flags = le32_to_cpu(phy_res->rate_n_flags); - - /* rx_status carries information about the packet to mac80211 */ - rx_status.mactime = le64_to_cpu(phy_res->timestamp); - rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? - IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; - rx_status.freq = - ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel), - rx_status.band); - rx_status.rate_idx = - iwl4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band); - rx_status.flag = 0; - - /* TSF isn't reliable. In order to allow smooth user experience, - * this W/A doesn't propagate it to the mac80211 */ - /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/ - - priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp); - - /* Find max signal strength (dBm) among 3 antenna/receiver chains */ - rx_status.signal = iwl4965_calc_rssi(priv, phy_res); - - iwl_legacy_dbg_log_rx_data_frame(priv, len, header); - IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n", - rx_status.signal, (unsigned long long)rx_status.mactime); - - /* - * "antenna number" - * - * It seems that the antenna field in the phy flags value - * is actually a bit field. This is undefined by radiotap, - * it wants an actual antenna number but I always get "7" - * for most legacy frames I receive indicating that the - * same frame was received on all three RX chains. - * - * I think this field should be removed in favor of a - * new 802.11n radiotap field "RX chains" that is defined - * as a bitmask. - */ - rx_status.antenna = - (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) - >> RX_RES_PHY_FLAGS_ANTENNA_POS; - - /* set the preamble flag if appropriate */ - if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK) - rx_status.flag |= RX_FLAG_SHORTPRE; - - /* Set up the HT phy flags */ - if (rate_n_flags & RATE_MCS_HT_MSK) - rx_status.flag |= RX_FLAG_HT; - if (rate_n_flags & RATE_MCS_HT40_MSK) - rx_status.flag |= RX_FLAG_40MHZ; - if (rate_n_flags & RATE_MCS_SGI_MSK) - rx_status.flag |= RX_FLAG_SHORT_GI; - - iwl4965_pass_packet_to_mac80211(priv, header, len, ampdu_status, - rxb, &rx_status); -} - -/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD). - * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */ -void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - priv->_4965.last_phy_res_valid = true; - memcpy(&priv->_4965.last_phy_res, pkt->u.raw, - sizeof(struct iwl_rx_phy_res)); -} - -static int iwl4965_get_channels_for_scan(struct iwl_priv *priv, - struct ieee80211_vif *vif, - enum ieee80211_band band, - u8 is_active, u8 n_probes, - struct iwl_scan_channel *scan_ch) -{ - struct ieee80211_channel *chan; - const struct ieee80211_supported_band *sband; - const struct iwl_channel_info *ch_info; - u16 passive_dwell = 0; - u16 active_dwell = 0; - int added, i; - u16 channel; - - sband = iwl_get_hw_mode(priv, band); - if (!sband) - return 0; - - active_dwell = iwl_legacy_get_active_dwell_time(priv, band, n_probes); - passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif); - - if (passive_dwell <= active_dwell) - passive_dwell = active_dwell + 1; - - for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) { - chan = priv->scan_request->channels[i]; - - if (chan->band != band) - continue; - - channel = chan->hw_value; - scan_ch->channel = cpu_to_le16(channel); - - ch_info = iwl_legacy_get_channel_info(priv, band, channel); - if (!iwl_legacy_is_channel_valid(ch_info)) { - IWL_DEBUG_SCAN(priv, - "Channel %d is INVALID for this band.\n", - channel); - continue; - } - - if (!is_active || iwl_legacy_is_channel_passive(ch_info) || - (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) - scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE; - else - scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE; - - if (n_probes) - scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes); - - scan_ch->active_dwell = cpu_to_le16(active_dwell); - scan_ch->passive_dwell = cpu_to_le16(passive_dwell); - - /* Set txpower levels to defaults */ - scan_ch->dsp_atten = 110; - - /* NOTE: if we were doing 6Mb OFDM for scans we'd use - * power level: - * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3; - */ - if (band == IEEE80211_BAND_5GHZ) - scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3; - else - scan_ch->tx_gain = ((1 << 5) | (5 << 3)); - - IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n", - channel, le32_to_cpu(scan_ch->type), - (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ? - "ACTIVE" : "PASSIVE", - (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ? - active_dwell : passive_dwell); - - scan_ch++; - added++; - } - - IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added); - return added; -} - -int iwl4965_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) -{ - struct iwl_host_cmd cmd = { - .id = REPLY_SCAN_CMD, - .len = sizeof(struct iwl_scan_cmd), - .flags = CMD_SIZE_HUGE, - }; - struct iwl_scan_cmd *scan; - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - u32 rate_flags = 0; - u16 cmd_len; - u16 rx_chain = 0; - enum ieee80211_band band; - u8 n_probes = 0; - u8 rx_ant = priv->hw_params.valid_rx_ant; - u8 rate; - bool is_active = false; - int chan_mod; - u8 active_chains; - u8 scan_tx_antennas = priv->hw_params.valid_tx_ant; - int ret; - - lockdep_assert_held(&priv->mutex); - - if (vif) - ctx = iwl_legacy_rxon_ctx_from_vif(vif); - - if (!priv->scan_cmd) { - priv->scan_cmd = kmalloc(sizeof(struct iwl_scan_cmd) + - IWL_MAX_SCAN_SIZE, GFP_KERNEL); - if (!priv->scan_cmd) { - IWL_DEBUG_SCAN(priv, - "fail to allocate memory for scan\n"); - return -ENOMEM; - } - } - scan = priv->scan_cmd; - memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE); - - scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH; - scan->quiet_time = IWL_ACTIVE_QUIET_TIME; - - if (iwl_legacy_is_any_associated(priv)) { - u16 interval; - u32 extra; - u32 suspend_time = 100; - u32 scan_suspend_time = 100; - - IWL_DEBUG_INFO(priv, "Scanning while associated...\n"); - interval = vif->bss_conf.beacon_int; - - scan->suspend_time = 0; - scan->max_out_time = cpu_to_le32(200 * 1024); - if (!interval) - interval = suspend_time; - - extra = (suspend_time / interval) << 22; - scan_suspend_time = (extra | - ((suspend_time % interval) * 1024)); - scan->suspend_time = cpu_to_le32(scan_suspend_time); - IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n", - scan_suspend_time, interval); - } - - if (priv->scan_request->n_ssids) { - int i, p = 0; - IWL_DEBUG_SCAN(priv, "Kicking off active scan\n"); - for (i = 0; i < priv->scan_request->n_ssids; i++) { - /* always does wildcard anyway */ - if (!priv->scan_request->ssids[i].ssid_len) - continue; - scan->direct_scan[p].id = WLAN_EID_SSID; - scan->direct_scan[p].len = - priv->scan_request->ssids[i].ssid_len; - memcpy(scan->direct_scan[p].ssid, - priv->scan_request->ssids[i].ssid, - priv->scan_request->ssids[i].ssid_len); - n_probes++; - p++; - } - is_active = true; - } else - IWL_DEBUG_SCAN(priv, "Start passive scan.\n"); - - scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK; - scan->tx_cmd.sta_id = ctx->bcast_sta_id; - scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; - - switch (priv->scan_band) { - case IEEE80211_BAND_2GHZ: - scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; - chan_mod = le32_to_cpu( - priv->contexts[IWL_RXON_CTX_BSS].active.flags & - RXON_FLG_CHANNEL_MODE_MSK) - >> RXON_FLG_CHANNEL_MODE_POS; - if (chan_mod == CHANNEL_MODE_PURE_40) { - rate = IWL_RATE_6M_PLCP; - } else { - rate = IWL_RATE_1M_PLCP; - rate_flags = RATE_MCS_CCK_MSK; - } - break; - case IEEE80211_BAND_5GHZ: - rate = IWL_RATE_6M_PLCP; - break; - default: - IWL_WARN(priv, "Invalid scan band\n"); - return -EIO; - } - - /* - * If active scanning is requested but a certain channel is - * marked passive, we can do active scanning if we detect - * transmissions. - * - * There is an issue with some firmware versions that triggers - * a sysassert on a "good CRC threshold" of zero (== disabled), - * on a radar channel even though this means that we should NOT - * send probes. - * - * The "good CRC threshold" is the number of frames that we - * need to receive during our dwell time on a channel before - * sending out probes -- setting this to a huge value will - * mean we never reach it, but at the same time work around - * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER - * here instead of IWL_GOOD_CRC_TH_DISABLED. - */ - scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT : - IWL_GOOD_CRC_TH_NEVER; - - band = priv->scan_band; - - if (priv->cfg->scan_rx_antennas[band]) - rx_ant = priv->cfg->scan_rx_antennas[band]; - - priv->scan_tx_ant[band] = iwl4965_toggle_tx_ant(priv, - priv->scan_tx_ant[band], - scan_tx_antennas); - rate_flags |= iwl4965_ant_idx_to_flags(priv->scan_tx_ant[band]); - scan->tx_cmd.rate_n_flags = iwl4965_hw_set_rate_n_flags(rate, rate_flags); - - /* In power save mode use one chain, otherwise use all chains */ - if (test_bit(STATUS_POWER_PMI, &priv->status)) { - /* rx_ant has been set to all valid chains previously */ - active_chains = rx_ant & - ((u8)(priv->chain_noise_data.active_chains)); - if (!active_chains) - active_chains = rx_ant; - - IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n", - priv->chain_noise_data.active_chains); - - rx_ant = iwl4965_first_antenna(active_chains); - } - - /* MIMO is not used here, but value is required */ - rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS; - rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS; - rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS; - rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS; - scan->rx_chain = cpu_to_le16(rx_chain); - - cmd_len = iwl_legacy_fill_probe_req(priv, - (struct ieee80211_mgmt *)scan->data, - vif->addr, - priv->scan_request->ie, - priv->scan_request->ie_len, - IWL_MAX_SCAN_SIZE - sizeof(*scan)); - scan->tx_cmd.len = cpu_to_le16(cmd_len); - - scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK | - RXON_FILTER_BCON_AWARE_MSK); - - scan->channel_count = iwl4965_get_channels_for_scan(priv, vif, band, - is_active, n_probes, - (void *)&scan->data[cmd_len]); - if (scan->channel_count == 0) { - IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count); - return -EIO; - } - - cmd.len += le16_to_cpu(scan->tx_cmd.len) + - scan->channel_count * sizeof(struct iwl_scan_channel); - cmd.data = scan; - scan->len = cpu_to_le16(cmd.len); - - set_bit(STATUS_SCAN_HW, &priv->status); - - ret = iwl_legacy_send_cmd_sync(priv, &cmd); - if (ret) - clear_bit(STATUS_SCAN_HW, &priv->status); - - return ret; -} - -int iwl4965_manage_ibss_station(struct iwl_priv *priv, - struct ieee80211_vif *vif, bool add) -{ - struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; - - if (add) - return iwl4965_add_bssid_station(priv, vif_priv->ctx, - vif->bss_conf.bssid, - &vif_priv->ibss_bssid_sta_id); - return iwl_legacy_remove_station(priv, vif_priv->ibss_bssid_sta_id, - vif->bss_conf.bssid); -} - -void iwl4965_free_tfds_in_queue(struct iwl_priv *priv, - int sta_id, int tid, int freed) -{ - lockdep_assert_held(&priv->sta_lock); - - if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed) - priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; - else { - IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n", - priv->stations[sta_id].tid[tid].tfds_in_queue, - freed); - priv->stations[sta_id].tid[tid].tfds_in_queue = 0; - } -} - -#define IWL_TX_QUEUE_MSK 0xfffff - -static bool iwl4965_is_single_rx_stream(struct iwl_priv *priv) -{ - return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC || - priv->current_ht_config.single_chain_sufficient; -} - -#define IWL_NUM_RX_CHAINS_MULTIPLE 3 -#define IWL_NUM_RX_CHAINS_SINGLE 2 -#define IWL_NUM_IDLE_CHAINS_DUAL 2 -#define IWL_NUM_IDLE_CHAINS_SINGLE 1 - -/* - * Determine how many receiver/antenna chains to use. - * - * More provides better reception via diversity. Fewer saves power - * at the expense of throughput, but only when not in powersave to - * start with. - * - * MIMO (dual stream) requires at least 2, but works better with 3. - * This does not determine *which* chains to use, just how many. - */ -static int iwl4965_get_active_rx_chain_count(struct iwl_priv *priv) -{ - /* # of Rx chains to use when expecting MIMO. */ - if (iwl4965_is_single_rx_stream(priv)) - return IWL_NUM_RX_CHAINS_SINGLE; - else - return IWL_NUM_RX_CHAINS_MULTIPLE; -} - -/* - * When we are in power saving mode, unless device support spatial - * multiplexing power save, use the active count for rx chain count. - */ -static int -iwl4965_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt) -{ - /* # Rx chains when idling, depending on SMPS mode */ - switch (priv->current_ht_config.smps) { - case IEEE80211_SMPS_STATIC: - case IEEE80211_SMPS_DYNAMIC: - return IWL_NUM_IDLE_CHAINS_SINGLE; - case IEEE80211_SMPS_OFF: - return active_cnt; - default: - WARN(1, "invalid SMPS mode %d", - priv->current_ht_config.smps); - return active_cnt; - } -} - -/* up to 4 chains */ -static u8 iwl4965_count_chain_bitmap(u32 chain_bitmap) -{ - u8 res; - res = (chain_bitmap & BIT(0)) >> 0; - res += (chain_bitmap & BIT(1)) >> 1; - res += (chain_bitmap & BIT(2)) >> 2; - res += (chain_bitmap & BIT(3)) >> 3; - return res; -} - -/** - * iwl4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image - * - * Selects how many and which Rx receivers/antennas/chains to use. - * This should not be used for scan command ... it puts data in wrong place. - */ -void iwl4965_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx) -{ - bool is_single = iwl4965_is_single_rx_stream(priv); - bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status); - u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt; - u32 active_chains; - u16 rx_chain; - - /* Tell uCode which antennas are actually connected. - * Before first association, we assume all antennas are connected. - * Just after first association, iwl4965_chain_noise_calibration() - * checks which antennas actually *are* connected. */ - if (priv->chain_noise_data.active_chains) - active_chains = priv->chain_noise_data.active_chains; - else - active_chains = priv->hw_params.valid_rx_ant; - - rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS; - - /* How many receivers should we use? */ - active_rx_cnt = iwl4965_get_active_rx_chain_count(priv); - idle_rx_cnt = iwl4965_get_idle_rx_chain_count(priv, active_rx_cnt); - - - /* correct rx chain count according hw settings - * and chain noise calibration - */ - valid_rx_cnt = iwl4965_count_chain_bitmap(active_chains); - if (valid_rx_cnt < active_rx_cnt) - active_rx_cnt = valid_rx_cnt; - - if (valid_rx_cnt < idle_rx_cnt) - idle_rx_cnt = valid_rx_cnt; - - rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS; - rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS; - - ctx->staging.rx_chain = cpu_to_le16(rx_chain); - - if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam) - ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK; - else - ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK; - - IWL_DEBUG_ASSOC(priv, "rx_chain=0x%X active=%d idle=%d\n", - ctx->staging.rx_chain, - active_rx_cnt, idle_rx_cnt); - - WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 || - active_rx_cnt < idle_rx_cnt); -} - -u8 iwl4965_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid) -{ - int i; - u8 ind = ant; - - for (i = 0; i < RATE_ANT_NUM - 1; i++) { - ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0; - if (valid & BIT(ind)) - return ind; - } - return ant; -} - -static const char *iwl4965_get_fh_string(int cmd) -{ - switch (cmd) { - IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG); - IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG); - IWL_CMD(FH_RSCSR_CHNL0_WPTR); - IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG); - IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG); - IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG); - IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV); - IWL_CMD(FH_TSSR_TX_STATUS_REG); - IWL_CMD(FH_TSSR_TX_ERROR_REG); - default: - return "UNKNOWN"; - } -} - -int iwl4965_dump_fh(struct iwl_priv *priv, char **buf, bool display) -{ - int i; -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG - int pos = 0; - size_t bufsz = 0; -#endif - static const u32 fh_tbl[] = { - FH_RSCSR_CHNL0_STTS_WPTR_REG, - FH_RSCSR_CHNL0_RBDCB_BASE_REG, - FH_RSCSR_CHNL0_WPTR, - FH_MEM_RCSR_CHNL0_CONFIG_REG, - FH_MEM_RSSR_SHARED_CTRL_REG, - FH_MEM_RSSR_RX_STATUS_REG, - FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV, - FH_TSSR_TX_STATUS_REG, - FH_TSSR_TX_ERROR_REG - }; -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG - if (display) { - bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40; - *buf = kmalloc(bufsz, GFP_KERNEL); - if (!*buf) - return -ENOMEM; - pos += scnprintf(*buf + pos, bufsz - pos, - "FH register values:\n"); - for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) { - pos += scnprintf(*buf + pos, bufsz - pos, - " %34s: 0X%08x\n", - iwl4965_get_fh_string(fh_tbl[i]), - iwl_legacy_read_direct32(priv, fh_tbl[i])); - } - return pos; - } -#endif - IWL_ERR(priv, "FH register values:\n"); - for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) { - IWL_ERR(priv, " %34s: 0X%08x\n", - iwl4965_get_fh_string(fh_tbl[i]), - iwl_legacy_read_direct32(priv, fh_tbl[i])); - } - return 0; -} diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-rs.c b/drivers/net/wireless/iwlegacy/iwl-4965-rs.c deleted file mode 100644 index 57ebe214e68..00000000000 --- a/drivers/net/wireless/iwlegacy/iwl-4965-rs.c +++ /dev/null @@ -1,2871 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/skbuff.h> -#include <linux/slab.h> -#include <net/mac80211.h> - -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/delay.h> - -#include <linux/workqueue.h> - -#include "iwl-dev.h" -#include "iwl-sta.h" -#include "iwl-core.h" -#include "iwl-4965.h" - -#define IWL4965_RS_NAME "iwl-4965-rs" - -#define NUM_TRY_BEFORE_ANT_TOGGLE 1 -#define IWL_NUMBER_TRY 1 -#define IWL_HT_NUMBER_TRY 3 - -#define IWL_RATE_MAX_WINDOW 62 /* # tx in history window */ -#define IWL_RATE_MIN_FAILURE_TH 6 /* min failures to calc tpt */ -#define IWL_RATE_MIN_SUCCESS_TH 8 /* min successes to calc tpt */ - -/* max allowed rate miss before sync LQ cmd */ -#define IWL_MISSED_RATE_MAX 15 -/* max time to accum history 2 seconds */ -#define IWL_RATE_SCALE_FLUSH_INTVL (3*HZ) - -static u8 rs_ht_to_legacy[] = { - IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX, - IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX, - IWL_RATE_6M_INDEX, - IWL_RATE_6M_INDEX, IWL_RATE_9M_INDEX, - IWL_RATE_12M_INDEX, IWL_RATE_18M_INDEX, - IWL_RATE_24M_INDEX, IWL_RATE_36M_INDEX, - IWL_RATE_48M_INDEX, IWL_RATE_54M_INDEX -}; - -static const u8 ant_toggle_lookup[] = { - /*ANT_NONE -> */ ANT_NONE, - /*ANT_A -> */ ANT_B, - /*ANT_B -> */ ANT_C, - /*ANT_AB -> */ ANT_BC, - /*ANT_C -> */ ANT_A, - /*ANT_AC -> */ ANT_AB, - /*ANT_BC -> */ ANT_AC, - /*ANT_ABC -> */ ANT_ABC, -}; - -#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \ - [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \ - IWL_RATE_SISO_##s##M_PLCP, \ - IWL_RATE_MIMO2_##s##M_PLCP,\ - IWL_RATE_##r##M_IEEE, \ - IWL_RATE_##ip##M_INDEX, \ - IWL_RATE_##in##M_INDEX, \ - IWL_RATE_##rp##M_INDEX, \ - IWL_RATE_##rn##M_INDEX, \ - IWL_RATE_##pp##M_INDEX, \ - IWL_RATE_##np##M_INDEX } - -/* - * Parameter order: - * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate - * - * If there isn't a valid next or previous rate then INV is used which - * maps to IWL_RATE_INVALID - * - */ -const struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT] = { - IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */ - IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */ - IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */ - IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */ - IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */ - IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */ - IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */ - IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */ - IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */ - IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */ - IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */ - IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */ - IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */ -}; - -static int iwl4965_hwrate_to_plcp_idx(u32 rate_n_flags) -{ - int idx = 0; - - /* HT rate format */ - if (rate_n_flags & RATE_MCS_HT_MSK) { - idx = (rate_n_flags & 0xff); - - if (idx >= IWL_RATE_MIMO2_6M_PLCP) - idx = idx - IWL_RATE_MIMO2_6M_PLCP; - - idx += IWL_FIRST_OFDM_RATE; - /* skip 9M not supported in ht*/ - if (idx >= IWL_RATE_9M_INDEX) - idx += 1; - if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE)) - return idx; - - /* legacy rate format, search for match in table */ - } else { - for (idx = 0; idx < ARRAY_SIZE(iwlegacy_rates); idx++) - if (iwlegacy_rates[idx].plcp == (rate_n_flags & 0xFF)) - return idx; - } - - return -1; -} - -static void iwl4965_rs_rate_scale_perform(struct iwl_priv *priv, - struct sk_buff *skb, - struct ieee80211_sta *sta, - struct iwl_lq_sta *lq_sta); -static void iwl4965_rs_fill_link_cmd(struct iwl_priv *priv, - struct iwl_lq_sta *lq_sta, u32 rate_n_flags); -static void iwl4965_rs_stay_in_table(struct iwl_lq_sta *lq_sta, - bool force_search); - -#ifdef CONFIG_MAC80211_DEBUGFS -static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta, - u32 *rate_n_flags, int index); -#else -static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta, - u32 *rate_n_flags, int index) -{} -#endif - -/** - * The following tables contain the expected throughput metrics for all rates - * - * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits - * - * where invalid entries are zeros. - * - * CCK rates are only valid in legacy table and will only be used in G - * (2.4 GHz) band. - */ - -static s32 expected_tpt_legacy[IWL_RATE_COUNT] = { - 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0 -}; - -static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = { - {0, 0, 0, 0, 42, 0, 76, 102, 124, 158, 183, 193, 202}, /* Norm */ - {0, 0, 0, 0, 46, 0, 82, 110, 132, 167, 192, 202, 210}, /* SGI */ - {0, 0, 0, 0, 48, 0, 93, 135, 176, 251, 319, 351, 381}, /* AGG */ - {0, 0, 0, 0, 53, 0, 102, 149, 193, 275, 348, 381, 413}, /* AGG+SGI */ -}; - -static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = { - {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */ - {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */ - {0, 0, 0, 0, 96, 0, 182, 259, 328, 451, 553, 598, 640}, /* AGG */ - {0, 0, 0, 0, 106, 0, 199, 282, 357, 487, 593, 640, 683}, /* AGG+SGI */ -}; - -static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = { - {0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250}, /* Norm */ - {0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256}, /* SGI */ - {0, 0, 0, 0, 92, 0, 175, 250, 317, 436, 534, 578, 619}, /* AGG */ - {0, 0, 0, 0, 102, 0, 192, 273, 344, 470, 573, 619, 660}, /* AGG+SGI*/ -}; - -static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = { - {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */ - {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */ - {0, 0, 0, 0, 180, 0, 327, 446, 545, 708, 828, 878, 922}, /* AGG */ - {0, 0, 0, 0, 197, 0, 355, 481, 584, 752, 872, 922, 966}, /* AGG+SGI */ -}; - -/* mbps, mcs */ -static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = { - { "1", "BPSK DSSS"}, - { "2", "QPSK DSSS"}, - {"5.5", "BPSK CCK"}, - { "11", "QPSK CCK"}, - { "6", "BPSK 1/2"}, - { "9", "BPSK 1/2"}, - { "12", "QPSK 1/2"}, - { "18", "QPSK 3/4"}, - { "24", "16QAM 1/2"}, - { "36", "16QAM 3/4"}, - { "48", "64QAM 2/3"}, - { "54", "64QAM 3/4"}, - { "60", "64QAM 5/6"}, -}; - -#define MCS_INDEX_PER_STREAM (8) - -static inline u8 iwl4965_rs_extract_rate(u32 rate_n_flags) -{ - return (u8)(rate_n_flags & 0xFF); -} - -static void -iwl4965_rs_rate_scale_clear_window(struct iwl_rate_scale_data *window) -{ - window->data = 0; - window->success_counter = 0; - window->success_ratio = IWL_INVALID_VALUE; - window->counter = 0; - window->average_tpt = IWL_INVALID_VALUE; - window->stamp = 0; -} - -static inline u8 iwl4965_rs_is_valid_ant(u8 valid_antenna, u8 ant_type) -{ - return (ant_type & valid_antenna) == ant_type; -} - -/* - * removes the old data from the statistics. All data that is older than - * TID_MAX_TIME_DIFF, will be deleted. - */ -static void -iwl4965_rs_tl_rm_old_stats(struct iwl_traffic_load *tl, u32 curr_time) -{ - /* The oldest age we want to keep */ - u32 oldest_time = curr_time - TID_MAX_TIME_DIFF; - - while (tl->queue_count && - (tl->time_stamp < oldest_time)) { - tl->total -= tl->packet_count[tl->head]; - tl->packet_count[tl->head] = 0; - tl->time_stamp += TID_QUEUE_CELL_SPACING; - tl->queue_count--; - tl->head++; - if (tl->head >= TID_QUEUE_MAX_SIZE) - tl->head = 0; - } -} - -/* - * increment traffic load value for tid and also remove - * any old values if passed the certain time period - */ -static u8 iwl4965_rs_tl_add_packet(struct iwl_lq_sta *lq_data, - struct ieee80211_hdr *hdr) -{ - u32 curr_time = jiffies_to_msecs(jiffies); - u32 time_diff; - s32 index; - struct iwl_traffic_load *tl = NULL; - u8 tid; - - if (ieee80211_is_data_qos(hdr->frame_control)) { - u8 *qc = ieee80211_get_qos_ctl(hdr); - tid = qc[0] & 0xf; - } else - return MAX_TID_COUNT; - - if (unlikely(tid >= TID_MAX_LOAD_COUNT)) - return MAX_TID_COUNT; - - tl = &lq_data->load[tid]; - - curr_time -= curr_time % TID_ROUND_VALUE; - - /* Happens only for the first packet. Initialize the data */ - if (!(tl->queue_count)) { - tl->total = 1; - tl->time_stamp = curr_time; - tl->queue_count = 1; - tl->head = 0; - tl->packet_count[0] = 1; - return MAX_TID_COUNT; - } - - time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time); - index = time_diff / TID_QUEUE_CELL_SPACING; - - /* The history is too long: remove data that is older than */ - /* TID_MAX_TIME_DIFF */ - if (index >= TID_QUEUE_MAX_SIZE) - iwl4965_rs_tl_rm_old_stats(tl, curr_time); - - index = (tl->head + index) % TID_QUEUE_MAX_SIZE; - tl->packet_count[index] = tl->packet_count[index] + 1; - tl->total = tl->total + 1; - - if ((index + 1) > tl->queue_count) - tl->queue_count = index + 1; - - return tid; -} - -/* - get the traffic load value for tid -*/ -static u32 iwl4965_rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid) -{ - u32 curr_time = jiffies_to_msecs(jiffies); - u32 time_diff; - s32 index; - struct iwl_traffic_load *tl = NULL; - - if (tid >= TID_MAX_LOAD_COUNT) - return 0; - - tl = &(lq_data->load[tid]); - - curr_time -= curr_time % TID_ROUND_VALUE; - - if (!(tl->queue_count)) - return 0; - - time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time); - index = time_diff / TID_QUEUE_CELL_SPACING; - - /* The history is too long: remove data that is older than */ - /* TID_MAX_TIME_DIFF */ - if (index >= TID_QUEUE_MAX_SIZE) - iwl4965_rs_tl_rm_old_stats(tl, curr_time); - - return tl->total; -} - -static int iwl4965_rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv, - struct iwl_lq_sta *lq_data, u8 tid, - struct ieee80211_sta *sta) -{ - int ret = -EAGAIN; - u32 load; - - load = iwl4965_rs_tl_get_load(lq_data, tid); - - if (load > IWL_AGG_LOAD_THRESHOLD) { - IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n", - sta->addr, tid); - ret = ieee80211_start_tx_ba_session(sta, tid, 5000); - if (ret == -EAGAIN) { - /* - * driver and mac80211 is out of sync - * this might be cause by reloading firmware - * stop the tx ba session here - */ - IWL_ERR(priv, "Fail start Tx agg on tid: %d\n", - tid); - ieee80211_stop_tx_ba_session(sta, tid); - } - } else { - IWL_ERR(priv, "Aggregation not enabled for tid %d " - "because load = %u\n", tid, load); - } - return ret; -} - -static void iwl4965_rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid, - struct iwl_lq_sta *lq_data, - struct ieee80211_sta *sta) -{ - if (tid < TID_MAX_LOAD_COUNT) - iwl4965_rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta); - else - IWL_ERR(priv, "tid exceeds max load count: %d/%d\n", - tid, TID_MAX_LOAD_COUNT); -} - -static inline int iwl4965_get_iwl4965_num_of_ant_from_rate(u32 rate_n_flags) -{ - return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) + - !!(rate_n_flags & RATE_MCS_ANT_B_MSK) + - !!(rate_n_flags & RATE_MCS_ANT_C_MSK); -} - -/* - * Static function to get the expected throughput from an iwl_scale_tbl_info - * that wraps a NULL pointer check - */ -static s32 -iwl4965_get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index) -{ - if (tbl->expected_tpt) - return tbl->expected_tpt[rs_index]; - return 0; -} - -/** - * iwl4965_rs_collect_tx_data - Update the success/failure sliding window - * - * We keep a sliding window of the last 62 packets transmitted - * at this rate. window->data contains the bitmask of successful - * packets. - */ -static int iwl4965_rs_collect_tx_data(struct iwl_scale_tbl_info *tbl, - int scale_index, int attempts, int successes) -{ - struct iwl_rate_scale_data *window = NULL; - static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1)); - s32 fail_count, tpt; - - if (scale_index < 0 || scale_index >= IWL_RATE_COUNT) - return -EINVAL; - - /* Select window for current tx bit rate */ - window = &(tbl->win[scale_index]); - - /* Get expected throughput */ - tpt = iwl4965_get_expected_tpt(tbl, scale_index); - - /* - * Keep track of only the latest 62 tx frame attempts in this rate's - * history window; anything older isn't really relevant any more. - * If we have filled up the sliding window, drop the oldest attempt; - * if the oldest attempt (highest bit in bitmap) shows "success", - * subtract "1" from the success counter (this is the main reason - * we keep these bitmaps!). - */ - while (attempts > 0) { - if (window->counter >= IWL_RATE_MAX_WINDOW) { - - /* remove earliest */ - window->counter = IWL_RATE_MAX_WINDOW - 1; - - if (window->data & mask) { - window->data &= ~mask; - window->success_counter--; - } - } - - /* Increment frames-attempted counter */ - window->counter++; - - /* Shift bitmap by one frame to throw away oldest history */ - window->data <<= 1; - - /* Mark the most recent #successes attempts as successful */ - if (successes > 0) { - window->success_counter++; - window->data |= 0x1; - successes--; - } - - attempts--; - } - - /* Calculate current success ratio, avoid divide-by-0! */ - if (window->counter > 0) - window->success_ratio = 128 * (100 * window->success_counter) - / window->counter; - else - window->success_ratio = IWL_INVALID_VALUE; - - fail_count = window->counter - window->success_counter; - - /* Calculate average throughput, if we have enough history. */ - if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) || - (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH)) - window->average_tpt = (window->success_ratio * tpt + 64) / 128; - else - window->average_tpt = IWL_INVALID_VALUE; - - /* Tag this window as having been updated */ - window->stamp = jiffies; - - return 0; -} - -/* - * Fill uCode API rate_n_flags field, based on "search" or "active" table. - */ -static u32 iwl4965_rate_n_flags_from_tbl(struct iwl_priv *priv, - struct iwl_scale_tbl_info *tbl, - int index, u8 use_green) -{ - u32 rate_n_flags = 0; - - if (is_legacy(tbl->lq_type)) { - rate_n_flags = iwlegacy_rates[index].plcp; - if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE) - rate_n_flags |= RATE_MCS_CCK_MSK; - - } else if (is_Ht(tbl->lq_type)) { - if (index > IWL_LAST_OFDM_RATE) { - IWL_ERR(priv, "Invalid HT rate index %d\n", index); - index = IWL_LAST_OFDM_RATE; - } - rate_n_flags = RATE_MCS_HT_MSK; - - if (is_siso(tbl->lq_type)) - rate_n_flags |= iwlegacy_rates[index].plcp_siso; - else - rate_n_flags |= iwlegacy_rates[index].plcp_mimo2; - } else { - IWL_ERR(priv, "Invalid tbl->lq_type %d\n", tbl->lq_type); - } - - rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) & - RATE_MCS_ANT_ABC_MSK); - - if (is_Ht(tbl->lq_type)) { - if (tbl->is_ht40) { - if (tbl->is_dup) - rate_n_flags |= RATE_MCS_DUP_MSK; - else - rate_n_flags |= RATE_MCS_HT40_MSK; - } - if (tbl->is_SGI) - rate_n_flags |= RATE_MCS_SGI_MSK; - - if (use_green) { - rate_n_flags |= RATE_MCS_GF_MSK; - if (is_siso(tbl->lq_type) && tbl->is_SGI) { - rate_n_flags &= ~RATE_MCS_SGI_MSK; - IWL_ERR(priv, "GF was set with SGI:SISO\n"); - } - } - } - return rate_n_flags; -} - -/* - * Interpret uCode API's rate_n_flags format, - * fill "search" or "active" tx mode table. - */ -static int iwl4965_rs_get_tbl_info_from_mcs(const u32 rate_n_flags, - enum ieee80211_band band, - struct iwl_scale_tbl_info *tbl, - int *rate_idx) -{ - u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK); - u8 iwl4965_num_of_ant = iwl4965_get_iwl4965_num_of_ant_from_rate(rate_n_flags); - u8 mcs; - - memset(tbl, 0, sizeof(struct iwl_scale_tbl_info)); - *rate_idx = iwl4965_hwrate_to_plcp_idx(rate_n_flags); - - if (*rate_idx == IWL_RATE_INVALID) { - *rate_idx = -1; - return -EINVAL; - } - tbl->is_SGI = 0; /* default legacy setup */ - tbl->is_ht40 = 0; - tbl->is_dup = 0; - tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS); - tbl->lq_type = LQ_NONE; - tbl->max_search = IWL_MAX_SEARCH; - - /* legacy rate format */ - if (!(rate_n_flags & RATE_MCS_HT_MSK)) { - if (iwl4965_num_of_ant == 1) { - if (band == IEEE80211_BAND_5GHZ) - tbl->lq_type = LQ_A; - else - tbl->lq_type = LQ_G; - } - /* HT rate format */ - } else { - if (rate_n_flags & RATE_MCS_SGI_MSK) - tbl->is_SGI = 1; - - if ((rate_n_flags & RATE_MCS_HT40_MSK) || - (rate_n_flags & RATE_MCS_DUP_MSK)) - tbl->is_ht40 = 1; - - if (rate_n_flags & RATE_MCS_DUP_MSK) - tbl->is_dup = 1; - - mcs = iwl4965_rs_extract_rate(rate_n_flags); - - /* SISO */ - if (mcs <= IWL_RATE_SISO_60M_PLCP) { - if (iwl4965_num_of_ant == 1) - tbl->lq_type = LQ_SISO; /*else NONE*/ - /* MIMO2 */ - } else { - if (iwl4965_num_of_ant == 2) - tbl->lq_type = LQ_MIMO2; - } - } - return 0; -} - -/* switch to another antenna/antennas and return 1 */ -/* if no other valid antenna found, return 0 */ -static int iwl4965_rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags, - struct iwl_scale_tbl_info *tbl) -{ - u8 new_ant_type; - - if (!tbl->ant_type || tbl->ant_type > ANT_ABC) - return 0; - - if (!iwl4965_rs_is_valid_ant(valid_ant, tbl->ant_type)) - return 0; - - new_ant_type = ant_toggle_lookup[tbl->ant_type]; - - while ((new_ant_type != tbl->ant_type) && - !iwl4965_rs_is_valid_ant(valid_ant, new_ant_type)) - new_ant_type = ant_toggle_lookup[new_ant_type]; - - if (new_ant_type == tbl->ant_type) - return 0; - - tbl->ant_type = new_ant_type; - *rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK; - *rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS; - return 1; -} - -/** - * Green-field mode is valid if the station supports it and - * there are no non-GF stations present in the BSS. - */ -static bool iwl4965_rs_use_green(struct ieee80211_sta *sta) -{ - struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; - struct iwl_rxon_context *ctx = sta_priv->common.ctx; - - return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) && - !(ctx->ht.non_gf_sta_present); -} - -/** - * iwl4965_rs_get_supported_rates - get the available rates - * - * if management frame or broadcast frame only return - * basic available rates. - * - */ -static u16 iwl4965_rs_get_supported_rates(struct iwl_lq_sta *lq_sta, - struct ieee80211_hdr *hdr, - enum iwl_table_type rate_type) -{ - if (is_legacy(rate_type)) { - return lq_sta->active_legacy_rate; - } else { - if (is_siso(rate_type)) - return lq_sta->active_siso_rate; - else - return lq_sta->active_mimo2_rate; - } -} - -static u16 -iwl4965_rs_get_adjacent_rate(struct iwl_priv *priv, u8 index, u16 rate_mask, - int rate_type) -{ - u8 high = IWL_RATE_INVALID; - u8 low = IWL_RATE_INVALID; - - /* 802.11A or ht walks to the next literal adjacent rate in - * the rate table */ - if (is_a_band(rate_type) || !is_legacy(rate_type)) { - int i; - u32 mask; - - /* Find the previous rate that is in the rate mask */ - i = index - 1; - for (mask = (1 << i); i >= 0; i--, mask >>= 1) { - if (rate_mask & mask) { - low = i; - break; - } - } - - /* Find the next rate that is in the rate mask */ - i = index + 1; - for (mask = (1 << i); i < IWL_RATE_COUNT; i++, mask <<= 1) { - if (rate_mask & mask) { - high = i; - break; - } - } - - return (high << 8) | low; - } - - low = index; - while (low != IWL_RATE_INVALID) { - low = iwlegacy_rates[low].prev_rs; - if (low == IWL_RATE_INVALID) - break; - if (rate_mask & (1 << low)) - break; - IWL_DEBUG_RATE(priv, "Skipping masked lower rate: %d\n", low); - } - - high = index; - while (high != IWL_RATE_INVALID) { - high = iwlegacy_rates[high].next_rs; - if (high == IWL_RATE_INVALID) - break; - if (rate_mask & (1 << high)) - break; - IWL_DEBUG_RATE(priv, "Skipping masked higher rate: %d\n", high); - } - - return (high << 8) | low; -} - -static u32 iwl4965_rs_get_lower_rate(struct iwl_lq_sta *lq_sta, - struct iwl_scale_tbl_info *tbl, - u8 scale_index, u8 ht_possible) -{ - s32 low; - u16 rate_mask; - u16 high_low; - u8 switch_to_legacy = 0; - u8 is_green = lq_sta->is_green; - struct iwl_priv *priv = lq_sta->drv; - - /* check if we need to switch from HT to legacy rates. - * assumption is that mandatory rates (1Mbps or 6Mbps) - * are always supported (spec demand) */ - if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_index)) { - switch_to_legacy = 1; - scale_index = rs_ht_to_legacy[scale_index]; - if (lq_sta->band == IEEE80211_BAND_5GHZ) - tbl->lq_type = LQ_A; - else - tbl->lq_type = LQ_G; - - if (iwl4965_num_of_ant(tbl->ant_type) > 1) - tbl->ant_type = - iwl4965_first_antenna(priv->hw_params.valid_tx_ant); - - tbl->is_ht40 = 0; - tbl->is_SGI = 0; - tbl->max_search = IWL_MAX_SEARCH; - } - - rate_mask = iwl4965_rs_get_supported_rates(lq_sta, NULL, tbl->lq_type); - - /* Mask with station rate restriction */ - if (is_legacy(tbl->lq_type)) { - /* supp_rates has no CCK bits in A mode */ - if (lq_sta->band == IEEE80211_BAND_5GHZ) - rate_mask = (u16)(rate_mask & - (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE)); - else - rate_mask = (u16)(rate_mask & lq_sta->supp_rates); - } - - /* If we switched from HT to legacy, check current rate */ - if (switch_to_legacy && (rate_mask & (1 << scale_index))) { - low = scale_index; - goto out; - } - - high_low = iwl4965_rs_get_adjacent_rate(lq_sta->drv, - scale_index, rate_mask, - tbl->lq_type); - low = high_low & 0xff; - - if (low == IWL_RATE_INVALID) - low = scale_index; - -out: - return iwl4965_rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green); -} - -/* - * Simple function to compare two rate scale table types - */ -static bool iwl4965_table_type_matches(struct iwl_scale_tbl_info *a, - struct iwl_scale_tbl_info *b) -{ - return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) && - (a->is_SGI == b->is_SGI); -} - -/* - * mac80211 sends us Tx status - */ -static void -iwl4965_rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband, - struct ieee80211_sta *sta, void *priv_sta, - struct sk_buff *skb) -{ - int legacy_success; - int retries; - int rs_index, mac_index, i; - struct iwl_lq_sta *lq_sta = priv_sta; - struct iwl_link_quality_cmd *table; - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - struct iwl_priv *priv = (struct iwl_priv *)priv_r; - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - enum mac80211_rate_control_flags mac_flags; - u32 tx_rate; - struct iwl_scale_tbl_info tbl_type; - struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl; - struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; - struct iwl_rxon_context *ctx = sta_priv->common.ctx; - - IWL_DEBUG_RATE_LIMIT(priv, - "get frame ack response, update rate scale window\n"); - - /* Treat uninitialized rate scaling data same as non-existing. */ - if (!lq_sta) { - IWL_DEBUG_RATE(priv, "Station rate scaling not created yet.\n"); - return; - } else if (!lq_sta->drv) { - IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n"); - return; - } - - if (!ieee80211_is_data(hdr->frame_control) || - info->flags & IEEE80211_TX_CTL_NO_ACK) - return; - - /* This packet was aggregated but doesn't carry status info */ - if ((info->flags & IEEE80211_TX_CTL_AMPDU) && - !(info->flags & IEEE80211_TX_STAT_AMPDU)) - return; - - /* - * Ignore this Tx frame response if its initial rate doesn't match - * that of latest Link Quality command. There may be stragglers - * from a previous Link Quality command, but we're no longer interested - * in those; they're either from the "active" mode while we're trying - * to check "search" mode, or a prior "search" mode after we've moved - * to a new "search" mode (which might become the new "active" mode). - */ - table = &lq_sta->lq; - tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags); - iwl4965_rs_get_tbl_info_from_mcs(tx_rate, - priv->band, &tbl_type, &rs_index); - if (priv->band == IEEE80211_BAND_5GHZ) - rs_index -= IWL_FIRST_OFDM_RATE; - mac_flags = info->status.rates[0].flags; - mac_index = info->status.rates[0].idx; - /* For HT packets, map MCS to PLCP */ - if (mac_flags & IEEE80211_TX_RC_MCS) { - mac_index &= RATE_MCS_CODE_MSK; /* Remove # of streams */ - if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE)) - mac_index++; - /* - * mac80211 HT index is always zero-indexed; we need to move - * HT OFDM rates after CCK rates in 2.4 GHz band - */ - if (priv->band == IEEE80211_BAND_2GHZ) - mac_index += IWL_FIRST_OFDM_RATE; - } - /* Here we actually compare this rate to the latest LQ command */ - if ((mac_index < 0) || - (tbl_type.is_SGI != - !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) || - (tbl_type.is_ht40 != - !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) || - (tbl_type.is_dup != - !!(mac_flags & IEEE80211_TX_RC_DUP_DATA)) || - (tbl_type.ant_type != info->antenna_sel_tx) || - (!!(tx_rate & RATE_MCS_HT_MSK) != - !!(mac_flags & IEEE80211_TX_RC_MCS)) || - (!!(tx_rate & RATE_MCS_GF_MSK) != - !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) || - (rs_index != mac_index)) { - IWL_DEBUG_RATE(priv, - "initial rate %d does not match %d (0x%x)\n", - mac_index, rs_index, tx_rate); - /* - * Since rates mis-match, the last LQ command may have failed. - * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with - * ... driver. - */ - lq_sta->missed_rate_counter++; - if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) { - lq_sta->missed_rate_counter = 0; - iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq, - CMD_ASYNC, false); - } - /* Regardless, ignore this status info for outdated rate */ - return; - } else - /* Rate did match, so reset the missed_rate_counter */ - lq_sta->missed_rate_counter = 0; - - /* Figure out if rate scale algorithm is in active or search table */ - if (iwl4965_table_type_matches(&tbl_type, - &(lq_sta->lq_info[lq_sta->active_tbl]))) { - curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); - other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]); - } else if (iwl4965_table_type_matches(&tbl_type, - &lq_sta->lq_info[1 - lq_sta->active_tbl])) { - curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]); - other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); - } else { - IWL_DEBUG_RATE(priv, - "Neither active nor search matches tx rate\n"); - tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); - IWL_DEBUG_RATE(priv, "active- lq:%x, ant:%x, SGI:%d\n", - tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI); - tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]); - IWL_DEBUG_RATE(priv, "search- lq:%x, ant:%x, SGI:%d\n", - tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI); - IWL_DEBUG_RATE(priv, "actual- lq:%x, ant:%x, SGI:%d\n", - tbl_type.lq_type, tbl_type.ant_type, tbl_type.is_SGI); - /* - * no matching table found, let's by-pass the data collection - * and continue to perform rate scale to find the rate table - */ - iwl4965_rs_stay_in_table(lq_sta, true); - goto done; - } - - /* - * Updating the frame history depends on whether packets were - * aggregated. - * - * For aggregation, all packets were transmitted at the same rate, the - * first index into rate scale table. - */ - if (info->flags & IEEE80211_TX_STAT_AMPDU) { - tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags); - iwl4965_rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, - &rs_index); - iwl4965_rs_collect_tx_data(curr_tbl, rs_index, - info->status.ampdu_len, - info->status.ampdu_ack_len); - - /* Update success/fail counts if not searching for new mode */ - if (lq_sta->stay_in_tbl) { - lq_sta->total_success += info->status.ampdu_ack_len; - lq_sta->total_failed += (info->status.ampdu_len - - info->status.ampdu_ack_len); - } - } else { - /* - * For legacy, update frame history with for each Tx retry. - */ - retries = info->status.rates[0].count - 1; - /* HW doesn't send more than 15 retries */ - retries = min(retries, 15); - - /* The last transmission may have been successful */ - legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK); - /* Collect data for each rate used during failed TX attempts */ - for (i = 0; i <= retries; ++i) { - tx_rate = le32_to_cpu(table->rs_table[i].rate_n_flags); - iwl4965_rs_get_tbl_info_from_mcs(tx_rate, priv->band, - &tbl_type, &rs_index); - /* - * Only collect stats if retried rate is in the same RS - * table as active/search. - */ - if (iwl4965_table_type_matches(&tbl_type, curr_tbl)) - tmp_tbl = curr_tbl; - else if (iwl4965_table_type_matches(&tbl_type, - other_tbl)) - tmp_tbl = other_tbl; - else - continue; - iwl4965_rs_collect_tx_data(tmp_tbl, rs_index, 1, - i < retries ? 0 : legacy_success); - } - - /* Update success/fail counts if not searching for new mode */ - if (lq_sta->stay_in_tbl) { - lq_sta->total_success += legacy_success; - lq_sta->total_failed += retries + (1 - legacy_success); - } - } - /* The last TX rate is cached in lq_sta; it's set in if/else above */ - lq_sta->last_rate_n_flags = tx_rate; -done: - /* See if there's a better rate or modulation mode to try. */ - if (sta && sta->supp_rates[sband->band]) - iwl4965_rs_rate_scale_perform(priv, skb, sta, lq_sta); -} - -/* - * Begin a period of staying with a selected modulation mode. - * Set "stay_in_tbl" flag to prevent any mode switches. - * Set frame tx success limits according to legacy vs. high-throughput, - * and reset overall (spanning all rates) tx success history statistics. - * These control how long we stay using same modulation mode before - * searching for a new mode. - */ -static void iwl4965_rs_set_stay_in_table(struct iwl_priv *priv, u8 is_legacy, - struct iwl_lq_sta *lq_sta) -{ - IWL_DEBUG_RATE(priv, "we are staying in the same table\n"); - lq_sta->stay_in_tbl = 1; /* only place this gets set */ - if (is_legacy) { - lq_sta->table_count_limit = IWL_LEGACY_TABLE_COUNT; - lq_sta->max_failure_limit = IWL_LEGACY_FAILURE_LIMIT; - lq_sta->max_success_limit = IWL_LEGACY_SUCCESS_LIMIT; - } else { - lq_sta->table_count_limit = IWL_NONE_LEGACY_TABLE_COUNT; - lq_sta->max_failure_limit = IWL_NONE_LEGACY_FAILURE_LIMIT; - lq_sta->max_success_limit = IWL_NONE_LEGACY_SUCCESS_LIMIT; - } - lq_sta->table_count = 0; - lq_sta->total_failed = 0; - lq_sta->total_success = 0; - lq_sta->flush_timer = jiffies; - lq_sta->action_counter = 0; -} - -/* - * Find correct throughput table for given mode of modulation - */ -static void iwl4965_rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta, - struct iwl_scale_tbl_info *tbl) -{ - /* Used to choose among HT tables */ - s32 (*ht_tbl_pointer)[IWL_RATE_COUNT]; - - /* Check for invalid LQ type */ - if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) { - tbl->expected_tpt = expected_tpt_legacy; - return; - } - - /* Legacy rates have only one table */ - if (is_legacy(tbl->lq_type)) { - tbl->expected_tpt = expected_tpt_legacy; - return; - } - - /* Choose among many HT tables depending on number of streams - * (SISO/MIMO2), channel width (20/40), SGI, and aggregation - * status */ - if (is_siso(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup)) - ht_tbl_pointer = expected_tpt_siso20MHz; - else if (is_siso(tbl->lq_type)) - ht_tbl_pointer = expected_tpt_siso40MHz; - else if (is_mimo2(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup)) - ht_tbl_pointer = expected_tpt_mimo2_20MHz; - else /* if (is_mimo2(tbl->lq_type)) <-- must be true */ - ht_tbl_pointer = expected_tpt_mimo2_40MHz; - - if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */ - tbl->expected_tpt = ht_tbl_pointer[0]; - else if (tbl->is_SGI && !lq_sta->is_agg) /* SGI */ - tbl->expected_tpt = ht_tbl_pointer[1]; - else if (!tbl->is_SGI && lq_sta->is_agg) /* AGG */ - tbl->expected_tpt = ht_tbl_pointer[2]; - else /* AGG+SGI */ - tbl->expected_tpt = ht_tbl_pointer[3]; -} - -/* - * Find starting rate for new "search" high-throughput mode of modulation. - * Goal is to find lowest expected rate (under perfect conditions) that is - * above the current measured throughput of "active" mode, to give new mode - * a fair chance to prove itself without too many challenges. - * - * This gets called when transitioning to more aggressive modulation - * (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive - * (i.e. MIMO to SISO). When moving to MIMO, bit rate will typically need - * to decrease to match "active" throughput. When moving from MIMO to SISO, - * bit rate will typically need to increase, but not if performance was bad. - */ -static s32 iwl4965_rs_get_best_rate(struct iwl_priv *priv, - struct iwl_lq_sta *lq_sta, - struct iwl_scale_tbl_info *tbl, /* "search" */ - u16 rate_mask, s8 index) -{ - /* "active" values */ - struct iwl_scale_tbl_info *active_tbl = - &(lq_sta->lq_info[lq_sta->active_tbl]); - s32 active_sr = active_tbl->win[index].success_ratio; - s32 active_tpt = active_tbl->expected_tpt[index]; - - /* expected "search" throughput */ - s32 *tpt_tbl = tbl->expected_tpt; - - s32 new_rate, high, low, start_hi; - u16 high_low; - s8 rate = index; - - new_rate = high = low = start_hi = IWL_RATE_INVALID; - - for (; ;) { - high_low = iwl4965_rs_get_adjacent_rate(priv, rate, rate_mask, - tbl->lq_type); - - low = high_low & 0xff; - high = (high_low >> 8) & 0xff; - - /* - * Lower the "search" bit rate, to give new "search" mode - * approximately the same throughput as "active" if: - * - * 1) "Active" mode has been working modestly well (but not - * great), and expected "search" throughput (under perfect - * conditions) at candidate rate is above the actual - * measured "active" throughput (but less than expected - * "active" throughput under perfect conditions). - * OR - * 2) "Active" mode has been working perfectly or very well - * and expected "search" throughput (under perfect - * conditions) at candidate rate is above expected - * "active" throughput (under perfect conditions). - */ - if ((((100 * tpt_tbl[rate]) > lq_sta->last_tpt) && - ((active_sr > IWL_RATE_DECREASE_TH) && - (active_sr <= IWL_RATE_HIGH_TH) && - (tpt_tbl[rate] <= active_tpt))) || - ((active_sr >= IWL_RATE_SCALE_SWITCH) && - (tpt_tbl[rate] > active_tpt))) { - - /* (2nd or later pass) - * If we've already tried to raise the rate, and are - * now trying to lower it, use the higher rate. */ - if (start_hi != IWL_RATE_INVALID) { - new_rate = start_hi; - break; - } - - new_rate = rate; - - /* Loop again with lower rate */ - if (low != IWL_RATE_INVALID) - rate = low; - - /* Lower rate not available, use the original */ - else - break; - - /* Else try to raise the "search" rate to match "active" */ - } else { - /* (2nd or later pass) - * If we've already tried to lower the rate, and are - * now trying to raise it, use the lower rate. */ - if (new_rate != IWL_RATE_INVALID) - break; - - /* Loop again with higher rate */ - else if (high != IWL_RATE_INVALID) { - start_hi = high; - rate = high; - - /* Higher rate not available, use the original */ - } else { - new_rate = rate; - break; - } - } - } - - return new_rate; -} - -/* - * Set up search table for MIMO2 - */ -static int iwl4965_rs_switch_to_mimo2(struct iwl_priv *priv, - struct iwl_lq_sta *lq_sta, - struct ieee80211_conf *conf, - struct ieee80211_sta *sta, - struct iwl_scale_tbl_info *tbl, int index) -{ - u16 rate_mask; - s32 rate; - s8 is_green = lq_sta->is_green; - struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; - struct iwl_rxon_context *ctx = sta_priv->common.ctx; - - if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported) - return -1; - - if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2) - == WLAN_HT_CAP_SM_PS_STATIC) - return -1; - - /* Need both Tx chains/antennas to support MIMO */ - if (priv->hw_params.tx_chains_num < 2) - return -1; - - IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO2\n"); - - tbl->lq_type = LQ_MIMO2; - tbl->is_dup = lq_sta->is_dup; - tbl->action = 0; - tbl->max_search = IWL_MAX_SEARCH; - rate_mask = lq_sta->active_mimo2_rate; - - if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap)) - tbl->is_ht40 = 1; - else - tbl->is_ht40 = 0; - - iwl4965_rs_set_expected_tpt_table(lq_sta, tbl); - - rate = iwl4965_rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index); - - IWL_DEBUG_RATE(priv, "LQ: MIMO2 best rate %d mask %X\n", - rate, rate_mask); - if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) { - IWL_DEBUG_RATE(priv, - "Can't switch with index %d rate mask %x\n", - rate, rate_mask); - return -1; - } - tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv, - tbl, rate, is_green); - - IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n", - tbl->current_rate, is_green); - return 0; -} - -/* - * Set up search table for SISO - */ -static int iwl4965_rs_switch_to_siso(struct iwl_priv *priv, - struct iwl_lq_sta *lq_sta, - struct ieee80211_conf *conf, - struct ieee80211_sta *sta, - struct iwl_scale_tbl_info *tbl, int index) -{ - u16 rate_mask; - u8 is_green = lq_sta->is_green; - s32 rate; - struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; - struct iwl_rxon_context *ctx = sta_priv->common.ctx; - - if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported) - return -1; - - IWL_DEBUG_RATE(priv, "LQ: try to switch to SISO\n"); - - tbl->is_dup = lq_sta->is_dup; - tbl->lq_type = LQ_SISO; - tbl->action = 0; - tbl->max_search = IWL_MAX_SEARCH; - rate_mask = lq_sta->active_siso_rate; - - if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap)) - tbl->is_ht40 = 1; - else - tbl->is_ht40 = 0; - - if (is_green) - tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield*/ - - iwl4965_rs_set_expected_tpt_table(lq_sta, tbl); - rate = iwl4965_rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index); - - IWL_DEBUG_RATE(priv, "LQ: get best rate %d mask %X\n", rate, rate_mask); - if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) { - IWL_DEBUG_RATE(priv, - "can not switch with index %d rate mask %x\n", - rate, rate_mask); - return -1; - } - tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv, - tbl, rate, is_green); - IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n", - tbl->current_rate, is_green); - return 0; -} - -/* - * Try to switch to new modulation mode from legacy - */ -static int iwl4965_rs_move_legacy_other(struct iwl_priv *priv, - struct iwl_lq_sta *lq_sta, - struct ieee80211_conf *conf, - struct ieee80211_sta *sta, - int index) -{ - struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); - struct iwl_scale_tbl_info *search_tbl = - &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); - struct iwl_rate_scale_data *window = &(tbl->win[index]); - u32 sz = (sizeof(struct iwl_scale_tbl_info) - - (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); - u8 start_action; - u8 valid_tx_ant = priv->hw_params.valid_tx_ant; - u8 tx_chains_num = priv->hw_params.tx_chains_num; - int ret = 0; - u8 update_search_tbl_counter = 0; - - tbl->action = IWL_LEGACY_SWITCH_SISO; - - start_action = tbl->action; - for (; ;) { - lq_sta->action_counter++; - switch (tbl->action) { - case IWL_LEGACY_SWITCH_ANTENNA1: - case IWL_LEGACY_SWITCH_ANTENNA2: - IWL_DEBUG_RATE(priv, "LQ: Legacy toggle Antenna\n"); - - if ((tbl->action == IWL_LEGACY_SWITCH_ANTENNA1 && - tx_chains_num <= 1) || - (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2 && - tx_chains_num <= 2)) - break; - - /* Don't change antenna if success has been great */ - if (window->success_ratio >= IWL_RS_GOOD_RATIO) - break; - - /* Set up search table to try other antenna */ - memcpy(search_tbl, tbl, sz); - - if (iwl4965_rs_toggle_antenna(valid_tx_ant, - &search_tbl->current_rate, search_tbl)) { - update_search_tbl_counter = 1; - iwl4965_rs_set_expected_tpt_table(lq_sta, - search_tbl); - goto out; - } - break; - case IWL_LEGACY_SWITCH_SISO: - IWL_DEBUG_RATE(priv, "LQ: Legacy switch to SISO\n"); - - /* Set up search table to try SISO */ - memcpy(search_tbl, tbl, sz); - search_tbl->is_SGI = 0; - ret = iwl4965_rs_switch_to_siso(priv, lq_sta, conf, sta, - search_tbl, index); - if (!ret) { - lq_sta->action_counter = 0; - goto out; - } - - break; - case IWL_LEGACY_SWITCH_MIMO2_AB: - case IWL_LEGACY_SWITCH_MIMO2_AC: - case IWL_LEGACY_SWITCH_MIMO2_BC: - IWL_DEBUG_RATE(priv, "LQ: Legacy switch to MIMO2\n"); - - /* Set up search table to try MIMO */ - memcpy(search_tbl, tbl, sz); - search_tbl->is_SGI = 0; - - if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AB) - search_tbl->ant_type = ANT_AB; - else if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AC) - search_tbl->ant_type = ANT_AC; - else - search_tbl->ant_type = ANT_BC; - - if (!iwl4965_rs_is_valid_ant(valid_tx_ant, - search_tbl->ant_type)) - break; - - ret = iwl4965_rs_switch_to_mimo2(priv, lq_sta, - conf, sta, - search_tbl, index); - if (!ret) { - lq_sta->action_counter = 0; - goto out; - } - break; - } - tbl->action++; - if (tbl->action > IWL_LEGACY_SWITCH_MIMO2_BC) - tbl->action = IWL_LEGACY_SWITCH_ANTENNA1; - - if (tbl->action == start_action) - break; - - } - search_tbl->lq_type = LQ_NONE; - return 0; - -out: - lq_sta->search_better_tbl = 1; - tbl->action++; - if (tbl->action > IWL_LEGACY_SWITCH_MIMO2_BC) - tbl->action = IWL_LEGACY_SWITCH_ANTENNA1; - if (update_search_tbl_counter) - search_tbl->action = tbl->action; - return 0; - -} - -/* - * Try to switch to new modulation mode from SISO - */ -static int iwl4965_rs_move_siso_to_other(struct iwl_priv *priv, - struct iwl_lq_sta *lq_sta, - struct ieee80211_conf *conf, - struct ieee80211_sta *sta, int index) -{ - u8 is_green = lq_sta->is_green; - struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); - struct iwl_scale_tbl_info *search_tbl = - &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); - struct iwl_rate_scale_data *window = &(tbl->win[index]); - struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; - u32 sz = (sizeof(struct iwl_scale_tbl_info) - - (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); - u8 start_action; - u8 valid_tx_ant = priv->hw_params.valid_tx_ant; - u8 tx_chains_num = priv->hw_params.tx_chains_num; - u8 update_search_tbl_counter = 0; - int ret; - - start_action = tbl->action; - - for (;;) { - lq_sta->action_counter++; - switch (tbl->action) { - case IWL_SISO_SWITCH_ANTENNA1: - case IWL_SISO_SWITCH_ANTENNA2: - IWL_DEBUG_RATE(priv, "LQ: SISO toggle Antenna\n"); - if ((tbl->action == IWL_SISO_SWITCH_ANTENNA1 && - tx_chains_num <= 1) || - (tbl->action == IWL_SISO_SWITCH_ANTENNA2 && - tx_chains_num <= 2)) - break; - - if (window->success_ratio >= IWL_RS_GOOD_RATIO) - break; - - memcpy(search_tbl, tbl, sz); - if (iwl4965_rs_toggle_antenna(valid_tx_ant, - &search_tbl->current_rate, search_tbl)) { - update_search_tbl_counter = 1; - goto out; - } - break; - case IWL_SISO_SWITCH_MIMO2_AB: - case IWL_SISO_SWITCH_MIMO2_AC: - case IWL_SISO_SWITCH_MIMO2_BC: - IWL_DEBUG_RATE(priv, "LQ: SISO switch to MIMO2\n"); - memcpy(search_tbl, tbl, sz); - search_tbl->is_SGI = 0; - - if (tbl->action == IWL_SISO_SWITCH_MIMO2_AB) - search_tbl->ant_type = ANT_AB; - else if (tbl->action == IWL_SISO_SWITCH_MIMO2_AC) - search_tbl->ant_type = ANT_AC; - else - search_tbl->ant_type = ANT_BC; - - if (!iwl4965_rs_is_valid_ant(valid_tx_ant, - search_tbl->ant_type)) - break; - - ret = iwl4965_rs_switch_to_mimo2(priv, lq_sta, - conf, sta, - search_tbl, index); - if (!ret) - goto out; - break; - case IWL_SISO_SWITCH_GI: - if (!tbl->is_ht40 && !(ht_cap->cap & - IEEE80211_HT_CAP_SGI_20)) - break; - if (tbl->is_ht40 && !(ht_cap->cap & - IEEE80211_HT_CAP_SGI_40)) - break; - - IWL_DEBUG_RATE(priv, "LQ: SISO toggle SGI/NGI\n"); - - memcpy(search_tbl, tbl, sz); - if (is_green) { - if (!tbl->is_SGI) - break; - else - IWL_ERR(priv, - "SGI was set in GF+SISO\n"); - } - search_tbl->is_SGI = !tbl->is_SGI; - iwl4965_rs_set_expected_tpt_table(lq_sta, search_tbl); - if (tbl->is_SGI) { - s32 tpt = lq_sta->last_tpt / 100; - if (tpt >= search_tbl->expected_tpt[index]) - break; - } - search_tbl->current_rate = - iwl4965_rate_n_flags_from_tbl(priv, search_tbl, - index, is_green); - update_search_tbl_counter = 1; - goto out; - } - tbl->action++; - if (tbl->action > IWL_SISO_SWITCH_GI) - tbl->action = IWL_SISO_SWITCH_ANTENNA1; - - if (tbl->action == start_action) - break; - } - search_tbl->lq_type = LQ_NONE; - return 0; - - out: - lq_sta->search_better_tbl = 1; - tbl->action++; - if (tbl->action > IWL_SISO_SWITCH_GI) - tbl->action = IWL_SISO_SWITCH_ANTENNA1; - if (update_search_tbl_counter) - search_tbl->action = tbl->action; - - return 0; -} - -/* - * Try to switch to new modulation mode from MIMO2 - */ -static int iwl4965_rs_move_mimo2_to_other(struct iwl_priv *priv, - struct iwl_lq_sta *lq_sta, - struct ieee80211_conf *conf, - struct ieee80211_sta *sta, int index) -{ - s8 is_green = lq_sta->is_green; - struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); - struct iwl_scale_tbl_info *search_tbl = - &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); - struct iwl_rate_scale_data *window = &(tbl->win[index]); - struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; - u32 sz = (sizeof(struct iwl_scale_tbl_info) - - (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); - u8 start_action; - u8 valid_tx_ant = priv->hw_params.valid_tx_ant; - u8 tx_chains_num = priv->hw_params.tx_chains_num; - u8 update_search_tbl_counter = 0; - int ret; - - start_action = tbl->action; - for (;;) { - lq_sta->action_counter++; - switch (tbl->action) { - case IWL_MIMO2_SWITCH_ANTENNA1: - case IWL_MIMO2_SWITCH_ANTENNA2: - IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle Antennas\n"); - - if (tx_chains_num <= 2) - break; - - if (window->success_ratio >= IWL_RS_GOOD_RATIO) - break; - - memcpy(search_tbl, tbl, sz); - if (iwl4965_rs_toggle_antenna(valid_tx_ant, - &search_tbl->current_rate, search_tbl)) { - update_search_tbl_counter = 1; - goto out; - } - break; - case IWL_MIMO2_SWITCH_SISO_A: - case IWL_MIMO2_SWITCH_SISO_B: - case IWL_MIMO2_SWITCH_SISO_C: - IWL_DEBUG_RATE(priv, "LQ: MIMO2 switch to SISO\n"); - - /* Set up new search table for SISO */ - memcpy(search_tbl, tbl, sz); - - if (tbl->action == IWL_MIMO2_SWITCH_SISO_A) - search_tbl->ant_type = ANT_A; - else if (tbl->action == IWL_MIMO2_SWITCH_SISO_B) - search_tbl->ant_type = ANT_B; - else - search_tbl->ant_type = ANT_C; - - if (!iwl4965_rs_is_valid_ant(valid_tx_ant, - search_tbl->ant_type)) - break; - - ret = iwl4965_rs_switch_to_siso(priv, lq_sta, - conf, sta, - search_tbl, index); - if (!ret) - goto out; - - break; - - case IWL_MIMO2_SWITCH_GI: - if (!tbl->is_ht40 && !(ht_cap->cap & - IEEE80211_HT_CAP_SGI_20)) - break; - if (tbl->is_ht40 && !(ht_cap->cap & - IEEE80211_HT_CAP_SGI_40)) - break; - - IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle SGI/NGI\n"); - - /* Set up new search table for MIMO2 */ - memcpy(search_tbl, tbl, sz); - search_tbl->is_SGI = !tbl->is_SGI; - iwl4965_rs_set_expected_tpt_table(lq_sta, search_tbl); - /* - * If active table already uses the fastest possible - * modulation (dual stream with short guard interval), - * and it's working well, there's no need to look - * for a better type of modulation! - */ - if (tbl->is_SGI) { - s32 tpt = lq_sta->last_tpt / 100; - if (tpt >= search_tbl->expected_tpt[index]) - break; - } - search_tbl->current_rate = - iwl4965_rate_n_flags_from_tbl(priv, search_tbl, - index, is_green); - update_search_tbl_counter = 1; - goto out; - - } - tbl->action++; - if (tbl->action > IWL_MIMO2_SWITCH_GI) - tbl->action = IWL_MIMO2_SWITCH_ANTENNA1; - - if (tbl->action == start_action) - break; - } - search_tbl->lq_type = LQ_NONE; - return 0; - out: - lq_sta->search_better_tbl = 1; - tbl->action++; - if (tbl->action > IWL_MIMO2_SWITCH_GI) - tbl->action = IWL_MIMO2_SWITCH_ANTENNA1; - if (update_search_tbl_counter) - search_tbl->action = tbl->action; - - return 0; - -} - -/* - * Check whether we should continue using same modulation mode, or - * begin search for a new mode, based on: - * 1) # tx successes or failures while using this mode - * 2) # times calling this function - * 3) elapsed time in this mode (not used, for now) - */ -static void -iwl4965_rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search) -{ - struct iwl_scale_tbl_info *tbl; - int i; - int active_tbl; - int flush_interval_passed = 0; - struct iwl_priv *priv; - - priv = lq_sta->drv; - active_tbl = lq_sta->active_tbl; - - tbl = &(lq_sta->lq_info[active_tbl]); - - /* If we've been disallowing search, see if we should now allow it */ - if (lq_sta->stay_in_tbl) { - - /* Elapsed time using current modulation mode */ - if (lq_sta->flush_timer) - flush_interval_passed = - time_after(jiffies, - (unsigned long)(lq_sta->flush_timer + - IWL_RATE_SCALE_FLUSH_INTVL)); - - /* - * Check if we should allow search for new modulation mode. - * If many frames have failed or succeeded, or we've used - * this same modulation for a long time, allow search, and - * reset history stats that keep track of whether we should - * allow a new search. Also (below) reset all bitmaps and - * stats in active history. - */ - if (force_search || - (lq_sta->total_failed > lq_sta->max_failure_limit) || - (lq_sta->total_success > lq_sta->max_success_limit) || - ((!lq_sta->search_better_tbl) && (lq_sta->flush_timer) - && (flush_interval_passed))) { - IWL_DEBUG_RATE(priv, "LQ: stay is expired %d %d %d\n:", - lq_sta->total_failed, - lq_sta->total_success, - flush_interval_passed); - - /* Allow search for new mode */ - lq_sta->stay_in_tbl = 0; /* only place reset */ - lq_sta->total_failed = 0; - lq_sta->total_success = 0; - lq_sta->flush_timer = 0; - - /* - * Else if we've used this modulation mode enough repetitions - * (regardless of elapsed time or success/failure), reset - * history bitmaps and rate-specific stats for all rates in - * active table. - */ - } else { - lq_sta->table_count++; - if (lq_sta->table_count >= - lq_sta->table_count_limit) { - lq_sta->table_count = 0; - - IWL_DEBUG_RATE(priv, - "LQ: stay in table clear win\n"); - for (i = 0; i < IWL_RATE_COUNT; i++) - iwl4965_rs_rate_scale_clear_window( - &(tbl->win[i])); - } - } - - /* If transitioning to allow "search", reset all history - * bitmaps and stats in active table (this will become the new - * "search" table). */ - if (!lq_sta->stay_in_tbl) { - for (i = 0; i < IWL_RATE_COUNT; i++) - iwl4965_rs_rate_scale_clear_window( - &(tbl->win[i])); - } - } -} - -/* - * setup rate table in uCode - * return rate_n_flags as used in the table - */ -static u32 iwl4965_rs_update_rate_tbl(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - struct iwl_lq_sta *lq_sta, - struct iwl_scale_tbl_info *tbl, - int index, u8 is_green) -{ - u32 rate; - - /* Update uCode's rate table. */ - rate = iwl4965_rate_n_flags_from_tbl(priv, tbl, index, is_green); - iwl4965_rs_fill_link_cmd(priv, lq_sta, rate); - iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false); - - return rate; -} - -/* - * Do rate scaling and search for new modulation mode. - */ -static void iwl4965_rs_rate_scale_perform(struct iwl_priv *priv, - struct sk_buff *skb, - struct ieee80211_sta *sta, - struct iwl_lq_sta *lq_sta) -{ - struct ieee80211_hw *hw = priv->hw; - struct ieee80211_conf *conf = &hw->conf; - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - int low = IWL_RATE_INVALID; - int high = IWL_RATE_INVALID; - int index; - int i; - struct iwl_rate_scale_data *window = NULL; - int current_tpt = IWL_INVALID_VALUE; - int low_tpt = IWL_INVALID_VALUE; - int high_tpt = IWL_INVALID_VALUE; - u32 fail_count; - s8 scale_action = 0; - u16 rate_mask; - u8 update_lq = 0; - struct iwl_scale_tbl_info *tbl, *tbl1; - u16 rate_scale_index_msk = 0; - u32 rate; - u8 is_green = 0; - u8 active_tbl = 0; - u8 done_search = 0; - u16 high_low; - s32 sr; - u8 tid = MAX_TID_COUNT; - struct iwl_tid_data *tid_data; - struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; - struct iwl_rxon_context *ctx = sta_priv->common.ctx; - - IWL_DEBUG_RATE(priv, "rate scale calculate new rate for skb\n"); - - /* Send management frames and NO_ACK data using lowest rate. */ - /* TODO: this could probably be improved.. */ - if (!ieee80211_is_data(hdr->frame_control) || - info->flags & IEEE80211_TX_CTL_NO_ACK) - return; - - if (!sta || !lq_sta) - return; - - lq_sta->supp_rates = sta->supp_rates[lq_sta->band]; - - tid = iwl4965_rs_tl_add_packet(lq_sta, hdr); - if ((tid != MAX_TID_COUNT) && (lq_sta->tx_agg_tid_en & (1 << tid))) { - tid_data = &priv->stations[lq_sta->lq.sta_id].tid[tid]; - if (tid_data->agg.state == IWL_AGG_OFF) - lq_sta->is_agg = 0; - else - lq_sta->is_agg = 1; - } else - lq_sta->is_agg = 0; - - /* - * Select rate-scale / modulation-mode table to work with in - * the rest of this function: "search" if searching for better - * modulation mode, or "active" if doing rate scaling within a mode. - */ - if (!lq_sta->search_better_tbl) - active_tbl = lq_sta->active_tbl; - else - active_tbl = 1 - lq_sta->active_tbl; - - tbl = &(lq_sta->lq_info[active_tbl]); - if (is_legacy(tbl->lq_type)) - lq_sta->is_green = 0; - else - lq_sta->is_green = iwl4965_rs_use_green(sta); - is_green = lq_sta->is_green; - - /* current tx rate */ - index = lq_sta->last_txrate_idx; - - IWL_DEBUG_RATE(priv, "Rate scale index %d for type %d\n", index, - tbl->lq_type); - - /* rates available for this association, and for modulation mode */ - rate_mask = iwl4965_rs_get_supported_rates(lq_sta, hdr, tbl->lq_type); - - IWL_DEBUG_RATE(priv, "mask 0x%04X\n", rate_mask); - - /* mask with station rate restriction */ - if (is_legacy(tbl->lq_type)) { - if (lq_sta->band == IEEE80211_BAND_5GHZ) - /* supp_rates has no CCK bits in A mode */ - rate_scale_index_msk = (u16) (rate_mask & - (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE)); - else - rate_scale_index_msk = (u16) (rate_mask & - lq_sta->supp_rates); - - } else - rate_scale_index_msk = rate_mask; - - if (!rate_scale_index_msk) - rate_scale_index_msk = rate_mask; - - if (!((1 << index) & rate_scale_index_msk)) { - IWL_ERR(priv, "Current Rate is not valid\n"); - if (lq_sta->search_better_tbl) { - /* revert to active table if search table is not valid*/ - tbl->lq_type = LQ_NONE; - lq_sta->search_better_tbl = 0; - tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); - /* get "active" rate info */ - index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate); - rate = iwl4965_rs_update_rate_tbl(priv, ctx, lq_sta, - tbl, index, is_green); - } - return; - } - - /* Get expected throughput table and history window for current rate */ - if (!tbl->expected_tpt) { - IWL_ERR(priv, "tbl->expected_tpt is NULL\n"); - return; - } - - /* force user max rate if set by user */ - if ((lq_sta->max_rate_idx != -1) && - (lq_sta->max_rate_idx < index)) { - index = lq_sta->max_rate_idx; - update_lq = 1; - window = &(tbl->win[index]); - goto lq_update; - } - - window = &(tbl->win[index]); - - /* - * If there is not enough history to calculate actual average - * throughput, keep analyzing results of more tx frames, without - * changing rate or mode (bypass most of the rest of this function). - * Set up new rate table in uCode only if old rate is not supported - * in current association (use new rate found above). - */ - fail_count = window->counter - window->success_counter; - if ((fail_count < IWL_RATE_MIN_FAILURE_TH) && - (window->success_counter < IWL_RATE_MIN_SUCCESS_TH)) { - IWL_DEBUG_RATE(priv, "LQ: still below TH. succ=%d total=%d " - "for index %d\n", - window->success_counter, window->counter, index); - - /* Can't calculate this yet; not enough history */ - window->average_tpt = IWL_INVALID_VALUE; - - /* Should we stay with this modulation mode, - * or search for a new one? */ - iwl4965_rs_stay_in_table(lq_sta, false); - - goto out; - } - /* Else we have enough samples; calculate estimate of - * actual average throughput */ - if (window->average_tpt != ((window->success_ratio * - tbl->expected_tpt[index] + 64) / 128)) { - IWL_ERR(priv, - "expected_tpt should have been calculated by now\n"); - window->average_tpt = ((window->success_ratio * - tbl->expected_tpt[index] + 64) / 128); - } - - /* If we are searching for better modulation mode, check success. */ - if (lq_sta->search_better_tbl) { - /* If good success, continue using the "search" mode; - * no need to send new link quality command, since we're - * continuing to use the setup that we've been trying. */ - if (window->average_tpt > lq_sta->last_tpt) { - - IWL_DEBUG_RATE(priv, "LQ: SWITCHING TO NEW TABLE " - "suc=%d cur-tpt=%d old-tpt=%d\n", - window->success_ratio, - window->average_tpt, - lq_sta->last_tpt); - - if (!is_legacy(tbl->lq_type)) - lq_sta->enable_counter = 1; - - /* Swap tables; "search" becomes "active" */ - lq_sta->active_tbl = active_tbl; - current_tpt = window->average_tpt; - - /* Else poor success; go back to mode in "active" table */ - } else { - - IWL_DEBUG_RATE(priv, "LQ: GOING BACK TO THE OLD TABLE " - "suc=%d cur-tpt=%d old-tpt=%d\n", - window->success_ratio, - window->average_tpt, - lq_sta->last_tpt); - - /* Nullify "search" table */ - tbl->lq_type = LQ_NONE; - - /* Revert to "active" table */ - active_tbl = lq_sta->active_tbl; - tbl = &(lq_sta->lq_info[active_tbl]); - - /* Revert to "active" rate and throughput info */ - index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate); - current_tpt = lq_sta->last_tpt; - - /* Need to set up a new rate table in uCode */ - update_lq = 1; - } - - /* Either way, we've made a decision; modulation mode - * search is done, allow rate adjustment next time. */ - lq_sta->search_better_tbl = 0; - done_search = 1; /* Don't switch modes below! */ - goto lq_update; - } - - /* (Else) not in search of better modulation mode, try for better - * starting rate, while staying in this mode. */ - high_low = iwl4965_rs_get_adjacent_rate(priv, index, - rate_scale_index_msk, - tbl->lq_type); - low = high_low & 0xff; - high = (high_low >> 8) & 0xff; - - /* If user set max rate, dont allow higher than user constrain */ - if ((lq_sta->max_rate_idx != -1) && - (lq_sta->max_rate_idx < high)) - high = IWL_RATE_INVALID; - - sr = window->success_ratio; - - /* Collect measured throughputs for current and adjacent rates */ - current_tpt = window->average_tpt; - if (low != IWL_RATE_INVALID) - low_tpt = tbl->win[low].average_tpt; - if (high != IWL_RATE_INVALID) - high_tpt = tbl->win[high].average_tpt; - - scale_action = 0; - - /* Too many failures, decrease rate */ - if ((sr <= IWL_RATE_DECREASE_TH) || (current_tpt == 0)) { - IWL_DEBUG_RATE(priv, - "decrease rate because of low success_ratio\n"); - scale_action = -1; - - /* No throughput measured yet for adjacent rates; try increase. */ - } else if ((low_tpt == IWL_INVALID_VALUE) && - (high_tpt == IWL_INVALID_VALUE)) { - - if (high != IWL_RATE_INVALID && sr >= IWL_RATE_INCREASE_TH) - scale_action = 1; - else if (low != IWL_RATE_INVALID) - scale_action = 0; - } - - /* Both adjacent throughputs are measured, but neither one has better - * throughput; we're using the best rate, don't change it! */ - else if ((low_tpt != IWL_INVALID_VALUE) && - (high_tpt != IWL_INVALID_VALUE) && - (low_tpt < current_tpt) && - (high_tpt < current_tpt)) - scale_action = 0; - - /* At least one adjacent rate's throughput is measured, - * and may have better performance. */ - else { - /* Higher adjacent rate's throughput is measured */ - if (high_tpt != IWL_INVALID_VALUE) { - /* Higher rate has better throughput */ - if (high_tpt > current_tpt && - sr >= IWL_RATE_INCREASE_TH) { - scale_action = 1; - } else { - scale_action = 0; - } - - /* Lower adjacent rate's throughput is measured */ - } else if (low_tpt != IWL_INVALID_VALUE) { - /* Lower rate has better throughput */ - if (low_tpt > current_tpt) { - IWL_DEBUG_RATE(priv, - "decrease rate because of low tpt\n"); - scale_action = -1; - } else if (sr >= IWL_RATE_INCREASE_TH) { - scale_action = 1; - } - } - } - - /* Sanity check; asked for decrease, but success rate or throughput - * has been good at old rate. Don't change it. */ - if ((scale_action == -1) && (low != IWL_RATE_INVALID) && - ((sr > IWL_RATE_HIGH_TH) || - (current_tpt > (100 * tbl->expected_tpt[low])))) - scale_action = 0; - - switch (scale_action) { - case -1: - /* Decrease starting rate, update uCode's rate table */ - if (low != IWL_RATE_INVALID) { - update_lq = 1; - index = low; - } - - break; - case 1: - /* Increase starting rate, update uCode's rate table */ - if (high != IWL_RATE_INVALID) { - update_lq = 1; - index = high; - } - - break; - case 0: - /* No change */ - default: - break; - } - - IWL_DEBUG_RATE(priv, "choose rate scale index %d action %d low %d " - "high %d type %d\n", - index, scale_action, low, high, tbl->lq_type); - -lq_update: - /* Replace uCode's rate table for the destination station. */ - if (update_lq) - rate = iwl4965_rs_update_rate_tbl(priv, ctx, lq_sta, - tbl, index, is_green); - - /* Should we stay with this modulation mode, - * or search for a new one? */ - iwl4965_rs_stay_in_table(lq_sta, false); - - /* - * Search for new modulation mode if we're: - * 1) Not changing rates right now - * 2) Not just finishing up a search - * 3) Allowing a new search - */ - if (!update_lq && !done_search && - !lq_sta->stay_in_tbl && window->counter) { - /* Save current throughput to compare with "search" throughput*/ - lq_sta->last_tpt = current_tpt; - - /* Select a new "search" modulation mode to try. - * If one is found, set up the new "search" table. */ - if (is_legacy(tbl->lq_type)) - iwl4965_rs_move_legacy_other(priv, lq_sta, - conf, sta, index); - else if (is_siso(tbl->lq_type)) - iwl4965_rs_move_siso_to_other(priv, lq_sta, - conf, sta, index); - else /* (is_mimo2(tbl->lq_type)) */ - iwl4965_rs_move_mimo2_to_other(priv, lq_sta, - conf, sta, index); - - /* If new "search" mode was selected, set up in uCode table */ - if (lq_sta->search_better_tbl) { - /* Access the "search" table, clear its history. */ - tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); - for (i = 0; i < IWL_RATE_COUNT; i++) - iwl4965_rs_rate_scale_clear_window( - &(tbl->win[i])); - - /* Use new "search" start rate */ - index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate); - - IWL_DEBUG_RATE(priv, - "Switch current mcs: %X index: %d\n", - tbl->current_rate, index); - iwl4965_rs_fill_link_cmd(priv, lq_sta, - tbl->current_rate); - iwl_legacy_send_lq_cmd(priv, ctx, - &lq_sta->lq, CMD_ASYNC, false); - } else - done_search = 1; - } - - if (done_search && !lq_sta->stay_in_tbl) { - /* If the "active" (non-search) mode was legacy, - * and we've tried switching antennas, - * but we haven't been able to try HT modes (not available), - * stay with best antenna legacy modulation for a while - * before next round of mode comparisons. */ - tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]); - if (is_legacy(tbl1->lq_type) && !conf_is_ht(conf) && - lq_sta->action_counter > tbl1->max_search) { - IWL_DEBUG_RATE(priv, "LQ: STAY in legacy table\n"); - iwl4965_rs_set_stay_in_table(priv, 1, lq_sta); - } - - /* If we're in an HT mode, and all 3 mode switch actions - * have been tried and compared, stay in this best modulation - * mode for a while before next round of mode comparisons. */ - if (lq_sta->enable_counter && - (lq_sta->action_counter >= tbl1->max_search)) { - if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) && - (lq_sta->tx_agg_tid_en & (1 << tid)) && - (tid != MAX_TID_COUNT)) { - tid_data = - &priv->stations[lq_sta->lq.sta_id].tid[tid]; - if (tid_data->agg.state == IWL_AGG_OFF) { - IWL_DEBUG_RATE(priv, - "try to aggregate tid %d\n", - tid); - iwl4965_rs_tl_turn_on_agg(priv, tid, - lq_sta, sta); - } - } - iwl4965_rs_set_stay_in_table(priv, 0, lq_sta); - } - } - -out: - tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv, tbl, - index, is_green); - i = index; - lq_sta->last_txrate_idx = i; -} - -/** - * iwl4965_rs_initialize_lq - Initialize a station's hardware rate table - * - * The uCode's station table contains a table of fallback rates - * for automatic fallback during transmission. - * - * NOTE: This sets up a default set of values. These will be replaced later - * if the driver's iwl-4965-rs rate scaling algorithm is used, instead of - * rc80211_simple. - * - * NOTE: Run REPLY_ADD_STA command to set up station table entry, before - * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD, - * which requires station table entry to exist). - */ -static void iwl4965_rs_initialize_lq(struct iwl_priv *priv, - struct ieee80211_conf *conf, - struct ieee80211_sta *sta, - struct iwl_lq_sta *lq_sta) -{ - struct iwl_scale_tbl_info *tbl; - int rate_idx; - int i; - u32 rate; - u8 use_green = iwl4965_rs_use_green(sta); - u8 active_tbl = 0; - u8 valid_tx_ant; - struct iwl_station_priv *sta_priv; - struct iwl_rxon_context *ctx; - - if (!sta || !lq_sta) - return; - - sta_priv = (void *)sta->drv_priv; - ctx = sta_priv->common.ctx; - - i = lq_sta->last_txrate_idx; - - valid_tx_ant = priv->hw_params.valid_tx_ant; - - if (!lq_sta->search_better_tbl) - active_tbl = lq_sta->active_tbl; - else - active_tbl = 1 - lq_sta->active_tbl; - - tbl = &(lq_sta->lq_info[active_tbl]); - - if ((i < 0) || (i >= IWL_RATE_COUNT)) - i = 0; - - rate = iwlegacy_rates[i].plcp; - tbl->ant_type = iwl4965_first_antenna(valid_tx_ant); - rate |= tbl->ant_type << RATE_MCS_ANT_POS; - - if (i >= IWL_FIRST_CCK_RATE && i <= IWL_LAST_CCK_RATE) - rate |= RATE_MCS_CCK_MSK; - - iwl4965_rs_get_tbl_info_from_mcs(rate, priv->band, tbl, &rate_idx); - if (!iwl4965_rs_is_valid_ant(valid_tx_ant, tbl->ant_type)) - iwl4965_rs_toggle_antenna(valid_tx_ant, &rate, tbl); - - rate = iwl4965_rate_n_flags_from_tbl(priv, tbl, rate_idx, use_green); - tbl->current_rate = rate; - iwl4965_rs_set_expected_tpt_table(lq_sta, tbl); - iwl4965_rs_fill_link_cmd(NULL, lq_sta, rate); - priv->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq; - iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_SYNC, true); -} - -static void -iwl4965_rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta, - struct ieee80211_tx_rate_control *txrc) -{ - - struct sk_buff *skb = txrc->skb; - struct ieee80211_supported_band *sband = txrc->sband; - struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r; - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct iwl_lq_sta *lq_sta = priv_sta; - int rate_idx; - - IWL_DEBUG_RATE_LIMIT(priv, "rate scale calculate new rate for skb\n"); - - /* Get max rate if user set max rate */ - if (lq_sta) { - lq_sta->max_rate_idx = txrc->max_rate_idx; - if ((sband->band == IEEE80211_BAND_5GHZ) && - (lq_sta->max_rate_idx != -1)) - lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE; - if ((lq_sta->max_rate_idx < 0) || - (lq_sta->max_rate_idx >= IWL_RATE_COUNT)) - lq_sta->max_rate_idx = -1; - } - - /* Treat uninitialized rate scaling data same as non-existing. */ - if (lq_sta && !lq_sta->drv) { - IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n"); - priv_sta = NULL; - } - - /* Send management frames and NO_ACK data using lowest rate. */ - if (rate_control_send_low(sta, priv_sta, txrc)) - return; - - if (!lq_sta) - return; - - rate_idx = lq_sta->last_txrate_idx; - - if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) { - rate_idx -= IWL_FIRST_OFDM_RATE; - /* 6M and 9M shared same MCS index */ - rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0; - if (iwl4965_rs_extract_rate(lq_sta->last_rate_n_flags) >= - IWL_RATE_MIMO2_6M_PLCP) - rate_idx = rate_idx + MCS_INDEX_PER_STREAM; - info->control.rates[0].flags = IEEE80211_TX_RC_MCS; - if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK) - info->control.rates[0].flags |= - IEEE80211_TX_RC_SHORT_GI; - if (lq_sta->last_rate_n_flags & RATE_MCS_DUP_MSK) - info->control.rates[0].flags |= - IEEE80211_TX_RC_DUP_DATA; - if (lq_sta->last_rate_n_flags & RATE_MCS_HT40_MSK) - info->control.rates[0].flags |= - IEEE80211_TX_RC_40_MHZ_WIDTH; - if (lq_sta->last_rate_n_flags & RATE_MCS_GF_MSK) - info->control.rates[0].flags |= - IEEE80211_TX_RC_GREEN_FIELD; - } else { - /* Check for invalid rates */ - if ((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT_LEGACY) || - ((sband->band == IEEE80211_BAND_5GHZ) && - (rate_idx < IWL_FIRST_OFDM_RATE))) - rate_idx = rate_lowest_index(sband, sta); - /* On valid 5 GHz rate, adjust index */ - else if (sband->band == IEEE80211_BAND_5GHZ) - rate_idx -= IWL_FIRST_OFDM_RATE; - info->control.rates[0].flags = 0; - } - info->control.rates[0].idx = rate_idx; - -} - -static void *iwl4965_rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta, - gfp_t gfp) -{ - struct iwl_lq_sta *lq_sta; - struct iwl_station_priv *sta_priv = - (struct iwl_station_priv *) sta->drv_priv; - struct iwl_priv *priv; - - priv = (struct iwl_priv *)priv_rate; - IWL_DEBUG_RATE(priv, "create station rate scale window\n"); - - lq_sta = &sta_priv->lq_sta; - - return lq_sta; -} - -/* - * Called after adding a new station to initialize rate scaling - */ -void -iwl4965_rs_rate_init(struct iwl_priv *priv, - struct ieee80211_sta *sta, - u8 sta_id) -{ - int i, j; - struct ieee80211_hw *hw = priv->hw; - struct ieee80211_conf *conf = &priv->hw->conf; - struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; - struct iwl_station_priv *sta_priv; - struct iwl_lq_sta *lq_sta; - struct ieee80211_supported_band *sband; - - sta_priv = (struct iwl_station_priv *) sta->drv_priv; - lq_sta = &sta_priv->lq_sta; - sband = hw->wiphy->bands[conf->channel->band]; - - - lq_sta->lq.sta_id = sta_id; - - for (j = 0; j < LQ_SIZE; j++) - for (i = 0; i < IWL_RATE_COUNT; i++) - iwl4965_rs_rate_scale_clear_window( - &lq_sta->lq_info[j].win[i]); - - lq_sta->flush_timer = 0; - lq_sta->supp_rates = sta->supp_rates[sband->band]; - for (j = 0; j < LQ_SIZE; j++) - for (i = 0; i < IWL_RATE_COUNT; i++) - iwl4965_rs_rate_scale_clear_window( - &lq_sta->lq_info[j].win[i]); - - IWL_DEBUG_RATE(priv, "LQ:" - "*** rate scale station global init for station %d ***\n", - sta_id); - /* TODO: what is a good starting rate for STA? About middle? Maybe not - * the lowest or the highest rate.. Could consider using RSSI from - * previous packets? Need to have IEEE 802.1X auth succeed immediately - * after assoc.. */ - - lq_sta->is_dup = 0; - lq_sta->max_rate_idx = -1; - lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX; - lq_sta->is_green = iwl4965_rs_use_green(sta); - lq_sta->active_legacy_rate = priv->active_rate & ~(0x1000); - lq_sta->band = priv->band; - /* - * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3), - * supp_rates[] does not; shift to convert format, force 9 MBits off. - */ - lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1; - lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1; - lq_sta->active_siso_rate &= ~((u16)0x2); - lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE; - - /* Same here */ - lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1; - lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1; - lq_sta->active_mimo2_rate &= ~((u16)0x2); - lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE; - - /* These values will be overridden later */ - lq_sta->lq.general_params.single_stream_ant_msk = - iwl4965_first_antenna(priv->hw_params.valid_tx_ant); - lq_sta->lq.general_params.dual_stream_ant_msk = - priv->hw_params.valid_tx_ant & - ~iwl4965_first_antenna(priv->hw_params.valid_tx_ant); - if (!lq_sta->lq.general_params.dual_stream_ant_msk) { - lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB; - } else if (iwl4965_num_of_ant(priv->hw_params.valid_tx_ant) == 2) { - lq_sta->lq.general_params.dual_stream_ant_msk = - priv->hw_params.valid_tx_ant; - } - - /* as default allow aggregation for all tids */ - lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID; - lq_sta->drv = priv; - - /* Set last_txrate_idx to lowest rate */ - lq_sta->last_txrate_idx = rate_lowest_index(sband, sta); - if (sband->band == IEEE80211_BAND_5GHZ) - lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE; - lq_sta->is_agg = 0; - -#ifdef CONFIG_MAC80211_DEBUGFS - lq_sta->dbg_fixed_rate = 0; -#endif - - iwl4965_rs_initialize_lq(priv, conf, sta, lq_sta); -} - -static void iwl4965_rs_fill_link_cmd(struct iwl_priv *priv, - struct iwl_lq_sta *lq_sta, u32 new_rate) -{ - struct iwl_scale_tbl_info tbl_type; - int index = 0; - int rate_idx; - int repeat_rate = 0; - u8 ant_toggle_cnt = 0; - u8 use_ht_possible = 1; - u8 valid_tx_ant = 0; - struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq; - - /* Override starting rate (index 0) if needed for debug purposes */ - iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index); - - /* Interpret new_rate (rate_n_flags) */ - iwl4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, - &tbl_type, &rate_idx); - - /* How many times should we repeat the initial rate? */ - if (is_legacy(tbl_type.lq_type)) { - ant_toggle_cnt = 1; - repeat_rate = IWL_NUMBER_TRY; - } else { - repeat_rate = IWL_HT_NUMBER_TRY; - } - - lq_cmd->general_params.mimo_delimiter = - is_mimo(tbl_type.lq_type) ? 1 : 0; - - /* Fill 1st table entry (index 0) */ - lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate); - - if (iwl4965_num_of_ant(tbl_type.ant_type) == 1) { - lq_cmd->general_params.single_stream_ant_msk = - tbl_type.ant_type; - } else if (iwl4965_num_of_ant(tbl_type.ant_type) == 2) { - lq_cmd->general_params.dual_stream_ant_msk = - tbl_type.ant_type; - } /* otherwise we don't modify the existing value */ - - index++; - repeat_rate--; - if (priv) - valid_tx_ant = priv->hw_params.valid_tx_ant; - - /* Fill rest of rate table */ - while (index < LINK_QUAL_MAX_RETRY_NUM) { - /* Repeat initial/next rate. - * For legacy IWL_NUMBER_TRY == 1, this loop will not execute. - * For HT IWL_HT_NUMBER_TRY == 3, this executes twice. */ - while (repeat_rate > 0 && (index < LINK_QUAL_MAX_RETRY_NUM)) { - if (is_legacy(tbl_type.lq_type)) { - if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE) - ant_toggle_cnt++; - else if (priv && - iwl4965_rs_toggle_antenna(valid_tx_ant, - &new_rate, &tbl_type)) - ant_toggle_cnt = 1; - } - - /* Override next rate if needed for debug purposes */ - iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index); - - /* Fill next table entry */ - lq_cmd->rs_table[index].rate_n_flags = - cpu_to_le32(new_rate); - repeat_rate--; - index++; - } - - iwl4965_rs_get_tbl_info_from_mcs(new_rate, - lq_sta->band, &tbl_type, - &rate_idx); - - /* Indicate to uCode which entries might be MIMO. - * If initial rate was MIMO, this will finally end up - * as (IWL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */ - if (is_mimo(tbl_type.lq_type)) - lq_cmd->general_params.mimo_delimiter = index; - - /* Get next rate */ - new_rate = iwl4965_rs_get_lower_rate(lq_sta, - &tbl_type, rate_idx, - use_ht_possible); - - /* How many times should we repeat the next rate? */ - if (is_legacy(tbl_type.lq_type)) { - if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE) - ant_toggle_cnt++; - else if (priv && - iwl4965_rs_toggle_antenna(valid_tx_ant, - &new_rate, &tbl_type)) - ant_toggle_cnt = 1; - - repeat_rate = IWL_NUMBER_TRY; - } else { - repeat_rate = IWL_HT_NUMBER_TRY; - } - - /* Don't allow HT rates after next pass. - * iwl4965_rs_get_lower_rate() will change type to LQ_A or LQ_G. */ - use_ht_possible = 0; - - /* Override next rate if needed for debug purposes */ - iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index); - - /* Fill next table entry */ - lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate); - - index++; - repeat_rate--; - } - - lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF; - lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF; - - lq_cmd->agg_params.agg_time_limit = - cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF); -} - -static void -*iwl4965_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) -{ - return hw->priv; -} -/* rate scale requires free function to be implemented */ -static void iwl4965_rs_free(void *priv_rate) -{ - return; -} - -static void iwl4965_rs_free_sta(void *priv_r, struct ieee80211_sta *sta, - void *priv_sta) -{ - struct iwl_priv *priv __maybe_unused = priv_r; - - IWL_DEBUG_RATE(priv, "enter\n"); - IWL_DEBUG_RATE(priv, "leave\n"); -} - - -#ifdef CONFIG_MAC80211_DEBUGFS -static int iwl4965_open_file_generic(struct inode *inode, struct file *file) -{ - file->private_data = inode->i_private; - return 0; -} -static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta, - u32 *rate_n_flags, int index) -{ - struct iwl_priv *priv; - u8 valid_tx_ant; - u8 ant_sel_tx; - - priv = lq_sta->drv; - valid_tx_ant = priv->hw_params.valid_tx_ant; - if (lq_sta->dbg_fixed_rate) { - ant_sel_tx = - ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK) - >> RATE_MCS_ANT_POS); - if ((valid_tx_ant & ant_sel_tx) == ant_sel_tx) { - *rate_n_flags = lq_sta->dbg_fixed_rate; - IWL_DEBUG_RATE(priv, "Fixed rate ON\n"); - } else { - lq_sta->dbg_fixed_rate = 0; - IWL_ERR(priv, - "Invalid antenna selection 0x%X, Valid is 0x%X\n", - ant_sel_tx, valid_tx_ant); - IWL_DEBUG_RATE(priv, "Fixed rate OFF\n"); - } - } else { - IWL_DEBUG_RATE(priv, "Fixed rate OFF\n"); - } -} - -static ssize_t iwl4965_rs_sta_dbgfs_scale_table_write(struct file *file, - const char __user *user_buf, size_t count, loff_t *ppos) -{ - struct iwl_lq_sta *lq_sta = file->private_data; - struct iwl_priv *priv; - char buf[64]; - size_t buf_size; - u32 parsed_rate; - struct iwl_station_priv *sta_priv = - container_of(lq_sta, struct iwl_station_priv, lq_sta); - struct iwl_rxon_context *ctx = sta_priv->common.ctx; - - priv = lq_sta->drv; - memset(buf, 0, sizeof(buf)); - buf_size = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - - if (sscanf(buf, "%x", &parsed_rate) == 1) - lq_sta->dbg_fixed_rate = parsed_rate; - else - lq_sta->dbg_fixed_rate = 0; - - lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */ - lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ - lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ - - IWL_DEBUG_RATE(priv, "sta_id %d rate 0x%X\n", - lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate); - - if (lq_sta->dbg_fixed_rate) { - iwl4965_rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate); - iwl_legacy_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC, - false); - } - - return count; -} - -static ssize_t iwl4965_rs_sta_dbgfs_scale_table_read(struct file *file, - char __user *user_buf, size_t count, loff_t *ppos) -{ - char *buff; - int desc = 0; - int i = 0; - int index = 0; - ssize_t ret; - - struct iwl_lq_sta *lq_sta = file->private_data; - struct iwl_priv *priv; - struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); - - priv = lq_sta->drv; - buff = kmalloc(1024, GFP_KERNEL); - if (!buff) - return -ENOMEM; - - desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id); - desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n", - lq_sta->total_failed, lq_sta->total_success, - lq_sta->active_legacy_rate); - desc += sprintf(buff+desc, "fixed rate 0x%X\n", - lq_sta->dbg_fixed_rate); - desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n", - (priv->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "", - (priv->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "", - (priv->hw_params.valid_tx_ant & ANT_C) ? "ANT_C" : ""); - desc += sprintf(buff+desc, "lq type %s\n", - (is_legacy(tbl->lq_type)) ? "legacy" : "HT"); - if (is_Ht(tbl->lq_type)) { - desc += sprintf(buff+desc, " %s", - (is_siso(tbl->lq_type)) ? "SISO" : "MIMO2"); - desc += sprintf(buff+desc, " %s", - (tbl->is_ht40) ? "40MHz" : "20MHz"); - desc += sprintf(buff+desc, " %s %s %s\n", - (tbl->is_SGI) ? "SGI" : "", - (lq_sta->is_green) ? "GF enabled" : "", - (lq_sta->is_agg) ? "AGG on" : ""); - } - desc += sprintf(buff+desc, "last tx rate=0x%X\n", - lq_sta->last_rate_n_flags); - desc += sprintf(buff+desc, "general:" - "flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n", - lq_sta->lq.general_params.flags, - lq_sta->lq.general_params.mimo_delimiter, - lq_sta->lq.general_params.single_stream_ant_msk, - lq_sta->lq.general_params.dual_stream_ant_msk); - - desc += sprintf(buff+desc, "agg:" - "time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n", - le16_to_cpu(lq_sta->lq.agg_params.agg_time_limit), - lq_sta->lq.agg_params.agg_dis_start_th, - lq_sta->lq.agg_params.agg_frame_cnt_limit); - - desc += sprintf(buff+desc, - "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n", - lq_sta->lq.general_params.start_rate_index[0], - lq_sta->lq.general_params.start_rate_index[1], - lq_sta->lq.general_params.start_rate_index[2], - lq_sta->lq.general_params.start_rate_index[3]); - - for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { - index = iwl4965_hwrate_to_plcp_idx( - le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags)); - if (is_legacy(tbl->lq_type)) { - desc += sprintf(buff+desc, " rate[%d] 0x%X %smbps\n", - i, - le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags), - iwl_rate_mcs[index].mbps); - } else { - desc += sprintf(buff+desc, - " rate[%d] 0x%X %smbps (%s)\n", - i, - le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags), - iwl_rate_mcs[index].mbps, iwl_rate_mcs[index].mcs); - } - } - - ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); - kfree(buff); - return ret; -} - -static const struct file_operations rs_sta_dbgfs_scale_table_ops = { - .write = iwl4965_rs_sta_dbgfs_scale_table_write, - .read = iwl4965_rs_sta_dbgfs_scale_table_read, - .open = iwl4965_open_file_generic, - .llseek = default_llseek, -}; -static ssize_t iwl4965_rs_sta_dbgfs_stats_table_read(struct file *file, - char __user *user_buf, size_t count, loff_t *ppos) -{ - char *buff; - int desc = 0; - int i, j; - ssize_t ret; - - struct iwl_lq_sta *lq_sta = file->private_data; - - buff = kmalloc(1024, GFP_KERNEL); - if (!buff) - return -ENOMEM; - - for (i = 0; i < LQ_SIZE; i++) { - desc += sprintf(buff+desc, - "%s type=%d SGI=%d HT40=%d DUP=%d GF=%d\n" - "rate=0x%X\n", - lq_sta->active_tbl == i ? "*" : "x", - lq_sta->lq_info[i].lq_type, - lq_sta->lq_info[i].is_SGI, - lq_sta->lq_info[i].is_ht40, - lq_sta->lq_info[i].is_dup, - lq_sta->is_green, - lq_sta->lq_info[i].current_rate); - for (j = 0; j < IWL_RATE_COUNT; j++) { - desc += sprintf(buff+desc, - "counter=%d success=%d %%=%d\n", - lq_sta->lq_info[i].win[j].counter, - lq_sta->lq_info[i].win[j].success_counter, - lq_sta->lq_info[i].win[j].success_ratio); - } - } - ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); - kfree(buff); - return ret; -} - -static const struct file_operations rs_sta_dbgfs_stats_table_ops = { - .read = iwl4965_rs_sta_dbgfs_stats_table_read, - .open = iwl4965_open_file_generic, - .llseek = default_llseek, -}; - -static ssize_t iwl4965_rs_sta_dbgfs_rate_scale_data_read(struct file *file, - char __user *user_buf, size_t count, loff_t *ppos) -{ - char buff[120]; - int desc = 0; - ssize_t ret; - - struct iwl_lq_sta *lq_sta = file->private_data; - struct iwl_priv *priv; - struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl]; - - priv = lq_sta->drv; - - if (is_Ht(tbl->lq_type)) - desc += sprintf(buff+desc, - "Bit Rate= %d Mb/s\n", - tbl->expected_tpt[lq_sta->last_txrate_idx]); - else - desc += sprintf(buff+desc, - "Bit Rate= %d Mb/s\n", - iwlegacy_rates[lq_sta->last_txrate_idx].ieee >> 1); - - ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); - return ret; -} - -static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = { - .read = iwl4965_rs_sta_dbgfs_rate_scale_data_read, - .open = iwl4965_open_file_generic, - .llseek = default_llseek, -}; - -static void iwl4965_rs_add_debugfs(void *priv, void *priv_sta, - struct dentry *dir) -{ - struct iwl_lq_sta *lq_sta = priv_sta; - lq_sta->rs_sta_dbgfs_scale_table_file = - debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir, - lq_sta, &rs_sta_dbgfs_scale_table_ops); - lq_sta->rs_sta_dbgfs_stats_table_file = - debugfs_create_file("rate_stats_table", S_IRUSR, dir, - lq_sta, &rs_sta_dbgfs_stats_table_ops); - lq_sta->rs_sta_dbgfs_rate_scale_data_file = - debugfs_create_file("rate_scale_data", S_IRUSR, dir, - lq_sta, &rs_sta_dbgfs_rate_scale_data_ops); - lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file = - debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir, - &lq_sta->tx_agg_tid_en); - -} - -static void iwl4965_rs_remove_debugfs(void *priv, void *priv_sta) -{ - struct iwl_lq_sta *lq_sta = priv_sta; - debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file); - debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file); - debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file); - debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file); -} -#endif - -/* - * Initialization of rate scaling information is done by driver after - * the station is added. Since mac80211 calls this function before a - * station is added we ignore it. - */ -static void -iwl4965_rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband, - struct ieee80211_sta *sta, void *priv_sta) -{ -} -static struct rate_control_ops rs_4965_ops = { - .module = NULL, - .name = IWL4965_RS_NAME, - .tx_status = iwl4965_rs_tx_status, - .get_rate = iwl4965_rs_get_rate, - .rate_init = iwl4965_rs_rate_init_stub, - .alloc = iwl4965_rs_alloc, - .free = iwl4965_rs_free, - .alloc_sta = iwl4965_rs_alloc_sta, - .free_sta = iwl4965_rs_free_sta, -#ifdef CONFIG_MAC80211_DEBUGFS - .add_sta_debugfs = iwl4965_rs_add_debugfs, - .remove_sta_debugfs = iwl4965_rs_remove_debugfs, -#endif -}; - -int iwl4965_rate_control_register(void) -{ - return ieee80211_rate_control_register(&rs_4965_ops); -} - -void iwl4965_rate_control_unregister(void) -{ - ieee80211_rate_control_unregister(&rs_4965_ops); -} diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-rx.c b/drivers/net/wireless/iwlegacy/iwl-4965-rx.c deleted file mode 100644 index 2b144bbfc3c..00000000000 --- a/drivers/net/wireless/iwlegacy/iwl-4965-rx.c +++ /dev/null @@ -1,215 +0,0 @@ -/****************************************************************************** - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/init.h> -#include <linux/sched.h> - -#include "iwl-dev.h" -#include "iwl-core.h" -#include "iwl-4965-calib.h" -#include "iwl-sta.h" -#include "iwl-io.h" -#include "iwl-helpers.h" -#include "iwl-4965-hw.h" -#include "iwl-4965.h" - -void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb) - -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_missed_beacon_notif *missed_beacon; - - missed_beacon = &pkt->u.missed_beacon; - if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) > - priv->missed_beacon_threshold) { - IWL_DEBUG_CALIB(priv, - "missed bcn cnsq %d totl %d rcd %d expctd %d\n", - le32_to_cpu(missed_beacon->consecutive_missed_beacons), - le32_to_cpu(missed_beacon->total_missed_becons), - le32_to_cpu(missed_beacon->num_recvd_beacons), - le32_to_cpu(missed_beacon->num_expected_beacons)); - if (!test_bit(STATUS_SCANNING, &priv->status)) - iwl4965_init_sensitivity(priv); - } -} - -/* Calculate noise level, based on measurements during network silence just - * before arriving beacon. This measurement can be done only if we know - * exactly when to expect beacons, therefore only when we're associated. */ -static void iwl4965_rx_calc_noise(struct iwl_priv *priv) -{ - struct statistics_rx_non_phy *rx_info; - int num_active_rx = 0; - int total_silence = 0; - int bcn_silence_a, bcn_silence_b, bcn_silence_c; - int last_rx_noise; - - rx_info = &(priv->_4965.statistics.rx.general); - bcn_silence_a = - le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER; - bcn_silence_b = - le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER; - bcn_silence_c = - le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER; - - if (bcn_silence_a) { - total_silence += bcn_silence_a; - num_active_rx++; - } - if (bcn_silence_b) { - total_silence += bcn_silence_b; - num_active_rx++; - } - if (bcn_silence_c) { - total_silence += bcn_silence_c; - num_active_rx++; - } - - /* Average among active antennas */ - if (num_active_rx) - last_rx_noise = (total_silence / num_active_rx) - 107; - else - last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE; - - IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n", - bcn_silence_a, bcn_silence_b, bcn_silence_c, - last_rx_noise); -} - -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS -/* - * based on the assumption of all statistics counter are in DWORD - * FIXME: This function is for debugging, do not deal with - * the case of counters roll-over. - */ -static void iwl4965_accumulative_statistics(struct iwl_priv *priv, - __le32 *stats) -{ - int i, size; - __le32 *prev_stats; - u32 *accum_stats; - u32 *delta, *max_delta; - struct statistics_general_common *general, *accum_general; - struct statistics_tx *tx, *accum_tx; - - prev_stats = (__le32 *)&priv->_4965.statistics; - accum_stats = (u32 *)&priv->_4965.accum_statistics; - size = sizeof(struct iwl_notif_statistics); - general = &priv->_4965.statistics.general.common; - accum_general = &priv->_4965.accum_statistics.general.common; - tx = &priv->_4965.statistics.tx; - accum_tx = &priv->_4965.accum_statistics.tx; - delta = (u32 *)&priv->_4965.delta_statistics; - max_delta = (u32 *)&priv->_4965.max_delta; - - for (i = sizeof(__le32); i < size; - i += sizeof(__le32), stats++, prev_stats++, delta++, - max_delta++, accum_stats++) { - if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) { - *delta = (le32_to_cpu(*stats) - - le32_to_cpu(*prev_stats)); - *accum_stats += *delta; - if (*delta > *max_delta) - *max_delta = *delta; - } - } - - /* reset accumulative statistics for "no-counter" type statistics */ - accum_general->temperature = general->temperature; - accum_general->ttl_timestamp = general->ttl_timestamp; -} -#endif - -#define REG_RECALIB_PERIOD (60) - -void iwl4965_rx_statistics(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb) -{ - int change; - struct iwl_rx_packet *pkt = rxb_addr(rxb); - - IWL_DEBUG_RX(priv, - "Statistics notification received (%d vs %d).\n", - (int)sizeof(struct iwl_notif_statistics), - le32_to_cpu(pkt->len_n_flags) & - FH_RSCSR_FRAME_SIZE_MSK); - - change = ((priv->_4965.statistics.general.common.temperature != - pkt->u.stats.general.common.temperature) || - ((priv->_4965.statistics.flag & - STATISTICS_REPLY_FLG_HT40_MODE_MSK) != - (pkt->u.stats.flag & - STATISTICS_REPLY_FLG_HT40_MODE_MSK))); -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS - iwl4965_accumulative_statistics(priv, (__le32 *)&pkt->u.stats); -#endif - - /* TODO: reading some of statistics is unneeded */ - memcpy(&priv->_4965.statistics, &pkt->u.stats, - sizeof(priv->_4965.statistics)); - - set_bit(STATUS_STATISTICS, &priv->status); - - /* Reschedule the statistics timer to occur in - * REG_RECALIB_PERIOD seconds to ensure we get a - * thermal update even if the uCode doesn't give - * us one */ - mod_timer(&priv->statistics_periodic, jiffies + - msecs_to_jiffies(REG_RECALIB_PERIOD * 1000)); - - if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) && - (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) { - iwl4965_rx_calc_noise(priv); - queue_work(priv->workqueue, &priv->run_time_calib_work); - } - if (priv->cfg->ops->lib->temp_ops.temperature && change) - priv->cfg->ops->lib->temp_ops.temperature(priv); -} - -void iwl4965_reply_statistics(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - - if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) { -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS - memset(&priv->_4965.accum_statistics, 0, - sizeof(struct iwl_notif_statistics)); - memset(&priv->_4965.delta_statistics, 0, - sizeof(struct iwl_notif_statistics)); - memset(&priv->_4965.max_delta, 0, - sizeof(struct iwl_notif_statistics)); -#endif - IWL_DEBUG_RX(priv, "Statistics have been cleared\n"); - } - iwl4965_rx_statistics(priv, rxb); -} diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-sta.c b/drivers/net/wireless/iwlegacy/iwl-4965-sta.c deleted file mode 100644 index a262c23553d..00000000000 --- a/drivers/net/wireless/iwlegacy/iwl-4965-sta.c +++ /dev/null @@ -1,721 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. - * - * Portions of this file are derived from the ipw3945 project, as well - * as portions of the ieee80211 subsystem header files. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#include <net/mac80211.h> - -#include "iwl-dev.h" -#include "iwl-core.h" -#include "iwl-sta.h" -#include "iwl-4965.h" - -static struct iwl_link_quality_cmd * -iwl4965_sta_alloc_lq(struct iwl_priv *priv, u8 sta_id) -{ - int i, r; - struct iwl_link_quality_cmd *link_cmd; - u32 rate_flags = 0; - __le32 rate_n_flags; - - link_cmd = kzalloc(sizeof(struct iwl_link_quality_cmd), GFP_KERNEL); - if (!link_cmd) { - IWL_ERR(priv, "Unable to allocate memory for LQ cmd.\n"); - return NULL; - } - /* Set up the rate scaling to start at selected rate, fall back - * all the way down to 1M in IEEE order, and then spin on 1M */ - if (priv->band == IEEE80211_BAND_5GHZ) - r = IWL_RATE_6M_INDEX; - else - r = IWL_RATE_1M_INDEX; - - if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE) - rate_flags |= RATE_MCS_CCK_MSK; - - rate_flags |= iwl4965_first_antenna(priv->hw_params.valid_tx_ant) << - RATE_MCS_ANT_POS; - rate_n_flags = iwl4965_hw_set_rate_n_flags(iwlegacy_rates[r].plcp, - rate_flags); - for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) - link_cmd->rs_table[i].rate_n_flags = rate_n_flags; - - link_cmd->general_params.single_stream_ant_msk = - iwl4965_first_antenna(priv->hw_params.valid_tx_ant); - - link_cmd->general_params.dual_stream_ant_msk = - priv->hw_params.valid_tx_ant & - ~iwl4965_first_antenna(priv->hw_params.valid_tx_ant); - if (!link_cmd->general_params.dual_stream_ant_msk) { - link_cmd->general_params.dual_stream_ant_msk = ANT_AB; - } else if (iwl4965_num_of_ant(priv->hw_params.valid_tx_ant) == 2) { - link_cmd->general_params.dual_stream_ant_msk = - priv->hw_params.valid_tx_ant; - } - - link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF; - link_cmd->agg_params.agg_time_limit = - cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF); - - link_cmd->sta_id = sta_id; - - return link_cmd; -} - -/* - * iwl4965_add_bssid_station - Add the special IBSS BSSID station - * - * Function sleeps. - */ -int -iwl4965_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx, - const u8 *addr, u8 *sta_id_r) -{ - int ret; - u8 sta_id; - struct iwl_link_quality_cmd *link_cmd; - unsigned long flags; - - if (sta_id_r) - *sta_id_r = IWL_INVALID_STATION; - - ret = iwl_legacy_add_station_common(priv, ctx, addr, 0, NULL, &sta_id); - if (ret) { - IWL_ERR(priv, "Unable to add station %pM\n", addr); - return ret; - } - - if (sta_id_r) - *sta_id_r = sta_id; - - spin_lock_irqsave(&priv->sta_lock, flags); - priv->stations[sta_id].used |= IWL_STA_LOCAL; - spin_unlock_irqrestore(&priv->sta_lock, flags); - - /* Set up default rate scaling table in device's station table */ - link_cmd = iwl4965_sta_alloc_lq(priv, sta_id); - if (!link_cmd) { - IWL_ERR(priv, - "Unable to initialize rate scaling for station %pM.\n", - addr); - return -ENOMEM; - } - - ret = iwl_legacy_send_lq_cmd(priv, ctx, link_cmd, CMD_SYNC, true); - if (ret) - IWL_ERR(priv, "Link quality command failed (%d)\n", ret); - - spin_lock_irqsave(&priv->sta_lock, flags); - priv->stations[sta_id].lq = link_cmd; - spin_unlock_irqrestore(&priv->sta_lock, flags); - - return 0; -} - -static int iwl4965_static_wepkey_cmd(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - bool send_if_empty) -{ - int i, not_empty = 0; - u8 buff[sizeof(struct iwl_wep_cmd) + - sizeof(struct iwl_wep_key) * WEP_KEYS_MAX]; - struct iwl_wep_cmd *wep_cmd = (struct iwl_wep_cmd *)buff; - size_t cmd_size = sizeof(struct iwl_wep_cmd); - struct iwl_host_cmd cmd = { - .id = ctx->wep_key_cmd, - .data = wep_cmd, - .flags = CMD_SYNC, - }; - - might_sleep(); - - memset(wep_cmd, 0, cmd_size + - (sizeof(struct iwl_wep_key) * WEP_KEYS_MAX)); - - for (i = 0; i < WEP_KEYS_MAX ; i++) { - wep_cmd->key[i].key_index = i; - if (ctx->wep_keys[i].key_size) { - wep_cmd->key[i].key_offset = i; - not_empty = 1; - } else { - wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET; - } - - wep_cmd->key[i].key_size = ctx->wep_keys[i].key_size; - memcpy(&wep_cmd->key[i].key[3], ctx->wep_keys[i].key, - ctx->wep_keys[i].key_size); - } - - wep_cmd->global_key_type = WEP_KEY_WEP_TYPE; - wep_cmd->num_keys = WEP_KEYS_MAX; - - cmd_size += sizeof(struct iwl_wep_key) * WEP_KEYS_MAX; - - cmd.len = cmd_size; - - if (not_empty || send_if_empty) - return iwl_legacy_send_cmd(priv, &cmd); - else - return 0; -} - -int iwl4965_restore_default_wep_keys(struct iwl_priv *priv, - struct iwl_rxon_context *ctx) -{ - lockdep_assert_held(&priv->mutex); - - return iwl4965_static_wepkey_cmd(priv, ctx, false); -} - -int iwl4965_remove_default_wep_key(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - struct ieee80211_key_conf *keyconf) -{ - int ret; - - lockdep_assert_held(&priv->mutex); - - IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n", - keyconf->keyidx); - - memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0])); - if (iwl_legacy_is_rfkill(priv)) { - IWL_DEBUG_WEP(priv, - "Not sending REPLY_WEPKEY command due to RFKILL.\n"); - /* but keys in device are clear anyway so return success */ - return 0; - } - ret = iwl4965_static_wepkey_cmd(priv, ctx, 1); - IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n", - keyconf->keyidx, ret); - - return ret; -} - -int iwl4965_set_default_wep_key(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - struct ieee80211_key_conf *keyconf) -{ - int ret; - - lockdep_assert_held(&priv->mutex); - - if (keyconf->keylen != WEP_KEY_LEN_128 && - keyconf->keylen != WEP_KEY_LEN_64) { - IWL_DEBUG_WEP(priv, "Bad WEP key length %d\n", keyconf->keylen); - return -EINVAL; - } - - keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV; - keyconf->hw_key_idx = HW_KEY_DEFAULT; - priv->stations[ctx->ap_sta_id].keyinfo.cipher = keyconf->cipher; - - ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen; - memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key, - keyconf->keylen); - - ret = iwl4965_static_wepkey_cmd(priv, ctx, false); - IWL_DEBUG_WEP(priv, "Set default WEP key: len=%d idx=%d ret=%d\n", - keyconf->keylen, keyconf->keyidx, ret); - - return ret; -} - -static int iwl4965_set_wep_dynamic_key_info(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - struct ieee80211_key_conf *keyconf, - u8 sta_id) -{ - unsigned long flags; - __le16 key_flags = 0; - struct iwl_legacy_addsta_cmd sta_cmd; - - lockdep_assert_held(&priv->mutex); - - keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV; - - key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK); - key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); - key_flags &= ~STA_KEY_FLG_INVALID; - - if (keyconf->keylen == WEP_KEY_LEN_128) - key_flags |= STA_KEY_FLG_KEY_SIZE_MSK; - - if (sta_id == ctx->bcast_sta_id) - key_flags |= STA_KEY_MULTICAST_MSK; - - spin_lock_irqsave(&priv->sta_lock, flags); - - priv->stations[sta_id].keyinfo.cipher = keyconf->cipher; - priv->stations[sta_id].keyinfo.keylen = keyconf->keylen; - priv->stations[sta_id].keyinfo.keyidx = keyconf->keyidx; - - memcpy(priv->stations[sta_id].keyinfo.key, - keyconf->key, keyconf->keylen); - - memcpy(&priv->stations[sta_id].sta.key.key[3], - keyconf->key, keyconf->keylen); - - if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK) - == STA_KEY_FLG_NO_ENC) - priv->stations[sta_id].sta.key.key_offset = - iwl_legacy_get_free_ucode_key_index(priv); - /* else, we are overriding an existing key => no need to allocated room - * in uCode. */ - - WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, - "no space for a new key"); - - priv->stations[sta_id].sta.key.key_flags = key_flags; - priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; - priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; - - memcpy(&sta_cmd, &priv->stations[sta_id].sta, - sizeof(struct iwl_legacy_addsta_cmd)); - spin_unlock_irqrestore(&priv->sta_lock, flags); - - return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC); -} - -static int iwl4965_set_ccmp_dynamic_key_info(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - struct ieee80211_key_conf *keyconf, - u8 sta_id) -{ - unsigned long flags; - __le16 key_flags = 0; - struct iwl_legacy_addsta_cmd sta_cmd; - - lockdep_assert_held(&priv->mutex); - - key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK); - key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); - key_flags &= ~STA_KEY_FLG_INVALID; - - if (sta_id == ctx->bcast_sta_id) - key_flags |= STA_KEY_MULTICAST_MSK; - - keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; - - spin_lock_irqsave(&priv->sta_lock, flags); - priv->stations[sta_id].keyinfo.cipher = keyconf->cipher; - priv->stations[sta_id].keyinfo.keylen = keyconf->keylen; - - memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, - keyconf->keylen); - - memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, - keyconf->keylen); - - if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK) - == STA_KEY_FLG_NO_ENC) - priv->stations[sta_id].sta.key.key_offset = - iwl_legacy_get_free_ucode_key_index(priv); - /* else, we are overriding an existing key => no need to allocated room - * in uCode. */ - - WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, - "no space for a new key"); - - priv->stations[sta_id].sta.key.key_flags = key_flags; - priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; - priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; - - memcpy(&sta_cmd, &priv->stations[sta_id].sta, - sizeof(struct iwl_legacy_addsta_cmd)); - spin_unlock_irqrestore(&priv->sta_lock, flags); - - return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC); -} - -static int iwl4965_set_tkip_dynamic_key_info(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - struct ieee80211_key_conf *keyconf, - u8 sta_id) -{ - unsigned long flags; - int ret = 0; - __le16 key_flags = 0; - - key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK); - key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); - key_flags &= ~STA_KEY_FLG_INVALID; - - if (sta_id == ctx->bcast_sta_id) - key_flags |= STA_KEY_MULTICAST_MSK; - - keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; - keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; - - spin_lock_irqsave(&priv->sta_lock, flags); - - priv->stations[sta_id].keyinfo.cipher = keyconf->cipher; - priv->stations[sta_id].keyinfo.keylen = 16; - - if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK) - == STA_KEY_FLG_NO_ENC) - priv->stations[sta_id].sta.key.key_offset = - iwl_legacy_get_free_ucode_key_index(priv); - /* else, we are overriding an existing key => no need to allocated room - * in uCode. */ - - WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, - "no space for a new key"); - - priv->stations[sta_id].sta.key.key_flags = key_flags; - - - /* This copy is acutally not needed: we get the key with each TX */ - memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16); - - memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, 16); - - spin_unlock_irqrestore(&priv->sta_lock, flags); - - return ret; -} - -void iwl4965_update_tkip_key(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - struct ieee80211_key_conf *keyconf, - struct ieee80211_sta *sta, u32 iv32, u16 *phase1key) -{ - u8 sta_id; - unsigned long flags; - int i; - - if (iwl_legacy_scan_cancel(priv)) { - /* cancel scan failed, just live w/ bad key and rely - briefly on SW decryption */ - return; - } - - sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, sta); - if (sta_id == IWL_INVALID_STATION) - return; - - spin_lock_irqsave(&priv->sta_lock, flags); - - priv->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32; - - for (i = 0; i < 5; i++) - priv->stations[sta_id].sta.key.tkip_rx_ttak[i] = - cpu_to_le16(phase1key[i]); - - priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; - priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; - - iwl_legacy_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); - - spin_unlock_irqrestore(&priv->sta_lock, flags); - -} - -int iwl4965_remove_dynamic_key(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - struct ieee80211_key_conf *keyconf, - u8 sta_id) -{ - unsigned long flags; - u16 key_flags; - u8 keyidx; - struct iwl_legacy_addsta_cmd sta_cmd; - - lockdep_assert_held(&priv->mutex); - - ctx->key_mapping_keys--; - - spin_lock_irqsave(&priv->sta_lock, flags); - key_flags = le16_to_cpu(priv->stations[sta_id].sta.key.key_flags); - keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3; - - IWL_DEBUG_WEP(priv, "Remove dynamic key: idx=%d sta=%d\n", - keyconf->keyidx, sta_id); - - if (keyconf->keyidx != keyidx) { - /* We need to remove a key with index different that the one - * in the uCode. This means that the key we need to remove has - * been replaced by another one with different index. - * Don't do anything and return ok - */ - spin_unlock_irqrestore(&priv->sta_lock, flags); - return 0; - } - - if (priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) { - IWL_WARN(priv, "Removing wrong key %d 0x%x\n", - keyconf->keyidx, key_flags); - spin_unlock_irqrestore(&priv->sta_lock, flags); - return 0; - } - - if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset, - &priv->ucode_key_table)) - IWL_ERR(priv, "index %d not used in uCode key table.\n", - priv->stations[sta_id].sta.key.key_offset); - memset(&priv->stations[sta_id].keyinfo, 0, - sizeof(struct iwl_hw_key)); - memset(&priv->stations[sta_id].sta.key, 0, - sizeof(struct iwl4965_keyinfo)); - priv->stations[sta_id].sta.key.key_flags = - STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID; - priv->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET; - priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; - priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; - - if (iwl_legacy_is_rfkill(priv)) { - IWL_DEBUG_WEP(priv, - "Not sending REPLY_ADD_STA command because RFKILL enabled.\n"); - spin_unlock_irqrestore(&priv->sta_lock, flags); - return 0; - } - memcpy(&sta_cmd, &priv->stations[sta_id].sta, - sizeof(struct iwl_legacy_addsta_cmd)); - spin_unlock_irqrestore(&priv->sta_lock, flags); - - return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC); -} - -int iwl4965_set_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx, - struct ieee80211_key_conf *keyconf, u8 sta_id) -{ - int ret; - - lockdep_assert_held(&priv->mutex); - - ctx->key_mapping_keys++; - keyconf->hw_key_idx = HW_KEY_DYNAMIC; - - switch (keyconf->cipher) { - case WLAN_CIPHER_SUITE_CCMP: - ret = iwl4965_set_ccmp_dynamic_key_info(priv, ctx, - keyconf, sta_id); - break; - case WLAN_CIPHER_SUITE_TKIP: - ret = iwl4965_set_tkip_dynamic_key_info(priv, ctx, - keyconf, sta_id); - break; - case WLAN_CIPHER_SUITE_WEP40: - case WLAN_CIPHER_SUITE_WEP104: - ret = iwl4965_set_wep_dynamic_key_info(priv, ctx, - keyconf, sta_id); - break; - default: - IWL_ERR(priv, - "Unknown alg: %s cipher = %x\n", __func__, - keyconf->cipher); - ret = -EINVAL; - } - - IWL_DEBUG_WEP(priv, - "Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n", - keyconf->cipher, keyconf->keylen, keyconf->keyidx, - sta_id, ret); - - return ret; -} - -/** - * iwl4965_alloc_bcast_station - add broadcast station into driver's station table. - * - * This adds the broadcast station into the driver's station table - * and marks it driver active, so that it will be restored to the - * device at the next best time. - */ -int iwl4965_alloc_bcast_station(struct iwl_priv *priv, - struct iwl_rxon_context *ctx) -{ - struct iwl_link_quality_cmd *link_cmd; - unsigned long flags; - u8 sta_id; - - spin_lock_irqsave(&priv->sta_lock, flags); - sta_id = iwl_legacy_prep_station(priv, ctx, iwlegacy_bcast_addr, - false, NULL); - if (sta_id == IWL_INVALID_STATION) { - IWL_ERR(priv, "Unable to prepare broadcast station\n"); - spin_unlock_irqrestore(&priv->sta_lock, flags); - - return -EINVAL; - } - - priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE; - priv->stations[sta_id].used |= IWL_STA_BCAST; - spin_unlock_irqrestore(&priv->sta_lock, flags); - - link_cmd = iwl4965_sta_alloc_lq(priv, sta_id); - if (!link_cmd) { - IWL_ERR(priv, - "Unable to initialize rate scaling for bcast station.\n"); - return -ENOMEM; - } - - spin_lock_irqsave(&priv->sta_lock, flags); - priv->stations[sta_id].lq = link_cmd; - spin_unlock_irqrestore(&priv->sta_lock, flags); - - return 0; -} - -/** - * iwl4965_update_bcast_station - update broadcast station's LQ command - * - * Only used by iwl4965. Placed here to have all bcast station management - * code together. - */ -static int iwl4965_update_bcast_station(struct iwl_priv *priv, - struct iwl_rxon_context *ctx) -{ - unsigned long flags; - struct iwl_link_quality_cmd *link_cmd; - u8 sta_id = ctx->bcast_sta_id; - - link_cmd = iwl4965_sta_alloc_lq(priv, sta_id); - if (!link_cmd) { - IWL_ERR(priv, - "Unable to initialize rate scaling for bcast station.\n"); - return -ENOMEM; - } - - spin_lock_irqsave(&priv->sta_lock, flags); - if (priv->stations[sta_id].lq) - kfree(priv->stations[sta_id].lq); - else - IWL_DEBUG_INFO(priv, - "Bcast station rate scaling has not been initialized yet.\n"); - priv->stations[sta_id].lq = link_cmd; - spin_unlock_irqrestore(&priv->sta_lock, flags); - - return 0; -} - -int iwl4965_update_bcast_stations(struct iwl_priv *priv) -{ - struct iwl_rxon_context *ctx; - int ret = 0; - - for_each_context(priv, ctx) { - ret = iwl4965_update_bcast_station(priv, ctx); - if (ret) - break; - } - - return ret; -} - -/** - * iwl4965_sta_tx_modify_enable_tid - Enable Tx for this TID in station table - */ -int iwl4965_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid) -{ - unsigned long flags; - struct iwl_legacy_addsta_cmd sta_cmd; - - lockdep_assert_held(&priv->mutex); - - /* Remove "disable" flag, to enable Tx for this TID */ - spin_lock_irqsave(&priv->sta_lock, flags); - priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX; - priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid)); - priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; - memcpy(&sta_cmd, &priv->stations[sta_id].sta, - sizeof(struct iwl_legacy_addsta_cmd)); - spin_unlock_irqrestore(&priv->sta_lock, flags); - - return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC); -} - -int iwl4965_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta, - int tid, u16 ssn) -{ - unsigned long flags; - int sta_id; - struct iwl_legacy_addsta_cmd sta_cmd; - - lockdep_assert_held(&priv->mutex); - - sta_id = iwl_legacy_sta_id(sta); - if (sta_id == IWL_INVALID_STATION) - return -ENXIO; - - spin_lock_irqsave(&priv->sta_lock, flags); - priv->stations[sta_id].sta.station_flags_msk = 0; - priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK; - priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid; - priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn); - priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; - memcpy(&sta_cmd, &priv->stations[sta_id].sta, - sizeof(struct iwl_legacy_addsta_cmd)); - spin_unlock_irqrestore(&priv->sta_lock, flags); - - return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC); -} - -int iwl4965_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta, - int tid) -{ - unsigned long flags; - int sta_id; - struct iwl_legacy_addsta_cmd sta_cmd; - - lockdep_assert_held(&priv->mutex); - - sta_id = iwl_legacy_sta_id(sta); - if (sta_id == IWL_INVALID_STATION) { - IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid); - return -ENXIO; - } - - spin_lock_irqsave(&priv->sta_lock, flags); - priv->stations[sta_id].sta.station_flags_msk = 0; - priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK; - priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid; - priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; - memcpy(&sta_cmd, &priv->stations[sta_id].sta, - sizeof(struct iwl_legacy_addsta_cmd)); - spin_unlock_irqrestore(&priv->sta_lock, flags); - - return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC); -} - -void -iwl4965_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt) -{ - unsigned long flags; - - spin_lock_irqsave(&priv->sta_lock, flags); - priv->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK; - priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK; - priv->stations[sta_id].sta.sta.modify_mask = - STA_MODIFY_SLEEP_TX_COUNT_MSK; - priv->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt); - priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; - iwl_legacy_send_add_sta(priv, - &priv->stations[sta_id].sta, CMD_ASYNC); - spin_unlock_irqrestore(&priv->sta_lock, flags); - -} diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-tx.c b/drivers/net/wireless/iwlegacy/iwl-4965-tx.c deleted file mode 100644 index 7f12e3638ba..00000000000 --- a/drivers/net/wireless/iwlegacy/iwl-4965-tx.c +++ /dev/null @@ -1,1378 +0,0 @@ -/****************************************************************************** - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/init.h> -#include <linux/sched.h> - -#include "iwl-dev.h" -#include "iwl-core.h" -#include "iwl-sta.h" -#include "iwl-io.h" -#include "iwl-helpers.h" -#include "iwl-4965-hw.h" -#include "iwl-4965.h" - -/* - * mac80211 queues, ACs, hardware queues, FIFOs. - * - * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues - * - * Mac80211 uses the following numbers, which we get as from it - * by way of skb_get_queue_mapping(skb): - * - * VO 0 - * VI 1 - * BE 2 - * BK 3 - * - * - * Regular (not A-MPDU) frames are put into hardware queues corresponding - * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their - * own queue per aggregation session (RA/TID combination), such queues are - * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In - * order to map frames to the right queue, we also need an AC->hw queue - * mapping. This is implemented here. - * - * Due to the way hw queues are set up (by the hw specific modules like - * iwl-4965.c), the AC->hw queue mapping is the identity - * mapping. - */ - -static const u8 tid_to_ac[] = { - IEEE80211_AC_BE, - IEEE80211_AC_BK, - IEEE80211_AC_BK, - IEEE80211_AC_BE, - IEEE80211_AC_VI, - IEEE80211_AC_VI, - IEEE80211_AC_VO, - IEEE80211_AC_VO -}; - -static inline int iwl4965_get_ac_from_tid(u16 tid) -{ - if (likely(tid < ARRAY_SIZE(tid_to_ac))) - return tid_to_ac[tid]; - - /* no support for TIDs 8-15 yet */ - return -EINVAL; -} - -static inline int -iwl4965_get_fifo_from_tid(struct iwl_rxon_context *ctx, u16 tid) -{ - if (likely(tid < ARRAY_SIZE(tid_to_ac))) - return ctx->ac_to_fifo[tid_to_ac[tid]]; - - /* no support for TIDs 8-15 yet */ - return -EINVAL; -} - -/* - * handle build REPLY_TX command notification. - */ -static void iwl4965_tx_cmd_build_basic(struct iwl_priv *priv, - struct sk_buff *skb, - struct iwl_tx_cmd *tx_cmd, - struct ieee80211_tx_info *info, - struct ieee80211_hdr *hdr, - u8 std_id) -{ - __le16 fc = hdr->frame_control; - __le32 tx_flags = tx_cmd->tx_flags; - - tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; - if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { - tx_flags |= TX_CMD_FLG_ACK_MSK; - if (ieee80211_is_mgmt(fc)) - tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; - if (ieee80211_is_probe_resp(fc) && - !(le16_to_cpu(hdr->seq_ctrl) & 0xf)) - tx_flags |= TX_CMD_FLG_TSF_MSK; - } else { - tx_flags &= (~TX_CMD_FLG_ACK_MSK); - tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; - } - - if (ieee80211_is_back_req(fc)) - tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK; - - tx_cmd->sta_id = std_id; - if (ieee80211_has_morefrags(fc)) - tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; - - if (ieee80211_is_data_qos(fc)) { - u8 *qc = ieee80211_get_qos_ctl(hdr); - tx_cmd->tid_tspec = qc[0] & 0xf; - tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; - } else { - tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; - } - - iwl_legacy_tx_cmd_protection(priv, info, fc, &tx_flags); - - tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); - if (ieee80211_is_mgmt(fc)) { - if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) - tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3); - else - tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2); - } else { - tx_cmd->timeout.pm_frame_timeout = 0; - } - - tx_cmd->driver_txop = 0; - tx_cmd->tx_flags = tx_flags; - tx_cmd->next_frame_len = 0; -} - -#define RTS_DFAULT_RETRY_LIMIT 60 - -static void iwl4965_tx_cmd_build_rate(struct iwl_priv *priv, - struct iwl_tx_cmd *tx_cmd, - struct ieee80211_tx_info *info, - __le16 fc) -{ - u32 rate_flags; - int rate_idx; - u8 rts_retry_limit; - u8 data_retry_limit; - u8 rate_plcp; - - /* Set retry limit on DATA packets and Probe Responses*/ - if (ieee80211_is_probe_resp(fc)) - data_retry_limit = 3; - else - data_retry_limit = IWL4965_DEFAULT_TX_RETRY; - tx_cmd->data_retry_limit = data_retry_limit; - - /* Set retry limit on RTS packets */ - rts_retry_limit = RTS_DFAULT_RETRY_LIMIT; - if (data_retry_limit < rts_retry_limit) - rts_retry_limit = data_retry_limit; - tx_cmd->rts_retry_limit = rts_retry_limit; - - /* DATA packets will use the uCode station table for rate/antenna - * selection */ - if (ieee80211_is_data(fc)) { - tx_cmd->initial_rate_index = 0; - tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK; - return; - } - - /** - * If the current TX rate stored in mac80211 has the MCS bit set, it's - * not really a TX rate. Thus, we use the lowest supported rate for - * this band. Also use the lowest supported rate if the stored rate - * index is invalid. - */ - rate_idx = info->control.rates[0].idx; - if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS || - (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY)) - rate_idx = rate_lowest_index(&priv->bands[info->band], - info->control.sta); - /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ - if (info->band == IEEE80211_BAND_5GHZ) - rate_idx += IWL_FIRST_OFDM_RATE; - /* Get PLCP rate for tx_cmd->rate_n_flags */ - rate_plcp = iwlegacy_rates[rate_idx].plcp; - /* Zero out flags for this packet */ - rate_flags = 0; - - /* Set CCK flag as needed */ - if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE)) - rate_flags |= RATE_MCS_CCK_MSK; - - /* Set up antennas */ - priv->mgmt_tx_ant = iwl4965_toggle_tx_ant(priv, priv->mgmt_tx_ant, - priv->hw_params.valid_tx_ant); - - rate_flags |= iwl4965_ant_idx_to_flags(priv->mgmt_tx_ant); - - /* Set the rate in the TX cmd */ - tx_cmd->rate_n_flags = iwl4965_hw_set_rate_n_flags(rate_plcp, rate_flags); -} - -static void iwl4965_tx_cmd_build_hwcrypto(struct iwl_priv *priv, - struct ieee80211_tx_info *info, - struct iwl_tx_cmd *tx_cmd, - struct sk_buff *skb_frag, - int sta_id) -{ - struct ieee80211_key_conf *keyconf = info->control.hw_key; - - switch (keyconf->cipher) { - case WLAN_CIPHER_SUITE_CCMP: - tx_cmd->sec_ctl = TX_CMD_SEC_CCM; - memcpy(tx_cmd->key, keyconf->key, keyconf->keylen); - if (info->flags & IEEE80211_TX_CTL_AMPDU) - tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK; - IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n"); - break; - - case WLAN_CIPHER_SUITE_TKIP: - tx_cmd->sec_ctl = TX_CMD_SEC_TKIP; - ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key); - IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n"); - break; - - case WLAN_CIPHER_SUITE_WEP104: - tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; - /* fall through */ - case WLAN_CIPHER_SUITE_WEP40: - tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP | - (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT); - - memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen); - - IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption " - "with key %d\n", keyconf->keyidx); - break; - - default: - IWL_ERR(priv, "Unknown encode cipher %x\n", keyconf->cipher); - break; - } -} - -/* - * start REPLY_TX command process - */ -int iwl4965_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) -{ - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct ieee80211_sta *sta = info->control.sta; - struct iwl_station_priv *sta_priv = NULL; - struct iwl_tx_queue *txq; - struct iwl_queue *q; - struct iwl_device_cmd *out_cmd; - struct iwl_cmd_meta *out_meta; - struct iwl_tx_cmd *tx_cmd; - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - int txq_id; - dma_addr_t phys_addr; - dma_addr_t txcmd_phys; - dma_addr_t scratch_phys; - u16 len, firstlen, secondlen; - u16 seq_number = 0; - __le16 fc; - u8 hdr_len; - u8 sta_id; - u8 wait_write_ptr = 0; - u8 tid = 0; - u8 *qc = NULL; - unsigned long flags; - bool is_agg = false; - - if (info->control.vif) - ctx = iwl_legacy_rxon_ctx_from_vif(info->control.vif); - - spin_lock_irqsave(&priv->lock, flags); - if (iwl_legacy_is_rfkill(priv)) { - IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n"); - goto drop_unlock; - } - - fc = hdr->frame_control; - -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG - if (ieee80211_is_auth(fc)) - IWL_DEBUG_TX(priv, "Sending AUTH frame\n"); - else if (ieee80211_is_assoc_req(fc)) - IWL_DEBUG_TX(priv, "Sending ASSOC frame\n"); - else if (ieee80211_is_reassoc_req(fc)) - IWL_DEBUG_TX(priv, "Sending REASSOC frame\n"); -#endif - - hdr_len = ieee80211_hdrlen(fc); - - /* For management frames use broadcast id to do not break aggregation */ - if (!ieee80211_is_data(fc)) - sta_id = ctx->bcast_sta_id; - else { - /* Find index into station table for destination station */ - sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, info->control.sta); - - if (sta_id == IWL_INVALID_STATION) { - IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", - hdr->addr1); - goto drop_unlock; - } - } - - IWL_DEBUG_TX(priv, "station Id %d\n", sta_id); - - if (sta) - sta_priv = (void *)sta->drv_priv; - - if (sta_priv && sta_priv->asleep && - (info->flags & IEEE80211_TX_CTL_POLL_RESPONSE)) { - /* - * This sends an asynchronous command to the device, - * but we can rely on it being processed before the - * next frame is processed -- and the next frame to - * this station is the one that will consume this - * counter. - * For now set the counter to just 1 since we do not - * support uAPSD yet. - */ - iwl4965_sta_modify_sleep_tx_count(priv, sta_id, 1); - } - - /* - * Send this frame after DTIM -- there's a special queue - * reserved for this for contexts that support AP mode. - */ - if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { - txq_id = ctx->mcast_queue; - /* - * The microcode will clear the more data - * bit in the last frame it transmits. - */ - hdr->frame_control |= - cpu_to_le16(IEEE80211_FCTL_MOREDATA); - } else - txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)]; - - /* irqs already disabled/saved above when locking priv->lock */ - spin_lock(&priv->sta_lock); - - if (ieee80211_is_data_qos(fc)) { - qc = ieee80211_get_qos_ctl(hdr); - tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; - if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) { - spin_unlock(&priv->sta_lock); - goto drop_unlock; - } - seq_number = priv->stations[sta_id].tid[tid].seq_number; - seq_number &= IEEE80211_SCTL_SEQ; - hdr->seq_ctrl = hdr->seq_ctrl & - cpu_to_le16(IEEE80211_SCTL_FRAG); - hdr->seq_ctrl |= cpu_to_le16(seq_number); - seq_number += 0x10; - /* aggregation is on for this <sta,tid> */ - if (info->flags & IEEE80211_TX_CTL_AMPDU && - priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) { - txq_id = priv->stations[sta_id].tid[tid].agg.txq_id; - is_agg = true; - } - } - - txq = &priv->txq[txq_id]; - q = &txq->q; - - if (unlikely(iwl_legacy_queue_space(q) < q->high_mark)) { - spin_unlock(&priv->sta_lock); - goto drop_unlock; - } - - if (ieee80211_is_data_qos(fc)) { - priv->stations[sta_id].tid[tid].tfds_in_queue++; - if (!ieee80211_has_morefrags(fc)) - priv->stations[sta_id].tid[tid].seq_number = seq_number; - } - - spin_unlock(&priv->sta_lock); - - /* Set up driver data for this TFD */ - memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); - txq->txb[q->write_ptr].skb = skb; - txq->txb[q->write_ptr].ctx = ctx; - - /* Set up first empty entry in queue's array of Tx/cmd buffers */ - out_cmd = txq->cmd[q->write_ptr]; - out_meta = &txq->meta[q->write_ptr]; - tx_cmd = &out_cmd->cmd.tx; - memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); - memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd)); - - /* - * Set up the Tx-command (not MAC!) header. - * Store the chosen Tx queue and TFD index within the sequence field; - * after Tx, uCode's Tx response will return this value so driver can - * locate the frame within the tx queue and do post-tx processing. - */ - out_cmd->hdr.cmd = REPLY_TX; - out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | - INDEX_TO_SEQ(q->write_ptr))); - - /* Copy MAC header from skb into command buffer */ - memcpy(tx_cmd->hdr, hdr, hdr_len); - - - /* Total # bytes to be transmitted */ - len = (u16)skb->len; - tx_cmd->len = cpu_to_le16(len); - - if (info->control.hw_key) - iwl4965_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id); - - /* TODO need this for burst mode later on */ - iwl4965_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id); - iwl_legacy_dbg_log_tx_data_frame(priv, len, hdr); - - iwl4965_tx_cmd_build_rate(priv, tx_cmd, info, fc); - - iwl_legacy_update_stats(priv, true, fc, len); - /* - * Use the first empty entry in this queue's command buffer array - * to contain the Tx command and MAC header concatenated together - * (payload data will be in another buffer). - * Size of this varies, due to varying MAC header length. - * If end is not dword aligned, we'll have 2 extra bytes at the end - * of the MAC header (device reads on dword boundaries). - * We'll tell device about this padding later. - */ - len = sizeof(struct iwl_tx_cmd) + - sizeof(struct iwl_cmd_header) + hdr_len; - firstlen = (len + 3) & ~3; - - /* Tell NIC about any 2-byte padding after MAC header */ - if (firstlen != len) - tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; - - /* Physical address of this Tx command's header (not MAC header!), - * within command buffer array. */ - txcmd_phys = pci_map_single(priv->pci_dev, - &out_cmd->hdr, firstlen, - PCI_DMA_BIDIRECTIONAL); - dma_unmap_addr_set(out_meta, mapping, txcmd_phys); - dma_unmap_len_set(out_meta, len, firstlen); - /* Add buffer containing Tx command and MAC(!) header to TFD's - * first entry */ - priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, - txcmd_phys, firstlen, 1, 0); - - if (!ieee80211_has_morefrags(hdr->frame_control)) { - txq->need_update = 1; - } else { - wait_write_ptr = 1; - txq->need_update = 0; - } - - /* Set up TFD's 2nd entry to point directly to remainder of skb, - * if any (802.11 null frames have no payload). */ - secondlen = skb->len - hdr_len; - if (secondlen > 0) { - phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len, - secondlen, PCI_DMA_TODEVICE); - priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, - phys_addr, secondlen, - 0, 0); - } - - scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + - offsetof(struct iwl_tx_cmd, scratch); - - /* take back ownership of DMA buffer to enable update */ - pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys, - firstlen, PCI_DMA_BIDIRECTIONAL); - tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); - tx_cmd->dram_msb_ptr = iwl_legacy_get_dma_hi_addr(scratch_phys); - - IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n", - le16_to_cpu(out_cmd->hdr.sequence)); - IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags)); - iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd)); - iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len); - - /* Set up entry for this TFD in Tx byte-count array */ - if (info->flags & IEEE80211_TX_CTL_AMPDU) - priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, - le16_to_cpu(tx_cmd->len)); - - pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys, - firstlen, PCI_DMA_BIDIRECTIONAL); - - trace_iwlwifi_legacy_dev_tx(priv, - &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr], - sizeof(struct iwl_tfd), - &out_cmd->hdr, firstlen, - skb->data + hdr_len, secondlen); - - /* Tell device the write index *just past* this latest filled TFD */ - q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd); - iwl_legacy_txq_update_write_ptr(priv, txq); - spin_unlock_irqrestore(&priv->lock, flags); - - /* - * At this point the frame is "transmitted" successfully - * and we will get a TX status notification eventually, - * regardless of the value of ret. "ret" only indicates - * whether or not we should update the write pointer. - */ - - /* - * Avoid atomic ops if it isn't an associated client. - * Also, if this is a packet for aggregation, don't - * increase the counter because the ucode will stop - * aggregation queues when their respective station - * goes to sleep. - */ - if (sta_priv && sta_priv->client && !is_agg) - atomic_inc(&sta_priv->pending_frames); - - if ((iwl_legacy_queue_space(q) < q->high_mark) && - priv->mac80211_registered) { - if (wait_write_ptr) { - spin_lock_irqsave(&priv->lock, flags); - txq->need_update = 1; - iwl_legacy_txq_update_write_ptr(priv, txq); - spin_unlock_irqrestore(&priv->lock, flags); - } else { - iwl_legacy_stop_queue(priv, txq); - } - } - - return 0; - -drop_unlock: - spin_unlock_irqrestore(&priv->lock, flags); - return -1; -} - -static inline int iwl4965_alloc_dma_ptr(struct iwl_priv *priv, - struct iwl_dma_ptr *ptr, size_t size) -{ - ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma, - GFP_KERNEL); - if (!ptr->addr) - return -ENOMEM; - ptr->size = size; - return 0; -} - -static inline void iwl4965_free_dma_ptr(struct iwl_priv *priv, - struct iwl_dma_ptr *ptr) -{ - if (unlikely(!ptr->addr)) - return; - - dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma); - memset(ptr, 0, sizeof(*ptr)); -} - -/** - * iwl4965_hw_txq_ctx_free - Free TXQ Context - * - * Destroy all TX DMA queues and structures - */ -void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv) -{ - int txq_id; - - /* Tx queues */ - if (priv->txq) { - for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) - if (txq_id == priv->cmd_queue) - iwl_legacy_cmd_queue_free(priv); - else - iwl_legacy_tx_queue_free(priv, txq_id); - } - iwl4965_free_dma_ptr(priv, &priv->kw); - - iwl4965_free_dma_ptr(priv, &priv->scd_bc_tbls); - - /* free tx queue structure */ - iwl_legacy_txq_mem(priv); -} - -/** - * iwl4965_txq_ctx_alloc - allocate TX queue context - * Allocate all Tx DMA structures and initialize them - * - * @param priv - * @return error code - */ -int iwl4965_txq_ctx_alloc(struct iwl_priv *priv) -{ - int ret; - int txq_id, slots_num; - unsigned long flags; - - /* Free all tx/cmd queues and keep-warm buffer */ - iwl4965_hw_txq_ctx_free(priv); - - ret = iwl4965_alloc_dma_ptr(priv, &priv->scd_bc_tbls, - priv->hw_params.scd_bc_tbls_size); - if (ret) { - IWL_ERR(priv, "Scheduler BC Table allocation failed\n"); - goto error_bc_tbls; - } - /* Alloc keep-warm buffer */ - ret = iwl4965_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE); - if (ret) { - IWL_ERR(priv, "Keep Warm allocation failed\n"); - goto error_kw; - } - - /* allocate tx queue structure */ - ret = iwl_legacy_alloc_txq_mem(priv); - if (ret) - goto error; - - spin_lock_irqsave(&priv->lock, flags); - - /* Turn off all Tx DMA fifos */ - iwl4965_txq_set_sched(priv, 0); - - /* Tell NIC where to find the "keep warm" buffer */ - iwl_legacy_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); - - spin_unlock_irqrestore(&priv->lock, flags); - - /* Alloc and init all Tx queues, including the command queue (#4/#9) */ - for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { - slots_num = (txq_id == priv->cmd_queue) ? - TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; - ret = iwl_legacy_tx_queue_init(priv, - &priv->txq[txq_id], slots_num, - txq_id); - if (ret) { - IWL_ERR(priv, "Tx %d queue init failed\n", txq_id); - goto error; - } - } - - return ret; - - error: - iwl4965_hw_txq_ctx_free(priv); - iwl4965_free_dma_ptr(priv, &priv->kw); - error_kw: - iwl4965_free_dma_ptr(priv, &priv->scd_bc_tbls); - error_bc_tbls: - return ret; -} - -void iwl4965_txq_ctx_reset(struct iwl_priv *priv) -{ - int txq_id, slots_num; - unsigned long flags; - - spin_lock_irqsave(&priv->lock, flags); - - /* Turn off all Tx DMA fifos */ - iwl4965_txq_set_sched(priv, 0); - - /* Tell NIC where to find the "keep warm" buffer */ - iwl_legacy_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); - - spin_unlock_irqrestore(&priv->lock, flags); - - /* Alloc and init all Tx queues, including the command queue (#4) */ - for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { - slots_num = txq_id == priv->cmd_queue ? - TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; - iwl_legacy_tx_queue_reset(priv, &priv->txq[txq_id], - slots_num, txq_id); - } -} - -/** - * iwl4965_txq_ctx_stop - Stop all Tx DMA channels - */ -void iwl4965_txq_ctx_stop(struct iwl_priv *priv) -{ - int ch, txq_id; - unsigned long flags; - - /* Turn off all Tx DMA fifos */ - spin_lock_irqsave(&priv->lock, flags); - - iwl4965_txq_set_sched(priv, 0); - - /* Stop each Tx DMA channel, and wait for it to be idle */ - for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) { - iwl_legacy_write_direct32(priv, - FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); - if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG, - FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), - 1000)) - IWL_ERR(priv, "Failing on timeout while stopping" - " DMA channel %d [0x%08x]", ch, - iwl_legacy_read_direct32(priv, - FH_TSSR_TX_STATUS_REG)); - } - spin_unlock_irqrestore(&priv->lock, flags); - - if (!priv->txq) - return; - - /* Unmap DMA from host system and free skb's */ - for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) - if (txq_id == priv->cmd_queue) - iwl_legacy_cmd_queue_unmap(priv); - else - iwl_legacy_tx_queue_unmap(priv, txq_id); -} - -/* - * Find first available (lowest unused) Tx Queue, mark it "active". - * Called only when finding queue for aggregation. - * Should never return anything < 7, because they should already - * be in use as EDCA AC (0-3), Command (4), reserved (5, 6) - */ -static int iwl4965_txq_ctx_activate_free(struct iwl_priv *priv) -{ - int txq_id; - - for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) - if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk)) - return txq_id; - return -1; -} - -/** - * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration - */ -static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv, - u16 txq_id) -{ - /* Simply stop the queue, but don't change any configuration; - * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */ - iwl_legacy_write_prph(priv, - IWL49_SCD_QUEUE_STATUS_BITS(txq_id), - (0 << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE)| - (1 << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); -} - -/** - * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue - */ -static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid, - u16 txq_id) -{ - u32 tbl_dw_addr; - u32 tbl_dw; - u16 scd_q2ratid; - - scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK; - - tbl_dw_addr = priv->scd_base_addr + - IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id); - - tbl_dw = iwl_legacy_read_targ_mem(priv, tbl_dw_addr); - - if (txq_id & 0x1) - tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); - else - tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); - - iwl_legacy_write_targ_mem(priv, tbl_dw_addr, tbl_dw); - - return 0; -} - -/** - * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue - * - * NOTE: txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE, - * i.e. it must be one of the higher queues used for aggregation - */ -static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id, - int tx_fifo, int sta_id, int tid, u16 ssn_idx) -{ - unsigned long flags; - u16 ra_tid; - int ret; - - if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) || - (IWL49_FIRST_AMPDU_QUEUE + - priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) { - IWL_WARN(priv, - "queue number out of range: %d, must be %d to %d\n", - txq_id, IWL49_FIRST_AMPDU_QUEUE, - IWL49_FIRST_AMPDU_QUEUE + - priv->cfg->base_params->num_of_ampdu_queues - 1); - return -EINVAL; - } - - ra_tid = BUILD_RAxTID(sta_id, tid); - - /* Modify device's station table to Tx this TID */ - ret = iwl4965_sta_tx_modify_enable_tid(priv, sta_id, tid); - if (ret) - return ret; - - spin_lock_irqsave(&priv->lock, flags); - - /* Stop this Tx queue before configuring it */ - iwl4965_tx_queue_stop_scheduler(priv, txq_id); - - /* Map receiver-address / traffic-ID to this queue */ - iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id); - - /* Set this queue as a chain-building queue */ - iwl_legacy_set_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id)); - - /* Place first TFD at index corresponding to start sequence number. - * Assumes that ssn_idx is valid (!= 0xFFF) */ - priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); - priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); - iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx); - - /* Set up Tx window size and frame limit for this queue */ - iwl_legacy_write_targ_mem(priv, - priv->scd_base_addr + IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id), - (SCD_WIN_SIZE << IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) & - IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK); - - iwl_legacy_write_targ_mem(priv, priv->scd_base_addr + - IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), - (SCD_FRAME_LIMIT << IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) - & IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK); - - iwl_legacy_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id)); - - /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */ - iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1); - - spin_unlock_irqrestore(&priv->lock, flags); - - return 0; -} - - -int iwl4965_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, u16 tid, u16 *ssn) -{ - int sta_id; - int tx_fifo; - int txq_id; - int ret; - unsigned long flags; - struct iwl_tid_data *tid_data; - - tx_fifo = iwl4965_get_fifo_from_tid(iwl_legacy_rxon_ctx_from_vif(vif), tid); - if (unlikely(tx_fifo < 0)) - return tx_fifo; - - IWL_WARN(priv, "%s on ra = %pM tid = %d\n", - __func__, sta->addr, tid); - - sta_id = iwl_legacy_sta_id(sta); - if (sta_id == IWL_INVALID_STATION) { - IWL_ERR(priv, "Start AGG on invalid station\n"); - return -ENXIO; - } - if (unlikely(tid >= MAX_TID_COUNT)) - return -EINVAL; - - if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) { - IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n"); - return -ENXIO; - } - - txq_id = iwl4965_txq_ctx_activate_free(priv); - if (txq_id == -1) { - IWL_ERR(priv, "No free aggregation queue available\n"); - return -ENXIO; - } - - spin_lock_irqsave(&priv->sta_lock, flags); - tid_data = &priv->stations[sta_id].tid[tid]; - *ssn = SEQ_TO_SN(tid_data->seq_number); - tid_data->agg.txq_id = txq_id; - iwl_legacy_set_swq_id(&priv->txq[txq_id], - iwl4965_get_ac_from_tid(tid), txq_id); - spin_unlock_irqrestore(&priv->sta_lock, flags); - - ret = iwl4965_txq_agg_enable(priv, txq_id, tx_fifo, - sta_id, tid, *ssn); - if (ret) - return ret; - - spin_lock_irqsave(&priv->sta_lock, flags); - tid_data = &priv->stations[sta_id].tid[tid]; - if (tid_data->tfds_in_queue == 0) { - IWL_DEBUG_HT(priv, "HW queue is empty\n"); - tid_data->agg.state = IWL_AGG_ON; - ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); - } else { - IWL_DEBUG_HT(priv, - "HW queue is NOT empty: %d packets in HW queue\n", - tid_data->tfds_in_queue); - tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA; - } - spin_unlock_irqrestore(&priv->sta_lock, flags); - return ret; -} - -/** - * txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE - * priv->lock must be held by the caller - */ -static int iwl4965_txq_agg_disable(struct iwl_priv *priv, u16 txq_id, - u16 ssn_idx, u8 tx_fifo) -{ - if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) || - (IWL49_FIRST_AMPDU_QUEUE + - priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) { - IWL_WARN(priv, - "queue number out of range: %d, must be %d to %d\n", - txq_id, IWL49_FIRST_AMPDU_QUEUE, - IWL49_FIRST_AMPDU_QUEUE + - priv->cfg->base_params->num_of_ampdu_queues - 1); - return -EINVAL; - } - - iwl4965_tx_queue_stop_scheduler(priv, txq_id); - - iwl_legacy_clear_bits_prph(priv, - IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id)); - - priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); - priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); - /* supposes that ssn_idx is valid (!= 0xFFF) */ - iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx); - - iwl_legacy_clear_bits_prph(priv, - IWL49_SCD_INTERRUPT_MASK, (1 << txq_id)); - iwl_txq_ctx_deactivate(priv, txq_id); - iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0); - - return 0; -} - -int iwl4965_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, u16 tid) -{ - int tx_fifo_id, txq_id, sta_id, ssn; - struct iwl_tid_data *tid_data; - int write_ptr, read_ptr; - unsigned long flags; - - tx_fifo_id = iwl4965_get_fifo_from_tid(iwl_legacy_rxon_ctx_from_vif(vif), tid); - if (unlikely(tx_fifo_id < 0)) - return tx_fifo_id; - - sta_id = iwl_legacy_sta_id(sta); - - if (sta_id == IWL_INVALID_STATION) { - IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid); - return -ENXIO; - } - - spin_lock_irqsave(&priv->sta_lock, flags); - - tid_data = &priv->stations[sta_id].tid[tid]; - ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4; - txq_id = tid_data->agg.txq_id; - - switch (priv->stations[sta_id].tid[tid].agg.state) { - case IWL_EMPTYING_HW_QUEUE_ADDBA: - /* - * This can happen if the peer stops aggregation - * again before we've had a chance to drain the - * queue we selected previously, i.e. before the - * session was really started completely. - */ - IWL_DEBUG_HT(priv, "AGG stop before setup done\n"); - goto turn_off; - case IWL_AGG_ON: - break; - default: - IWL_WARN(priv, "Stopping AGG while state not ON or starting\n"); - } - - write_ptr = priv->txq[txq_id].q.write_ptr; - read_ptr = priv->txq[txq_id].q.read_ptr; - - /* The queue is not empty */ - if (write_ptr != read_ptr) { - IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n"); - priv->stations[sta_id].tid[tid].agg.state = - IWL_EMPTYING_HW_QUEUE_DELBA; - spin_unlock_irqrestore(&priv->sta_lock, flags); - return 0; - } - - IWL_DEBUG_HT(priv, "HW queue is empty\n"); - turn_off: - priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF; - - /* do not restore/save irqs */ - spin_unlock(&priv->sta_lock); - spin_lock(&priv->lock); - - /* - * the only reason this call can fail is queue number out of range, - * which can happen if uCode is reloaded and all the station - * information are lost. if it is outside the range, there is no need - * to deactivate the uCode queue, just return "success" to allow - * mac80211 to clean up it own data. - */ - iwl4965_txq_agg_disable(priv, txq_id, ssn, tx_fifo_id); - spin_unlock_irqrestore(&priv->lock, flags); - - ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); - - return 0; -} - -int iwl4965_txq_check_empty(struct iwl_priv *priv, - int sta_id, u8 tid, int txq_id) -{ - struct iwl_queue *q = &priv->txq[txq_id].q; - u8 *addr = priv->stations[sta_id].sta.sta.addr; - struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid]; - struct iwl_rxon_context *ctx; - - ctx = &priv->contexts[priv->stations[sta_id].ctxid]; - - lockdep_assert_held(&priv->sta_lock); - - switch (priv->stations[sta_id].tid[tid].agg.state) { - case IWL_EMPTYING_HW_QUEUE_DELBA: - /* We are reclaiming the last packet of the */ - /* aggregated HW queue */ - if ((txq_id == tid_data->agg.txq_id) && - (q->read_ptr == q->write_ptr)) { - u16 ssn = SEQ_TO_SN(tid_data->seq_number); - int tx_fifo = iwl4965_get_fifo_from_tid(ctx, tid); - IWL_DEBUG_HT(priv, - "HW queue empty: continue DELBA flow\n"); - iwl4965_txq_agg_disable(priv, txq_id, ssn, tx_fifo); - tid_data->agg.state = IWL_AGG_OFF; - ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid); - } - break; - case IWL_EMPTYING_HW_QUEUE_ADDBA: - /* We are reclaiming the last packet of the queue */ - if (tid_data->tfds_in_queue == 0) { - IWL_DEBUG_HT(priv, - "HW queue empty: continue ADDBA flow\n"); - tid_data->agg.state = IWL_AGG_ON; - ieee80211_start_tx_ba_cb_irqsafe(ctx->vif, addr, tid); - } - break; - } - - return 0; -} - -static void iwl4965_non_agg_tx_status(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - const u8 *addr1) -{ - struct ieee80211_sta *sta; - struct iwl_station_priv *sta_priv; - - rcu_read_lock(); - sta = ieee80211_find_sta(ctx->vif, addr1); - if (sta) { - sta_priv = (void *)sta->drv_priv; - /* avoid atomic ops if this isn't a client */ - if (sta_priv->client && - atomic_dec_return(&sta_priv->pending_frames) == 0) - ieee80211_sta_block_awake(priv->hw, sta, false); - } - rcu_read_unlock(); -} - -static void -iwl4965_tx_status(struct iwl_priv *priv, struct iwl_tx_info *tx_info, - bool is_agg) -{ - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx_info->skb->data; - - if (!is_agg) - iwl4965_non_agg_tx_status(priv, tx_info->ctx, hdr->addr1); - - ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb); -} - -int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) -{ - struct iwl_tx_queue *txq = &priv->txq[txq_id]; - struct iwl_queue *q = &txq->q; - struct iwl_tx_info *tx_info; - int nfreed = 0; - struct ieee80211_hdr *hdr; - - if ((index >= q->n_bd) || (iwl_legacy_queue_used(q, index) == 0)) { - IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, " - "is out of range [0-%d] %d %d.\n", txq_id, - index, q->n_bd, q->write_ptr, q->read_ptr); - return 0; - } - - for (index = iwl_legacy_queue_inc_wrap(index, q->n_bd); - q->read_ptr != index; - q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) { - - tx_info = &txq->txb[txq->q.read_ptr]; - - if (WARN_ON_ONCE(tx_info->skb == NULL)) - continue; - - hdr = (struct ieee80211_hdr *)tx_info->skb->data; - if (ieee80211_is_data_qos(hdr->frame_control)) - nfreed++; - - iwl4965_tx_status(priv, tx_info, - txq_id >= IWL4965_FIRST_AMPDU_QUEUE); - tx_info->skb = NULL; - - priv->cfg->ops->lib->txq_free_tfd(priv, txq); - } - return nfreed; -} - -/** - * iwl4965_tx_status_reply_compressed_ba - Update tx status from block-ack - * - * Go through block-ack's bitmap of ACK'd frames, update driver's record of - * ACK vs. not. This gets sent to mac80211, then to rate scaling algo. - */ -static int iwl4965_tx_status_reply_compressed_ba(struct iwl_priv *priv, - struct iwl_ht_agg *agg, - struct iwl_compressed_ba_resp *ba_resp) - -{ - int i, sh, ack; - u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl); - u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); - int successes = 0; - struct ieee80211_tx_info *info; - u64 bitmap, sent_bitmap; - - if (unlikely(!agg->wait_for_ba)) { - if (unlikely(ba_resp->bitmap)) - IWL_ERR(priv, "Received BA when not expected\n"); - return -EINVAL; - } - - /* Mark that the expected block-ack response arrived */ - agg->wait_for_ba = 0; - IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx, - ba_resp->seq_ctl); - - /* Calculate shift to align block-ack bits with our Tx window bits */ - sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4); - if (sh < 0) /* tbw something is wrong with indices */ - sh += 0x100; - - if (agg->frame_count > (64 - sh)) { - IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size"); - return -1; - } - - /* don't use 64-bit values for now */ - bitmap = le64_to_cpu(ba_resp->bitmap) >> sh; - - /* check for success or failure according to the - * transmitted bitmap and block-ack bitmap */ - sent_bitmap = bitmap & agg->bitmap; - - /* For each frame attempted in aggregation, - * update driver's record of tx frame's status. */ - i = 0; - while (sent_bitmap) { - ack = sent_bitmap & 1ULL; - successes += ack; - IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n", - ack ? "ACK" : "NACK", i, - (agg->start_idx + i) & 0xff, - agg->start_idx + i); - sent_bitmap >>= 1; - ++i; - } - - IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n", - (unsigned long long)bitmap); - - info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb); - memset(&info->status, 0, sizeof(info->status)); - info->flags |= IEEE80211_TX_STAT_ACK; - info->flags |= IEEE80211_TX_STAT_AMPDU; - info->status.ampdu_ack_len = successes; - info->status.ampdu_len = agg->frame_count; - iwl4965_hwrate_to_tx_control(priv, agg->rate_n_flags, info); - - return 0; -} - -/** - * translate ucode response to mac80211 tx status control values - */ -void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags, - struct ieee80211_tx_info *info) -{ - struct ieee80211_tx_rate *r = &info->control.rates[0]; - - info->antenna_sel_tx = - ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS); - if (rate_n_flags & RATE_MCS_HT_MSK) - r->flags |= IEEE80211_TX_RC_MCS; - if (rate_n_flags & RATE_MCS_GF_MSK) - r->flags |= IEEE80211_TX_RC_GREEN_FIELD; - if (rate_n_flags & RATE_MCS_HT40_MSK) - r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; - if (rate_n_flags & RATE_MCS_DUP_MSK) - r->flags |= IEEE80211_TX_RC_DUP_DATA; - if (rate_n_flags & RATE_MCS_SGI_MSK) - r->flags |= IEEE80211_TX_RC_SHORT_GI; - r->idx = iwl4965_hwrate_to_mac80211_idx(rate_n_flags, info->band); -} - -/** - * iwl4965_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA - * - * Handles block-acknowledge notification from device, which reports success - * of frames sent via aggregation. - */ -void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba; - struct iwl_tx_queue *txq = NULL; - struct iwl_ht_agg *agg; - int index; - int sta_id; - int tid; - unsigned long flags; - - /* "flow" corresponds to Tx queue */ - u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); - - /* "ssn" is start of block-ack Tx window, corresponds to index - * (in Tx queue's circular buffer) of first TFD/frame in window */ - u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn); - - if (scd_flow >= priv->hw_params.max_txq_num) { - IWL_ERR(priv, - "BUG_ON scd_flow is bigger than number of queues\n"); - return; - } - - txq = &priv->txq[scd_flow]; - sta_id = ba_resp->sta_id; - tid = ba_resp->tid; - agg = &priv->stations[sta_id].tid[tid].agg; - if (unlikely(agg->txq_id != scd_flow)) { - /* - * FIXME: this is a uCode bug which need to be addressed, - * log the information and return for now! - * since it is possible happen very often and in order - * not to fill the syslog, don't enable the logging by default - */ - IWL_DEBUG_TX_REPLY(priv, - "BA scd_flow %d does not match txq_id %d\n", - scd_flow, agg->txq_id); - return; - } - - /* Find index just before block-ack window */ - index = iwl_legacy_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd); - - spin_lock_irqsave(&priv->sta_lock, flags); - - IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, " - "sta_id = %d\n", - agg->wait_for_ba, - (u8 *) &ba_resp->sta_addr_lo32, - ba_resp->sta_id); - IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx," - "scd_flow = " - "%d, scd_ssn = %d\n", - ba_resp->tid, - ba_resp->seq_ctl, - (unsigned long long)le64_to_cpu(ba_resp->bitmap), - ba_resp->scd_flow, - ba_resp->scd_ssn); - IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx\n", - agg->start_idx, - (unsigned long long)agg->bitmap); - - /* Update driver's record of ACK vs. not for each frame in window */ - iwl4965_tx_status_reply_compressed_ba(priv, agg, ba_resp); - - /* Release all TFDs before the SSN, i.e. all TFDs in front of - * block-ack window (we assume that they've been successfully - * transmitted ... if not, it's too late anyway). */ - if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) { - /* calculate mac80211 ampdu sw queue to wake */ - int freed = iwl4965_tx_queue_reclaim(priv, scd_flow, index); - iwl4965_free_tfds_in_queue(priv, sta_id, tid, freed); - - if ((iwl_legacy_queue_space(&txq->q) > txq->q.low_mark) && - priv->mac80211_registered && - (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) - iwl_legacy_wake_queue(priv, txq); - - iwl4965_txq_check_empty(priv, sta_id, tid, scd_flow); - } - - spin_unlock_irqrestore(&priv->sta_lock, flags); -} - -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG -const char *iwl4965_get_tx_fail_reason(u32 status) -{ -#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x -#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x - - switch (status & TX_STATUS_MSK) { - case TX_STATUS_SUCCESS: - return "SUCCESS"; - TX_STATUS_POSTPONE(DELAY); - TX_STATUS_POSTPONE(FEW_BYTES); - TX_STATUS_POSTPONE(QUIET_PERIOD); - TX_STATUS_POSTPONE(CALC_TTAK); - TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY); - TX_STATUS_FAIL(SHORT_LIMIT); - TX_STATUS_FAIL(LONG_LIMIT); - TX_STATUS_FAIL(FIFO_UNDERRUN); - TX_STATUS_FAIL(DRAIN_FLOW); - TX_STATUS_FAIL(RFKILL_FLUSH); - TX_STATUS_FAIL(LIFE_EXPIRE); - TX_STATUS_FAIL(DEST_PS); - TX_STATUS_FAIL(HOST_ABORTED); - TX_STATUS_FAIL(BT_RETRY); - TX_STATUS_FAIL(STA_INVALID); - TX_STATUS_FAIL(FRAG_DROPPED); - TX_STATUS_FAIL(TID_DISABLE); - TX_STATUS_FAIL(FIFO_FLUSHED); - TX_STATUS_FAIL(INSUFFICIENT_CF_POLL); - TX_STATUS_FAIL(PASSIVE_NO_RX); - TX_STATUS_FAIL(NO_BEACON_ON_RADAR); - } - - return "UNKNOWN"; - -#undef TX_STATUS_FAIL -#undef TX_STATUS_POSTPONE -} -#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */ diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c b/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c deleted file mode 100644 index 001d148feb9..00000000000 --- a/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c +++ /dev/null @@ -1,166 +0,0 @@ -/****************************************************************************** - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/init.h> -#include <linux/sched.h> - -#include "iwl-dev.h" -#include "iwl-core.h" -#include "iwl-io.h" -#include "iwl-helpers.h" -#include "iwl-4965-hw.h" -#include "iwl-4965.h" -#include "iwl-4965-calib.h" - -#define IWL_AC_UNSET -1 - -/** - * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host, - * using sample data 100 bytes apart. If these sample points are good, - * it's a pretty good bet that everything between them is good, too. - */ -static int -iwl4965_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len) -{ - u32 val; - int ret = 0; - u32 errcnt = 0; - u32 i; - - IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len); - - for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) { - /* read data comes through single port, auto-incr addr */ - /* NOTE: Use the debugless read so we don't flood kernel log - * if IWL_DL_IO is set */ - iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, - i + IWL4965_RTC_INST_LOWER_BOUND); - val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT); - if (val != le32_to_cpu(*image)) { - ret = -EIO; - errcnt++; - if (errcnt >= 3) - break; - } - } - - return ret; -} - -/** - * iwl4965_verify_inst_full - verify runtime uCode image in card vs. host, - * looking at all data. - */ -static int iwl4965_verify_inst_full(struct iwl_priv *priv, __le32 *image, - u32 len) -{ - u32 val; - u32 save_len = len; - int ret = 0; - u32 errcnt; - - IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len); - - iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, - IWL4965_RTC_INST_LOWER_BOUND); - - errcnt = 0; - for (; len > 0; len -= sizeof(u32), image++) { - /* read data comes through single port, auto-incr addr */ - /* NOTE: Use the debugless read so we don't flood kernel log - * if IWL_DL_IO is set */ - val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT); - if (val != le32_to_cpu(*image)) { - IWL_ERR(priv, "uCode INST section is invalid at " - "offset 0x%x, is 0x%x, s/b 0x%x\n", - save_len - len, val, le32_to_cpu(*image)); - ret = -EIO; - errcnt++; - if (errcnt >= 20) - break; - } - } - - if (!errcnt) - IWL_DEBUG_INFO(priv, - "ucode image in INSTRUCTION memory is good\n"); - - return ret; -} - -/** - * iwl4965_verify_ucode - determine which instruction image is in SRAM, - * and verify its contents - */ -int iwl4965_verify_ucode(struct iwl_priv *priv) -{ - __le32 *image; - u32 len; - int ret; - - /* Try bootstrap */ - image = (__le32 *)priv->ucode_boot.v_addr; - len = priv->ucode_boot.len; - ret = iwl4965_verify_inst_sparse(priv, image, len); - if (!ret) { - IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n"); - return 0; - } - - /* Try initialize */ - image = (__le32 *)priv->ucode_init.v_addr; - len = priv->ucode_init.len; - ret = iwl4965_verify_inst_sparse(priv, image, len); - if (!ret) { - IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n"); - return 0; - } - - /* Try runtime/protocol */ - image = (__le32 *)priv->ucode_code.v_addr; - len = priv->ucode_code.len; - ret = iwl4965_verify_inst_sparse(priv, image, len); - if (!ret) { - IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n"); - return 0; - } - - IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n"); - - /* Since nothing seems to match, show first several data entries in - * instruction SRAM, so maybe visual inspection will give a clue. - * Selection of bootstrap image (vs. other images) is arbitrary. */ - image = (__le32 *)priv->ucode_boot.v_addr; - len = priv->ucode_boot.len; - ret = iwl4965_verify_inst_full(priv, image, len); - - return ret; -} diff --git a/drivers/net/wireless/iwlegacy/iwl-4965.c b/drivers/net/wireless/iwlegacy/iwl-4965.c deleted file mode 100644 index 86f4fce193e..00000000000 --- a/drivers/net/wireless/iwlegacy/iwl-4965.c +++ /dev/null @@ -1,2183 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/init.h> -#include <linux/pci.h> -#include <linux/dma-mapping.h> -#include <linux/delay.h> -#include <linux/sched.h> -#include <linux/skbuff.h> -#include <linux/netdevice.h> -#include <net/mac80211.h> -#include <linux/etherdevice.h> -#include <asm/unaligned.h> - -#include "iwl-eeprom.h" -#include "iwl-dev.h" -#include "iwl-core.h" -#include "iwl-io.h" -#include "iwl-helpers.h" -#include "iwl-4965-calib.h" -#include "iwl-sta.h" -#include "iwl-4965-led.h" -#include "iwl-4965.h" -#include "iwl-4965-debugfs.h" - -static int iwl4965_send_tx_power(struct iwl_priv *priv); -static int iwl4965_hw_get_temperature(struct iwl_priv *priv); - -/* Highest firmware API version supported */ -#define IWL4965_UCODE_API_MAX 2 - -/* Lowest firmware API version supported */ -#define IWL4965_UCODE_API_MIN 2 - -#define IWL4965_FW_PRE "iwlwifi-4965-" -#define _IWL4965_MODULE_FIRMWARE(api) IWL4965_FW_PRE #api ".ucode" -#define IWL4965_MODULE_FIRMWARE(api) _IWL4965_MODULE_FIRMWARE(api) - -/* check contents of special bootstrap uCode SRAM */ -static int iwl4965_verify_bsm(struct iwl_priv *priv) -{ - __le32 *image = priv->ucode_boot.v_addr; - u32 len = priv->ucode_boot.len; - u32 reg; - u32 val; - - IWL_DEBUG_INFO(priv, "Begin verify bsm\n"); - - /* verify BSM SRAM contents */ - val = iwl_legacy_read_prph(priv, BSM_WR_DWCOUNT_REG); - for (reg = BSM_SRAM_LOWER_BOUND; - reg < BSM_SRAM_LOWER_BOUND + len; - reg += sizeof(u32), image++) { - val = iwl_legacy_read_prph(priv, reg); - if (val != le32_to_cpu(*image)) { - IWL_ERR(priv, "BSM uCode verification failed at " - "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n", - BSM_SRAM_LOWER_BOUND, - reg - BSM_SRAM_LOWER_BOUND, len, - val, le32_to_cpu(*image)); - return -EIO; - } - } - - IWL_DEBUG_INFO(priv, "BSM bootstrap uCode image OK\n"); - - return 0; -} - -/** - * iwl4965_load_bsm - Load bootstrap instructions - * - * BSM operation: - * - * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program - * in special SRAM that does not power down during RFKILL. When powering back - * up after power-saving sleeps (or during initial uCode load), the BSM loads - * the bootstrap program into the on-board processor, and starts it. - * - * The bootstrap program loads (via DMA) instructions and data for a new - * program from host DRAM locations indicated by the host driver in the - * BSM_DRAM_* registers. Once the new program is loaded, it starts - * automatically. - * - * When initializing the NIC, the host driver points the BSM to the - * "initialize" uCode image. This uCode sets up some internal data, then - * notifies host via "initialize alive" that it is complete. - * - * The host then replaces the BSM_DRAM_* pointer values to point to the - * normal runtime uCode instructions and a backup uCode data cache buffer - * (filled initially with starting data values for the on-board processor), - * then triggers the "initialize" uCode to load and launch the runtime uCode, - * which begins normal operation. - * - * When doing a power-save shutdown, runtime uCode saves data SRAM into - * the backup data cache in DRAM before SRAM is powered down. - * - * When powering back up, the BSM loads the bootstrap program. This reloads - * the runtime uCode instructions and the backup data cache into SRAM, - * and re-launches the runtime uCode from where it left off. - */ -static int iwl4965_load_bsm(struct iwl_priv *priv) -{ - __le32 *image = priv->ucode_boot.v_addr; - u32 len = priv->ucode_boot.len; - dma_addr_t pinst; - dma_addr_t pdata; - u32 inst_len; - u32 data_len; - int i; - u32 done; - u32 reg_offset; - int ret; - - IWL_DEBUG_INFO(priv, "Begin load bsm\n"); - - priv->ucode_type = UCODE_RT; - - /* make sure bootstrap program is no larger than BSM's SRAM size */ - if (len > IWL49_MAX_BSM_SIZE) - return -EINVAL; - - /* Tell bootstrap uCode where to find the "Initialize" uCode - * in host DRAM ... host DRAM physical address bits 35:4 for 4965. - * NOTE: iwl_init_alive_start() will replace these values, - * after the "initialize" uCode has run, to point to - * runtime/protocol instructions and backup data cache. - */ - pinst = priv->ucode_init.p_addr >> 4; - pdata = priv->ucode_init_data.p_addr >> 4; - inst_len = priv->ucode_init.len; - data_len = priv->ucode_init_data.len; - - iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst); - iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata); - iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len); - iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len); - - /* Fill BSM memory with bootstrap instructions */ - for (reg_offset = BSM_SRAM_LOWER_BOUND; - reg_offset < BSM_SRAM_LOWER_BOUND + len; - reg_offset += sizeof(u32), image++) - _iwl_legacy_write_prph(priv, reg_offset, le32_to_cpu(*image)); - - ret = iwl4965_verify_bsm(priv); - if (ret) - return ret; - - /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */ - iwl_legacy_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0); - iwl_legacy_write_prph(priv, - BSM_WR_MEM_DST_REG, IWL49_RTC_INST_LOWER_BOUND); - iwl_legacy_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32)); - - /* Load bootstrap code into instruction SRAM now, - * to prepare to load "initialize" uCode */ - iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START); - - /* Wait for load of bootstrap uCode to finish */ - for (i = 0; i < 100; i++) { - done = iwl_legacy_read_prph(priv, BSM_WR_CTRL_REG); - if (!(done & BSM_WR_CTRL_REG_BIT_START)) - break; - udelay(10); - } - if (i < 100) - IWL_DEBUG_INFO(priv, "BSM write complete, poll %d iterations\n", i); - else { - IWL_ERR(priv, "BSM write did not complete!\n"); - return -EIO; - } - - /* Enable future boot loads whenever power management unit triggers it - * (e.g. when powering back up after power-save shutdown) */ - iwl_legacy_write_prph(priv, - BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN); - - - return 0; -} - -/** - * iwl4965_set_ucode_ptrs - Set uCode address location - * - * Tell initialization uCode where to find runtime uCode. - * - * BSM registers initially contain pointers to initialization uCode. - * We need to replace them to load runtime uCode inst and data, - * and to save runtime data when powering down. - */ -static int iwl4965_set_ucode_ptrs(struct iwl_priv *priv) -{ - dma_addr_t pinst; - dma_addr_t pdata; - int ret = 0; - - /* bits 35:4 for 4965 */ - pinst = priv->ucode_code.p_addr >> 4; - pdata = priv->ucode_data_backup.p_addr >> 4; - - /* Tell bootstrap uCode where to find image to load */ - iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst); - iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata); - iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, - priv->ucode_data.len); - - /* Inst byte count must be last to set up, bit 31 signals uCode - * that all new ptr/size info is in place */ - iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, - priv->ucode_code.len | BSM_DRAM_INST_LOAD); - IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n"); - - return ret; -} - -/** - * iwl4965_init_alive_start - Called after REPLY_ALIVE notification received - * - * Called after REPLY_ALIVE notification received from "initialize" uCode. - * - * The 4965 "initialize" ALIVE reply contains calibration data for: - * Voltage, temperature, and MIMO tx gain correction, now stored in priv - * (3945 does not contain this data). - * - * Tell "initialize" uCode to go ahead and load the runtime uCode. -*/ -static void iwl4965_init_alive_start(struct iwl_priv *priv) -{ - /* Bootstrap uCode has loaded initialize uCode ... verify inst image. - * This is a paranoid check, because we would not have gotten the - * "initialize" alive if code weren't properly loaded. */ - if (iwl4965_verify_ucode(priv)) { - /* Runtime instruction load was bad; - * take it all the way back down so we can try again */ - IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n"); - goto restart; - } - - /* Calculate temperature */ - priv->temperature = iwl4965_hw_get_temperature(priv); - - /* Send pointers to protocol/runtime uCode image ... init code will - * load and launch runtime uCode, which will send us another "Alive" - * notification. */ - IWL_DEBUG_INFO(priv, "Initialization Alive received.\n"); - if (iwl4965_set_ucode_ptrs(priv)) { - /* Runtime instruction load won't happen; - * take it all the way back down so we can try again */ - IWL_DEBUG_INFO(priv, "Couldn't set up uCode pointers.\n"); - goto restart; - } - return; - -restart: - queue_work(priv->workqueue, &priv->restart); -} - -static bool iw4965_is_ht40_channel(__le32 rxon_flags) -{ - int chan_mod = le32_to_cpu(rxon_flags & RXON_FLG_CHANNEL_MODE_MSK) - >> RXON_FLG_CHANNEL_MODE_POS; - return ((chan_mod == CHANNEL_MODE_PURE_40) || - (chan_mod == CHANNEL_MODE_MIXED)); -} - -static void iwl4965_nic_config(struct iwl_priv *priv) -{ - unsigned long flags; - u16 radio_cfg; - - spin_lock_irqsave(&priv->lock, flags); - - radio_cfg = iwl_legacy_eeprom_query16(priv, EEPROM_RADIO_CONFIG); - - /* write radio config values to register */ - if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX) - iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, - EEPROM_RF_CFG_TYPE_MSK(radio_cfg) | - EEPROM_RF_CFG_STEP_MSK(radio_cfg) | - EEPROM_RF_CFG_DASH_MSK(radio_cfg)); - - /* set CSR_HW_CONFIG_REG for uCode use */ - iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | - CSR_HW_IF_CONFIG_REG_BIT_MAC_SI); - - priv->calib_info = (struct iwl_eeprom_calib_info *) - iwl_legacy_eeprom_query_addr(priv, - EEPROM_4965_CALIB_TXPOWER_OFFSET); - - spin_unlock_irqrestore(&priv->lock, flags); -} - -/* Reset differential Rx gains in NIC to prepare for chain noise calibration. - * Called after every association, but this runs only once! - * ... once chain noise is calibrated the first time, it's good forever. */ -static void iwl4965_chain_noise_reset(struct iwl_priv *priv) -{ - struct iwl_chain_noise_data *data = &(priv->chain_noise_data); - - if ((data->state == IWL_CHAIN_NOISE_ALIVE) && - iwl_legacy_is_any_associated(priv)) { - struct iwl_calib_diff_gain_cmd cmd; - - /* clear data for chain noise calibration algorithm */ - data->chain_noise_a = 0; - data->chain_noise_b = 0; - data->chain_noise_c = 0; - data->chain_signal_a = 0; - data->chain_signal_b = 0; - data->chain_signal_c = 0; - data->beacon_count = 0; - - memset(&cmd, 0, sizeof(cmd)); - cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD; - cmd.diff_gain_a = 0; - cmd.diff_gain_b = 0; - cmd.diff_gain_c = 0; - if (iwl_legacy_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD, - sizeof(cmd), &cmd)) - IWL_ERR(priv, - "Could not send REPLY_PHY_CALIBRATION_CMD\n"); - data->state = IWL_CHAIN_NOISE_ACCUMULATE; - IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n"); - } -} - -static struct iwl_sensitivity_ranges iwl4965_sensitivity = { - .min_nrg_cck = 97, - .max_nrg_cck = 0, /* not used, set to 0 */ - - .auto_corr_min_ofdm = 85, - .auto_corr_min_ofdm_mrc = 170, - .auto_corr_min_ofdm_x1 = 105, - .auto_corr_min_ofdm_mrc_x1 = 220, - - .auto_corr_max_ofdm = 120, - .auto_corr_max_ofdm_mrc = 210, - .auto_corr_max_ofdm_x1 = 140, - .auto_corr_max_ofdm_mrc_x1 = 270, - - .auto_corr_min_cck = 125, - .auto_corr_max_cck = 200, - .auto_corr_min_cck_mrc = 200, - .auto_corr_max_cck_mrc = 400, - - .nrg_th_cck = 100, - .nrg_th_ofdm = 100, - - .barker_corr_th_min = 190, - .barker_corr_th_min_mrc = 390, - .nrg_th_cca = 62, -}; - -static void iwl4965_set_ct_threshold(struct iwl_priv *priv) -{ - /* want Kelvin */ - priv->hw_params.ct_kill_threshold = - CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY); -} - -/** - * iwl4965_hw_set_hw_params - * - * Called when initializing driver - */ -static int iwl4965_hw_set_hw_params(struct iwl_priv *priv) -{ - if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES && - priv->cfg->mod_params->num_of_queues <= IWL49_NUM_QUEUES) - priv->cfg->base_params->num_of_queues = - priv->cfg->mod_params->num_of_queues; - - priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues; - priv->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM; - priv->hw_params.scd_bc_tbls_size = - priv->cfg->base_params->num_of_queues * - sizeof(struct iwl4965_scd_bc_tbl); - priv->hw_params.tfd_size = sizeof(struct iwl_tfd); - priv->hw_params.max_stations = IWL4965_STATION_COUNT; - priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWL4965_BROADCAST_ID; - priv->hw_params.max_data_size = IWL49_RTC_DATA_SIZE; - priv->hw_params.max_inst_size = IWL49_RTC_INST_SIZE; - priv->hw_params.max_bsm_size = BSM_SRAM_SIZE; - priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_5GHZ); - - priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR; - - priv->hw_params.tx_chains_num = iwl4965_num_of_ant(priv->cfg->valid_tx_ant); - priv->hw_params.rx_chains_num = iwl4965_num_of_ant(priv->cfg->valid_rx_ant); - priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant; - priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant; - - iwl4965_set_ct_threshold(priv); - - priv->hw_params.sens = &iwl4965_sensitivity; - priv->hw_params.beacon_time_tsf_bits = IWL4965_EXT_BEACON_TIME_POS; - - return 0; -} - -static s32 iwl4965_math_div_round(s32 num, s32 denom, s32 *res) -{ - s32 sign = 1; - - if (num < 0) { - sign = -sign; - num = -num; - } - if (denom < 0) { - sign = -sign; - denom = -denom; - } - *res = 1; - *res = ((num * 2 + denom) / (denom * 2)) * sign; - - return 1; -} - -/** - * iwl4965_get_voltage_compensation - Power supply voltage comp for txpower - * - * Determines power supply voltage compensation for txpower calculations. - * Returns number of 1/2-dB steps to subtract from gain table index, - * to compensate for difference between power supply voltage during - * factory measurements, vs. current power supply voltage. - * - * Voltage indication is higher for lower voltage. - * Lower voltage requires more gain (lower gain table index). - */ -static s32 iwl4965_get_voltage_compensation(s32 eeprom_voltage, - s32 current_voltage) -{ - s32 comp = 0; - - if ((TX_POWER_IWL_ILLEGAL_VOLTAGE == eeprom_voltage) || - (TX_POWER_IWL_ILLEGAL_VOLTAGE == current_voltage)) - return 0; - - iwl4965_math_div_round(current_voltage - eeprom_voltage, - TX_POWER_IWL_VOLTAGE_CODES_PER_03V, &comp); - - if (current_voltage > eeprom_voltage) - comp *= 2; - if ((comp < -2) || (comp > 2)) - comp = 0; - - return comp; -} - -static s32 iwl4965_get_tx_atten_grp(u16 channel) -{ - if (channel >= CALIB_IWL_TX_ATTEN_GR5_FCH && - channel <= CALIB_IWL_TX_ATTEN_GR5_LCH) - return CALIB_CH_GROUP_5; - - if (channel >= CALIB_IWL_TX_ATTEN_GR1_FCH && - channel <= CALIB_IWL_TX_ATTEN_GR1_LCH) - return CALIB_CH_GROUP_1; - - if (channel >= CALIB_IWL_TX_ATTEN_GR2_FCH && - channel <= CALIB_IWL_TX_ATTEN_GR2_LCH) - return CALIB_CH_GROUP_2; - - if (channel >= CALIB_IWL_TX_ATTEN_GR3_FCH && - channel <= CALIB_IWL_TX_ATTEN_GR3_LCH) - return CALIB_CH_GROUP_3; - - if (channel >= CALIB_IWL_TX_ATTEN_GR4_FCH && - channel <= CALIB_IWL_TX_ATTEN_GR4_LCH) - return CALIB_CH_GROUP_4; - - return -EINVAL; -} - -static u32 iwl4965_get_sub_band(const struct iwl_priv *priv, u32 channel) -{ - s32 b = -1; - - for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) { - if (priv->calib_info->band_info[b].ch_from == 0) - continue; - - if ((channel >= priv->calib_info->band_info[b].ch_from) - && (channel <= priv->calib_info->band_info[b].ch_to)) - break; - } - - return b; -} - -static s32 iwl4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2) -{ - s32 val; - - if (x2 == x1) - return y1; - else { - iwl4965_math_div_round((x2 - x) * (y1 - y2), (x2 - x1), &val); - return val + y2; - } -} - -/** - * iwl4965_interpolate_chan - Interpolate factory measurements for one channel - * - * Interpolates factory measurements from the two sample channels within a - * sub-band, to apply to channel of interest. Interpolation is proportional to - * differences in channel frequencies, which is proportional to differences - * in channel number. - */ -static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel, - struct iwl_eeprom_calib_ch_info *chan_info) -{ - s32 s = -1; - u32 c; - u32 m; - const struct iwl_eeprom_calib_measure *m1; - const struct iwl_eeprom_calib_measure *m2; - struct iwl_eeprom_calib_measure *omeas; - u32 ch_i1; - u32 ch_i2; - - s = iwl4965_get_sub_band(priv, channel); - if (s >= EEPROM_TX_POWER_BANDS) { - IWL_ERR(priv, "Tx Power can not find channel %d\n", channel); - return -1; - } - - ch_i1 = priv->calib_info->band_info[s].ch1.ch_num; - ch_i2 = priv->calib_info->band_info[s].ch2.ch_num; - chan_info->ch_num = (u8) channel; - - IWL_DEBUG_TXPOWER(priv, "channel %d subband %d factory cal ch %d & %d\n", - channel, s, ch_i1, ch_i2); - - for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) { - for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) { - m1 = &(priv->calib_info->band_info[s].ch1. - measurements[c][m]); - m2 = &(priv->calib_info->band_info[s].ch2. - measurements[c][m]); - omeas = &(chan_info->measurements[c][m]); - - omeas->actual_pow = - (u8) iwl4965_interpolate_value(channel, ch_i1, - m1->actual_pow, - ch_i2, - m2->actual_pow); - omeas->gain_idx = - (u8) iwl4965_interpolate_value(channel, ch_i1, - m1->gain_idx, ch_i2, - m2->gain_idx); - omeas->temperature = - (u8) iwl4965_interpolate_value(channel, ch_i1, - m1->temperature, - ch_i2, - m2->temperature); - omeas->pa_det = - (s8) iwl4965_interpolate_value(channel, ch_i1, - m1->pa_det, ch_i2, - m2->pa_det); - - IWL_DEBUG_TXPOWER(priv, - "chain %d meas %d AP1=%d AP2=%d AP=%d\n", c, m, - m1->actual_pow, m2->actual_pow, omeas->actual_pow); - IWL_DEBUG_TXPOWER(priv, - "chain %d meas %d NI1=%d NI2=%d NI=%d\n", c, m, - m1->gain_idx, m2->gain_idx, omeas->gain_idx); - IWL_DEBUG_TXPOWER(priv, - "chain %d meas %d PA1=%d PA2=%d PA=%d\n", c, m, - m1->pa_det, m2->pa_det, omeas->pa_det); - IWL_DEBUG_TXPOWER(priv, - "chain %d meas %d T1=%d T2=%d T=%d\n", c, m, - m1->temperature, m2->temperature, - omeas->temperature); - } - } - - return 0; -} - -/* bit-rate-dependent table to prevent Tx distortion, in half-dB units, - * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. */ -static s32 back_off_table[] = { - 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */ - 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */ - 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */ - 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */ - 10 /* CCK */ -}; - -/* Thermal compensation values for txpower for various frequency ranges ... - * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust */ -static struct iwl4965_txpower_comp_entry { - s32 degrees_per_05db_a; - s32 degrees_per_05db_a_denom; -} tx_power_cmp_tble[CALIB_CH_GROUP_MAX] = { - {9, 2}, /* group 0 5.2, ch 34-43 */ - {4, 1}, /* group 1 5.2, ch 44-70 */ - {4, 1}, /* group 2 5.2, ch 71-124 */ - {4, 1}, /* group 3 5.2, ch 125-200 */ - {3, 1} /* group 4 2.4, ch all */ -}; - -static s32 get_min_power_index(s32 rate_power_index, u32 band) -{ - if (!band) { - if ((rate_power_index & 7) <= 4) - return MIN_TX_GAIN_INDEX_52GHZ_EXT; - } - return MIN_TX_GAIN_INDEX; -} - -struct gain_entry { - u8 dsp; - u8 radio; -}; - -static const struct gain_entry gain_table[2][108] = { - /* 5.2GHz power gain index table */ - { - {123, 0x3F}, /* highest txpower */ - {117, 0x3F}, - {110, 0x3F}, - {104, 0x3F}, - {98, 0x3F}, - {110, 0x3E}, - {104, 0x3E}, - {98, 0x3E}, - {110, 0x3D}, - {104, 0x3D}, - {98, 0x3D}, - {110, 0x3C}, - {104, 0x3C}, - {98, 0x3C}, - {110, 0x3B}, - {104, 0x3B}, - {98, 0x3B}, - {110, 0x3A}, - {104, 0x3A}, - {98, 0x3A}, - {110, 0x39}, - {104, 0x39}, - {98, 0x39}, - {110, 0x38}, - {104, 0x38}, - {98, 0x38}, - {110, 0x37}, - {104, 0x37}, - {98, 0x37}, - {110, 0x36}, - {104, 0x36}, - {98, 0x36}, - {110, 0x35}, - {104, 0x35}, - {98, 0x35}, - {110, 0x34}, - {104, 0x34}, - {98, 0x34}, - {110, 0x33}, - {104, 0x33}, - {98, 0x33}, - {110, 0x32}, - {104, 0x32}, - {98, 0x32}, - {110, 0x31}, - {104, 0x31}, - {98, 0x31}, - {110, 0x30}, - {104, 0x30}, - {98, 0x30}, - {110, 0x25}, - {104, 0x25}, - {98, 0x25}, - {110, 0x24}, - {104, 0x24}, - {98, 0x24}, - {110, 0x23}, - {104, 0x23}, - {98, 0x23}, - {110, 0x22}, - {104, 0x18}, - {98, 0x18}, - {110, 0x17}, - {104, 0x17}, - {98, 0x17}, - {110, 0x16}, - {104, 0x16}, - {98, 0x16}, - {110, 0x15}, - {104, 0x15}, - {98, 0x15}, - {110, 0x14}, - {104, 0x14}, - {98, 0x14}, - {110, 0x13}, - {104, 0x13}, - {98, 0x13}, - {110, 0x12}, - {104, 0x08}, - {98, 0x08}, - {110, 0x07}, - {104, 0x07}, - {98, 0x07}, - {110, 0x06}, - {104, 0x06}, - {98, 0x06}, - {110, 0x05}, - {104, 0x05}, - {98, 0x05}, - {110, 0x04}, - {104, 0x04}, - {98, 0x04}, - {110, 0x03}, - {104, 0x03}, - {98, 0x03}, - {110, 0x02}, - {104, 0x02}, - {98, 0x02}, - {110, 0x01}, - {104, 0x01}, - {98, 0x01}, - {110, 0x00}, - {104, 0x00}, - {98, 0x00}, - {93, 0x00}, - {88, 0x00}, - {83, 0x00}, - {78, 0x00}, - }, - /* 2.4GHz power gain index table */ - { - {110, 0x3f}, /* highest txpower */ - {104, 0x3f}, - {98, 0x3f}, - {110, 0x3e}, - {104, 0x3e}, - {98, 0x3e}, - {110, 0x3d}, - {104, 0x3d}, - {98, 0x3d}, - {110, 0x3c}, - {104, 0x3c}, - {98, 0x3c}, - {110, 0x3b}, - {104, 0x3b}, - {98, 0x3b}, - {110, 0x3a}, - {104, 0x3a}, - {98, 0x3a}, - {110, 0x39}, - {104, 0x39}, - {98, 0x39}, - {110, 0x38}, - {104, 0x38}, - {98, 0x38}, - {110, 0x37}, - {104, 0x37}, - {98, 0x37}, - {110, 0x36}, - {104, 0x36}, - {98, 0x36}, - {110, 0x35}, - {104, 0x35}, - {98, 0x35}, - {110, 0x34}, - {104, 0x34}, - {98, 0x34}, - {110, 0x33}, - {104, 0x33}, - {98, 0x33}, - {110, 0x32}, - {104, 0x32}, - {98, 0x32}, - {110, 0x31}, - {104, 0x31}, - {98, 0x31}, - {110, 0x30}, - {104, 0x30}, - {98, 0x30}, - {110, 0x6}, - {104, 0x6}, - {98, 0x6}, - {110, 0x5}, - {104, 0x5}, - {98, 0x5}, - {110, 0x4}, - {104, 0x4}, - {98, 0x4}, - {110, 0x3}, - {104, 0x3}, - {98, 0x3}, - {110, 0x2}, - {104, 0x2}, - {98, 0x2}, - {110, 0x1}, - {104, 0x1}, - {98, 0x1}, - {110, 0x0}, - {104, 0x0}, - {98, 0x0}, - {97, 0}, - {96, 0}, - {95, 0}, - {94, 0}, - {93, 0}, - {92, 0}, - {91, 0}, - {90, 0}, - {89, 0}, - {88, 0}, - {87, 0}, - {86, 0}, - {85, 0}, - {84, 0}, - {83, 0}, - {82, 0}, - {81, 0}, - {80, 0}, - {79, 0}, - {78, 0}, - {77, 0}, - {76, 0}, - {75, 0}, - {74, 0}, - {73, 0}, - {72, 0}, - {71, 0}, - {70, 0}, - {69, 0}, - {68, 0}, - {67, 0}, - {66, 0}, - {65, 0}, - {64, 0}, - {63, 0}, - {62, 0}, - {61, 0}, - {60, 0}, - {59, 0}, - } -}; - -static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel, - u8 is_ht40, u8 ctrl_chan_high, - struct iwl4965_tx_power_db *tx_power_tbl) -{ - u8 saturation_power; - s32 target_power; - s32 user_target_power; - s32 power_limit; - s32 current_temp; - s32 reg_limit; - s32 current_regulatory; - s32 txatten_grp = CALIB_CH_GROUP_MAX; - int i; - int c; - const struct iwl_channel_info *ch_info = NULL; - struct iwl_eeprom_calib_ch_info ch_eeprom_info; - const struct iwl_eeprom_calib_measure *measurement; - s16 voltage; - s32 init_voltage; - s32 voltage_compensation; - s32 degrees_per_05db_num; - s32 degrees_per_05db_denom; - s32 factory_temp; - s32 temperature_comp[2]; - s32 factory_gain_index[2]; - s32 factory_actual_pwr[2]; - s32 power_index; - - /* tx_power_user_lmt is in dBm, convert to half-dBm (half-dB units - * are used for indexing into txpower table) */ - user_target_power = 2 * priv->tx_power_user_lmt; - - /* Get current (RXON) channel, band, width */ - IWL_DEBUG_TXPOWER(priv, "chan %d band %d is_ht40 %d\n", channel, band, - is_ht40); - - ch_info = iwl_legacy_get_channel_info(priv, priv->band, channel); - - if (!iwl_legacy_is_channel_valid(ch_info)) - return -EINVAL; - - /* get txatten group, used to select 1) thermal txpower adjustment - * and 2) mimo txpower balance between Tx chains. */ - txatten_grp = iwl4965_get_tx_atten_grp(channel); - if (txatten_grp < 0) { - IWL_ERR(priv, "Can't find txatten group for channel %d.\n", - channel); - return txatten_grp; - } - - IWL_DEBUG_TXPOWER(priv, "channel %d belongs to txatten group %d\n", - channel, txatten_grp); - - if (is_ht40) { - if (ctrl_chan_high) - channel -= 2; - else - channel += 2; - } - - /* hardware txpower limits ... - * saturation (clipping distortion) txpowers are in half-dBm */ - if (band) - saturation_power = priv->calib_info->saturation_power24; - else - saturation_power = priv->calib_info->saturation_power52; - - if (saturation_power < IWL_TX_POWER_SATURATION_MIN || - saturation_power > IWL_TX_POWER_SATURATION_MAX) { - if (band) - saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_24; - else - saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_52; - } - - /* regulatory txpower limits ... reg_limit values are in half-dBm, - * max_power_avg values are in dBm, convert * 2 */ - if (is_ht40) - reg_limit = ch_info->ht40_max_power_avg * 2; - else - reg_limit = ch_info->max_power_avg * 2; - - if ((reg_limit < IWL_TX_POWER_REGULATORY_MIN) || - (reg_limit > IWL_TX_POWER_REGULATORY_MAX)) { - if (band) - reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_24; - else - reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_52; - } - - /* Interpolate txpower calibration values for this channel, - * based on factory calibration tests on spaced channels. */ - iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info); - - /* calculate tx gain adjustment based on power supply voltage */ - voltage = le16_to_cpu(priv->calib_info->voltage); - init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage); - voltage_compensation = - iwl4965_get_voltage_compensation(voltage, init_voltage); - - IWL_DEBUG_TXPOWER(priv, "curr volt %d eeprom volt %d volt comp %d\n", - init_voltage, - voltage, voltage_compensation); - - /* get current temperature (Celsius) */ - current_temp = max(priv->temperature, IWL_TX_POWER_TEMPERATURE_MIN); - current_temp = min(priv->temperature, IWL_TX_POWER_TEMPERATURE_MAX); - current_temp = KELVIN_TO_CELSIUS(current_temp); - - /* select thermal txpower adjustment params, based on channel group - * (same frequency group used for mimo txatten adjustment) */ - degrees_per_05db_num = - tx_power_cmp_tble[txatten_grp].degrees_per_05db_a; - degrees_per_05db_denom = - tx_power_cmp_tble[txatten_grp].degrees_per_05db_a_denom; - - /* get per-chain txpower values from factory measurements */ - for (c = 0; c < 2; c++) { - measurement = &ch_eeprom_info.measurements[c][1]; - - /* txgain adjustment (in half-dB steps) based on difference - * between factory and current temperature */ - factory_temp = measurement->temperature; - iwl4965_math_div_round((current_temp - factory_temp) * - degrees_per_05db_denom, - degrees_per_05db_num, - &temperature_comp[c]); - - factory_gain_index[c] = measurement->gain_idx; - factory_actual_pwr[c] = measurement->actual_pow; - - IWL_DEBUG_TXPOWER(priv, "chain = %d\n", c); - IWL_DEBUG_TXPOWER(priv, "fctry tmp %d, " - "curr tmp %d, comp %d steps\n", - factory_temp, current_temp, - temperature_comp[c]); - - IWL_DEBUG_TXPOWER(priv, "fctry idx %d, fctry pwr %d\n", - factory_gain_index[c], - factory_actual_pwr[c]); - } - - /* for each of 33 bit-rates (including 1 for CCK) */ - for (i = 0; i < POWER_TABLE_NUM_ENTRIES; i++) { - u8 is_mimo_rate; - union iwl4965_tx_power_dual_stream tx_power; - - /* for mimo, reduce each chain's txpower by half - * (3dB, 6 steps), so total output power is regulatory - * compliant. */ - if (i & 0x8) { - current_regulatory = reg_limit - - IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION; - is_mimo_rate = 1; - } else { - current_regulatory = reg_limit; - is_mimo_rate = 0; - } - - /* find txpower limit, either hardware or regulatory */ - power_limit = saturation_power - back_off_table[i]; - if (power_limit > current_regulatory) - power_limit = current_regulatory; - - /* reduce user's txpower request if necessary - * for this rate on this channel */ - target_power = user_target_power; - if (target_power > power_limit) - target_power = power_limit; - - IWL_DEBUG_TXPOWER(priv, "rate %d sat %d reg %d usr %d tgt %d\n", - i, saturation_power - back_off_table[i], - current_regulatory, user_target_power, - target_power); - - /* for each of 2 Tx chains (radio transmitters) */ - for (c = 0; c < 2; c++) { - s32 atten_value; - - if (is_mimo_rate) - atten_value = - (s32)le32_to_cpu(priv->card_alive_init. - tx_atten[txatten_grp][c]); - else - atten_value = 0; - - /* calculate index; higher index means lower txpower */ - power_index = (u8) (factory_gain_index[c] - - (target_power - - factory_actual_pwr[c]) - - temperature_comp[c] - - voltage_compensation + - atten_value); - -/* IWL_DEBUG_TXPOWER(priv, "calculated txpower index %d\n", - power_index); */ - - if (power_index < get_min_power_index(i, band)) - power_index = get_min_power_index(i, band); - - /* adjust 5 GHz index to support negative indexes */ - if (!band) - power_index += 9; - - /* CCK, rate 32, reduce txpower for CCK */ - if (i == POWER_TABLE_CCK_ENTRY) - power_index += - IWL_TX_POWER_CCK_COMPENSATION_C_STEP; - - /* stay within the table! */ - if (power_index > 107) { - IWL_WARN(priv, "txpower index %d > 107\n", - power_index); - power_index = 107; - } - if (power_index < 0) { - IWL_WARN(priv, "txpower index %d < 0\n", - power_index); - power_index = 0; - } - - /* fill txpower command for this rate/chain */ - tx_power.s.radio_tx_gain[c] = - gain_table[band][power_index].radio; - tx_power.s.dsp_predis_atten[c] = - gain_table[band][power_index].dsp; - - IWL_DEBUG_TXPOWER(priv, "chain %d mimo %d index %d " - "gain 0x%02x dsp %d\n", - c, atten_value, power_index, - tx_power.s.radio_tx_gain[c], - tx_power.s.dsp_predis_atten[c]); - } /* for each chain */ - - tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw); - - } /* for each rate */ - - return 0; -} - -/** - * iwl4965_send_tx_power - Configure the TXPOWER level user limit - * - * Uses the active RXON for channel, band, and characteristics (ht40, high) - * The power limit is taken from priv->tx_power_user_lmt. - */ -static int iwl4965_send_tx_power(struct iwl_priv *priv) -{ - struct iwl4965_txpowertable_cmd cmd = { 0 }; - int ret; - u8 band = 0; - bool is_ht40 = false; - u8 ctrl_chan_high = 0; - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - - if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status), - "TX Power requested while scanning!\n")) - return -EAGAIN; - - band = priv->band == IEEE80211_BAND_2GHZ; - - is_ht40 = iw4965_is_ht40_channel(ctx->active.flags); - - if (is_ht40 && (ctx->active.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK)) - ctrl_chan_high = 1; - - cmd.band = band; - cmd.channel = ctx->active.channel; - - ret = iwl4965_fill_txpower_tbl(priv, band, - le16_to_cpu(ctx->active.channel), - is_ht40, ctrl_chan_high, &cmd.tx_power); - if (ret) - goto out; - - ret = iwl_legacy_send_cmd_pdu(priv, - REPLY_TX_PWR_TABLE_CMD, sizeof(cmd), &cmd); - -out: - return ret; -} - -static int iwl4965_send_rxon_assoc(struct iwl_priv *priv, - struct iwl_rxon_context *ctx) -{ - int ret = 0; - struct iwl4965_rxon_assoc_cmd rxon_assoc; - const struct iwl_legacy_rxon_cmd *rxon1 = &ctx->staging; - const struct iwl_legacy_rxon_cmd *rxon2 = &ctx->active; - - if ((rxon1->flags == rxon2->flags) && - (rxon1->filter_flags == rxon2->filter_flags) && - (rxon1->cck_basic_rates == rxon2->cck_basic_rates) && - (rxon1->ofdm_ht_single_stream_basic_rates == - rxon2->ofdm_ht_single_stream_basic_rates) && - (rxon1->ofdm_ht_dual_stream_basic_rates == - rxon2->ofdm_ht_dual_stream_basic_rates) && - (rxon1->rx_chain == rxon2->rx_chain) && - (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) { - IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n"); - return 0; - } - - rxon_assoc.flags = ctx->staging.flags; - rxon_assoc.filter_flags = ctx->staging.filter_flags; - rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates; - rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates; - rxon_assoc.reserved = 0; - rxon_assoc.ofdm_ht_single_stream_basic_rates = - ctx->staging.ofdm_ht_single_stream_basic_rates; - rxon_assoc.ofdm_ht_dual_stream_basic_rates = - ctx->staging.ofdm_ht_dual_stream_basic_rates; - rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain; - - ret = iwl_legacy_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC, - sizeof(rxon_assoc), &rxon_assoc, NULL); - - return ret; -} - -static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) -{ - /* cast away the const for active_rxon in this function */ - struct iwl_legacy_rxon_cmd *active_rxon = (void *)&ctx->active; - int ret; - bool new_assoc = - !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK); - - if (!iwl_legacy_is_alive(priv)) - return -EBUSY; - - if (!ctx->is_active) - return 0; - - /* always get timestamp with Rx frame */ - ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK; - - ret = iwl_legacy_check_rxon_cmd(priv, ctx); - if (ret) { - IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n"); - return -EINVAL; - } - - /* - * receive commit_rxon request - * abort any previous channel switch if still in process - */ - if (test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status) && - (priv->switch_channel != ctx->staging.channel)) { - IWL_DEBUG_11H(priv, "abort channel switch on %d\n", - le16_to_cpu(priv->switch_channel)); - iwl_legacy_chswitch_done(priv, false); - } - - /* If we don't need to send a full RXON, we can use - * iwl_rxon_assoc_cmd which is used to reconfigure filter - * and other flags for the current radio configuration. */ - if (!iwl_legacy_full_rxon_required(priv, ctx)) { - ret = iwl_legacy_send_rxon_assoc(priv, ctx); - if (ret) { - IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret); - return ret; - } - - memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon)); - iwl_legacy_print_rx_config_cmd(priv, ctx); - /* - * We do not commit tx power settings while channel changing, - * do it now if tx power changed. - */ - iwl_legacy_set_tx_power(priv, priv->tx_power_next, false); - return 0; - } - - /* If we are currently associated and the new config requires - * an RXON_ASSOC and the new config wants the associated mask enabled, - * we must clear the associated from the active configuration - * before we apply the new config */ - if (iwl_legacy_is_associated_ctx(ctx) && new_assoc) { - IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n"); - active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; - - ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd, - sizeof(struct iwl_legacy_rxon_cmd), - active_rxon); - - /* If the mask clearing failed then we set - * active_rxon back to what it was previously */ - if (ret) { - active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK; - IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret); - return ret; - } - iwl_legacy_clear_ucode_stations(priv, ctx); - iwl_legacy_restore_stations(priv, ctx); - ret = iwl4965_restore_default_wep_keys(priv, ctx); - if (ret) { - IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret); - return ret; - } - } - - IWL_DEBUG_INFO(priv, "Sending RXON\n" - "* with%s RXON_FILTER_ASSOC_MSK\n" - "* channel = %d\n" - "* bssid = %pM\n", - (new_assoc ? "" : "out"), - le16_to_cpu(ctx->staging.channel), - ctx->staging.bssid_addr); - - iwl_legacy_set_rxon_hwcrypto(priv, ctx, - !priv->cfg->mod_params->sw_crypto); - - /* Apply the new configuration - * RXON unassoc clears the station table in uCode so restoration of - * stations is needed after it (the RXON command) completes - */ - if (!new_assoc) { - ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd, - sizeof(struct iwl_legacy_rxon_cmd), &ctx->staging); - if (ret) { - IWL_ERR(priv, "Error setting new RXON (%d)\n", ret); - return ret; - } - IWL_DEBUG_INFO(priv, "Return from !new_assoc RXON.\n"); - memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon)); - iwl_legacy_clear_ucode_stations(priv, ctx); - iwl_legacy_restore_stations(priv, ctx); - ret = iwl4965_restore_default_wep_keys(priv, ctx); - if (ret) { - IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret); - return ret; - } - } - if (new_assoc) { - priv->start_calib = 0; - /* Apply the new configuration - * RXON assoc doesn't clear the station table in uCode, - */ - ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd, - sizeof(struct iwl_legacy_rxon_cmd), &ctx->staging); - if (ret) { - IWL_ERR(priv, "Error setting new RXON (%d)\n", ret); - return ret; - } - memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon)); - } - iwl_legacy_print_rx_config_cmd(priv, ctx); - - iwl4965_init_sensitivity(priv); - - /* If we issue a new RXON command which required a tune then we must - * send a new TXPOWER command or we won't be able to Tx any frames */ - ret = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true); - if (ret) { - IWL_ERR(priv, "Error sending TX power (%d)\n", ret); - return ret; - } - - return 0; -} - -static int iwl4965_hw_channel_switch(struct iwl_priv *priv, - struct ieee80211_channel_switch *ch_switch) -{ - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - int rc; - u8 band = 0; - bool is_ht40 = false; - u8 ctrl_chan_high = 0; - struct iwl4965_channel_switch_cmd cmd; - const struct iwl_channel_info *ch_info; - u32 switch_time_in_usec, ucode_switch_time; - u16 ch; - u32 tsf_low; - u8 switch_count; - u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval); - struct ieee80211_vif *vif = ctx->vif; - band = priv->band == IEEE80211_BAND_2GHZ; - - is_ht40 = iw4965_is_ht40_channel(ctx->staging.flags); - - if (is_ht40 && - (ctx->staging.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK)) - ctrl_chan_high = 1; - - cmd.band = band; - cmd.expect_beacon = 0; - ch = ch_switch->channel->hw_value; - cmd.channel = cpu_to_le16(ch); - cmd.rxon_flags = ctx->staging.flags; - cmd.rxon_filter_flags = ctx->staging.filter_flags; - switch_count = ch_switch->count; - tsf_low = ch_switch->timestamp & 0x0ffffffff; - /* - * calculate the ucode channel switch time - * adding TSF as one of the factor for when to switch - */ - if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) { - if (switch_count > ((priv->ucode_beacon_time - tsf_low) / - beacon_interval)) { - switch_count -= (priv->ucode_beacon_time - - tsf_low) / beacon_interval; - } else - switch_count = 0; - } - if (switch_count <= 1) - cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time); - else { - switch_time_in_usec = - vif->bss_conf.beacon_int * switch_count * TIME_UNIT; - ucode_switch_time = iwl_legacy_usecs_to_beacons(priv, - switch_time_in_usec, - beacon_interval); - cmd.switch_time = iwl_legacy_add_beacon_time(priv, - priv->ucode_beacon_time, - ucode_switch_time, - beacon_interval); - } - IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n", - cmd.switch_time); - ch_info = iwl_legacy_get_channel_info(priv, priv->band, ch); - if (ch_info) - cmd.expect_beacon = iwl_legacy_is_channel_radar(ch_info); - else { - IWL_ERR(priv, "invalid channel switch from %u to %u\n", - ctx->active.channel, ch); - return -EFAULT; - } - - rc = iwl4965_fill_txpower_tbl(priv, band, ch, is_ht40, - ctrl_chan_high, &cmd.tx_power); - if (rc) { - IWL_DEBUG_11H(priv, "error:%d fill txpower_tbl\n", rc); - return rc; - } - - return iwl_legacy_send_cmd_pdu(priv, - REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd); -} - -/** - * iwl4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array - */ -static void iwl4965_txq_update_byte_cnt_tbl(struct iwl_priv *priv, - struct iwl_tx_queue *txq, - u16 byte_cnt) -{ - struct iwl4965_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr; - int txq_id = txq->q.id; - int write_ptr = txq->q.write_ptr; - int len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; - __le16 bc_ent; - - WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX); - - bc_ent = cpu_to_le16(len & 0xFFF); - /* Set up byte count within first 256 entries */ - scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; - - /* If within first 64 entries, duplicate at end */ - if (write_ptr < TFD_QUEUE_SIZE_BC_DUP) - scd_bc_tbl[txq_id]. - tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent; -} - -/** - * iwl4965_hw_get_temperature - return the calibrated temperature (in Kelvin) - * @statistics: Provides the temperature reading from the uCode - * - * A return of <0 indicates bogus data in the statistics - */ -static int iwl4965_hw_get_temperature(struct iwl_priv *priv) -{ - s32 temperature; - s32 vt; - s32 R1, R2, R3; - u32 R4; - - if (test_bit(STATUS_TEMPERATURE, &priv->status) && - (priv->_4965.statistics.flag & - STATISTICS_REPLY_FLG_HT40_MODE_MSK)) { - IWL_DEBUG_TEMP(priv, "Running HT40 temperature calibration\n"); - R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]); - R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]); - R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[1]); - R4 = le32_to_cpu(priv->card_alive_init.therm_r4[1]); - } else { - IWL_DEBUG_TEMP(priv, "Running temperature calibration\n"); - R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[0]); - R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[0]); - R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[0]); - R4 = le32_to_cpu(priv->card_alive_init.therm_r4[0]); - } - - /* - * Temperature is only 23 bits, so sign extend out to 32. - * - * NOTE If we haven't received a statistics notification yet - * with an updated temperature, use R4 provided to us in the - * "initialize" ALIVE response. - */ - if (!test_bit(STATUS_TEMPERATURE, &priv->status)) - vt = sign_extend32(R4, 23); - else - vt = sign_extend32(le32_to_cpu(priv->_4965.statistics. - general.common.temperature), 23); - - IWL_DEBUG_TEMP(priv, "Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt); - - if (R3 == R1) { - IWL_ERR(priv, "Calibration conflict R1 == R3\n"); - return -1; - } - - /* Calculate temperature in degrees Kelvin, adjust by 97%. - * Add offset to center the adjustment around 0 degrees Centigrade. */ - temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2); - temperature /= (R3 - R1); - temperature = (temperature * 97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET; - - IWL_DEBUG_TEMP(priv, "Calibrated temperature: %dK, %dC\n", - temperature, KELVIN_TO_CELSIUS(temperature)); - - return temperature; -} - -/* Adjust Txpower only if temperature variance is greater than threshold. */ -#define IWL_TEMPERATURE_THRESHOLD 3 - -/** - * iwl4965_is_temp_calib_needed - determines if new calibration is needed - * - * If the temperature changed has changed sufficiently, then a recalibration - * is needed. - * - * Assumes caller will replace priv->last_temperature once calibration - * executed. - */ -static int iwl4965_is_temp_calib_needed(struct iwl_priv *priv) -{ - int temp_diff; - - if (!test_bit(STATUS_STATISTICS, &priv->status)) { - IWL_DEBUG_TEMP(priv, "Temperature not updated -- no statistics.\n"); - return 0; - } - - temp_diff = priv->temperature - priv->last_temperature; - - /* get absolute value */ - if (temp_diff < 0) { - IWL_DEBUG_POWER(priv, "Getting cooler, delta %d\n", temp_diff); - temp_diff = -temp_diff; - } else if (temp_diff == 0) - IWL_DEBUG_POWER(priv, "Temperature unchanged\n"); - else - IWL_DEBUG_POWER(priv, "Getting warmer, delta %d\n", temp_diff); - - if (temp_diff < IWL_TEMPERATURE_THRESHOLD) { - IWL_DEBUG_POWER(priv, " => thermal txpower calib not needed\n"); - return 0; - } - - IWL_DEBUG_POWER(priv, " => thermal txpower calib needed\n"); - - return 1; -} - -static void iwl4965_temperature_calib(struct iwl_priv *priv) -{ - s32 temp; - - temp = iwl4965_hw_get_temperature(priv); - if (IWL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(temp)) - return; - - if (priv->temperature != temp) { - if (priv->temperature) - IWL_DEBUG_TEMP(priv, "Temperature changed " - "from %dC to %dC\n", - KELVIN_TO_CELSIUS(priv->temperature), - KELVIN_TO_CELSIUS(temp)); - else - IWL_DEBUG_TEMP(priv, "Temperature " - "initialized to %dC\n", - KELVIN_TO_CELSIUS(temp)); - } - - priv->temperature = temp; - set_bit(STATUS_TEMPERATURE, &priv->status); - - if (!priv->disable_tx_power_cal && - unlikely(!test_bit(STATUS_SCANNING, &priv->status)) && - iwl4965_is_temp_calib_needed(priv)) - queue_work(priv->workqueue, &priv->txpower_work); -} - -static u16 iwl4965_get_hcmd_size(u8 cmd_id, u16 len) -{ - switch (cmd_id) { - case REPLY_RXON: - return (u16) sizeof(struct iwl4965_rxon_cmd); - default: - return len; - } -} - -static u16 iwl4965_build_addsta_hcmd(const struct iwl_legacy_addsta_cmd *cmd, - u8 *data) -{ - struct iwl4965_addsta_cmd *addsta = (struct iwl4965_addsta_cmd *)data; - addsta->mode = cmd->mode; - memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify)); - memcpy(&addsta->key, &cmd->key, sizeof(struct iwl4965_keyinfo)); - addsta->station_flags = cmd->station_flags; - addsta->station_flags_msk = cmd->station_flags_msk; - addsta->tid_disable_tx = cmd->tid_disable_tx; - addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid; - addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid; - addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn; - addsta->sleep_tx_count = cmd->sleep_tx_count; - addsta->reserved1 = cpu_to_le16(0); - addsta->reserved2 = cpu_to_le16(0); - - return (u16)sizeof(struct iwl4965_addsta_cmd); -} - -static inline u32 iwl4965_get_scd_ssn(struct iwl4965_tx_resp *tx_resp) -{ - return le32_to_cpup(&tx_resp->u.status + tx_resp->frame_count) & MAX_SN; -} - -/** - * iwl4965_tx_status_reply_tx - Handle Tx response for frames in aggregation queue - */ -static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv, - struct iwl_ht_agg *agg, - struct iwl4965_tx_resp *tx_resp, - int txq_id, u16 start_idx) -{ - u16 status; - struct agg_tx_status *frame_status = tx_resp->u.agg_status; - struct ieee80211_tx_info *info = NULL; - struct ieee80211_hdr *hdr = NULL; - u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags); - int i, sh, idx; - u16 seq; - if (agg->wait_for_ba) - IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n"); - - agg->frame_count = tx_resp->frame_count; - agg->start_idx = start_idx; - agg->rate_n_flags = rate_n_flags; - agg->bitmap = 0; - - /* num frames attempted by Tx command */ - if (agg->frame_count == 1) { - /* Only one frame was attempted; no block-ack will arrive */ - status = le16_to_cpu(frame_status[0].status); - idx = start_idx; - - IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n", - agg->frame_count, agg->start_idx, idx); - - info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb); - info->status.rates[0].count = tx_resp->failure_frame + 1; - info->flags &= ~IEEE80211_TX_CTL_AMPDU; - info->flags |= iwl4965_tx_status_to_mac80211(status); - iwl4965_hwrate_to_tx_control(priv, rate_n_flags, info); - - IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n", - status & 0xff, tx_resp->failure_frame); - IWL_DEBUG_TX_REPLY(priv, "Rate Info rate_n_flags=%x\n", rate_n_flags); - - agg->wait_for_ba = 0; - } else { - /* Two or more frames were attempted; expect block-ack */ - u64 bitmap = 0; - int start = agg->start_idx; - - /* Construct bit-map of pending frames within Tx window */ - for (i = 0; i < agg->frame_count; i++) { - u16 sc; - status = le16_to_cpu(frame_status[i].status); - seq = le16_to_cpu(frame_status[i].sequence); - idx = SEQ_TO_INDEX(seq); - txq_id = SEQ_TO_QUEUE(seq); - - if (status & (AGG_TX_STATE_FEW_BYTES_MSK | - AGG_TX_STATE_ABORT_MSK)) - continue; - - IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n", - agg->frame_count, txq_id, idx); - - hdr = iwl_legacy_tx_queue_get_hdr(priv, txq_id, idx); - if (!hdr) { - IWL_ERR(priv, - "BUG_ON idx doesn't point to valid skb" - " idx=%d, txq_id=%d\n", idx, txq_id); - return -1; - } - - sc = le16_to_cpu(hdr->seq_ctrl); - if (idx != (SEQ_TO_SN(sc) & 0xff)) { - IWL_ERR(priv, - "BUG_ON idx doesn't match seq control" - " idx=%d, seq_idx=%d, seq=%d\n", - idx, SEQ_TO_SN(sc), hdr->seq_ctrl); - return -1; - } - - IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n", - i, idx, SEQ_TO_SN(sc)); - - sh = idx - start; - if (sh > 64) { - sh = (start - idx) + 0xff; - bitmap = bitmap << sh; - sh = 0; - start = idx; - } else if (sh < -64) - sh = 0xff - (start - idx); - else if (sh < 0) { - sh = start - idx; - start = idx; - bitmap = bitmap << sh; - sh = 0; - } - bitmap |= 1ULL << sh; - IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n", - start, (unsigned long long)bitmap); - } - - agg->bitmap = bitmap; - agg->start_idx = start; - IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n", - agg->frame_count, agg->start_idx, - (unsigned long long)agg->bitmap); - - if (bitmap) - agg->wait_for_ba = 1; - } - return 0; -} - -static u8 iwl4965_find_station(struct iwl_priv *priv, const u8 *addr) -{ - int i; - int start = 0; - int ret = IWL_INVALID_STATION; - unsigned long flags; - - if ((priv->iw_mode == NL80211_IFTYPE_ADHOC)) - start = IWL_STA_ID; - - if (is_broadcast_ether_addr(addr)) - return priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id; - - spin_lock_irqsave(&priv->sta_lock, flags); - for (i = start; i < priv->hw_params.max_stations; i++) - if (priv->stations[i].used && - (!compare_ether_addr(priv->stations[i].sta.sta.addr, - addr))) { - ret = i; - goto out; - } - - IWL_DEBUG_ASSOC_LIMIT(priv, "can not find STA %pM total %d\n", - addr, priv->num_stations); - - out: - /* - * It may be possible that more commands interacting with stations - * arrive before we completed processing the adding of - * station - */ - if (ret != IWL_INVALID_STATION && - (!(priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) || - ((priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) && - (priv->stations[ret].used & IWL_STA_UCODE_INPROGRESS)))) { - IWL_ERR(priv, "Requested station info for sta %d before ready.\n", - ret); - ret = IWL_INVALID_STATION; - } - spin_unlock_irqrestore(&priv->sta_lock, flags); - return ret; -} - -static int iwl4965_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr) -{ - if (priv->iw_mode == NL80211_IFTYPE_STATION) { - return IWL_AP_ID; - } else { - u8 *da = ieee80211_get_DA(hdr); - return iwl4965_find_station(priv, da); - } -} - -/** - * iwl4965_rx_reply_tx - Handle standard (non-aggregation) Tx response - */ -static void iwl4965_rx_reply_tx(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - u16 sequence = le16_to_cpu(pkt->hdr.sequence); - int txq_id = SEQ_TO_QUEUE(sequence); - int index = SEQ_TO_INDEX(sequence); - struct iwl_tx_queue *txq = &priv->txq[txq_id]; - struct ieee80211_hdr *hdr; - struct ieee80211_tx_info *info; - struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; - u32 status = le32_to_cpu(tx_resp->u.status); - int uninitialized_var(tid); - int sta_id; - int freed; - u8 *qc = NULL; - unsigned long flags; - - if ((index >= txq->q.n_bd) || (iwl_legacy_queue_used(&txq->q, index) == 0)) { - IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d " - "is out of range [0-%d] %d %d\n", txq_id, - index, txq->q.n_bd, txq->q.write_ptr, - txq->q.read_ptr); - return; - } - - txq->time_stamp = jiffies; - info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb); - memset(&info->status, 0, sizeof(info->status)); - - hdr = iwl_legacy_tx_queue_get_hdr(priv, txq_id, index); - if (ieee80211_is_data_qos(hdr->frame_control)) { - qc = ieee80211_get_qos_ctl(hdr); - tid = qc[0] & 0xf; - } - - sta_id = iwl4965_get_ra_sta_id(priv, hdr); - if (txq->sched_retry && unlikely(sta_id == IWL_INVALID_STATION)) { - IWL_ERR(priv, "Station not known\n"); - return; - } - - spin_lock_irqsave(&priv->sta_lock, flags); - if (txq->sched_retry) { - const u32 scd_ssn = iwl4965_get_scd_ssn(tx_resp); - struct iwl_ht_agg *agg = NULL; - WARN_ON(!qc); - - agg = &priv->stations[sta_id].tid[tid].agg; - - iwl4965_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index); - - /* check if BAR is needed */ - if ((tx_resp->frame_count == 1) && !iwl4965_is_tx_success(status)) - info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; - - if (txq->q.read_ptr != (scd_ssn & 0xff)) { - index = iwl_legacy_queue_dec_wrap(scd_ssn & 0xff, - txq->q.n_bd); - IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn " - "%d index %d\n", scd_ssn , index); - freed = iwl4965_tx_queue_reclaim(priv, txq_id, index); - if (qc) - iwl4965_free_tfds_in_queue(priv, sta_id, - tid, freed); - - if (priv->mac80211_registered && - (iwl_legacy_queue_space(&txq->q) > txq->q.low_mark) - && (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) - iwl_legacy_wake_queue(priv, txq); - } - } else { - info->status.rates[0].count = tx_resp->failure_frame + 1; - info->flags |= iwl4965_tx_status_to_mac80211(status); - iwl4965_hwrate_to_tx_control(priv, - le32_to_cpu(tx_resp->rate_n_flags), - info); - - IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) " - "rate_n_flags 0x%x retries %d\n", - txq_id, - iwl4965_get_tx_fail_reason(status), status, - le32_to_cpu(tx_resp->rate_n_flags), - tx_resp->failure_frame); - - freed = iwl4965_tx_queue_reclaim(priv, txq_id, index); - if (qc && likely(sta_id != IWL_INVALID_STATION)) - iwl4965_free_tfds_in_queue(priv, sta_id, tid, freed); - else if (sta_id == IWL_INVALID_STATION) - IWL_DEBUG_TX_REPLY(priv, "Station not known\n"); - - if (priv->mac80211_registered && - (iwl_legacy_queue_space(&txq->q) > txq->q.low_mark)) - iwl_legacy_wake_queue(priv, txq); - } - if (qc && likely(sta_id != IWL_INVALID_STATION)) - iwl4965_txq_check_empty(priv, sta_id, tid, txq_id); - - iwl4965_check_abort_status(priv, tx_resp->frame_count, status); - - spin_unlock_irqrestore(&priv->sta_lock, flags); -} - -static void iwl4965_rx_beacon_notif(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl4965_beacon_notif *beacon = (void *)pkt->u.raw; - u8 rate __maybe_unused = - iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags); - - IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d " - "tsf:0x%.8x%.8x rate:%d\n", - le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK, - beacon->beacon_notify_hdr.failure_frame, - le32_to_cpu(beacon->ibss_mgr_status), - le32_to_cpu(beacon->high_tsf), - le32_to_cpu(beacon->low_tsf), rate); - - priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status); -} - -/* Set up 4965-specific Rx frame reply handlers */ -static void iwl4965_rx_handler_setup(struct iwl_priv *priv) -{ - /* Legacy Rx frames */ - priv->rx_handlers[REPLY_RX] = iwl4965_rx_reply_rx; - /* Tx response */ - priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx; - priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif; -} - -static struct iwl_hcmd_ops iwl4965_hcmd = { - .rxon_assoc = iwl4965_send_rxon_assoc, - .commit_rxon = iwl4965_commit_rxon, - .set_rxon_chain = iwl4965_set_rxon_chain, -}; - -static void iwl4965_post_scan(struct iwl_priv *priv) -{ - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - - /* - * Since setting the RXON may have been deferred while - * performing the scan, fire one off if needed - */ - if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging))) - iwl_legacy_commit_rxon(priv, ctx); -} - -static void iwl4965_post_associate(struct iwl_priv *priv) -{ - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - struct ieee80211_vif *vif = ctx->vif; - struct ieee80211_conf *conf = NULL; - int ret = 0; - - if (!vif || !priv->is_open) - return; - - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - return; - - iwl_legacy_scan_cancel_timeout(priv, 200); - - conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw); - - ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; - iwl_legacy_commit_rxon(priv, ctx); - - ret = iwl_legacy_send_rxon_timing(priv, ctx); - if (ret) - IWL_WARN(priv, "RXON timing - " - "Attempting to continue.\n"); - - ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; - - iwl_legacy_set_rxon_ht(priv, &priv->current_ht_config); - - if (priv->cfg->ops->hcmd->set_rxon_chain) - priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); - - ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid); - - IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n", - vif->bss_conf.aid, vif->bss_conf.beacon_int); - - if (vif->bss_conf.use_short_preamble) - ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; - else - ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; - - if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) { - if (vif->bss_conf.use_short_slot) - ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; - else - ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; - } - - iwl_legacy_commit_rxon(priv, ctx); - - IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n", - vif->bss_conf.aid, ctx->active.bssid_addr); - - switch (vif->type) { - case NL80211_IFTYPE_STATION: - break; - case NL80211_IFTYPE_ADHOC: - iwl4965_send_beacon_cmd(priv); - break; - default: - IWL_ERR(priv, "%s Should not be called in %d mode\n", - __func__, vif->type); - break; - } - - /* the chain noise calibration will enabled PM upon completion - * If chain noise has already been run, then we need to enable - * power management here */ - if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE) - iwl_legacy_power_update_mode(priv, false); - - /* Enable Rx differential gain and sensitivity calibrations */ - iwl4965_chain_noise_reset(priv); - priv->start_calib = 1; -} - -static void iwl4965_config_ap(struct iwl_priv *priv) -{ - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - struct ieee80211_vif *vif = ctx->vif; - int ret = 0; - - lockdep_assert_held(&priv->mutex); - - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - return; - - /* The following should be done only at AP bring up */ - if (!iwl_legacy_is_associated_ctx(ctx)) { - - /* RXON - unassoc (to set timing command) */ - ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; - iwl_legacy_commit_rxon(priv, ctx); - - /* RXON Timing */ - ret = iwl_legacy_send_rxon_timing(priv, ctx); - if (ret) - IWL_WARN(priv, "RXON timing failed - " - "Attempting to continue.\n"); - - /* AP has all antennas */ - priv->chain_noise_data.active_chains = - priv->hw_params.valid_rx_ant; - iwl_legacy_set_rxon_ht(priv, &priv->current_ht_config); - if (priv->cfg->ops->hcmd->set_rxon_chain) - priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); - - ctx->staging.assoc_id = 0; - - if (vif->bss_conf.use_short_preamble) - ctx->staging.flags |= - RXON_FLG_SHORT_PREAMBLE_MSK; - else - ctx->staging.flags &= - ~RXON_FLG_SHORT_PREAMBLE_MSK; - - if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) { - if (vif->bss_conf.use_short_slot) - ctx->staging.flags |= - RXON_FLG_SHORT_SLOT_MSK; - else - ctx->staging.flags &= - ~RXON_FLG_SHORT_SLOT_MSK; - } - /* need to send beacon cmd before committing assoc RXON! */ - iwl4965_send_beacon_cmd(priv); - /* restore RXON assoc */ - ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; - iwl_legacy_commit_rxon(priv, ctx); - } - iwl4965_send_beacon_cmd(priv); -} - -static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = { - .get_hcmd_size = iwl4965_get_hcmd_size, - .build_addsta_hcmd = iwl4965_build_addsta_hcmd, - .request_scan = iwl4965_request_scan, - .post_scan = iwl4965_post_scan, -}; - -static struct iwl_lib_ops iwl4965_lib = { - .set_hw_params = iwl4965_hw_set_hw_params, - .txq_update_byte_cnt_tbl = iwl4965_txq_update_byte_cnt_tbl, - .txq_attach_buf_to_tfd = iwl4965_hw_txq_attach_buf_to_tfd, - .txq_free_tfd = iwl4965_hw_txq_free_tfd, - .txq_init = iwl4965_hw_tx_queue_init, - .rx_handler_setup = iwl4965_rx_handler_setup, - .is_valid_rtc_data_addr = iwl4965_hw_valid_rtc_data_addr, - .init_alive_start = iwl4965_init_alive_start, - .load_ucode = iwl4965_load_bsm, - .dump_nic_error_log = iwl4965_dump_nic_error_log, - .dump_fh = iwl4965_dump_fh, - .set_channel_switch = iwl4965_hw_channel_switch, - .apm_ops = { - .init = iwl_legacy_apm_init, - .config = iwl4965_nic_config, - }, - .eeprom_ops = { - .regulatory_bands = { - EEPROM_REGULATORY_BAND_1_CHANNELS, - EEPROM_REGULATORY_BAND_2_CHANNELS, - EEPROM_REGULATORY_BAND_3_CHANNELS, - EEPROM_REGULATORY_BAND_4_CHANNELS, - EEPROM_REGULATORY_BAND_5_CHANNELS, - EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS, - EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS - }, - .acquire_semaphore = iwl4965_eeprom_acquire_semaphore, - .release_semaphore = iwl4965_eeprom_release_semaphore, - }, - .send_tx_power = iwl4965_send_tx_power, - .update_chain_flags = iwl4965_update_chain_flags, - .temp_ops = { - .temperature = iwl4965_temperature_calib, - }, - .debugfs_ops = { - .rx_stats_read = iwl4965_ucode_rx_stats_read, - .tx_stats_read = iwl4965_ucode_tx_stats_read, - .general_stats_read = iwl4965_ucode_general_stats_read, - }, -}; - -static const struct iwl_legacy_ops iwl4965_legacy_ops = { - .post_associate = iwl4965_post_associate, - .config_ap = iwl4965_config_ap, - .manage_ibss_station = iwl4965_manage_ibss_station, - .update_bcast_stations = iwl4965_update_bcast_stations, -}; - -struct ieee80211_ops iwl4965_hw_ops = { - .tx = iwl4965_mac_tx, - .start = iwl4965_mac_start, - .stop = iwl4965_mac_stop, - .add_interface = iwl_legacy_mac_add_interface, - .remove_interface = iwl_legacy_mac_remove_interface, - .change_interface = iwl_legacy_mac_change_interface, - .config = iwl_legacy_mac_config, - .configure_filter = iwl4965_configure_filter, - .set_key = iwl4965_mac_set_key, - .update_tkip_key = iwl4965_mac_update_tkip_key, - .conf_tx = iwl_legacy_mac_conf_tx, - .reset_tsf = iwl_legacy_mac_reset_tsf, - .bss_info_changed = iwl_legacy_mac_bss_info_changed, - .ampdu_action = iwl4965_mac_ampdu_action, - .hw_scan = iwl_legacy_mac_hw_scan, - .sta_add = iwl4965_mac_sta_add, - .sta_remove = iwl_legacy_mac_sta_remove, - .channel_switch = iwl4965_mac_channel_switch, - .tx_last_beacon = iwl_legacy_mac_tx_last_beacon, -}; - -static const struct iwl_ops iwl4965_ops = { - .lib = &iwl4965_lib, - .hcmd = &iwl4965_hcmd, - .utils = &iwl4965_hcmd_utils, - .led = &iwl4965_led_ops, - .legacy = &iwl4965_legacy_ops, - .ieee80211_ops = &iwl4965_hw_ops, -}; - -static struct iwl_base_params iwl4965_base_params = { - .eeprom_size = IWL4965_EEPROM_IMG_SIZE, - .num_of_queues = IWL49_NUM_QUEUES, - .num_of_ampdu_queues = IWL49_NUM_AMPDU_QUEUES, - .pll_cfg_val = 0, - .set_l0s = true, - .use_bsm = true, - .led_compensation = 61, - .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS, - .wd_timeout = IWL_DEF_WD_TIMEOUT, - .temperature_kelvin = true, - .ucode_tracing = true, - .sensitivity_calib_by_driver = true, - .chain_noise_calib_by_driver = true, -}; - -struct iwl_cfg iwl4965_cfg = { - .name = "Intel(R) Wireless WiFi Link 4965AGN", - .fw_name_pre = IWL4965_FW_PRE, - .ucode_api_max = IWL4965_UCODE_API_MAX, - .ucode_api_min = IWL4965_UCODE_API_MIN, - .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, - .valid_tx_ant = ANT_AB, - .valid_rx_ant = ANT_ABC, - .eeprom_ver = EEPROM_4965_EEPROM_VERSION, - .eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION, - .ops = &iwl4965_ops, - .mod_params = &iwl4965_mod_params, - .base_params = &iwl4965_base_params, - .led_mode = IWL_LED_BLINK, - /* - * Force use of chains B and C for scan RX on 5 GHz band - * because the device has off-channel reception on chain A. - */ - .scan_rx_antennas[IEEE80211_BAND_5GHZ] = ANT_BC, -}; - -/* Module firmware */ -MODULE_FIRMWARE(IWL4965_MODULE_FIRMWARE(IWL4965_UCODE_API_MAX)); diff --git a/drivers/net/wireless/iwlegacy/iwl-4965.h b/drivers/net/wireless/iwlegacy/iwl-4965.h deleted file mode 100644 index 01f8163daf1..00000000000 --- a/drivers/net/wireless/iwlegacy/iwl-4965.h +++ /dev/null @@ -1,282 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ - -#ifndef __iwl_4965_h__ -#define __iwl_4965_h__ - -#include "iwl-dev.h" - -/* configuration for the _4965 devices */ -extern struct iwl_cfg iwl4965_cfg; - -extern struct iwl_mod_params iwl4965_mod_params; - -extern struct ieee80211_ops iwl4965_hw_ops; - -/* tx queue */ -void iwl4965_free_tfds_in_queue(struct iwl_priv *priv, - int sta_id, int tid, int freed); - -/* RXON */ -void iwl4965_set_rxon_chain(struct iwl_priv *priv, - struct iwl_rxon_context *ctx); - -/* uCode */ -int iwl4965_verify_ucode(struct iwl_priv *priv); - -/* lib */ -void iwl4965_check_abort_status(struct iwl_priv *priv, - u8 frame_count, u32 status); - -void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq); -int iwl4965_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq); -int iwl4965_hw_nic_init(struct iwl_priv *priv); -int iwl4965_dump_fh(struct iwl_priv *priv, char **buf, bool display); - -/* rx */ -void iwl4965_rx_queue_restock(struct iwl_priv *priv); -void iwl4965_rx_replenish(struct iwl_priv *priv); -void iwl4965_rx_replenish_now(struct iwl_priv *priv); -void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq); -int iwl4965_rxq_stop(struct iwl_priv *priv); -int iwl4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band); -void iwl4965_rx_reply_rx(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb); -void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb); -void iwl4965_rx_handle(struct iwl_priv *priv); - -/* tx */ -void iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq); -int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, - struct iwl_tx_queue *txq, - dma_addr_t addr, u16 len, u8 reset, u8 pad); -int iwl4965_hw_tx_queue_init(struct iwl_priv *priv, - struct iwl_tx_queue *txq); -void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags, - struct ieee80211_tx_info *info); -int iwl4965_tx_skb(struct iwl_priv *priv, struct sk_buff *skb); -int iwl4965_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, u16 tid, u16 *ssn); -int iwl4965_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, u16 tid); -int iwl4965_txq_check_empty(struct iwl_priv *priv, - int sta_id, u8 tid, int txq_id); -void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb); -int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index); -void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv); -int iwl4965_txq_ctx_alloc(struct iwl_priv *priv); -void iwl4965_txq_ctx_reset(struct iwl_priv *priv); -void iwl4965_txq_ctx_stop(struct iwl_priv *priv); -void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask); - -/* - * Acquire priv->lock before calling this function ! - */ -void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index); -/** - * iwl4965_tx_queue_set_status - (optionally) start Tx/Cmd queue - * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed - * @scd_retry: (1) Indicates queue will be used in aggregation mode - * - * NOTE: Acquire priv->lock before calling this function ! - */ -void iwl4965_tx_queue_set_status(struct iwl_priv *priv, - struct iwl_tx_queue *txq, - int tx_fifo_id, int scd_retry); - -static inline u32 iwl4965_tx_status_to_mac80211(u32 status) -{ - status &= TX_STATUS_MSK; - - switch (status) { - case TX_STATUS_SUCCESS: - case TX_STATUS_DIRECT_DONE: - return IEEE80211_TX_STAT_ACK; - case TX_STATUS_FAIL_DEST_PS: - return IEEE80211_TX_STAT_TX_FILTERED; - default: - return 0; - } -} - -static inline bool iwl4965_is_tx_success(u32 status) -{ - status &= TX_STATUS_MSK; - return (status == TX_STATUS_SUCCESS) || - (status == TX_STATUS_DIRECT_DONE); -} - -u8 iwl4965_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid); - -/* rx */ -void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb); -bool iwl4965_good_plcp_health(struct iwl_priv *priv, - struct iwl_rx_packet *pkt); -void iwl4965_rx_statistics(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb); -void iwl4965_reply_statistics(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb); - -/* scan */ -int iwl4965_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif); - -/* station mgmt */ -int iwl4965_manage_ibss_station(struct iwl_priv *priv, - struct ieee80211_vif *vif, bool add); - -/* hcmd */ -int iwl4965_send_beacon_cmd(struct iwl_priv *priv); - -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG -const char *iwl4965_get_tx_fail_reason(u32 status); -#else -static inline const char * -iwl4965_get_tx_fail_reason(u32 status) { return ""; } -#endif - -/* station management */ -int iwl4965_alloc_bcast_station(struct iwl_priv *priv, - struct iwl_rxon_context *ctx); -int iwl4965_add_bssid_station(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - const u8 *addr, u8 *sta_id_r); -int iwl4965_remove_default_wep_key(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - struct ieee80211_key_conf *key); -int iwl4965_set_default_wep_key(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - struct ieee80211_key_conf *key); -int iwl4965_restore_default_wep_keys(struct iwl_priv *priv, - struct iwl_rxon_context *ctx); -int iwl4965_set_dynamic_key(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - struct ieee80211_key_conf *key, u8 sta_id); -int iwl4965_remove_dynamic_key(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - struct ieee80211_key_conf *key, u8 sta_id); -void iwl4965_update_tkip_key(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - struct ieee80211_key_conf *keyconf, - struct ieee80211_sta *sta, u32 iv32, u16 *phase1key); -int iwl4965_sta_tx_modify_enable_tid(struct iwl_priv *priv, - int sta_id, int tid); -int iwl4965_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta, - int tid, u16 ssn); -int iwl4965_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta, - int tid); -void iwl4965_sta_modify_sleep_tx_count(struct iwl_priv *priv, - int sta_id, int cnt); -int iwl4965_update_bcast_stations(struct iwl_priv *priv); - -/* rate */ -static inline u32 iwl4965_ant_idx_to_flags(u8 ant_idx) -{ - return BIT(ant_idx) << RATE_MCS_ANT_POS; -} - -static inline u8 iwl4965_hw_get_rate(__le32 rate_n_flags) -{ - return le32_to_cpu(rate_n_flags) & 0xFF; -} - -static inline __le32 iwl4965_hw_set_rate_n_flags(u8 rate, u32 flags) -{ - return cpu_to_le32(flags|(u32)rate); -} - -/* eeprom */ -void iwl4965_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac); -int iwl4965_eeprom_acquire_semaphore(struct iwl_priv *priv); -void iwl4965_eeprom_release_semaphore(struct iwl_priv *priv); -int iwl4965_eeprom_check_version(struct iwl_priv *priv); - -/* mac80211 handlers (for 4965) */ -void iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb); -int iwl4965_mac_start(struct ieee80211_hw *hw); -void iwl4965_mac_stop(struct ieee80211_hw *hw); -void iwl4965_configure_filter(struct ieee80211_hw *hw, - unsigned int changed_flags, - unsigned int *total_flags, - u64 multicast); -int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, - struct ieee80211_vif *vif, struct ieee80211_sta *sta, - struct ieee80211_key_conf *key); -void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_key_conf *keyconf, - struct ieee80211_sta *sta, - u32 iv32, u16 *phase1key); -int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - enum ieee80211_ampdu_mlme_action action, - struct ieee80211_sta *sta, u16 tid, u16 *ssn, - u8 buf_size); -int iwl4965_mac_sta_add(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta); -void iwl4965_mac_channel_switch(struct ieee80211_hw *hw, - struct ieee80211_channel_switch *ch_switch); - -#endif /* __iwl_4965_h__ */ diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c deleted file mode 100644 index 2bd5659310d..00000000000 --- a/drivers/net/wireless/iwlegacy/iwl-core.c +++ /dev/null @@ -1,2661 +0,0 @@ -/****************************************************************************** - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - *****************************************************************************/ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/etherdevice.h> -#include <linux/sched.h> -#include <linux/slab.h> -#include <net/mac80211.h> - -#include "iwl-eeprom.h" -#include "iwl-dev.h" -#include "iwl-debug.h" -#include "iwl-core.h" -#include "iwl-io.h" -#include "iwl-power.h" -#include "iwl-sta.h" -#include "iwl-helpers.h" - - -MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965"); -MODULE_VERSION(IWLWIFI_VERSION); -MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); -MODULE_LICENSE("GPL"); - -/* - * set bt_coex_active to true, uCode will do kill/defer - * every time the priority line is asserted (BT is sending signals on the - * priority line in the PCIx). - * set bt_coex_active to false, uCode will ignore the BT activity and - * perform the normal operation - * - * User might experience transmit issue on some platform due to WiFi/BT - * co-exist problem. The possible behaviors are: - * Able to scan and finding all the available AP - * Not able to associate with any AP - * On those platforms, WiFi communication can be restored by set - * "bt_coex_active" module parameter to "false" - * - * default: bt_coex_active = true (BT_COEX_ENABLE) - */ -static bool bt_coex_active = true; -module_param(bt_coex_active, bool, S_IRUGO); -MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist"); - -u32 iwlegacy_debug_level; -EXPORT_SYMBOL(iwlegacy_debug_level); - -const u8 iwlegacy_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; -EXPORT_SYMBOL(iwlegacy_bcast_addr); - - -/* This function both allocates and initializes hw and priv. */ -struct ieee80211_hw *iwl_legacy_alloc_all(struct iwl_cfg *cfg) -{ - struct iwl_priv *priv; - /* mac80211 allocates memory for this device instance, including - * space for this driver's private structure */ - struct ieee80211_hw *hw; - - hw = ieee80211_alloc_hw(sizeof(struct iwl_priv), - cfg->ops->ieee80211_ops); - if (hw == NULL) { - pr_err("%s: Can not allocate network device\n", - cfg->name); - goto out; - } - - priv = hw->priv; - priv->hw = hw; - -out: - return hw; -} -EXPORT_SYMBOL(iwl_legacy_alloc_all); - -#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */ -#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */ -static void iwl_legacy_init_ht_hw_capab(const struct iwl_priv *priv, - struct ieee80211_sta_ht_cap *ht_info, - enum ieee80211_band band) -{ - u16 max_bit_rate = 0; - u8 rx_chains_num = priv->hw_params.rx_chains_num; - u8 tx_chains_num = priv->hw_params.tx_chains_num; - - ht_info->cap = 0; - memset(&ht_info->mcs, 0, sizeof(ht_info->mcs)); - - ht_info->ht_supported = true; - - ht_info->cap |= IEEE80211_HT_CAP_SGI_20; - max_bit_rate = MAX_BIT_RATE_20_MHZ; - if (priv->hw_params.ht40_channel & BIT(band)) { - ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; - ht_info->cap |= IEEE80211_HT_CAP_SGI_40; - ht_info->mcs.rx_mask[4] = 0x01; - max_bit_rate = MAX_BIT_RATE_40_MHZ; - } - - if (priv->cfg->mod_params->amsdu_size_8K) - ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU; - - ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF; - ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF; - - ht_info->mcs.rx_mask[0] = 0xFF; - if (rx_chains_num >= 2) - ht_info->mcs.rx_mask[1] = 0xFF; - if (rx_chains_num >= 3) - ht_info->mcs.rx_mask[2] = 0xFF; - - /* Highest supported Rx data rate */ - max_bit_rate *= rx_chains_num; - WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK); - ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate); - - /* Tx MCS capabilities */ - ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; - if (tx_chains_num != rx_chains_num) { - ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF; - ht_info->mcs.tx_params |= ((tx_chains_num - 1) << - IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT); - } -} - -/** - * iwl_legacy_init_geos - Initialize mac80211's geo/channel info based from eeprom - */ -int iwl_legacy_init_geos(struct iwl_priv *priv) -{ - struct iwl_channel_info *ch; - struct ieee80211_supported_band *sband; - struct ieee80211_channel *channels; - struct ieee80211_channel *geo_ch; - struct ieee80211_rate *rates; - int i = 0; - s8 max_tx_power = 0; - - if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates || - priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) { - IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n"); - set_bit(STATUS_GEO_CONFIGURED, &priv->status); - return 0; - } - - channels = kzalloc(sizeof(struct ieee80211_channel) * - priv->channel_count, GFP_KERNEL); - if (!channels) - return -ENOMEM; - - rates = kzalloc((sizeof(struct ieee80211_rate) * IWL_RATE_COUNT_LEGACY), - GFP_KERNEL); - if (!rates) { - kfree(channels); - return -ENOMEM; - } - - /* 5.2GHz channels start after the 2.4GHz channels */ - sband = &priv->bands[IEEE80211_BAND_5GHZ]; - sband->channels = &channels[ARRAY_SIZE(iwlegacy_eeprom_band_1)]; - /* just OFDM */ - sband->bitrates = &rates[IWL_FIRST_OFDM_RATE]; - sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE; - - if (priv->cfg->sku & IWL_SKU_N) - iwl_legacy_init_ht_hw_capab(priv, &sband->ht_cap, - IEEE80211_BAND_5GHZ); - - sband = &priv->bands[IEEE80211_BAND_2GHZ]; - sband->channels = channels; - /* OFDM & CCK */ - sband->bitrates = rates; - sband->n_bitrates = IWL_RATE_COUNT_LEGACY; - - if (priv->cfg->sku & IWL_SKU_N) - iwl_legacy_init_ht_hw_capab(priv, &sband->ht_cap, - IEEE80211_BAND_2GHZ); - - priv->ieee_channels = channels; - priv->ieee_rates = rates; - - for (i = 0; i < priv->channel_count; i++) { - ch = &priv->channel_info[i]; - - if (!iwl_legacy_is_channel_valid(ch)) - continue; - - sband = &priv->bands[ch->band]; - - geo_ch = &sband->channels[sband->n_channels++]; - - geo_ch->center_freq = - ieee80211_channel_to_frequency(ch->channel, ch->band); - geo_ch->max_power = ch->max_power_avg; - geo_ch->max_antenna_gain = 0xff; - geo_ch->hw_value = ch->channel; - - if (iwl_legacy_is_channel_valid(ch)) { - if (!(ch->flags & EEPROM_CHANNEL_IBSS)) - geo_ch->flags |= IEEE80211_CHAN_NO_IBSS; - - if (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) - geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN; - - if (ch->flags & EEPROM_CHANNEL_RADAR) - geo_ch->flags |= IEEE80211_CHAN_RADAR; - - geo_ch->flags |= ch->ht40_extension_channel; - - if (ch->max_power_avg > max_tx_power) - max_tx_power = ch->max_power_avg; - } else { - geo_ch->flags |= IEEE80211_CHAN_DISABLED; - } - - IWL_DEBUG_INFO(priv, "Channel %d Freq=%d[%sGHz] %s flag=0x%X\n", - ch->channel, geo_ch->center_freq, - iwl_legacy_is_channel_a_band(ch) ? "5.2" : "2.4", - geo_ch->flags & IEEE80211_CHAN_DISABLED ? - "restricted" : "valid", - geo_ch->flags); - } - - priv->tx_power_device_lmt = max_tx_power; - priv->tx_power_user_lmt = max_tx_power; - priv->tx_power_next = max_tx_power; - - if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) && - priv->cfg->sku & IWL_SKU_A) { - IWL_INFO(priv, "Incorrectly detected BG card as ABG. " - "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n", - priv->pci_dev->device, - priv->pci_dev->subsystem_device); - priv->cfg->sku &= ~IWL_SKU_A; - } - - IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n", - priv->bands[IEEE80211_BAND_2GHZ].n_channels, - priv->bands[IEEE80211_BAND_5GHZ].n_channels); - - set_bit(STATUS_GEO_CONFIGURED, &priv->status); - - return 0; -} -EXPORT_SYMBOL(iwl_legacy_init_geos); - -/* - * iwl_legacy_free_geos - undo allocations in iwl_legacy_init_geos - */ -void iwl_legacy_free_geos(struct iwl_priv *priv) -{ - kfree(priv->ieee_channels); - kfree(priv->ieee_rates); - clear_bit(STATUS_GEO_CONFIGURED, &priv->status); -} -EXPORT_SYMBOL(iwl_legacy_free_geos); - -static bool iwl_legacy_is_channel_extension(struct iwl_priv *priv, - enum ieee80211_band band, - u16 channel, u8 extension_chan_offset) -{ - const struct iwl_channel_info *ch_info; - - ch_info = iwl_legacy_get_channel_info(priv, band, channel); - if (!iwl_legacy_is_channel_valid(ch_info)) - return false; - - if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE) - return !(ch_info->ht40_extension_channel & - IEEE80211_CHAN_NO_HT40PLUS); - else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW) - return !(ch_info->ht40_extension_channel & - IEEE80211_CHAN_NO_HT40MINUS); - - return false; -} - -bool iwl_legacy_is_ht40_tx_allowed(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - struct ieee80211_sta_ht_cap *ht_cap) -{ - if (!ctx->ht.enabled || !ctx->ht.is_40mhz) - return false; - - /* - * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40 - * the bit will not set if it is pure 40MHz case - */ - if (ht_cap && !ht_cap->ht_supported) - return false; - -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS - if (priv->disable_ht40) - return false; -#endif - - return iwl_legacy_is_channel_extension(priv, priv->band, - le16_to_cpu(ctx->staging.channel), - ctx->ht.extension_chan_offset); -} -EXPORT_SYMBOL(iwl_legacy_is_ht40_tx_allowed); - -static u16 iwl_legacy_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val) -{ - u16 new_val; - u16 beacon_factor; - - /* - * If mac80211 hasn't given us a beacon interval, program - * the default into the device. - */ - if (!beacon_val) - return DEFAULT_BEACON_INTERVAL; - - /* - * If the beacon interval we obtained from the peer - * is too large, we'll have to wake up more often - * (and in IBSS case, we'll beacon too much) - * - * For example, if max_beacon_val is 4096, and the - * requested beacon interval is 7000, we'll have to - * use 3500 to be able to wake up on the beacons. - * - * This could badly influence beacon detection stats. - */ - - beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val; - new_val = beacon_val / beacon_factor; - - if (!new_val) - new_val = max_beacon_val; - - return new_val; -} - -int -iwl_legacy_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx) -{ - u64 tsf; - s32 interval_tm, rem; - struct ieee80211_conf *conf = NULL; - u16 beacon_int; - struct ieee80211_vif *vif = ctx->vif; - - conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw); - - lockdep_assert_held(&priv->mutex); - - memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd)); - - ctx->timing.timestamp = cpu_to_le64(priv->timestamp); - ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval); - - beacon_int = vif ? vif->bss_conf.beacon_int : 0; - - /* - * TODO: For IBSS we need to get atim_window from mac80211, - * for now just always use 0 - */ - ctx->timing.atim_window = 0; - - beacon_int = iwl_legacy_adjust_beacon_interval(beacon_int, - priv->hw_params.max_beacon_itrvl * TIME_UNIT); - ctx->timing.beacon_interval = cpu_to_le16(beacon_int); - - tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */ - interval_tm = beacon_int * TIME_UNIT; - rem = do_div(tsf, interval_tm); - ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem); - - ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ?: 1) : 1; - - IWL_DEBUG_ASSOC(priv, - "beacon interval %d beacon timer %d beacon tim %d\n", - le16_to_cpu(ctx->timing.beacon_interval), - le32_to_cpu(ctx->timing.beacon_init_val), - le16_to_cpu(ctx->timing.atim_window)); - - return iwl_legacy_send_cmd_pdu(priv, ctx->rxon_timing_cmd, - sizeof(ctx->timing), &ctx->timing); -} -EXPORT_SYMBOL(iwl_legacy_send_rxon_timing); - -void -iwl_legacy_set_rxon_hwcrypto(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - int hw_decrypt) -{ - struct iwl_legacy_rxon_cmd *rxon = &ctx->staging; - - if (hw_decrypt) - rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK; - else - rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK; - -} -EXPORT_SYMBOL(iwl_legacy_set_rxon_hwcrypto); - -/* validate RXON structure is valid */ -int -iwl_legacy_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx) -{ - struct iwl_legacy_rxon_cmd *rxon = &ctx->staging; - bool error = false; - - if (rxon->flags & RXON_FLG_BAND_24G_MSK) { - if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) { - IWL_WARN(priv, "check 2.4G: wrong narrow\n"); - error = true; - } - if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) { - IWL_WARN(priv, "check 2.4G: wrong radar\n"); - error = true; - } - } else { - if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) { - IWL_WARN(priv, "check 5.2G: not short slot!\n"); - error = true; - } - if (rxon->flags & RXON_FLG_CCK_MSK) { - IWL_WARN(priv, "check 5.2G: CCK!\n"); - error = true; - } - } - if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) { - IWL_WARN(priv, "mac/bssid mcast!\n"); - error = true; - } - - /* make sure basic rates 6Mbps and 1Mbps are supported */ - if ((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0 && - (rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0) { - IWL_WARN(priv, "neither 1 nor 6 are basic\n"); - error = true; - } - - if (le16_to_cpu(rxon->assoc_id) > 2007) { - IWL_WARN(priv, "aid > 2007\n"); - error = true; - } - - if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) - == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) { - IWL_WARN(priv, "CCK and short slot\n"); - error = true; - } - - if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) - == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) { - IWL_WARN(priv, "CCK and auto detect"); - error = true; - } - - if ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK | - RXON_FLG_TGG_PROTECT_MSK)) == - RXON_FLG_TGG_PROTECT_MSK) { - IWL_WARN(priv, "TGg but no auto-detect\n"); - error = true; - } - - if (error) - IWL_WARN(priv, "Tuning to channel %d\n", - le16_to_cpu(rxon->channel)); - - if (error) { - IWL_ERR(priv, "Invalid RXON\n"); - return -EINVAL; - } - return 0; -} -EXPORT_SYMBOL(iwl_legacy_check_rxon_cmd); - -/** - * iwl_legacy_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed - * @priv: staging_rxon is compared to active_rxon - * - * If the RXON structure is changing enough to require a new tune, - * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that - * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required. - */ -int iwl_legacy_full_rxon_required(struct iwl_priv *priv, - struct iwl_rxon_context *ctx) -{ - const struct iwl_legacy_rxon_cmd *staging = &ctx->staging; - const struct iwl_legacy_rxon_cmd *active = &ctx->active; - -#define CHK(cond) \ - if ((cond)) { \ - IWL_DEBUG_INFO(priv, "need full RXON - " #cond "\n"); \ - return 1; \ - } - -#define CHK_NEQ(c1, c2) \ - if ((c1) != (c2)) { \ - IWL_DEBUG_INFO(priv, "need full RXON - " \ - #c1 " != " #c2 " - %d != %d\n", \ - (c1), (c2)); \ - return 1; \ - } - - /* These items are only settable from the full RXON command */ - CHK(!iwl_legacy_is_associated_ctx(ctx)); - CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr)); - CHK(compare_ether_addr(staging->node_addr, active->node_addr)); - CHK(compare_ether_addr(staging->wlap_bssid_addr, - active->wlap_bssid_addr)); - CHK_NEQ(staging->dev_type, active->dev_type); - CHK_NEQ(staging->channel, active->channel); - CHK_NEQ(staging->air_propagation, active->air_propagation); - CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates, - active->ofdm_ht_single_stream_basic_rates); - CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates, - active->ofdm_ht_dual_stream_basic_rates); - CHK_NEQ(staging->assoc_id, active->assoc_id); - - /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can - * be updated with the RXON_ASSOC command -- however only some - * flag transitions are allowed using RXON_ASSOC */ - - /* Check if we are not switching bands */ - CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK, - active->flags & RXON_FLG_BAND_24G_MSK); - - /* Check if we are switching association toggle */ - CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK, - active->filter_flags & RXON_FILTER_ASSOC_MSK); - -#undef CHK -#undef CHK_NEQ - - return 0; -} -EXPORT_SYMBOL(iwl_legacy_full_rxon_required); - -u8 iwl_legacy_get_lowest_plcp(struct iwl_priv *priv, - struct iwl_rxon_context *ctx) -{ - /* - * Assign the lowest rate -- should really get this from - * the beacon skb from mac80211. - */ - if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) - return IWL_RATE_1M_PLCP; - else - return IWL_RATE_6M_PLCP; -} -EXPORT_SYMBOL(iwl_legacy_get_lowest_plcp); - -static void _iwl_legacy_set_rxon_ht(struct iwl_priv *priv, - struct iwl_ht_config *ht_conf, - struct iwl_rxon_context *ctx) -{ - struct iwl_legacy_rxon_cmd *rxon = &ctx->staging; - - if (!ctx->ht.enabled) { - rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK | - RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK | - RXON_FLG_HT40_PROT_MSK | - RXON_FLG_HT_PROT_MSK); - return; - } - - rxon->flags |= cpu_to_le32(ctx->ht.protection << - RXON_FLG_HT_OPERATING_MODE_POS); - - /* Set up channel bandwidth: - * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */ - /* clear the HT channel mode before set the mode */ - rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK | - RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); - if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, NULL)) { - /* pure ht40 */ - if (ctx->ht.protection == - IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) { - rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40; - /* Note: control channel is opposite of extension channel */ - switch (ctx->ht.extension_chan_offset) { - case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: - rxon->flags &= - ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; - break; - case IEEE80211_HT_PARAM_CHA_SEC_BELOW: - rxon->flags |= - RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; - break; - } - } else { - /* Note: control channel is opposite of extension channel */ - switch (ctx->ht.extension_chan_offset) { - case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: - rxon->flags &= - ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); - rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED; - break; - case IEEE80211_HT_PARAM_CHA_SEC_BELOW: - rxon->flags |= - RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; - rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED; - break; - case IEEE80211_HT_PARAM_CHA_SEC_NONE: - default: - /* channel location only valid if in Mixed mode */ - IWL_ERR(priv, - "invalid extension channel offset\n"); - break; - } - } - } else { - rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY; - } - - if (priv->cfg->ops->hcmd->set_rxon_chain) - priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); - - IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X " - "extension channel offset 0x%x\n", - le32_to_cpu(rxon->flags), ctx->ht.protection, - ctx->ht.extension_chan_offset); -} - -void iwl_legacy_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf) -{ - struct iwl_rxon_context *ctx; - - for_each_context(priv, ctx) - _iwl_legacy_set_rxon_ht(priv, ht_conf, ctx); -} -EXPORT_SYMBOL(iwl_legacy_set_rxon_ht); - -/* Return valid, unused, channel for a passive scan to reset the RF */ -u8 iwl_legacy_get_single_channel_number(struct iwl_priv *priv, - enum ieee80211_band band) -{ - const struct iwl_channel_info *ch_info; - int i; - u8 channel = 0; - u8 min, max; - struct iwl_rxon_context *ctx; - - if (band == IEEE80211_BAND_5GHZ) { - min = 14; - max = priv->channel_count; - } else { - min = 0; - max = 14; - } - - for (i = min; i < max; i++) { - bool busy = false; - - for_each_context(priv, ctx) { - busy = priv->channel_info[i].channel == - le16_to_cpu(ctx->staging.channel); - if (busy) - break; - } - - if (busy) - continue; - - channel = priv->channel_info[i].channel; - ch_info = iwl_legacy_get_channel_info(priv, band, channel); - if (iwl_legacy_is_channel_valid(ch_info)) - break; - } - - return channel; -} -EXPORT_SYMBOL(iwl_legacy_get_single_channel_number); - -/** - * iwl_legacy_set_rxon_channel - Set the band and channel values in staging RXON - * @ch: requested channel as a pointer to struct ieee80211_channel - - * NOTE: Does not commit to the hardware; it sets appropriate bit fields - * in the staging RXON flag structure based on the ch->band - */ -int -iwl_legacy_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch, - struct iwl_rxon_context *ctx) -{ - enum ieee80211_band band = ch->band; - u16 channel = ch->hw_value; - - if ((le16_to_cpu(ctx->staging.channel) == channel) && - (priv->band == band)) - return 0; - - ctx->staging.channel = cpu_to_le16(channel); - if (band == IEEE80211_BAND_5GHZ) - ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK; - else - ctx->staging.flags |= RXON_FLG_BAND_24G_MSK; - - priv->band = band; - - IWL_DEBUG_INFO(priv, "Staging channel set to %d [%d]\n", channel, band); - - return 0; -} -EXPORT_SYMBOL(iwl_legacy_set_rxon_channel); - -void iwl_legacy_set_flags_for_band(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - enum ieee80211_band band, - struct ieee80211_vif *vif) -{ - if (band == IEEE80211_BAND_5GHZ) { - ctx->staging.flags &= - ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK - | RXON_FLG_CCK_MSK); - ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; - } else { - /* Copied from iwl_post_associate() */ - if (vif && vif->bss_conf.use_short_slot) - ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; - else - ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; - - ctx->staging.flags |= RXON_FLG_BAND_24G_MSK; - ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK; - ctx->staging.flags &= ~RXON_FLG_CCK_MSK; - } -} -EXPORT_SYMBOL(iwl_legacy_set_flags_for_band); - -/* - * initialize rxon structure with default values from eeprom - */ -void iwl_legacy_connection_init_rx_config(struct iwl_priv *priv, - struct iwl_rxon_context *ctx) -{ - const struct iwl_channel_info *ch_info; - - memset(&ctx->staging, 0, sizeof(ctx->staging)); - - if (!ctx->vif) { - ctx->staging.dev_type = ctx->unused_devtype; - } else - switch (ctx->vif->type) { - - case NL80211_IFTYPE_STATION: - ctx->staging.dev_type = ctx->station_devtype; - ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK; - break; - - case NL80211_IFTYPE_ADHOC: - ctx->staging.dev_type = ctx->ibss_devtype; - ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK; - ctx->staging.filter_flags = RXON_FILTER_BCON_AWARE_MSK | - RXON_FILTER_ACCEPT_GRP_MSK; - break; - - default: - IWL_ERR(priv, "Unsupported interface type %d\n", - ctx->vif->type); - break; - } - -#if 0 - /* TODO: Figure out when short_preamble would be set and cache from - * that */ - if (!hw_to_local(priv->hw)->short_preamble) - ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; - else - ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; -#endif - - ch_info = iwl_legacy_get_channel_info(priv, priv->band, - le16_to_cpu(ctx->active.channel)); - - if (!ch_info) - ch_info = &priv->channel_info[0]; - - ctx->staging.channel = cpu_to_le16(ch_info->channel); - priv->band = ch_info->band; - - iwl_legacy_set_flags_for_band(priv, ctx, priv->band, ctx->vif); - - ctx->staging.ofdm_basic_rates = - (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF; - ctx->staging.cck_basic_rates = - (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF; - - /* clear both MIX and PURE40 mode flag */ - ctx->staging.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED | - RXON_FLG_CHANNEL_MODE_PURE_40); - if (ctx->vif) - memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN); - - ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff; - ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff; -} -EXPORT_SYMBOL(iwl_legacy_connection_init_rx_config); - -void iwl_legacy_set_rate(struct iwl_priv *priv) -{ - const struct ieee80211_supported_band *hw = NULL; - struct ieee80211_rate *rate; - struct iwl_rxon_context *ctx; - int i; - - hw = iwl_get_hw_mode(priv, priv->band); - if (!hw) { - IWL_ERR(priv, "Failed to set rate: unable to get hw mode\n"); - return; - } - - priv->active_rate = 0; - - for (i = 0; i < hw->n_bitrates; i++) { - rate = &(hw->bitrates[i]); - if (rate->hw_value < IWL_RATE_COUNT_LEGACY) - priv->active_rate |= (1 << rate->hw_value); - } - - IWL_DEBUG_RATE(priv, "Set active_rate = %0x\n", priv->active_rate); - - for_each_context(priv, ctx) { - ctx->staging.cck_basic_rates = - (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF; - - ctx->staging.ofdm_basic_rates = - (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF; - } -} -EXPORT_SYMBOL(iwl_legacy_set_rate); - -void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success) -{ - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - return; - - if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status)) - ieee80211_chswitch_done(ctx->vif, is_success); -} -EXPORT_SYMBOL(iwl_legacy_chswitch_done); - -void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_csa_notification *csa = &(pkt->u.csa_notif); - - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - struct iwl_legacy_rxon_cmd *rxon = (void *)&ctx->active; - - if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status)) - return; - - if (!le32_to_cpu(csa->status) && csa->channel == priv->switch_channel) { - rxon->channel = csa->channel; - ctx->staging.channel = csa->channel; - IWL_DEBUG_11H(priv, "CSA notif: channel %d\n", - le16_to_cpu(csa->channel)); - iwl_legacy_chswitch_done(priv, true); - } else { - IWL_ERR(priv, "CSA notif (fail) : channel %d\n", - le16_to_cpu(csa->channel)); - iwl_legacy_chswitch_done(priv, false); - } -} -EXPORT_SYMBOL(iwl_legacy_rx_csa); - -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG -void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv, - struct iwl_rxon_context *ctx) -{ - struct iwl_legacy_rxon_cmd *rxon = &ctx->staging; - - IWL_DEBUG_RADIO(priv, "RX CONFIG:\n"); - iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon)); - IWL_DEBUG_RADIO(priv, "u16 channel: 0x%x\n", - le16_to_cpu(rxon->channel)); - IWL_DEBUG_RADIO(priv, "u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags)); - IWL_DEBUG_RADIO(priv, "u32 filter_flags: 0x%08x\n", - le32_to_cpu(rxon->filter_flags)); - IWL_DEBUG_RADIO(priv, "u8 dev_type: 0x%x\n", rxon->dev_type); - IWL_DEBUG_RADIO(priv, "u8 ofdm_basic_rates: 0x%02x\n", - rxon->ofdm_basic_rates); - IWL_DEBUG_RADIO(priv, "u8 cck_basic_rates: 0x%02x\n", - rxon->cck_basic_rates); - IWL_DEBUG_RADIO(priv, "u8[6] node_addr: %pM\n", rxon->node_addr); - IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr); - IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n", - le16_to_cpu(rxon->assoc_id)); -} -EXPORT_SYMBOL(iwl_legacy_print_rx_config_cmd); -#endif -/** - * iwl_legacy_irq_handle_error - called for HW or SW error interrupt from card - */ -void iwl_legacy_irq_handle_error(struct iwl_priv *priv) -{ - /* Set the FW error flag -- cleared on iwl_down */ - set_bit(STATUS_FW_ERROR, &priv->status); - - /* Cancel currently queued command. */ - clear_bit(STATUS_HCMD_ACTIVE, &priv->status); - - IWL_ERR(priv, "Loaded firmware version: %s\n", - priv->hw->wiphy->fw_version); - - priv->cfg->ops->lib->dump_nic_error_log(priv); - if (priv->cfg->ops->lib->dump_fh) - priv->cfg->ops->lib->dump_fh(priv, NULL, false); -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG - if (iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) - iwl_legacy_print_rx_config_cmd(priv, - &priv->contexts[IWL_RXON_CTX_BSS]); -#endif - - wake_up(&priv->wait_command_queue); - - /* Keep the restart process from trying to send host - * commands by clearing the INIT status bit */ - clear_bit(STATUS_READY, &priv->status); - - if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) { - IWL_DEBUG(priv, IWL_DL_FW_ERRORS, - "Restarting adapter due to uCode error.\n"); - - if (priv->cfg->mod_params->restart_fw) - queue_work(priv->workqueue, &priv->restart); - } -} -EXPORT_SYMBOL(iwl_legacy_irq_handle_error); - -static int iwl_legacy_apm_stop_master(struct iwl_priv *priv) -{ - int ret = 0; - - /* stop device's busmaster DMA activity */ - iwl_legacy_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); - - ret = iwl_poll_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED, - CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); - if (ret) - IWL_WARN(priv, "Master Disable Timed Out, 100 usec\n"); - - IWL_DEBUG_INFO(priv, "stop master\n"); - - return ret; -} - -void iwl_legacy_apm_stop(struct iwl_priv *priv) -{ - IWL_DEBUG_INFO(priv, "Stop card, put in low power state\n"); - - /* Stop device's DMA activity */ - iwl_legacy_apm_stop_master(priv); - - /* Reset the entire device */ - iwl_legacy_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); - - udelay(10); - - /* - * Clear "initialization complete" bit to move adapter from - * D0A* (powered-up Active) --> D0U* (Uninitialized) state. - */ - iwl_legacy_clear_bit(priv, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_INIT_DONE); -} -EXPORT_SYMBOL(iwl_legacy_apm_stop); - - -/* - * Start up NIC's basic functionality after it has been reset - * (e.g. after platform boot, or shutdown via iwl_legacy_apm_stop()) - * NOTE: This does not load uCode nor start the embedded processor - */ -int iwl_legacy_apm_init(struct iwl_priv *priv) -{ - int ret = 0; - u16 lctl; - - IWL_DEBUG_INFO(priv, "Init card's basic functions\n"); - - /* - * Use "set_bit" below rather than "write", to preserve any hardware - * bits already set by default after reset. - */ - - /* Disable L0S exit timer (platform NMI Work/Around) */ - iwl_legacy_set_bit(priv, CSR_GIO_CHICKEN_BITS, - CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); - - /* - * Disable L0s without affecting L1; - * don't wait for ICH L0s (ICH bug W/A) - */ - iwl_legacy_set_bit(priv, CSR_GIO_CHICKEN_BITS, - CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); - - /* Set FH wait threshold to maximum (HW error during stress W/A) */ - iwl_legacy_set_bit(priv, CSR_DBG_HPET_MEM_REG, - CSR_DBG_HPET_MEM_REG_VAL); - - /* - * Enable HAP INTA (interrupt from management bus) to - * wake device's PCI Express link L1a -> L0s - * NOTE: This is no-op for 3945 (non-existent bit) - */ - iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); - - /* - * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition. - * Check if BIOS (or OS) enabled L1-ASPM on this device. - * If so (likely), disable L0S, so device moves directly L0->L1; - * costs negligible amount of power savings. - * If not (unlikely), enable L0S, so there is at least some - * power savings, even without L1. - */ - if (priv->cfg->base_params->set_l0s) { - lctl = iwl_legacy_pcie_link_ctl(priv); - if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) == - PCI_CFG_LINK_CTRL_VAL_L1_EN) { - /* L1-ASPM enabled; disable(!) L0S */ - iwl_legacy_set_bit(priv, CSR_GIO_REG, - CSR_GIO_REG_VAL_L0S_ENABLED); - IWL_DEBUG_POWER(priv, "L1 Enabled; Disabling L0S\n"); - } else { - /* L1-ASPM disabled; enable(!) L0S */ - iwl_legacy_clear_bit(priv, CSR_GIO_REG, - CSR_GIO_REG_VAL_L0S_ENABLED); - IWL_DEBUG_POWER(priv, "L1 Disabled; Enabling L0S\n"); - } - } - - /* Configure analog phase-lock-loop before activating to D0A */ - if (priv->cfg->base_params->pll_cfg_val) - iwl_legacy_set_bit(priv, CSR_ANA_PLL_CFG, - priv->cfg->base_params->pll_cfg_val); - - /* - * Set "initialization complete" bit to move adapter from - * D0U* --> D0A* (powered-up active) state. - */ - iwl_legacy_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); - - /* - * Wait for clock stabilization; once stabilized, access to - * device-internal resources is supported, e.g. iwl_legacy_write_prph() - * and accesses to uCode SRAM. - */ - ret = iwl_poll_bit(priv, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, - CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); - if (ret < 0) { - IWL_DEBUG_INFO(priv, "Failed to init the card\n"); - goto out; - } - - /* - * Enable DMA and BSM (if used) clocks, wait for them to stabilize. - * BSM (Boostrap State Machine) is only in 3945 and 4965. - * - * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits - * do not disable clocks. This preserves any hardware bits already - * set by default in "CLK_CTRL_REG" after reset. - */ - if (priv->cfg->base_params->use_bsm) - iwl_legacy_write_prph(priv, APMG_CLK_EN_REG, - APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT); - else - iwl_legacy_write_prph(priv, APMG_CLK_EN_REG, - APMG_CLK_VAL_DMA_CLK_RQT); - udelay(20); - - /* Disable L1-Active */ - iwl_legacy_set_bits_prph(priv, APMG_PCIDEV_STT_REG, - APMG_PCIDEV_STT_VAL_L1_ACT_DIS); - -out: - return ret; -} -EXPORT_SYMBOL(iwl_legacy_apm_init); - - -int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force) -{ - int ret; - s8 prev_tx_power; - bool defer; - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - - lockdep_assert_held(&priv->mutex); - - if (priv->tx_power_user_lmt == tx_power && !force) - return 0; - - if (!priv->cfg->ops->lib->send_tx_power) - return -EOPNOTSUPP; - - /* 0 dBm mean 1 milliwatt */ - if (tx_power < 0) { - IWL_WARN(priv, - "Requested user TXPOWER %d below 1 mW.\n", - tx_power); - return -EINVAL; - } - - if (tx_power > priv->tx_power_device_lmt) { - IWL_WARN(priv, - "Requested user TXPOWER %d above upper limit %d.\n", - tx_power, priv->tx_power_device_lmt); - return -EINVAL; - } - - if (!iwl_legacy_is_ready_rf(priv)) - return -EIO; - - /* scan complete and commit_rxon use tx_power_next value, - * it always need to be updated for newest request */ - priv->tx_power_next = tx_power; - - /* do not set tx power when scanning or channel changing */ - defer = test_bit(STATUS_SCANNING, &priv->status) || - memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)); - if (defer && !force) { - IWL_DEBUG_INFO(priv, "Deferring tx power set\n"); - return 0; - } - - prev_tx_power = priv->tx_power_user_lmt; - priv->tx_power_user_lmt = tx_power; - - ret = priv->cfg->ops->lib->send_tx_power(priv); - - /* if fail to set tx_power, restore the orig. tx power */ - if (ret) { - priv->tx_power_user_lmt = prev_tx_power; - priv->tx_power_next = prev_tx_power; - } - return ret; -} -EXPORT_SYMBOL(iwl_legacy_set_tx_power); - -void iwl_legacy_send_bt_config(struct iwl_priv *priv) -{ - struct iwl_bt_cmd bt_cmd = { - .lead_time = BT_LEAD_TIME_DEF, - .max_kill = BT_MAX_KILL_DEF, - .kill_ack_mask = 0, - .kill_cts_mask = 0, - }; - - if (!bt_coex_active) - bt_cmd.flags = BT_COEX_DISABLE; - else - bt_cmd.flags = BT_COEX_ENABLE; - - IWL_DEBUG_INFO(priv, "BT coex %s\n", - (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active"); - - if (iwl_legacy_send_cmd_pdu(priv, REPLY_BT_CONFIG, - sizeof(struct iwl_bt_cmd), &bt_cmd)) - IWL_ERR(priv, "failed to send BT Coex Config\n"); -} -EXPORT_SYMBOL(iwl_legacy_send_bt_config); - -int iwl_legacy_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear) -{ - struct iwl_statistics_cmd statistics_cmd = { - .configuration_flags = - clear ? IWL_STATS_CONF_CLEAR_STATS : 0, - }; - - if (flags & CMD_ASYNC) - return iwl_legacy_send_cmd_pdu_async(priv, REPLY_STATISTICS_CMD, - sizeof(struct iwl_statistics_cmd), - &statistics_cmd, NULL); - else - return iwl_legacy_send_cmd_pdu(priv, REPLY_STATISTICS_CMD, - sizeof(struct iwl_statistics_cmd), - &statistics_cmd); -} -EXPORT_SYMBOL(iwl_legacy_send_statistics_request); - -void iwl_legacy_rx_pm_sleep_notif(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb) -{ -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif); - IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n", - sleep->pm_sleep_mode, sleep->pm_wakeup_src); -#endif -} -EXPORT_SYMBOL(iwl_legacy_rx_pm_sleep_notif); - -void iwl_legacy_rx_pm_debug_statistics_notif(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; - IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled " - "notification for %s:\n", len, - iwl_legacy_get_cmd_string(pkt->hdr.cmd)); - iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len); -} -EXPORT_SYMBOL(iwl_legacy_rx_pm_debug_statistics_notif); - -void iwl_legacy_rx_reply_error(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - - IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) " - "seq 0x%04X ser 0x%08X\n", - le32_to_cpu(pkt->u.err_resp.error_type), - iwl_legacy_get_cmd_string(pkt->u.err_resp.cmd_id), - pkt->u.err_resp.cmd_id, - le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num), - le32_to_cpu(pkt->u.err_resp.error_info)); -} -EXPORT_SYMBOL(iwl_legacy_rx_reply_error); - -void iwl_legacy_clear_isr_stats(struct iwl_priv *priv) -{ - memset(&priv->isr_stats, 0, sizeof(priv->isr_stats)); -} - -int iwl_legacy_mac_conf_tx(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, u16 queue, - const struct ieee80211_tx_queue_params *params) -{ - struct iwl_priv *priv = hw->priv; - struct iwl_rxon_context *ctx; - unsigned long flags; - int q; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - - if (!iwl_legacy_is_ready_rf(priv)) { - IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n"); - return -EIO; - } - - if (queue >= AC_NUM) { - IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue); - return 0; - } - - q = AC_NUM - 1 - queue; - - spin_lock_irqsave(&priv->lock, flags); - - for_each_context(priv, ctx) { - ctx->qos_data.def_qos_parm.ac[q].cw_min = - cpu_to_le16(params->cw_min); - ctx->qos_data.def_qos_parm.ac[q].cw_max = - cpu_to_le16(params->cw_max); - ctx->qos_data.def_qos_parm.ac[q].aifsn = params->aifs; - ctx->qos_data.def_qos_parm.ac[q].edca_txop = - cpu_to_le16((params->txop * 32)); - - ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0; - } - - spin_unlock_irqrestore(&priv->lock, flags); - - IWL_DEBUG_MAC80211(priv, "leave\n"); - return 0; -} -EXPORT_SYMBOL(iwl_legacy_mac_conf_tx); - -int iwl_legacy_mac_tx_last_beacon(struct ieee80211_hw *hw) -{ - struct iwl_priv *priv = hw->priv; - - return priv->ibss_manager == IWL_IBSS_MANAGER; -} -EXPORT_SYMBOL_GPL(iwl_legacy_mac_tx_last_beacon); - -static int -iwl_legacy_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx) -{ - iwl_legacy_connection_init_rx_config(priv, ctx); - - if (priv->cfg->ops->hcmd->set_rxon_chain) - priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); - - return iwl_legacy_commit_rxon(priv, ctx); -} - -static int iwl_legacy_setup_interface(struct iwl_priv *priv, - struct iwl_rxon_context *ctx) -{ - struct ieee80211_vif *vif = ctx->vif; - int err; - - lockdep_assert_held(&priv->mutex); - - /* - * This variable will be correct only when there's just - * a single context, but all code using it is for hardware - * that supports only one context. - */ - priv->iw_mode = vif->type; - - ctx->is_active = true; - - err = iwl_legacy_set_mode(priv, ctx); - if (err) { - if (!ctx->always_active) - ctx->is_active = false; - return err; - } - - return 0; -} - -int -iwl_legacy_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) -{ - struct iwl_priv *priv = hw->priv; - struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; - struct iwl_rxon_context *tmp, *ctx = NULL; - int err; - - IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n", - vif->type, vif->addr); - - mutex_lock(&priv->mutex); - - if (!iwl_legacy_is_ready_rf(priv)) { - IWL_WARN(priv, "Try to add interface when device not ready\n"); - err = -EINVAL; - goto out; - } - - for_each_context(priv, tmp) { - u32 possible_modes = - tmp->interface_modes | tmp->exclusive_interface_modes; - - if (tmp->vif) { - /* check if this busy context is exclusive */ - if (tmp->exclusive_interface_modes & - BIT(tmp->vif->type)) { - err = -EINVAL; - goto out; - } - continue; - } - - if (!(possible_modes & BIT(vif->type))) - continue; - - /* have maybe usable context w/o interface */ - ctx = tmp; - break; - } - - if (!ctx) { - err = -EOPNOTSUPP; - goto out; - } - - vif_priv->ctx = ctx; - ctx->vif = vif; - - err = iwl_legacy_setup_interface(priv, ctx); - if (!err) - goto out; - - ctx->vif = NULL; - priv->iw_mode = NL80211_IFTYPE_STATION; - out: - mutex_unlock(&priv->mutex); - - IWL_DEBUG_MAC80211(priv, "leave\n"); - return err; -} -EXPORT_SYMBOL(iwl_legacy_mac_add_interface); - -static void iwl_legacy_teardown_interface(struct iwl_priv *priv, - struct ieee80211_vif *vif, - bool mode_change) -{ - struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif); - - lockdep_assert_held(&priv->mutex); - - if (priv->scan_vif == vif) { - iwl_legacy_scan_cancel_timeout(priv, 200); - iwl_legacy_force_scan_end(priv); - } - - if (!mode_change) { - iwl_legacy_set_mode(priv, ctx); - if (!ctx->always_active) - ctx->is_active = false; - } -} - -void iwl_legacy_mac_remove_interface(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) -{ - struct iwl_priv *priv = hw->priv; - struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif); - - IWL_DEBUG_MAC80211(priv, "enter\n"); - - mutex_lock(&priv->mutex); - - WARN_ON(ctx->vif != vif); - ctx->vif = NULL; - - iwl_legacy_teardown_interface(priv, vif, false); - - memset(priv->bssid, 0, ETH_ALEN); - mutex_unlock(&priv->mutex); - - IWL_DEBUG_MAC80211(priv, "leave\n"); - -} -EXPORT_SYMBOL(iwl_legacy_mac_remove_interface); - -int iwl_legacy_alloc_txq_mem(struct iwl_priv *priv) -{ - if (!priv->txq) - priv->txq = kzalloc( - sizeof(struct iwl_tx_queue) * - priv->cfg->base_params->num_of_queues, - GFP_KERNEL); - if (!priv->txq) { - IWL_ERR(priv, "Not enough memory for txq\n"); - return -ENOMEM; - } - return 0; -} -EXPORT_SYMBOL(iwl_legacy_alloc_txq_mem); - -void iwl_legacy_txq_mem(struct iwl_priv *priv) -{ - kfree(priv->txq); - priv->txq = NULL; -} -EXPORT_SYMBOL(iwl_legacy_txq_mem); - -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS - -#define IWL_TRAFFIC_DUMP_SIZE (IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES) - -void iwl_legacy_reset_traffic_log(struct iwl_priv *priv) -{ - priv->tx_traffic_idx = 0; - priv->rx_traffic_idx = 0; - if (priv->tx_traffic) - memset(priv->tx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE); - if (priv->rx_traffic) - memset(priv->rx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE); -} - -int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv) -{ - u32 traffic_size = IWL_TRAFFIC_DUMP_SIZE; - - if (iwlegacy_debug_level & IWL_DL_TX) { - if (!priv->tx_traffic) { - priv->tx_traffic = - kzalloc(traffic_size, GFP_KERNEL); - if (!priv->tx_traffic) - return -ENOMEM; - } - } - if (iwlegacy_debug_level & IWL_DL_RX) { - if (!priv->rx_traffic) { - priv->rx_traffic = - kzalloc(traffic_size, GFP_KERNEL); - if (!priv->rx_traffic) - return -ENOMEM; - } - } - iwl_legacy_reset_traffic_log(priv); - return 0; -} -EXPORT_SYMBOL(iwl_legacy_alloc_traffic_mem); - -void iwl_legacy_free_traffic_mem(struct iwl_priv *priv) -{ - kfree(priv->tx_traffic); - priv->tx_traffic = NULL; - - kfree(priv->rx_traffic); - priv->rx_traffic = NULL; -} -EXPORT_SYMBOL(iwl_legacy_free_traffic_mem); - -void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv, - u16 length, struct ieee80211_hdr *header) -{ - __le16 fc; - u16 len; - - if (likely(!(iwlegacy_debug_level & IWL_DL_TX))) - return; - - if (!priv->tx_traffic) - return; - - fc = header->frame_control; - if (ieee80211_is_data(fc)) { - len = (length > IWL_TRAFFIC_ENTRY_SIZE) - ? IWL_TRAFFIC_ENTRY_SIZE : length; - memcpy((priv->tx_traffic + - (priv->tx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)), - header, len); - priv->tx_traffic_idx = - (priv->tx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES; - } -} -EXPORT_SYMBOL(iwl_legacy_dbg_log_tx_data_frame); - -void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv, - u16 length, struct ieee80211_hdr *header) -{ - __le16 fc; - u16 len; - - if (likely(!(iwlegacy_debug_level & IWL_DL_RX))) - return; - - if (!priv->rx_traffic) - return; - - fc = header->frame_control; - if (ieee80211_is_data(fc)) { - len = (length > IWL_TRAFFIC_ENTRY_SIZE) - ? IWL_TRAFFIC_ENTRY_SIZE : length; - memcpy((priv->rx_traffic + - (priv->rx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)), - header, len); - priv->rx_traffic_idx = - (priv->rx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES; - } -} -EXPORT_SYMBOL(iwl_legacy_dbg_log_rx_data_frame); - -const char *iwl_legacy_get_mgmt_string(int cmd) -{ - switch (cmd) { - IWL_CMD(MANAGEMENT_ASSOC_REQ); - IWL_CMD(MANAGEMENT_ASSOC_RESP); - IWL_CMD(MANAGEMENT_REASSOC_REQ); - IWL_CMD(MANAGEMENT_REASSOC_RESP); - IWL_CMD(MANAGEMENT_PROBE_REQ); - IWL_CMD(MANAGEMENT_PROBE_RESP); - IWL_CMD(MANAGEMENT_BEACON); - IWL_CMD(MANAGEMENT_ATIM); - IWL_CMD(MANAGEMENT_DISASSOC); - IWL_CMD(MANAGEMENT_AUTH); - IWL_CMD(MANAGEMENT_DEAUTH); - IWL_CMD(MANAGEMENT_ACTION); - default: - return "UNKNOWN"; - - } -} - -const char *iwl_legacy_get_ctrl_string(int cmd) -{ - switch (cmd) { - IWL_CMD(CONTROL_BACK_REQ); - IWL_CMD(CONTROL_BACK); - IWL_CMD(CONTROL_PSPOLL); - IWL_CMD(CONTROL_RTS); - IWL_CMD(CONTROL_CTS); - IWL_CMD(CONTROL_ACK); - IWL_CMD(CONTROL_CFEND); - IWL_CMD(CONTROL_CFENDACK); - default: - return "UNKNOWN"; - - } -} - -void iwl_legacy_clear_traffic_stats(struct iwl_priv *priv) -{ - memset(&priv->tx_stats, 0, sizeof(struct traffic_stats)); - memset(&priv->rx_stats, 0, sizeof(struct traffic_stats)); -} - -/* - * if CONFIG_IWLWIFI_LEGACY_DEBUGFS defined, - * iwl_legacy_update_stats function will - * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass - * Use debugFs to display the rx/rx_statistics - * if CONFIG_IWLWIFI_LEGACY_DEBUGFS not being defined, then no MGMT and CTRL - * information will be recorded, but DATA pkt still will be recorded - * for the reason of iwl_led.c need to control the led blinking based on - * number of tx and rx data. - * - */ -void -iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len) -{ - struct traffic_stats *stats; - - if (is_tx) - stats = &priv->tx_stats; - else - stats = &priv->rx_stats; - - if (ieee80211_is_mgmt(fc)) { - switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) { - case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ): - stats->mgmt[MANAGEMENT_ASSOC_REQ]++; - break; - case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP): - stats->mgmt[MANAGEMENT_ASSOC_RESP]++; - break; - case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ): - stats->mgmt[MANAGEMENT_REASSOC_REQ]++; - break; - case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP): - stats->mgmt[MANAGEMENT_REASSOC_RESP]++; - break; - case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ): - stats->mgmt[MANAGEMENT_PROBE_REQ]++; - break; - case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP): - stats->mgmt[MANAGEMENT_PROBE_RESP]++; - break; - case cpu_to_le16(IEEE80211_STYPE_BEACON): - stats->mgmt[MANAGEMENT_BEACON]++; - break; - case cpu_to_le16(IEEE80211_STYPE_ATIM): - stats->mgmt[MANAGEMENT_ATIM]++; - break; - case cpu_to_le16(IEEE80211_STYPE_DISASSOC): - stats->mgmt[MANAGEMENT_DISASSOC]++; - break; - case cpu_to_le16(IEEE80211_STYPE_AUTH): - stats->mgmt[MANAGEMENT_AUTH]++; - break; - case cpu_to_le16(IEEE80211_STYPE_DEAUTH): - stats->mgmt[MANAGEMENT_DEAUTH]++; - break; - case cpu_to_le16(IEEE80211_STYPE_ACTION): - stats->mgmt[MANAGEMENT_ACTION]++; - break; - } - } else if (ieee80211_is_ctl(fc)) { - switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) { - case cpu_to_le16(IEEE80211_STYPE_BACK_REQ): - stats->ctrl[CONTROL_BACK_REQ]++; - break; - case cpu_to_le16(IEEE80211_STYPE_BACK): - stats->ctrl[CONTROL_BACK]++; - break; - case cpu_to_le16(IEEE80211_STYPE_PSPOLL): - stats->ctrl[CONTROL_PSPOLL]++; - break; - case cpu_to_le16(IEEE80211_STYPE_RTS): - stats->ctrl[CONTROL_RTS]++; - break; - case cpu_to_le16(IEEE80211_STYPE_CTS): - stats->ctrl[CONTROL_CTS]++; - break; - case cpu_to_le16(IEEE80211_STYPE_ACK): - stats->ctrl[CONTROL_ACK]++; - break; - case cpu_to_le16(IEEE80211_STYPE_CFEND): - stats->ctrl[CONTROL_CFEND]++; - break; - case cpu_to_le16(IEEE80211_STYPE_CFENDACK): - stats->ctrl[CONTROL_CFENDACK]++; - break; - } - } else { - /* data */ - stats->data_cnt++; - stats->data_bytes += len; - } -} -EXPORT_SYMBOL(iwl_legacy_update_stats); -#endif - -int iwl_legacy_force_reset(struct iwl_priv *priv, bool external) -{ - struct iwl_force_reset *force_reset; - - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - return -EINVAL; - - force_reset = &priv->force_reset; - force_reset->reset_request_count++; - if (!external) { - if (force_reset->last_force_reset_jiffies && - time_after(force_reset->last_force_reset_jiffies + - force_reset->reset_duration, jiffies)) { - IWL_DEBUG_INFO(priv, "force reset rejected\n"); - force_reset->reset_reject_count++; - return -EAGAIN; - } - } - force_reset->reset_success_count++; - force_reset->last_force_reset_jiffies = jiffies; - - /* - * if the request is from external(ex: debugfs), - * then always perform the request in regardless the module - * parameter setting - * if the request is from internal (uCode error or driver - * detect failure), then fw_restart module parameter - * need to be check before performing firmware reload - */ - - if (!external && !priv->cfg->mod_params->restart_fw) { - IWL_DEBUG_INFO(priv, "Cancel firmware reload based on " - "module parameter setting\n"); - return 0; - } - - IWL_ERR(priv, "On demand firmware reload\n"); - - /* Set the FW error flag -- cleared on iwl_down */ - set_bit(STATUS_FW_ERROR, &priv->status); - wake_up(&priv->wait_command_queue); - /* - * Keep the restart process from trying to send host - * commands by clearing the INIT status bit - */ - clear_bit(STATUS_READY, &priv->status); - queue_work(priv->workqueue, &priv->restart); - - return 0; -} - -int -iwl_legacy_mac_change_interface(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - enum nl80211_iftype newtype, bool newp2p) -{ - struct iwl_priv *priv = hw->priv; - struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif); - struct iwl_rxon_context *tmp; - u32 interface_modes; - int err; - - newtype = ieee80211_iftype_p2p(newtype, newp2p); - - mutex_lock(&priv->mutex); - - if (!ctx->vif || !iwl_legacy_is_ready_rf(priv)) { - /* - * Huh? But wait ... this can maybe happen when - * we're in the middle of a firmware restart! - */ - err = -EBUSY; - goto out; - } - - interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes; - - if (!(interface_modes & BIT(newtype))) { - err = -EBUSY; - goto out; - } - - if (ctx->exclusive_interface_modes & BIT(newtype)) { - for_each_context(priv, tmp) { - if (ctx == tmp) - continue; - - if (!tmp->vif) - continue; - - /* - * The current mode switch would be exclusive, but - * another context is active ... refuse the switch. - */ - err = -EBUSY; - goto out; - } - } - - /* success */ - iwl_legacy_teardown_interface(priv, vif, true); - vif->type = newtype; - vif->p2p = newp2p; - err = iwl_legacy_setup_interface(priv, ctx); - WARN_ON(err); - /* - * We've switched internally, but submitting to the - * device may have failed for some reason. Mask this - * error, because otherwise mac80211 will not switch - * (and set the interface type back) and we'll be - * out of sync with it. - */ - err = 0; - - out: - mutex_unlock(&priv->mutex); - return err; -} -EXPORT_SYMBOL(iwl_legacy_mac_change_interface); - -/* - * On every watchdog tick we check (latest) time stamp. If it does not - * change during timeout period and queue is not empty we reset firmware. - */ -static int iwl_legacy_check_stuck_queue(struct iwl_priv *priv, int cnt) -{ - struct iwl_tx_queue *txq = &priv->txq[cnt]; - struct iwl_queue *q = &txq->q; - unsigned long timeout; - int ret; - - if (q->read_ptr == q->write_ptr) { - txq->time_stamp = jiffies; - return 0; - } - - timeout = txq->time_stamp + - msecs_to_jiffies(priv->cfg->base_params->wd_timeout); - - if (time_after(jiffies, timeout)) { - IWL_ERR(priv, "Queue %d stuck for %u ms.\n", - q->id, priv->cfg->base_params->wd_timeout); - ret = iwl_legacy_force_reset(priv, false); - return (ret == -EAGAIN) ? 0 : 1; - } - - return 0; -} - -/* - * Making watchdog tick be a quarter of timeout assure we will - * discover the queue hung between timeout and 1.25*timeout - */ -#define IWL_WD_TICK(timeout) ((timeout) / 4) - -/* - * Watchdog timer callback, we check each tx queue for stuck, if if hung - * we reset the firmware. If everything is fine just rearm the timer. - */ -void iwl_legacy_bg_watchdog(unsigned long data) -{ - struct iwl_priv *priv = (struct iwl_priv *)data; - int cnt; - unsigned long timeout; - - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - return; - - timeout = priv->cfg->base_params->wd_timeout; - if (timeout == 0) - return; - - /* monitor and check for stuck cmd queue */ - if (iwl_legacy_check_stuck_queue(priv, priv->cmd_queue)) - return; - - /* monitor and check for other stuck queues */ - if (iwl_legacy_is_any_associated(priv)) { - for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) { - /* skip as we already checked the command queue */ - if (cnt == priv->cmd_queue) - continue; - if (iwl_legacy_check_stuck_queue(priv, cnt)) - return; - } - } - - mod_timer(&priv->watchdog, jiffies + - msecs_to_jiffies(IWL_WD_TICK(timeout))); -} -EXPORT_SYMBOL(iwl_legacy_bg_watchdog); - -void iwl_legacy_setup_watchdog(struct iwl_priv *priv) -{ - unsigned int timeout = priv->cfg->base_params->wd_timeout; - - if (timeout) - mod_timer(&priv->watchdog, - jiffies + msecs_to_jiffies(IWL_WD_TICK(timeout))); - else - del_timer(&priv->watchdog); -} -EXPORT_SYMBOL(iwl_legacy_setup_watchdog); - -/* - * extended beacon time format - * time in usec will be changed into a 32-bit value in extended:internal format - * the extended part is the beacon counts - * the internal part is the time in usec within one beacon interval - */ -u32 -iwl_legacy_usecs_to_beacons(struct iwl_priv *priv, - u32 usec, u32 beacon_interval) -{ - u32 quot; - u32 rem; - u32 interval = beacon_interval * TIME_UNIT; - - if (!interval || !usec) - return 0; - - quot = (usec / interval) & - (iwl_legacy_beacon_time_mask_high(priv, - priv->hw_params.beacon_time_tsf_bits) >> - priv->hw_params.beacon_time_tsf_bits); - rem = (usec % interval) & iwl_legacy_beacon_time_mask_low(priv, - priv->hw_params.beacon_time_tsf_bits); - - return (quot << priv->hw_params.beacon_time_tsf_bits) + rem; -} -EXPORT_SYMBOL(iwl_legacy_usecs_to_beacons); - -/* base is usually what we get from ucode with each received frame, - * the same as HW timer counter counting down - */ -__le32 iwl_legacy_add_beacon_time(struct iwl_priv *priv, u32 base, - u32 addon, u32 beacon_interval) -{ - u32 base_low = base & iwl_legacy_beacon_time_mask_low(priv, - priv->hw_params.beacon_time_tsf_bits); - u32 addon_low = addon & iwl_legacy_beacon_time_mask_low(priv, - priv->hw_params.beacon_time_tsf_bits); - u32 interval = beacon_interval * TIME_UNIT; - u32 res = (base & iwl_legacy_beacon_time_mask_high(priv, - priv->hw_params.beacon_time_tsf_bits)) + - (addon & iwl_legacy_beacon_time_mask_high(priv, - priv->hw_params.beacon_time_tsf_bits)); - - if (base_low > addon_low) - res += base_low - addon_low; - else if (base_low < addon_low) { - res += interval + base_low - addon_low; - res += (1 << priv->hw_params.beacon_time_tsf_bits); - } else - res += (1 << priv->hw_params.beacon_time_tsf_bits); - - return cpu_to_le32(res); -} -EXPORT_SYMBOL(iwl_legacy_add_beacon_time); - -#ifdef CONFIG_PM - -int iwl_legacy_pci_suspend(struct device *device) -{ - struct pci_dev *pdev = to_pci_dev(device); - struct iwl_priv *priv = pci_get_drvdata(pdev); - - /* - * This function is called when system goes into suspend state - * mac80211 will call iwl_mac_stop() from the mac80211 suspend function - * first but since iwl_mac_stop() has no knowledge of who the caller is, - * it will not call apm_ops.stop() to stop the DMA operation. - * Calling apm_ops.stop here to make sure we stop the DMA. - */ - iwl_legacy_apm_stop(priv); - - return 0; -} -EXPORT_SYMBOL(iwl_legacy_pci_suspend); - -int iwl_legacy_pci_resume(struct device *device) -{ - struct pci_dev *pdev = to_pci_dev(device); - struct iwl_priv *priv = pci_get_drvdata(pdev); - bool hw_rfkill = false; - - /* - * We disable the RETRY_TIMEOUT register (0x41) to keep - * PCI Tx retries from interfering with C3 CPU state. - */ - pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); - - iwl_legacy_enable_interrupts(priv); - - if (!(iwl_read32(priv, CSR_GP_CNTRL) & - CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) - hw_rfkill = true; - - if (hw_rfkill) - set_bit(STATUS_RF_KILL_HW, &priv->status); - else - clear_bit(STATUS_RF_KILL_HW, &priv->status); - - wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rfkill); - - return 0; -} -EXPORT_SYMBOL(iwl_legacy_pci_resume); - -const struct dev_pm_ops iwl_legacy_pm_ops = { - .suspend = iwl_legacy_pci_suspend, - .resume = iwl_legacy_pci_resume, - .freeze = iwl_legacy_pci_suspend, - .thaw = iwl_legacy_pci_resume, - .poweroff = iwl_legacy_pci_suspend, - .restore = iwl_legacy_pci_resume, -}; -EXPORT_SYMBOL(iwl_legacy_pm_ops); - -#endif /* CONFIG_PM */ - -static void -iwl_legacy_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx) -{ - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - return; - - if (!ctx->is_active) - return; - - ctx->qos_data.def_qos_parm.qos_flags = 0; - - if (ctx->qos_data.qos_active) - ctx->qos_data.def_qos_parm.qos_flags |= - QOS_PARAM_FLG_UPDATE_EDCA_MSK; - - if (ctx->ht.enabled) - ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK; - - IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n", - ctx->qos_data.qos_active, - ctx->qos_data.def_qos_parm.qos_flags); - - iwl_legacy_send_cmd_pdu_async(priv, ctx->qos_cmd, - sizeof(struct iwl_qosparam_cmd), - &ctx->qos_data.def_qos_parm, NULL); -} - -/** - * iwl_legacy_mac_config - mac80211 config callback - */ -int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed) -{ - struct iwl_priv *priv = hw->priv; - const struct iwl_channel_info *ch_info; - struct ieee80211_conf *conf = &hw->conf; - struct ieee80211_channel *channel = conf->channel; - struct iwl_ht_config *ht_conf = &priv->current_ht_config; - struct iwl_rxon_context *ctx; - unsigned long flags = 0; - int ret = 0; - u16 ch; - int scan_active = 0; - bool ht_changed[NUM_IWL_RXON_CTX] = {}; - - if (WARN_ON(!priv->cfg->ops->legacy)) - return -EOPNOTSUPP; - - mutex_lock(&priv->mutex); - - IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n", - channel->hw_value, changed); - - if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) { - scan_active = 1; - IWL_DEBUG_MAC80211(priv, "scan active\n"); - } - - if (changed & (IEEE80211_CONF_CHANGE_SMPS | - IEEE80211_CONF_CHANGE_CHANNEL)) { - /* mac80211 uses static for non-HT which is what we want */ - priv->current_ht_config.smps = conf->smps_mode; - - /* - * Recalculate chain counts. - * - * If monitor mode is enabled then mac80211 will - * set up the SM PS mode to OFF if an HT channel is - * configured. - */ - if (priv->cfg->ops->hcmd->set_rxon_chain) - for_each_context(priv, ctx) - priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); - } - - /* during scanning mac80211 will delay channel setting until - * scan finish with changed = 0 - */ - if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) { - if (scan_active) - goto set_ch_out; - - ch = channel->hw_value; - ch_info = iwl_legacy_get_channel_info(priv, channel->band, ch); - if (!iwl_legacy_is_channel_valid(ch_info)) { - IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n"); - ret = -EINVAL; - goto set_ch_out; - } - - if (priv->iw_mode == NL80211_IFTYPE_ADHOC && - !iwl_legacy_is_channel_ibss(ch_info)) { - IWL_DEBUG_MAC80211(priv, "leave - not IBSS channel\n"); - ret = -EINVAL; - goto set_ch_out; - } - - spin_lock_irqsave(&priv->lock, flags); - - for_each_context(priv, ctx) { - /* Configure HT40 channels */ - if (ctx->ht.enabled != conf_is_ht(conf)) { - ctx->ht.enabled = conf_is_ht(conf); - ht_changed[ctx->ctxid] = true; - } - if (ctx->ht.enabled) { - if (conf_is_ht40_minus(conf)) { - ctx->ht.extension_chan_offset = - IEEE80211_HT_PARAM_CHA_SEC_BELOW; - ctx->ht.is_40mhz = true; - } else if (conf_is_ht40_plus(conf)) { - ctx->ht.extension_chan_offset = - IEEE80211_HT_PARAM_CHA_SEC_ABOVE; - ctx->ht.is_40mhz = true; - } else { - ctx->ht.extension_chan_offset = - IEEE80211_HT_PARAM_CHA_SEC_NONE; - ctx->ht.is_40mhz = false; - } - } else - ctx->ht.is_40mhz = false; - - /* - * Default to no protection. Protection mode will - * later be set from BSS config in iwl_ht_conf - */ - ctx->ht.protection = - IEEE80211_HT_OP_MODE_PROTECTION_NONE; - - /* if we are switching from ht to 2.4 clear flags - * from any ht related info since 2.4 does not - * support ht */ - if ((le16_to_cpu(ctx->staging.channel) != ch)) - ctx->staging.flags = 0; - - iwl_legacy_set_rxon_channel(priv, channel, ctx); - iwl_legacy_set_rxon_ht(priv, ht_conf); - - iwl_legacy_set_flags_for_band(priv, ctx, channel->band, - ctx->vif); - } - - spin_unlock_irqrestore(&priv->lock, flags); - - if (priv->cfg->ops->legacy->update_bcast_stations) - ret = - priv->cfg->ops->legacy->update_bcast_stations(priv); - - set_ch_out: - /* The list of supported rates and rate mask can be different - * for each band; since the band may have changed, reset - * the rate mask to what mac80211 lists */ - iwl_legacy_set_rate(priv); - } - - if (changed & (IEEE80211_CONF_CHANGE_PS | - IEEE80211_CONF_CHANGE_IDLE)) { - ret = iwl_legacy_power_update_mode(priv, false); - if (ret) - IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n"); - } - - if (changed & IEEE80211_CONF_CHANGE_POWER) { - IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n", - priv->tx_power_user_lmt, conf->power_level); - - iwl_legacy_set_tx_power(priv, conf->power_level, false); - } - - if (!iwl_legacy_is_ready(priv)) { - IWL_DEBUG_MAC80211(priv, "leave - not ready\n"); - goto out; - } - - if (scan_active) - goto out; - - for_each_context(priv, ctx) { - if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging))) - iwl_legacy_commit_rxon(priv, ctx); - else - IWL_DEBUG_INFO(priv, - "Not re-sending same RXON configuration.\n"); - if (ht_changed[ctx->ctxid]) - iwl_legacy_update_qos(priv, ctx); - } - -out: - IWL_DEBUG_MAC80211(priv, "leave\n"); - mutex_unlock(&priv->mutex); - return ret; -} -EXPORT_SYMBOL(iwl_legacy_mac_config); - -void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) -{ - struct iwl_priv *priv = hw->priv; - unsigned long flags; - /* IBSS can only be the IWL_RXON_CTX_BSS context */ - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - - if (WARN_ON(!priv->cfg->ops->legacy)) - return; - - mutex_lock(&priv->mutex); - IWL_DEBUG_MAC80211(priv, "enter\n"); - - spin_lock_irqsave(&priv->lock, flags); - memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config)); - spin_unlock_irqrestore(&priv->lock, flags); - - spin_lock_irqsave(&priv->lock, flags); - - /* new association get rid of ibss beacon skb */ - if (priv->beacon_skb) - dev_kfree_skb(priv->beacon_skb); - - priv->beacon_skb = NULL; - - priv->timestamp = 0; - - spin_unlock_irqrestore(&priv->lock, flags); - - iwl_legacy_scan_cancel_timeout(priv, 100); - if (!iwl_legacy_is_ready_rf(priv)) { - IWL_DEBUG_MAC80211(priv, "leave - not ready\n"); - mutex_unlock(&priv->mutex); - return; - } - - /* we are restarting association process - * clear RXON_FILTER_ASSOC_MSK bit - */ - ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; - iwl_legacy_commit_rxon(priv, ctx); - - iwl_legacy_set_rate(priv); - - mutex_unlock(&priv->mutex); - - IWL_DEBUG_MAC80211(priv, "leave\n"); -} -EXPORT_SYMBOL(iwl_legacy_mac_reset_tsf); - -static void iwl_legacy_ht_conf(struct iwl_priv *priv, - struct ieee80211_vif *vif) -{ - struct iwl_ht_config *ht_conf = &priv->current_ht_config; - struct ieee80211_sta *sta; - struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; - struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif); - - IWL_DEBUG_ASSOC(priv, "enter:\n"); - - if (!ctx->ht.enabled) - return; - - ctx->ht.protection = - bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION; - ctx->ht.non_gf_sta_present = - !!(bss_conf->ht_operation_mode & - IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT); - - ht_conf->single_chain_sufficient = false; - - switch (vif->type) { - case NL80211_IFTYPE_STATION: - rcu_read_lock(); - sta = ieee80211_find_sta(vif, bss_conf->bssid); - if (sta) { - struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; - int maxstreams; - - maxstreams = (ht_cap->mcs.tx_params & - IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK) - >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT; - maxstreams += 1; - - if ((ht_cap->mcs.rx_mask[1] == 0) && - (ht_cap->mcs.rx_mask[2] == 0)) - ht_conf->single_chain_sufficient = true; - if (maxstreams <= 1) - ht_conf->single_chain_sufficient = true; - } else { - /* - * If at all, this can only happen through a race - * when the AP disconnects us while we're still - * setting up the connection, in that case mac80211 - * will soon tell us about that. - */ - ht_conf->single_chain_sufficient = true; - } - rcu_read_unlock(); - break; - case NL80211_IFTYPE_ADHOC: - ht_conf->single_chain_sufficient = true; - break; - default: - break; - } - - IWL_DEBUG_ASSOC(priv, "leave\n"); -} - -static inline void iwl_legacy_set_no_assoc(struct iwl_priv *priv, - struct ieee80211_vif *vif) -{ - struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif); - - /* - * inform the ucode that there is no longer an - * association and that no more packets should be - * sent - */ - ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; - ctx->staging.assoc_id = 0; - iwl_legacy_commit_rxon(priv, ctx); -} - -static void iwl_legacy_beacon_update(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) -{ - struct iwl_priv *priv = hw->priv; - unsigned long flags; - __le64 timestamp; - struct sk_buff *skb = ieee80211_beacon_get(hw, vif); - - if (!skb) - return; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - - lockdep_assert_held(&priv->mutex); - - if (!priv->beacon_ctx) { - IWL_ERR(priv, "update beacon but no beacon context!\n"); - dev_kfree_skb(skb); - return; - } - - spin_lock_irqsave(&priv->lock, flags); - - if (priv->beacon_skb) - dev_kfree_skb(priv->beacon_skb); - - priv->beacon_skb = skb; - - timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp; - priv->timestamp = le64_to_cpu(timestamp); - - IWL_DEBUG_MAC80211(priv, "leave\n"); - spin_unlock_irqrestore(&priv->lock, flags); - - if (!iwl_legacy_is_ready_rf(priv)) { - IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n"); - return; - } - - priv->cfg->ops->legacy->post_associate(priv); -} - -void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_bss_conf *bss_conf, - u32 changes) -{ - struct iwl_priv *priv = hw->priv; - struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif); - int ret; - - if (WARN_ON(!priv->cfg->ops->legacy)) - return; - - IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes); - - mutex_lock(&priv->mutex); - - if (!iwl_legacy_is_alive(priv)) { - mutex_unlock(&priv->mutex); - return; - } - - if (changes & BSS_CHANGED_QOS) { - unsigned long flags; - - spin_lock_irqsave(&priv->lock, flags); - ctx->qos_data.qos_active = bss_conf->qos; - iwl_legacy_update_qos(priv, ctx); - spin_unlock_irqrestore(&priv->lock, flags); - } - - if (changes & BSS_CHANGED_BEACON_ENABLED) { - /* - * the add_interface code must make sure we only ever - * have a single interface that could be beaconing at - * any time. - */ - if (vif->bss_conf.enable_beacon) - priv->beacon_ctx = ctx; - else - priv->beacon_ctx = NULL; - } - - if (changes & BSS_CHANGED_BSSID) { - IWL_DEBUG_MAC80211(priv, "BSSID %pM\n", bss_conf->bssid); - - /* - * If there is currently a HW scan going on in the - * background then we need to cancel it else the RXON - * below/in post_associate will fail. - */ - if (iwl_legacy_scan_cancel_timeout(priv, 100)) { - IWL_WARN(priv, - "Aborted scan still in progress after 100ms\n"); - IWL_DEBUG_MAC80211(priv, - "leaving - scan abort failed.\n"); - mutex_unlock(&priv->mutex); - return; - } - - /* mac80211 only sets assoc when in STATION mode */ - if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) { - memcpy(ctx->staging.bssid_addr, - bss_conf->bssid, ETH_ALEN); - - /* currently needed in a few places */ - memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN); - } else { - ctx->staging.filter_flags &= - ~RXON_FILTER_ASSOC_MSK; - } - - } - - /* - * This needs to be after setting the BSSID in case - * mac80211 decides to do both changes at once because - * it will invoke post_associate. - */ - if (vif->type == NL80211_IFTYPE_ADHOC && changes & BSS_CHANGED_BEACON) - iwl_legacy_beacon_update(hw, vif); - - if (changes & BSS_CHANGED_ERP_PREAMBLE) { - IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n", - bss_conf->use_short_preamble); - if (bss_conf->use_short_preamble) - ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; - else - ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; - } - - if (changes & BSS_CHANGED_ERP_CTS_PROT) { - IWL_DEBUG_MAC80211(priv, - "ERP_CTS %d\n", bss_conf->use_cts_prot); - if (bss_conf->use_cts_prot && - (priv->band != IEEE80211_BAND_5GHZ)) - ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK; - else - ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK; - if (bss_conf->use_cts_prot) - ctx->staging.flags |= RXON_FLG_SELF_CTS_EN; - else - ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN; - } - - if (changes & BSS_CHANGED_BASIC_RATES) { - /* XXX use this information - * - * To do that, remove code from iwl_legacy_set_rate() and put something - * like this here: - * - if (A-band) - ctx->staging.ofdm_basic_rates = - bss_conf->basic_rates; - else - ctx->staging.ofdm_basic_rates = - bss_conf->basic_rates >> 4; - ctx->staging.cck_basic_rates = - bss_conf->basic_rates & 0xF; - */ - } - - if (changes & BSS_CHANGED_HT) { - iwl_legacy_ht_conf(priv, vif); - - if (priv->cfg->ops->hcmd->set_rxon_chain) - priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); - } - - if (changes & BSS_CHANGED_ASSOC) { - IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc); - if (bss_conf->assoc) { - priv->timestamp = bss_conf->timestamp; - - if (!iwl_legacy_is_rfkill(priv)) - priv->cfg->ops->legacy->post_associate(priv); - } else - iwl_legacy_set_no_assoc(priv, vif); - } - - if (changes && iwl_legacy_is_associated_ctx(ctx) && bss_conf->aid) { - IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n", - changes); - ret = iwl_legacy_send_rxon_assoc(priv, ctx); - if (!ret) { - /* Sync active_rxon with latest change. */ - memcpy((void *)&ctx->active, - &ctx->staging, - sizeof(struct iwl_legacy_rxon_cmd)); - } - } - - if (changes & BSS_CHANGED_BEACON_ENABLED) { - if (vif->bss_conf.enable_beacon) { - memcpy(ctx->staging.bssid_addr, - bss_conf->bssid, ETH_ALEN); - memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN); - priv->cfg->ops->legacy->config_ap(priv); - } else - iwl_legacy_set_no_assoc(priv, vif); - } - - if (changes & BSS_CHANGED_IBSS) { - ret = priv->cfg->ops->legacy->manage_ibss_station(priv, vif, - bss_conf->ibss_joined); - if (ret) - IWL_ERR(priv, "failed to %s IBSS station %pM\n", - bss_conf->ibss_joined ? "add" : "remove", - bss_conf->bssid); - } - - mutex_unlock(&priv->mutex); - - IWL_DEBUG_MAC80211(priv, "leave\n"); -} -EXPORT_SYMBOL(iwl_legacy_mac_bss_info_changed); - -irqreturn_t iwl_legacy_isr(int irq, void *data) -{ - struct iwl_priv *priv = data; - u32 inta, inta_mask; - u32 inta_fh; - unsigned long flags; - if (!priv) - return IRQ_NONE; - - spin_lock_irqsave(&priv->lock, flags); - - /* Disable (but don't clear!) interrupts here to avoid - * back-to-back ISRs and sporadic interrupts from our NIC. - * If we have something to service, the tasklet will re-enable ints. - * If we *don't* have something, we'll re-enable before leaving here. */ - inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */ - iwl_write32(priv, CSR_INT_MASK, 0x00000000); - - /* Discover which interrupts are active/pending */ - inta = iwl_read32(priv, CSR_INT); - inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); - - /* Ignore interrupt if there's nothing in NIC to service. - * This may be due to IRQ shared with another device, - * or due to sporadic interrupts thrown from our NIC. */ - if (!inta && !inta_fh) { - IWL_DEBUG_ISR(priv, - "Ignore interrupt, inta == 0, inta_fh == 0\n"); - goto none; - } - - if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) { - /* Hardware disappeared. It might have already raised - * an interrupt */ - IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta); - goto unplugged; - } - - IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", - inta, inta_mask, inta_fh); - - inta &= ~CSR_INT_BIT_SCD; - - /* iwl_irq_tasklet() will service interrupts and re-enable them */ - if (likely(inta || inta_fh)) - tasklet_schedule(&priv->irq_tasklet); - -unplugged: - spin_unlock_irqrestore(&priv->lock, flags); - return IRQ_HANDLED; - -none: - /* re-enable interrupts here since we don't have anything to service. */ - /* only Re-enable if disabled by irq */ - if (test_bit(STATUS_INT_ENABLED, &priv->status)) - iwl_legacy_enable_interrupts(priv); - spin_unlock_irqrestore(&priv->lock, flags); - return IRQ_NONE; -} -EXPORT_SYMBOL(iwl_legacy_isr); - -/* - * iwl_legacy_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this - * function. - */ -void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv, - struct ieee80211_tx_info *info, - __le16 fc, __le32 *tx_flags) -{ - if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) { - *tx_flags |= TX_CMD_FLG_RTS_MSK; - *tx_flags &= ~TX_CMD_FLG_CTS_MSK; - *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; - - if (!ieee80211_is_mgmt(fc)) - return; - - switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) { - case cpu_to_le16(IEEE80211_STYPE_AUTH): - case cpu_to_le16(IEEE80211_STYPE_DEAUTH): - case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ): - case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ): - *tx_flags &= ~TX_CMD_FLG_RTS_MSK; - *tx_flags |= TX_CMD_FLG_CTS_MSK; - break; - } - } else if (info->control.rates[0].flags & - IEEE80211_TX_RC_USE_CTS_PROTECT) { - *tx_flags &= ~TX_CMD_FLG_RTS_MSK; - *tx_flags |= TX_CMD_FLG_CTS_MSK; - *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; - } -} -EXPORT_SYMBOL(iwl_legacy_tx_cmd_protection); diff --git a/drivers/net/wireless/iwlegacy/iwl-core.h b/drivers/net/wireless/iwlegacy/iwl-core.h deleted file mode 100644 index d1271fe07d4..00000000000 --- a/drivers/net/wireless/iwlegacy/iwl-core.h +++ /dev/null @@ -1,636 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ - -#ifndef __iwl_legacy_core_h__ -#define __iwl_legacy_core_h__ - -/************************ - * forward declarations * - ************************/ -struct iwl_host_cmd; -struct iwl_cmd; - - -#define IWLWIFI_VERSION "in-tree:" -#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation" -#define DRV_AUTHOR "<ilw@linux.intel.com>" - -#define IWL_PCI_DEVICE(dev, subdev, cfg) \ - .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \ - .subvendor = PCI_ANY_ID, .subdevice = (subdev), \ - .driver_data = (kernel_ulong_t)&(cfg) - -#define TIME_UNIT 1024 - -#define IWL_SKU_G 0x1 -#define IWL_SKU_A 0x2 -#define IWL_SKU_N 0x8 - -#define IWL_CMD(x) case x: return #x - -struct iwl_hcmd_ops { - int (*rxon_assoc)(struct iwl_priv *priv, struct iwl_rxon_context *ctx); - int (*commit_rxon)(struct iwl_priv *priv, struct iwl_rxon_context *ctx); - void (*set_rxon_chain)(struct iwl_priv *priv, - struct iwl_rxon_context *ctx); -}; - -struct iwl_hcmd_utils_ops { - u16 (*get_hcmd_size)(u8 cmd_id, u16 len); - u16 (*build_addsta_hcmd)(const struct iwl_legacy_addsta_cmd *cmd, - u8 *data); - int (*request_scan)(struct iwl_priv *priv, struct ieee80211_vif *vif); - void (*post_scan)(struct iwl_priv *priv); -}; - -struct iwl_apm_ops { - int (*init)(struct iwl_priv *priv); - void (*config)(struct iwl_priv *priv); -}; - -struct iwl_debugfs_ops { - ssize_t (*rx_stats_read)(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos); - ssize_t (*tx_stats_read)(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos); - ssize_t (*general_stats_read)(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos); -}; - -struct iwl_temp_ops { - void (*temperature)(struct iwl_priv *priv); -}; - -struct iwl_lib_ops { - /* set hw dependent parameters */ - int (*set_hw_params)(struct iwl_priv *priv); - /* Handling TX */ - void (*txq_update_byte_cnt_tbl)(struct iwl_priv *priv, - struct iwl_tx_queue *txq, - u16 byte_cnt); - int (*txq_attach_buf_to_tfd)(struct iwl_priv *priv, - struct iwl_tx_queue *txq, - dma_addr_t addr, - u16 len, u8 reset, u8 pad); - void (*txq_free_tfd)(struct iwl_priv *priv, - struct iwl_tx_queue *txq); - int (*txq_init)(struct iwl_priv *priv, - struct iwl_tx_queue *txq); - /* setup Rx handler */ - void (*rx_handler_setup)(struct iwl_priv *priv); - /* alive notification after init uCode load */ - void (*init_alive_start)(struct iwl_priv *priv); - /* check validity of rtc data address */ - int (*is_valid_rtc_data_addr)(u32 addr); - /* 1st ucode load */ - int (*load_ucode)(struct iwl_priv *priv); - - void (*dump_nic_error_log)(struct iwl_priv *priv); - int (*dump_fh)(struct iwl_priv *priv, char **buf, bool display); - int (*set_channel_switch)(struct iwl_priv *priv, - struct ieee80211_channel_switch *ch_switch); - /* power management */ - struct iwl_apm_ops apm_ops; - - /* power */ - int (*send_tx_power) (struct iwl_priv *priv); - void (*update_chain_flags)(struct iwl_priv *priv); - - /* eeprom operations (as defined in iwl-eeprom.h) */ - struct iwl_eeprom_ops eeprom_ops; - - /* temperature */ - struct iwl_temp_ops temp_ops; - - struct iwl_debugfs_ops debugfs_ops; - -}; - -struct iwl_led_ops { - int (*cmd)(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd); -}; - -struct iwl_legacy_ops { - void (*post_associate)(struct iwl_priv *priv); - void (*config_ap)(struct iwl_priv *priv); - /* station management */ - int (*update_bcast_stations)(struct iwl_priv *priv); - int (*manage_ibss_station)(struct iwl_priv *priv, - struct ieee80211_vif *vif, bool add); -}; - -struct iwl_ops { - const struct iwl_lib_ops *lib; - const struct iwl_hcmd_ops *hcmd; - const struct iwl_hcmd_utils_ops *utils; - const struct iwl_led_ops *led; - const struct iwl_nic_ops *nic; - const struct iwl_legacy_ops *legacy; - const struct ieee80211_ops *ieee80211_ops; -}; - -struct iwl_mod_params { - int sw_crypto; /* def: 0 = using hardware encryption */ - int disable_hw_scan; /* def: 0 = use h/w scan */ - int num_of_queues; /* def: HW dependent */ - int disable_11n; /* def: 0 = 11n capabilities enabled */ - int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */ - int antenna; /* def: 0 = both antennas (use diversity) */ - int restart_fw; /* def: 1 = restart firmware */ -}; - -/* - * @led_compensation: compensate on the led on/off time per HW according - * to the deviation to achieve the desired led frequency. - * The detail algorithm is described in iwl-led.c - * @chain_noise_num_beacons: number of beacons used to compute chain noise - * @wd_timeout: TX queues watchdog timeout - * @temperature_kelvin: temperature report by uCode in kelvin - * @ucode_tracing: support ucode continuous tracing - * @sensitivity_calib_by_driver: driver has the capability to perform - * sensitivity calibration operation - * @chain_noise_calib_by_driver: driver has the capability to perform - * chain noise calibration operation - */ -struct iwl_base_params { - int eeprom_size; - int num_of_queues; /* def: HW dependent */ - int num_of_ampdu_queues;/* def: HW dependent */ - /* for iwl_legacy_apm_init() */ - u32 pll_cfg_val; - bool set_l0s; - bool use_bsm; - - u16 led_compensation; - int chain_noise_num_beacons; - unsigned int wd_timeout; - bool temperature_kelvin; - const bool ucode_tracing; - const bool sensitivity_calib_by_driver; - const bool chain_noise_calib_by_driver; -}; - -/** - * struct iwl_cfg - * @fw_name_pre: Firmware filename prefix. The api version and extension - * (.ucode) will be added to filename before loading from disk. The - * filename is constructed as fw_name_pre<api>.ucode. - * @ucode_api_max: Highest version of uCode API supported by driver. - * @ucode_api_min: Lowest version of uCode API supported by driver. - * @scan_antennas: available antenna for scan operation - * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off) - * - * We enable the driver to be backward compatible wrt API version. The - * driver specifies which APIs it supports (with @ucode_api_max being the - * highest and @ucode_api_min the lowest). Firmware will only be loaded if - * it has a supported API version. The firmware's API version will be - * stored in @iwl_priv, enabling the driver to make runtime changes based - * on firmware version used. - * - * For example, - * if (IWL_UCODE_API(priv->ucode_ver) >= 2) { - * Driver interacts with Firmware API version >= 2. - * } else { - * Driver interacts with Firmware API version 1. - * } - * - * The ideal usage of this infrastructure is to treat a new ucode API - * release as a new hardware revision. That is, through utilizing the - * iwl_hcmd_utils_ops etc. we accommodate different command structures - * and flows between hardware versions as well as their API - * versions. - * - */ -struct iwl_cfg { - /* params specific to an individual device within a device family */ - const char *name; - const char *fw_name_pre; - const unsigned int ucode_api_max; - const unsigned int ucode_api_min; - u8 valid_tx_ant; - u8 valid_rx_ant; - unsigned int sku; - u16 eeprom_ver; - u16 eeprom_calib_ver; - const struct iwl_ops *ops; - /* module based parameters which can be set from modprobe cmd */ - const struct iwl_mod_params *mod_params; - /* params not likely to change within a device family */ - struct iwl_base_params *base_params; - /* params likely to change within a device family */ - u8 scan_rx_antennas[IEEE80211_NUM_BANDS]; - enum iwl_led_mode led_mode; -}; - -/*************************** - * L i b * - ***************************/ - -struct ieee80211_hw *iwl_legacy_alloc_all(struct iwl_cfg *cfg); -int iwl_legacy_mac_conf_tx(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, u16 queue, - const struct ieee80211_tx_queue_params *params); -int iwl_legacy_mac_tx_last_beacon(struct ieee80211_hw *hw); -void iwl_legacy_set_rxon_hwcrypto(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - int hw_decrypt); -int iwl_legacy_check_rxon_cmd(struct iwl_priv *priv, - struct iwl_rxon_context *ctx); -int iwl_legacy_full_rxon_required(struct iwl_priv *priv, - struct iwl_rxon_context *ctx); -int iwl_legacy_set_rxon_channel(struct iwl_priv *priv, - struct ieee80211_channel *ch, - struct iwl_rxon_context *ctx); -void iwl_legacy_set_flags_for_band(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - enum ieee80211_band band, - struct ieee80211_vif *vif); -u8 iwl_legacy_get_single_channel_number(struct iwl_priv *priv, - enum ieee80211_band band); -void iwl_legacy_set_rxon_ht(struct iwl_priv *priv, - struct iwl_ht_config *ht_conf); -bool iwl_legacy_is_ht40_tx_allowed(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - struct ieee80211_sta_ht_cap *ht_cap); -void iwl_legacy_connection_init_rx_config(struct iwl_priv *priv, - struct iwl_rxon_context *ctx); -void iwl_legacy_set_rate(struct iwl_priv *priv); -int iwl_legacy_set_decrypted_flag(struct iwl_priv *priv, - struct ieee80211_hdr *hdr, - u32 decrypt_res, - struct ieee80211_rx_status *stats); -void iwl_legacy_irq_handle_error(struct iwl_priv *priv); -int iwl_legacy_mac_add_interface(struct ieee80211_hw *hw, - struct ieee80211_vif *vif); -void iwl_legacy_mac_remove_interface(struct ieee80211_hw *hw, - struct ieee80211_vif *vif); -int iwl_legacy_mac_change_interface(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - enum nl80211_iftype newtype, bool newp2p); -int iwl_legacy_alloc_txq_mem(struct iwl_priv *priv); -void iwl_legacy_txq_mem(struct iwl_priv *priv); - -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS -int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv); -void iwl_legacy_free_traffic_mem(struct iwl_priv *priv); -void iwl_legacy_reset_traffic_log(struct iwl_priv *priv); -void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv, - u16 length, struct ieee80211_hdr *header); -void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv, - u16 length, struct ieee80211_hdr *header); -const char *iwl_legacy_get_mgmt_string(int cmd); -const char *iwl_legacy_get_ctrl_string(int cmd); -void iwl_legacy_clear_traffic_stats(struct iwl_priv *priv); -void iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, - u16 len); -#else -static inline int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv) -{ - return 0; -} -static inline void iwl_legacy_free_traffic_mem(struct iwl_priv *priv) -{ -} -static inline void iwl_legacy_reset_traffic_log(struct iwl_priv *priv) -{ -} -static inline void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv, - u16 length, struct ieee80211_hdr *header) -{ -} -static inline void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv, - u16 length, struct ieee80211_hdr *header) -{ -} -static inline void iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx, - __le16 fc, u16 len) -{ -} -#endif -/***************************************************** - * RX handlers. - * **************************************************/ -void iwl_legacy_rx_pm_sleep_notif(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb); -void iwl_legacy_rx_pm_debug_statistics_notif(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb); -void iwl_legacy_rx_reply_error(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb); - -/***************************************************** -* RX -******************************************************/ -void iwl_legacy_cmd_queue_unmap(struct iwl_priv *priv); -void iwl_legacy_cmd_queue_free(struct iwl_priv *priv); -int iwl_legacy_rx_queue_alloc(struct iwl_priv *priv); -void iwl_legacy_rx_queue_update_write_ptr(struct iwl_priv *priv, - struct iwl_rx_queue *q); -int iwl_legacy_rx_queue_space(const struct iwl_rx_queue *q); -void iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb); -/* Handlers */ -void iwl_legacy_rx_spectrum_measure_notif(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb); -void iwl_legacy_recover_from_statistics(struct iwl_priv *priv, - struct iwl_rx_packet *pkt); -void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success); -void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb); - -/* TX helpers */ - -/***************************************************** -* TX -******************************************************/ -void iwl_legacy_txq_update_write_ptr(struct iwl_priv *priv, - struct iwl_tx_queue *txq); -int iwl_legacy_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, - int slots_num, u32 txq_id); -void iwl_legacy_tx_queue_reset(struct iwl_priv *priv, - struct iwl_tx_queue *txq, - int slots_num, u32 txq_id); -void iwl_legacy_tx_queue_unmap(struct iwl_priv *priv, int txq_id); -void iwl_legacy_tx_queue_free(struct iwl_priv *priv, int txq_id); -void iwl_legacy_setup_watchdog(struct iwl_priv *priv); -/***************************************************** - * TX power - ****************************************************/ -int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force); - -/******************************************************************************* - * Rate - ******************************************************************************/ - -u8 iwl_legacy_get_lowest_plcp(struct iwl_priv *priv, - struct iwl_rxon_context *ctx); - -/******************************************************************************* - * Scanning - ******************************************************************************/ -void iwl_legacy_init_scan_params(struct iwl_priv *priv); -int iwl_legacy_scan_cancel(struct iwl_priv *priv); -int iwl_legacy_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms); -void iwl_legacy_force_scan_end(struct iwl_priv *priv); -int iwl_legacy_mac_hw_scan(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct cfg80211_scan_request *req); -void iwl_legacy_internal_short_hw_scan(struct iwl_priv *priv); -int iwl_legacy_force_reset(struct iwl_priv *priv, bool external); -u16 iwl_legacy_fill_probe_req(struct iwl_priv *priv, - struct ieee80211_mgmt *frame, - const u8 *ta, const u8 *ie, int ie_len, int left); -void iwl_legacy_setup_rx_scan_handlers(struct iwl_priv *priv); -u16 iwl_legacy_get_active_dwell_time(struct iwl_priv *priv, - enum ieee80211_band band, - u8 n_probes); -u16 iwl_legacy_get_passive_dwell_time(struct iwl_priv *priv, - enum ieee80211_band band, - struct ieee80211_vif *vif); -void iwl_legacy_setup_scan_deferred_work(struct iwl_priv *priv); -void iwl_legacy_cancel_scan_deferred_work(struct iwl_priv *priv); - -/* For faster active scanning, scan will move to the next channel if fewer than - * PLCP_QUIET_THRESH packets are heard on this channel within - * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell - * time if it's a quiet channel (nothing responded to our probe, and there's - * no other traffic). - * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */ -#define IWL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */ -#define IWL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */ - -#define IWL_SCAN_CHECK_WATCHDOG (HZ * 7) - -/***************************************************** - * S e n d i n g H o s t C o m m a n d s * - *****************************************************/ - -const char *iwl_legacy_get_cmd_string(u8 cmd); -int __must_check iwl_legacy_send_cmd_sync(struct iwl_priv *priv, - struct iwl_host_cmd *cmd); -int iwl_legacy_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd); -int __must_check iwl_legacy_send_cmd_pdu(struct iwl_priv *priv, u8 id, - u16 len, const void *data); -int iwl_legacy_send_cmd_pdu_async(struct iwl_priv *priv, u8 id, u16 len, - const void *data, - void (*callback)(struct iwl_priv *priv, - struct iwl_device_cmd *cmd, - struct iwl_rx_packet *pkt)); - -int iwl_legacy_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd); - - -/***************************************************** - * PCI * - *****************************************************/ - -static inline u16 iwl_legacy_pcie_link_ctl(struct iwl_priv *priv) -{ - int pos; - u16 pci_lnk_ctl; - pos = pci_pcie_cap(priv->pci_dev); - pci_read_config_word(priv->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl); - return pci_lnk_ctl; -} - -void iwl_legacy_bg_watchdog(unsigned long data); -u32 iwl_legacy_usecs_to_beacons(struct iwl_priv *priv, - u32 usec, u32 beacon_interval); -__le32 iwl_legacy_add_beacon_time(struct iwl_priv *priv, u32 base, - u32 addon, u32 beacon_interval); - -#ifdef CONFIG_PM -int iwl_legacy_pci_suspend(struct device *device); -int iwl_legacy_pci_resume(struct device *device); -extern const struct dev_pm_ops iwl_legacy_pm_ops; - -#define IWL_LEGACY_PM_OPS (&iwl_legacy_pm_ops) - -#else /* !CONFIG_PM */ - -#define IWL_LEGACY_PM_OPS NULL - -#endif /* !CONFIG_PM */ - -/***************************************************** -* Error Handling Debugging -******************************************************/ -void iwl4965_dump_nic_error_log(struct iwl_priv *priv); -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG -void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv, - struct iwl_rxon_context *ctx); -#else -static inline void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv, - struct iwl_rxon_context *ctx) -{ -} -#endif - -void iwl_legacy_clear_isr_stats(struct iwl_priv *priv); - -/***************************************************** -* GEOS -******************************************************/ -int iwl_legacy_init_geos(struct iwl_priv *priv); -void iwl_legacy_free_geos(struct iwl_priv *priv); - -/*************** DRIVER STATUS FUNCTIONS *****/ - -#define STATUS_HCMD_ACTIVE 0 /* host command in progress */ -/* 1 is unused (used to be STATUS_HCMD_SYNC_ACTIVE) */ -#define STATUS_INT_ENABLED 2 -#define STATUS_RF_KILL_HW 3 -#define STATUS_CT_KILL 4 -#define STATUS_INIT 5 -#define STATUS_ALIVE 6 -#define STATUS_READY 7 -#define STATUS_TEMPERATURE 8 -#define STATUS_GEO_CONFIGURED 9 -#define STATUS_EXIT_PENDING 10 -#define STATUS_STATISTICS 12 -#define STATUS_SCANNING 13 -#define STATUS_SCAN_ABORTING 14 -#define STATUS_SCAN_HW 15 -#define STATUS_POWER_PMI 16 -#define STATUS_FW_ERROR 17 -#define STATUS_CHANNEL_SWITCH_PENDING 18 - -static inline int iwl_legacy_is_ready(struct iwl_priv *priv) -{ - /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are - * set but EXIT_PENDING is not */ - return test_bit(STATUS_READY, &priv->status) && - test_bit(STATUS_GEO_CONFIGURED, &priv->status) && - !test_bit(STATUS_EXIT_PENDING, &priv->status); -} - -static inline int iwl_legacy_is_alive(struct iwl_priv *priv) -{ - return test_bit(STATUS_ALIVE, &priv->status); -} - -static inline int iwl_legacy_is_init(struct iwl_priv *priv) -{ - return test_bit(STATUS_INIT, &priv->status); -} - -static inline int iwl_legacy_is_rfkill_hw(struct iwl_priv *priv) -{ - return test_bit(STATUS_RF_KILL_HW, &priv->status); -} - -static inline int iwl_legacy_is_rfkill(struct iwl_priv *priv) -{ - return iwl_legacy_is_rfkill_hw(priv); -} - -static inline int iwl_legacy_is_ctkill(struct iwl_priv *priv) -{ - return test_bit(STATUS_CT_KILL, &priv->status); -} - -static inline int iwl_legacy_is_ready_rf(struct iwl_priv *priv) -{ - - if (iwl_legacy_is_rfkill(priv)) - return 0; - - return iwl_legacy_is_ready(priv); -} - -extern void iwl_legacy_send_bt_config(struct iwl_priv *priv); -extern int iwl_legacy_send_statistics_request(struct iwl_priv *priv, - u8 flags, bool clear); -void iwl_legacy_apm_stop(struct iwl_priv *priv); -int iwl_legacy_apm_init(struct iwl_priv *priv); - -int iwl_legacy_send_rxon_timing(struct iwl_priv *priv, - struct iwl_rxon_context *ctx); -static inline int iwl_legacy_send_rxon_assoc(struct iwl_priv *priv, - struct iwl_rxon_context *ctx) -{ - return priv->cfg->ops->hcmd->rxon_assoc(priv, ctx); -} -static inline int iwl_legacy_commit_rxon(struct iwl_priv *priv, - struct iwl_rxon_context *ctx) -{ - return priv->cfg->ops->hcmd->commit_rxon(priv, ctx); -} -static inline const struct ieee80211_supported_band *iwl_get_hw_mode( - struct iwl_priv *priv, enum ieee80211_band band) -{ - return priv->hw->wiphy->bands[band]; -} - -/* mac80211 handlers */ -int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed); -void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw, - struct ieee80211_vif *vif); -void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_bss_conf *bss_conf, - u32 changes); -void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv, - struct ieee80211_tx_info *info, - __le16 fc, __le32 *tx_flags); - -irqreturn_t iwl_legacy_isr(int irq, void *data); - -#endif /* __iwl_legacy_core_h__ */ diff --git a/drivers/net/wireless/iwlegacy/iwl-debug.h b/drivers/net/wireless/iwlegacy/iwl-debug.h deleted file mode 100644 index ae13112701b..00000000000 --- a/drivers/net/wireless/iwlegacy/iwl-debug.h +++ /dev/null @@ -1,198 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. - * - * Portions of this file are derived from the ipw3945 project. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#ifndef __iwl_legacy_debug_h__ -#define __iwl_legacy_debug_h__ - -struct iwl_priv; -extern u32 iwlegacy_debug_level; - -#define IWL_ERR(p, f, a...) dev_err(&((p)->pci_dev->dev), f, ## a) -#define IWL_WARN(p, f, a...) dev_warn(&((p)->pci_dev->dev), f, ## a) -#define IWL_INFO(p, f, a...) dev_info(&((p)->pci_dev->dev), f, ## a) -#define IWL_CRIT(p, f, a...) dev_crit(&((p)->pci_dev->dev), f, ## a) - -#define iwl_print_hex_error(priv, p, len) \ -do { \ - print_hex_dump(KERN_ERR, "iwl data: ", \ - DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \ -} while (0) - -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG -#define IWL_DEBUG(__priv, level, fmt, args...) \ -do { \ - if (iwl_legacy_get_debug_level(__priv) & (level)) \ - dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev), \ - "%c %s " fmt, in_interrupt() ? 'I' : 'U', \ - __func__ , ## args); \ -} while (0) - -#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) \ -do { \ - if ((iwl_legacy_get_debug_level(__priv) & (level)) && net_ratelimit()) \ - dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev), \ - "%c %s " fmt, in_interrupt() ? 'I' : 'U', \ - __func__ , ## args); \ -} while (0) - -#define iwl_print_hex_dump(priv, level, p, len) \ -do { \ - if (iwl_legacy_get_debug_level(priv) & level) \ - print_hex_dump(KERN_DEBUG, "iwl data: ", \ - DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \ -} while (0) - -#else -#define IWL_DEBUG(__priv, level, fmt, args...) -#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) -static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level, - const void *p, u32 len) -{} -#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */ - -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS -int iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name); -void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv); -#else -static inline int -iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name) -{ - return 0; -} -static inline void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv) -{ -} -#endif /* CONFIG_IWLWIFI_LEGACY_DEBUGFS */ - -/* - * To use the debug system: - * - * If you are defining a new debug classification, simply add it to the #define - * list here in the form of - * - * #define IWL_DL_xxxx VALUE - * - * where xxxx should be the name of the classification (for example, WEP). - * - * You then need to either add a IWL_xxxx_DEBUG() macro definition for your - * classification, or use IWL_DEBUG(IWL_DL_xxxx, ...) whenever you want - * to send output to that classification. - * - * The active debug levels can be accessed via files - * - * /sys/module/iwl4965/parameters/debug{50} - * /sys/module/iwl3945/parameters/debug - * /sys/class/net/wlan0/device/debug_level - * - * when CONFIG_IWLWIFI_LEGACY_DEBUG=y. - */ - -/* 0x0000000F - 0x00000001 */ -#define IWL_DL_INFO (1 << 0) -#define IWL_DL_MAC80211 (1 << 1) -#define IWL_DL_HCMD (1 << 2) -#define IWL_DL_STATE (1 << 3) -/* 0x000000F0 - 0x00000010 */ -#define IWL_DL_MACDUMP (1 << 4) -#define IWL_DL_HCMD_DUMP (1 << 5) -#define IWL_DL_EEPROM (1 << 6) -#define IWL_DL_RADIO (1 << 7) -/* 0x00000F00 - 0x00000100 */ -#define IWL_DL_POWER (1 << 8) -#define IWL_DL_TEMP (1 << 9) -#define IWL_DL_NOTIF (1 << 10) -#define IWL_DL_SCAN (1 << 11) -/* 0x0000F000 - 0x00001000 */ -#define IWL_DL_ASSOC (1 << 12) -#define IWL_DL_DROP (1 << 13) -#define IWL_DL_TXPOWER (1 << 14) -#define IWL_DL_AP (1 << 15) -/* 0x000F0000 - 0x00010000 */ -#define IWL_DL_FW (1 << 16) -#define IWL_DL_RF_KILL (1 << 17) -#define IWL_DL_FW_ERRORS (1 << 18) -#define IWL_DL_LED (1 << 19) -/* 0x00F00000 - 0x00100000 */ -#define IWL_DL_RATE (1 << 20) -#define IWL_DL_CALIB (1 << 21) -#define IWL_DL_WEP (1 << 22) -#define IWL_DL_TX (1 << 23) -/* 0x0F000000 - 0x01000000 */ -#define IWL_DL_RX (1 << 24) -#define IWL_DL_ISR (1 << 25) -#define IWL_DL_HT (1 << 26) -#define IWL_DL_IO (1 << 27) -/* 0xF0000000 - 0x10000000 */ -#define IWL_DL_11H (1 << 28) -#define IWL_DL_STATS (1 << 29) -#define IWL_DL_TX_REPLY (1 << 30) -#define IWL_DL_QOS (1 << 31) - -#define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a) -#define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a) -#define IWL_DEBUG_MACDUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_MACDUMP, f, ## a) -#define IWL_DEBUG_TEMP(p, f, a...) IWL_DEBUG(p, IWL_DL_TEMP, f, ## a) -#define IWL_DEBUG_SCAN(p, f, a...) IWL_DEBUG(p, IWL_DL_SCAN, f, ## a) -#define IWL_DEBUG_RX(p, f, a...) IWL_DEBUG(p, IWL_DL_RX, f, ## a) -#define IWL_DEBUG_TX(p, f, a...) IWL_DEBUG(p, IWL_DL_TX, f, ## a) -#define IWL_DEBUG_ISR(p, f, a...) IWL_DEBUG(p, IWL_DL_ISR, f, ## a) -#define IWL_DEBUG_LED(p, f, a...) IWL_DEBUG(p, IWL_DL_LED, f, ## a) -#define IWL_DEBUG_WEP(p, f, a...) IWL_DEBUG(p, IWL_DL_WEP, f, ## a) -#define IWL_DEBUG_HC(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD, f, ## a) -#define IWL_DEBUG_HC_DUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD_DUMP, f, ## a) -#define IWL_DEBUG_EEPROM(p, f, a...) IWL_DEBUG(p, IWL_DL_EEPROM, f, ## a) -#define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a) -#define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a) -#define IWL_DEBUG_RF_KILL(p, f, a...) IWL_DEBUG(p, IWL_DL_RF_KILL, f, ## a) -#define IWL_DEBUG_DROP(p, f, a...) IWL_DEBUG(p, IWL_DL_DROP, f, ## a) -#define IWL_DEBUG_DROP_LIMIT(p, f, a...) \ - IWL_DEBUG_LIMIT(p, IWL_DL_DROP, f, ## a) -#define IWL_DEBUG_AP(p, f, a...) IWL_DEBUG(p, IWL_DL_AP, f, ## a) -#define IWL_DEBUG_TXPOWER(p, f, a...) IWL_DEBUG(p, IWL_DL_TXPOWER, f, ## a) -#define IWL_DEBUG_IO(p, f, a...) IWL_DEBUG(p, IWL_DL_IO, f, ## a) -#define IWL_DEBUG_RATE(p, f, a...) IWL_DEBUG(p, IWL_DL_RATE, f, ## a) -#define IWL_DEBUG_RATE_LIMIT(p, f, a...) \ - IWL_DEBUG_LIMIT(p, IWL_DL_RATE, f, ## a) -#define IWL_DEBUG_NOTIF(p, f, a...) IWL_DEBUG(p, IWL_DL_NOTIF, f, ## a) -#define IWL_DEBUG_ASSOC(p, f, a...) \ - IWL_DEBUG(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a) -#define IWL_DEBUG_ASSOC_LIMIT(p, f, a...) \ - IWL_DEBUG_LIMIT(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a) -#define IWL_DEBUG_HT(p, f, a...) IWL_DEBUG(p, IWL_DL_HT, f, ## a) -#define IWL_DEBUG_STATS(p, f, a...) IWL_DEBUG(p, IWL_DL_STATS, f, ## a) -#define IWL_DEBUG_STATS_LIMIT(p, f, a...) \ - IWL_DEBUG_LIMIT(p, IWL_DL_STATS, f, ## a) -#define IWL_DEBUG_TX_REPLY(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_REPLY, f, ## a) -#define IWL_DEBUG_TX_REPLY_LIMIT(p, f, a...) \ - IWL_DEBUG_LIMIT(p, IWL_DL_TX_REPLY, f, ## a) -#define IWL_DEBUG_QOS(p, f, a...) IWL_DEBUG(p, IWL_DL_QOS, f, ## a) -#define IWL_DEBUG_RADIO(p, f, a...) IWL_DEBUG(p, IWL_DL_RADIO, f, ## a) -#define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a) -#define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a) - -#endif diff --git a/drivers/net/wireless/iwlegacy/iwl-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-debugfs.c deleted file mode 100644 index 1407dca70de..00000000000 --- a/drivers/net/wireless/iwlegacy/iwl-debugfs.c +++ /dev/null @@ -1,1314 +0,0 @@ -/****************************************************************************** - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - *****************************************************************************/ -#include <linux/ieee80211.h> -#include <linux/export.h> -#include <net/mac80211.h> - - -#include "iwl-dev.h" -#include "iwl-debug.h" -#include "iwl-core.h" -#include "iwl-io.h" - -/* create and remove of files */ -#define DEBUGFS_ADD_FILE(name, parent, mode) do { \ - if (!debugfs_create_file(#name, mode, parent, priv, \ - &iwl_legacy_dbgfs_##name##_ops)) \ - goto err; \ -} while (0) - -#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \ - struct dentry *__tmp; \ - __tmp = debugfs_create_bool(#name, S_IWUSR | S_IRUSR, \ - parent, ptr); \ - if (IS_ERR(__tmp) || !__tmp) \ - goto err; \ -} while (0) - -#define DEBUGFS_ADD_X32(name, parent, ptr) do { \ - struct dentry *__tmp; \ - __tmp = debugfs_create_x32(#name, S_IWUSR | S_IRUSR, \ - parent, ptr); \ - if (IS_ERR(__tmp) || !__tmp) \ - goto err; \ -} while (0) - -/* file operation */ -#define DEBUGFS_READ_FUNC(name) \ -static ssize_t iwl_legacy_dbgfs_##name##_read(struct file *file, \ - char __user *user_buf, \ - size_t count, loff_t *ppos); - -#define DEBUGFS_WRITE_FUNC(name) \ -static ssize_t iwl_legacy_dbgfs_##name##_write(struct file *file, \ - const char __user *user_buf, \ - size_t count, loff_t *ppos); - - -static int -iwl_legacy_dbgfs_open_file_generic(struct inode *inode, struct file *file) -{ - file->private_data = inode->i_private; - return 0; -} - -#define DEBUGFS_READ_FILE_OPS(name) \ - DEBUGFS_READ_FUNC(name); \ -static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \ - .read = iwl_legacy_dbgfs_##name##_read, \ - .open = iwl_legacy_dbgfs_open_file_generic, \ - .llseek = generic_file_llseek, \ -}; - -#define DEBUGFS_WRITE_FILE_OPS(name) \ - DEBUGFS_WRITE_FUNC(name); \ -static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \ - .write = iwl_legacy_dbgfs_##name##_write, \ - .open = iwl_legacy_dbgfs_open_file_generic, \ - .llseek = generic_file_llseek, \ -}; - -#define DEBUGFS_READ_WRITE_FILE_OPS(name) \ - DEBUGFS_READ_FUNC(name); \ - DEBUGFS_WRITE_FUNC(name); \ -static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \ - .write = iwl_legacy_dbgfs_##name##_write, \ - .read = iwl_legacy_dbgfs_##name##_read, \ - .open = iwl_legacy_dbgfs_open_file_generic, \ - .llseek = generic_file_llseek, \ -}; - -static ssize_t iwl_legacy_dbgfs_tx_statistics_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) { - - struct iwl_priv *priv = file->private_data; - char *buf; - int pos = 0; - - int cnt; - ssize_t ret; - const size_t bufsz = 100 + - sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX); - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) - return -ENOMEM; - pos += scnprintf(buf + pos, bufsz - pos, "Management:\n"); - for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) { - pos += scnprintf(buf + pos, bufsz - pos, - "\t%25s\t\t: %u\n", - iwl_legacy_get_mgmt_string(cnt), - priv->tx_stats.mgmt[cnt]); - } - pos += scnprintf(buf + pos, bufsz - pos, "Control\n"); - for (cnt = 0; cnt < CONTROL_MAX; cnt++) { - pos += scnprintf(buf + pos, bufsz - pos, - "\t%25s\t\t: %u\n", - iwl_legacy_get_ctrl_string(cnt), - priv->tx_stats.ctrl[cnt]); - } - pos += scnprintf(buf + pos, bufsz - pos, "Data:\n"); - pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n", - priv->tx_stats.data_cnt); - pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n", - priv->tx_stats.data_bytes); - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -static ssize_t -iwl_legacy_dbgfs_clear_traffic_statistics_write(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - u32 clear_flag; - char buf[8]; - int buf_size; - - memset(buf, 0, sizeof(buf)); - buf_size = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - if (sscanf(buf, "%x", &clear_flag) != 1) - return -EFAULT; - iwl_legacy_clear_traffic_stats(priv); - - return count; -} - -static ssize_t iwl_legacy_dbgfs_rx_statistics_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) { - - struct iwl_priv *priv = file->private_data; - char *buf; - int pos = 0; - int cnt; - ssize_t ret; - const size_t bufsz = 100 + - sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX); - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - pos += scnprintf(buf + pos, bufsz - pos, "Management:\n"); - for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) { - pos += scnprintf(buf + pos, bufsz - pos, - "\t%25s\t\t: %u\n", - iwl_legacy_get_mgmt_string(cnt), - priv->rx_stats.mgmt[cnt]); - } - pos += scnprintf(buf + pos, bufsz - pos, "Control:\n"); - for (cnt = 0; cnt < CONTROL_MAX; cnt++) { - pos += scnprintf(buf + pos, bufsz - pos, - "\t%25s\t\t: %u\n", - iwl_legacy_get_ctrl_string(cnt), - priv->rx_stats.ctrl[cnt]); - } - pos += scnprintf(buf + pos, bufsz - pos, "Data:\n"); - pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n", - priv->rx_stats.data_cnt); - pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n", - priv->rx_stats.data_bytes); - - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -#define BYTE1_MASK 0x000000ff; -#define BYTE2_MASK 0x0000ffff; -#define BYTE3_MASK 0x00ffffff; -static ssize_t iwl_legacy_dbgfs_sram_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - u32 val; - char *buf; - ssize_t ret; - int i; - int pos = 0; - struct iwl_priv *priv = file->private_data; - size_t bufsz; - - /* default is to dump the entire data segment */ - if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) { - priv->dbgfs_sram_offset = 0x800000; - if (priv->ucode_type == UCODE_INIT) - priv->dbgfs_sram_len = priv->ucode_init_data.len; - else - priv->dbgfs_sram_len = priv->ucode_data.len; - } - bufsz = 30 + priv->dbgfs_sram_len * sizeof(char) * 10; - buf = kmalloc(bufsz, GFP_KERNEL); - if (!buf) - return -ENOMEM; - pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n", - priv->dbgfs_sram_len); - pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n", - priv->dbgfs_sram_offset); - for (i = priv->dbgfs_sram_len; i > 0; i -= 4) { - val = iwl_legacy_read_targ_mem(priv, priv->dbgfs_sram_offset + \ - priv->dbgfs_sram_len - i); - if (i < 4) { - switch (i) { - case 1: - val &= BYTE1_MASK; - break; - case 2: - val &= BYTE2_MASK; - break; - case 3: - val &= BYTE3_MASK; - break; - } - } - if (!(i % 16)) - pos += scnprintf(buf + pos, bufsz - pos, "\n"); - pos += scnprintf(buf + pos, bufsz - pos, "0x%08x ", val); - } - pos += scnprintf(buf + pos, bufsz - pos, "\n"); - - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -static ssize_t iwl_legacy_dbgfs_sram_write(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - char buf[64]; - int buf_size; - u32 offset, len; - - memset(buf, 0, sizeof(buf)); - buf_size = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - - if (sscanf(buf, "%x,%x", &offset, &len) == 2) { - priv->dbgfs_sram_offset = offset; - priv->dbgfs_sram_len = len; - } else { - priv->dbgfs_sram_offset = 0; - priv->dbgfs_sram_len = 0; - } - - return count; -} - -static ssize_t -iwl_legacy_dbgfs_stations_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - struct iwl_station_entry *station; - int max_sta = priv->hw_params.max_stations; - char *buf; - int i, j, pos = 0; - ssize_t ret; - /* Add 30 for initial string */ - const size_t bufsz = 30 + sizeof(char) * 500 * (priv->num_stations); - - buf = kmalloc(bufsz, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - pos += scnprintf(buf + pos, bufsz - pos, "num of stations: %d\n\n", - priv->num_stations); - - for (i = 0; i < max_sta; i++) { - station = &priv->stations[i]; - if (!station->used) - continue; - pos += scnprintf(buf + pos, bufsz - pos, - "station %d - addr: %pM, flags: %#x\n", - i, station->sta.sta.addr, - station->sta.station_flags_msk); - pos += scnprintf(buf + pos, bufsz - pos, - "TID\tseq_num\ttxq_id\tframes\ttfds\t"); - pos += scnprintf(buf + pos, bufsz - pos, - "start_idx\tbitmap\t\t\trate_n_flags\n"); - - for (j = 0; j < MAX_TID_COUNT; j++) { - pos += scnprintf(buf + pos, bufsz - pos, - "%d:\t%#x\t%#x\t%u\t%u\t%u\t\t%#.16llx\t%#x", - j, station->tid[j].seq_number, - station->tid[j].agg.txq_id, - station->tid[j].agg.frame_count, - station->tid[j].tfds_in_queue, - station->tid[j].agg.start_idx, - station->tid[j].agg.bitmap, - station->tid[j].agg.rate_n_flags); - - if (station->tid[j].agg.wait_for_ba) - pos += scnprintf(buf + pos, bufsz - pos, - " - waitforba"); - pos += scnprintf(buf + pos, bufsz - pos, "\n"); - } - - pos += scnprintf(buf + pos, bufsz - pos, "\n"); - } - - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -static ssize_t iwl_legacy_dbgfs_nvm_read(struct file *file, - char __user *user_buf, - size_t count, - loff_t *ppos) -{ - ssize_t ret; - struct iwl_priv *priv = file->private_data; - int pos = 0, ofs = 0, buf_size = 0; - const u8 *ptr; - char *buf; - u16 eeprom_ver; - size_t eeprom_len = priv->cfg->base_params->eeprom_size; - buf_size = 4 * eeprom_len + 256; - - if (eeprom_len % 16) { - IWL_ERR(priv, "NVM size is not multiple of 16.\n"); - return -ENODATA; - } - - ptr = priv->eeprom; - if (!ptr) { - IWL_ERR(priv, "Invalid EEPROM memory\n"); - return -ENOMEM; - } - - /* 4 characters for byte 0xYY */ - buf = kzalloc(buf_size, GFP_KERNEL); - if (!buf) { - IWL_ERR(priv, "Can not allocate Buffer\n"); - return -ENOMEM; - } - eeprom_ver = iwl_legacy_eeprom_query16(priv, EEPROM_VERSION); - pos += scnprintf(buf + pos, buf_size - pos, "EEPROM " - "version: 0x%x\n", eeprom_ver); - for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) { - pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs); - hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos, - buf_size - pos, 0); - pos += strlen(buf + pos); - if (buf_size - pos > 0) - buf[pos++] = '\n'; - } - - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -static ssize_t -iwl_legacy_dbgfs_channels_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - struct ieee80211_channel *channels = NULL; - const struct ieee80211_supported_band *supp_band = NULL; - int pos = 0, i, bufsz = PAGE_SIZE; - char *buf; - ssize_t ret; - - if (!test_bit(STATUS_GEO_CONFIGURED, &priv->status)) - return -EAGAIN; - - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) { - IWL_ERR(priv, "Can not allocate Buffer\n"); - return -ENOMEM; - } - - supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_2GHZ); - if (supp_band) { - channels = supp_band->channels; - - pos += scnprintf(buf + pos, bufsz - pos, - "Displaying %d channels in 2.4GHz band 802.11bg):\n", - supp_band->n_channels); - - for (i = 0; i < supp_band->n_channels; i++) - pos += scnprintf(buf + pos, bufsz - pos, - "%d: %ddBm: BSS%s%s, %s.\n", - channels[i].hw_value, - channels[i].max_power, - channels[i].flags & IEEE80211_CHAN_RADAR ? - " (IEEE 802.11h required)" : "", - ((channels[i].flags & IEEE80211_CHAN_NO_IBSS) - || (channels[i].flags & - IEEE80211_CHAN_RADAR)) ? "" : - ", IBSS", - channels[i].flags & - IEEE80211_CHAN_PASSIVE_SCAN ? - "passive only" : "active/passive"); - } - supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_5GHZ); - if (supp_band) { - channels = supp_band->channels; - - pos += scnprintf(buf + pos, bufsz - pos, - "Displaying %d channels in 5.2GHz band (802.11a)\n", - supp_band->n_channels); - - for (i = 0; i < supp_band->n_channels; i++) - pos += scnprintf(buf + pos, bufsz - pos, - "%d: %ddBm: BSS%s%s, %s.\n", - channels[i].hw_value, - channels[i].max_power, - channels[i].flags & IEEE80211_CHAN_RADAR ? - " (IEEE 802.11h required)" : "", - ((channels[i].flags & IEEE80211_CHAN_NO_IBSS) - || (channels[i].flags & - IEEE80211_CHAN_RADAR)) ? "" : - ", IBSS", - channels[i].flags & - IEEE80211_CHAN_PASSIVE_SCAN ? - "passive only" : "active/passive"); - } - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -static ssize_t iwl_legacy_dbgfs_status_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) { - - struct iwl_priv *priv = file->private_data; - char buf[512]; - int pos = 0; - const size_t bufsz = sizeof(buf); - - pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n", - test_bit(STATUS_HCMD_ACTIVE, &priv->status)); - pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n", - test_bit(STATUS_INT_ENABLED, &priv->status)); - pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n", - test_bit(STATUS_RF_KILL_HW, &priv->status)); - pos += scnprintf(buf + pos, bufsz - pos, "STATUS_CT_KILL:\t\t %d\n", - test_bit(STATUS_CT_KILL, &priv->status)); - pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INIT:\t\t %d\n", - test_bit(STATUS_INIT, &priv->status)); - pos += scnprintf(buf + pos, bufsz - pos, "STATUS_ALIVE:\t\t %d\n", - test_bit(STATUS_ALIVE, &priv->status)); - pos += scnprintf(buf + pos, bufsz - pos, "STATUS_READY:\t\t %d\n", - test_bit(STATUS_READY, &priv->status)); - pos += scnprintf(buf + pos, bufsz - pos, "STATUS_TEMPERATURE:\t %d\n", - test_bit(STATUS_TEMPERATURE, &priv->status)); - pos += scnprintf(buf + pos, bufsz - pos, "STATUS_GEO_CONFIGURED:\t %d\n", - test_bit(STATUS_GEO_CONFIGURED, &priv->status)); - pos += scnprintf(buf + pos, bufsz - pos, "STATUS_EXIT_PENDING:\t %d\n", - test_bit(STATUS_EXIT_PENDING, &priv->status)); - pos += scnprintf(buf + pos, bufsz - pos, "STATUS_STATISTICS:\t %d\n", - test_bit(STATUS_STATISTICS, &priv->status)); - pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCANNING:\t %d\n", - test_bit(STATUS_SCANNING, &priv->status)); - pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_ABORTING:\t %d\n", - test_bit(STATUS_SCAN_ABORTING, &priv->status)); - pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_HW:\t\t %d\n", - test_bit(STATUS_SCAN_HW, &priv->status)); - pos += scnprintf(buf + pos, bufsz - pos, "STATUS_POWER_PMI:\t %d\n", - test_bit(STATUS_POWER_PMI, &priv->status)); - pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n", - test_bit(STATUS_FW_ERROR, &priv->status)); - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t iwl_legacy_dbgfs_interrupt_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) { - - struct iwl_priv *priv = file->private_data; - int pos = 0; - int cnt = 0; - char *buf; - int bufsz = 24 * 64; /* 24 items * 64 char per item */ - ssize_t ret; - - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) { - IWL_ERR(priv, "Can not allocate Buffer\n"); - return -ENOMEM; - } - - pos += scnprintf(buf + pos, bufsz - pos, - "Interrupt Statistics Report:\n"); - - pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n", - priv->isr_stats.hw); - pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n", - priv->isr_stats.sw); - if (priv->isr_stats.sw || priv->isr_stats.hw) { - pos += scnprintf(buf + pos, bufsz - pos, - "\tLast Restarting Code: 0x%X\n", - priv->isr_stats.err_code); - } -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG - pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n", - priv->isr_stats.sch); - pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n", - priv->isr_stats.alive); -#endif - pos += scnprintf(buf + pos, bufsz - pos, - "HW RF KILL switch toggled:\t %u\n", - priv->isr_stats.rfkill); - - pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n", - priv->isr_stats.ctkill); - - pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n", - priv->isr_stats.wakeup); - - pos += scnprintf(buf + pos, bufsz - pos, - "Rx command responses:\t\t %u\n", - priv->isr_stats.rx); - for (cnt = 0; cnt < REPLY_MAX; cnt++) { - if (priv->isr_stats.rx_handlers[cnt] > 0) - pos += scnprintf(buf + pos, bufsz - pos, - "\tRx handler[%36s]:\t\t %u\n", - iwl_legacy_get_cmd_string(cnt), - priv->isr_stats.rx_handlers[cnt]); - } - - pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n", - priv->isr_stats.tx); - - pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n", - priv->isr_stats.unhandled); - - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -static ssize_t iwl_legacy_dbgfs_interrupt_write(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - char buf[8]; - int buf_size; - u32 reset_flag; - - memset(buf, 0, sizeof(buf)); - buf_size = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - if (sscanf(buf, "%x", &reset_flag) != 1) - return -EFAULT; - if (reset_flag == 0) - iwl_legacy_clear_isr_stats(priv); - - return count; -} - -static ssize_t -iwl_legacy_dbgfs_qos_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - struct iwl_rxon_context *ctx; - int pos = 0, i; - char buf[256 * NUM_IWL_RXON_CTX]; - const size_t bufsz = sizeof(buf); - - for_each_context(priv, ctx) { - pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n", - ctx->ctxid); - for (i = 0; i < AC_NUM; i++) { - pos += scnprintf(buf + pos, bufsz - pos, - "\tcw_min\tcw_max\taifsn\ttxop\n"); - pos += scnprintf(buf + pos, bufsz - pos, - "AC[%d]\t%u\t%u\t%u\t%u\n", i, - ctx->qos_data.def_qos_parm.ac[i].cw_min, - ctx->qos_data.def_qos_parm.ac[i].cw_max, - ctx->qos_data.def_qos_parm.ac[i].aifsn, - ctx->qos_data.def_qos_parm.ac[i].edca_txop); - } - pos += scnprintf(buf + pos, bufsz - pos, "\n"); - } - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t iwl_legacy_dbgfs_disable_ht40_write(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - char buf[8]; - int buf_size; - int ht40; - - memset(buf, 0, sizeof(buf)); - buf_size = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - if (sscanf(buf, "%d", &ht40) != 1) - return -EFAULT; - if (!iwl_legacy_is_any_associated(priv)) - priv->disable_ht40 = ht40 ? true : false; - else { - IWL_ERR(priv, "Sta associated with AP - " - "Change to 40MHz channel support is not allowed\n"); - return -EINVAL; - } - - return count; -} - -static ssize_t iwl_legacy_dbgfs_disable_ht40_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - char buf[100]; - int pos = 0; - const size_t bufsz = sizeof(buf); - - pos += scnprintf(buf + pos, bufsz - pos, - "11n 40MHz Mode: %s\n", - priv->disable_ht40 ? "Disabled" : "Enabled"); - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -DEBUGFS_READ_WRITE_FILE_OPS(sram); -DEBUGFS_READ_FILE_OPS(nvm); -DEBUGFS_READ_FILE_OPS(stations); -DEBUGFS_READ_FILE_OPS(channels); -DEBUGFS_READ_FILE_OPS(status); -DEBUGFS_READ_WRITE_FILE_OPS(interrupt); -DEBUGFS_READ_FILE_OPS(qos); -DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40); - -static ssize_t iwl_legacy_dbgfs_traffic_log_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - int pos = 0, ofs = 0; - int cnt = 0, entry; - struct iwl_tx_queue *txq; - struct iwl_queue *q; - struct iwl_rx_queue *rxq = &priv->rxq; - char *buf; - int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) + - (priv->cfg->base_params->num_of_queues * 32 * 8) + 400; - const u8 *ptr; - ssize_t ret; - - if (!priv->txq) { - IWL_ERR(priv, "txq not ready\n"); - return -EAGAIN; - } - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) { - IWL_ERR(priv, "Can not allocate buffer\n"); - return -ENOMEM; - } - pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n"); - for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) { - txq = &priv->txq[cnt]; - q = &txq->q; - pos += scnprintf(buf + pos, bufsz - pos, - "q[%d]: read_ptr: %u, write_ptr: %u\n", - cnt, q->read_ptr, q->write_ptr); - } - if (priv->tx_traffic && (iwlegacy_debug_level & IWL_DL_TX)) { - ptr = priv->tx_traffic; - pos += scnprintf(buf + pos, bufsz - pos, - "Tx Traffic idx: %u\n", priv->tx_traffic_idx); - for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) { - for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16; - entry++, ofs += 16) { - pos += scnprintf(buf + pos, bufsz - pos, - "0x%.4x ", ofs); - hex_dump_to_buffer(ptr + ofs, 16, 16, 2, - buf + pos, bufsz - pos, 0); - pos += strlen(buf + pos); - if (bufsz - pos > 0) - buf[pos++] = '\n'; - } - } - } - - pos += scnprintf(buf + pos, bufsz - pos, "Rx Queue\n"); - pos += scnprintf(buf + pos, bufsz - pos, - "read: %u, write: %u\n", - rxq->read, rxq->write); - - if (priv->rx_traffic && (iwlegacy_debug_level & IWL_DL_RX)) { - ptr = priv->rx_traffic; - pos += scnprintf(buf + pos, bufsz - pos, - "Rx Traffic idx: %u\n", priv->rx_traffic_idx); - for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) { - for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16; - entry++, ofs += 16) { - pos += scnprintf(buf + pos, bufsz - pos, - "0x%.4x ", ofs); - hex_dump_to_buffer(ptr + ofs, 16, 16, 2, - buf + pos, bufsz - pos, 0); - pos += strlen(buf + pos); - if (bufsz - pos > 0) - buf[pos++] = '\n'; - } - } - } - - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -static ssize_t iwl_legacy_dbgfs_traffic_log_write(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - char buf[8]; - int buf_size; - int traffic_log; - - memset(buf, 0, sizeof(buf)); - buf_size = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - if (sscanf(buf, "%d", &traffic_log) != 1) - return -EFAULT; - if (traffic_log == 0) - iwl_legacy_reset_traffic_log(priv); - - return count; -} - -static ssize_t iwl_legacy_dbgfs_tx_queue_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) { - - struct iwl_priv *priv = file->private_data; - struct iwl_tx_queue *txq; - struct iwl_queue *q; - char *buf; - int pos = 0; - int cnt; - int ret; - const size_t bufsz = sizeof(char) * 64 * - priv->cfg->base_params->num_of_queues; - - if (!priv->txq) { - IWL_ERR(priv, "txq not ready\n"); - return -EAGAIN; - } - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) { - txq = &priv->txq[cnt]; - q = &txq->q; - pos += scnprintf(buf + pos, bufsz - pos, - "hwq %.2d: read=%u write=%u stop=%d" - " swq_id=%#.2x (ac %d/hwq %d)\n", - cnt, q->read_ptr, q->write_ptr, - !!test_bit(cnt, priv->queue_stopped), - txq->swq_id, txq->swq_id & 3, - (txq->swq_id >> 2) & 0x1f); - if (cnt >= 4) - continue; - /* for the ACs, display the stop count too */ - pos += scnprintf(buf + pos, bufsz - pos, - " stop-count: %d\n", - atomic_read(&priv->queue_stop_count[cnt])); - } - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -static ssize_t iwl_legacy_dbgfs_rx_queue_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) { - - struct iwl_priv *priv = file->private_data; - struct iwl_rx_queue *rxq = &priv->rxq; - char buf[256]; - int pos = 0; - const size_t bufsz = sizeof(buf); - - pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n", - rxq->read); - pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n", - rxq->write); - pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n", - rxq->free_count); - if (rxq->rb_stts) { - pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n", - le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF); - } else { - pos += scnprintf(buf + pos, bufsz - pos, - "closed_rb_num: Not Allocated\n"); - } - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t iwl_legacy_dbgfs_ucode_rx_stats_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - return priv->cfg->ops->lib->debugfs_ops.rx_stats_read(file, - user_buf, count, ppos); -} - -static ssize_t iwl_legacy_dbgfs_ucode_tx_stats_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - return priv->cfg->ops->lib->debugfs_ops.tx_stats_read(file, - user_buf, count, ppos); -} - -static ssize_t iwl_legacy_dbgfs_ucode_general_stats_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - return priv->cfg->ops->lib->debugfs_ops.general_stats_read(file, - user_buf, count, ppos); -} - -static ssize_t iwl_legacy_dbgfs_sensitivity_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) { - - struct iwl_priv *priv = file->private_data; - int pos = 0; - int cnt = 0; - char *buf; - int bufsz = sizeof(struct iwl_sensitivity_data) * 4 + 100; - ssize_t ret; - struct iwl_sensitivity_data *data; - - data = &priv->sensitivity_data; - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) { - IWL_ERR(priv, "Can not allocate Buffer\n"); - return -ENOMEM; - } - - pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm:\t\t\t %u\n", - data->auto_corr_ofdm); - pos += scnprintf(buf + pos, bufsz - pos, - "auto_corr_ofdm_mrc:\t\t %u\n", - data->auto_corr_ofdm_mrc); - pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_x1:\t\t %u\n", - data->auto_corr_ofdm_x1); - pos += scnprintf(buf + pos, bufsz - pos, - "auto_corr_ofdm_mrc_x1:\t\t %u\n", - data->auto_corr_ofdm_mrc_x1); - pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck:\t\t\t %u\n", - data->auto_corr_cck); - pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck_mrc:\t\t %u\n", - data->auto_corr_cck_mrc); - pos += scnprintf(buf + pos, bufsz - pos, - "last_bad_plcp_cnt_ofdm:\t\t %u\n", - data->last_bad_plcp_cnt_ofdm); - pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_ofdm:\t\t %u\n", - data->last_fa_cnt_ofdm); - pos += scnprintf(buf + pos, bufsz - pos, - "last_bad_plcp_cnt_cck:\t\t %u\n", - data->last_bad_plcp_cnt_cck); - pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_cck:\t\t %u\n", - data->last_fa_cnt_cck); - pos += scnprintf(buf + pos, bufsz - pos, "nrg_curr_state:\t\t\t %u\n", - data->nrg_curr_state); - pos += scnprintf(buf + pos, bufsz - pos, "nrg_prev_state:\t\t\t %u\n", - data->nrg_prev_state); - pos += scnprintf(buf + pos, bufsz - pos, "nrg_value:\t\t\t"); - for (cnt = 0; cnt < 10; cnt++) { - pos += scnprintf(buf + pos, bufsz - pos, " %u", - data->nrg_value[cnt]); - } - pos += scnprintf(buf + pos, bufsz - pos, "\n"); - pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_rssi:\t\t"); - for (cnt = 0; cnt < NRG_NUM_PREV_STAT_L; cnt++) { - pos += scnprintf(buf + pos, bufsz - pos, " %u", - data->nrg_silence_rssi[cnt]); - } - pos += scnprintf(buf + pos, bufsz - pos, "\n"); - pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_ref:\t\t %u\n", - data->nrg_silence_ref); - pos += scnprintf(buf + pos, bufsz - pos, "nrg_energy_idx:\t\t\t %u\n", - data->nrg_energy_idx); - pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_idx:\t\t %u\n", - data->nrg_silence_idx); - pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_cck:\t\t\t %u\n", - data->nrg_th_cck); - pos += scnprintf(buf + pos, bufsz - pos, - "nrg_auto_corr_silence_diff:\t %u\n", - data->nrg_auto_corr_silence_diff); - pos += scnprintf(buf + pos, bufsz - pos, "num_in_cck_no_fa:\t\t %u\n", - data->num_in_cck_no_fa); - pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_ofdm:\t\t\t %u\n", - data->nrg_th_ofdm); - - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - - -static ssize_t iwl_legacy_dbgfs_chain_noise_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) { - - struct iwl_priv *priv = file->private_data; - int pos = 0; - int cnt = 0; - char *buf; - int bufsz = sizeof(struct iwl_chain_noise_data) * 4 + 100; - ssize_t ret; - struct iwl_chain_noise_data *data; - - data = &priv->chain_noise_data; - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) { - IWL_ERR(priv, "Can not allocate Buffer\n"); - return -ENOMEM; - } - - pos += scnprintf(buf + pos, bufsz - pos, "active_chains:\t\t\t %u\n", - data->active_chains); - pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_a:\t\t\t %u\n", - data->chain_noise_a); - pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_b:\t\t\t %u\n", - data->chain_noise_b); - pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_c:\t\t\t %u\n", - data->chain_noise_c); - pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_a:\t\t\t %u\n", - data->chain_signal_a); - pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_b:\t\t\t %u\n", - data->chain_signal_b); - pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_c:\t\t\t %u\n", - data->chain_signal_c); - pos += scnprintf(buf + pos, bufsz - pos, "beacon_count:\t\t\t %u\n", - data->beacon_count); - - pos += scnprintf(buf + pos, bufsz - pos, "disconn_array:\t\t\t"); - for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) { - pos += scnprintf(buf + pos, bufsz - pos, " %u", - data->disconn_array[cnt]); - } - pos += scnprintf(buf + pos, bufsz - pos, "\n"); - pos += scnprintf(buf + pos, bufsz - pos, "delta_gain_code:\t\t"); - for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) { - pos += scnprintf(buf + pos, bufsz - pos, " %u", - data->delta_gain_code[cnt]); - } - pos += scnprintf(buf + pos, bufsz - pos, "\n"); - pos += scnprintf(buf + pos, bufsz - pos, "radio_write:\t\t\t %u\n", - data->radio_write); - pos += scnprintf(buf + pos, bufsz - pos, "state:\t\t\t\t %u\n", - data->state); - - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -static ssize_t iwl_legacy_dbgfs_power_save_status_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - char buf[60]; - int pos = 0; - const size_t bufsz = sizeof(buf); - u32 pwrsave_status; - - pwrsave_status = iwl_read32(priv, CSR_GP_CNTRL) & - CSR_GP_REG_POWER_SAVE_STATUS_MSK; - - pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: "); - pos += scnprintf(buf + pos, bufsz - pos, "%s\n", - (pwrsave_status == CSR_GP_REG_NO_POWER_SAVE) ? "none" : - (pwrsave_status == CSR_GP_REG_MAC_POWER_SAVE) ? "MAC" : - (pwrsave_status == CSR_GP_REG_PHY_POWER_SAVE) ? "PHY" : - "error"); - - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t iwl_legacy_dbgfs_clear_ucode_statistics_write(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - char buf[8]; - int buf_size; - int clear; - - memset(buf, 0, sizeof(buf)); - buf_size = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - if (sscanf(buf, "%d", &clear) != 1) - return -EFAULT; - - /* make request to uCode to retrieve statistics information */ - mutex_lock(&priv->mutex); - iwl_legacy_send_statistics_request(priv, CMD_SYNC, true); - mutex_unlock(&priv->mutex); - - return count; -} - -static ssize_t iwl_legacy_dbgfs_rxon_flags_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) { - - struct iwl_priv *priv = file->private_data; - int len = 0; - char buf[20]; - - len = sprintf(buf, "0x%04X\n", - le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.flags)); - return simple_read_from_buffer(user_buf, count, ppos, buf, len); -} - -static ssize_t iwl_legacy_dbgfs_rxon_filter_flags_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) { - - struct iwl_priv *priv = file->private_data; - int len = 0; - char buf[20]; - - len = sprintf(buf, "0x%04X\n", - le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags)); - return simple_read_from_buffer(user_buf, count, ppos, buf, len); -} - -static ssize_t iwl_legacy_dbgfs_fh_reg_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - char *buf; - int pos = 0; - ssize_t ret = -EFAULT; - - if (priv->cfg->ops->lib->dump_fh) { - ret = pos = priv->cfg->ops->lib->dump_fh(priv, &buf, true); - if (buf) { - ret = simple_read_from_buffer(user_buf, - count, ppos, buf, pos); - kfree(buf); - } - } - - return ret; -} - -static ssize_t iwl_legacy_dbgfs_missed_beacon_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) { - - struct iwl_priv *priv = file->private_data; - int pos = 0; - char buf[12]; - const size_t bufsz = sizeof(buf); - - pos += scnprintf(buf + pos, bufsz - pos, "%d\n", - priv->missed_beacon_threshold); - - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t iwl_legacy_dbgfs_missed_beacon_write(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - char buf[8]; - int buf_size; - int missed; - - memset(buf, 0, sizeof(buf)); - buf_size = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - if (sscanf(buf, "%d", &missed) != 1) - return -EINVAL; - - if (missed < IWL_MISSED_BEACON_THRESHOLD_MIN || - missed > IWL_MISSED_BEACON_THRESHOLD_MAX) - priv->missed_beacon_threshold = - IWL_MISSED_BEACON_THRESHOLD_DEF; - else - priv->missed_beacon_threshold = missed; - - return count; -} - -static ssize_t iwl_legacy_dbgfs_force_reset_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) { - - struct iwl_priv *priv = file->private_data; - int pos = 0; - char buf[300]; - const size_t bufsz = sizeof(buf); - struct iwl_force_reset *force_reset; - - force_reset = &priv->force_reset; - - pos += scnprintf(buf + pos, bufsz - pos, - "\tnumber of reset request: %d\n", - force_reset->reset_request_count); - pos += scnprintf(buf + pos, bufsz - pos, - "\tnumber of reset request success: %d\n", - force_reset->reset_success_count); - pos += scnprintf(buf + pos, bufsz - pos, - "\tnumber of reset request reject: %d\n", - force_reset->reset_reject_count); - pos += scnprintf(buf + pos, bufsz - pos, - "\treset duration: %lu\n", - force_reset->reset_duration); - - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t iwl_legacy_dbgfs_force_reset_write(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) { - - int ret; - struct iwl_priv *priv = file->private_data; - - ret = iwl_legacy_force_reset(priv, true); - - return ret ? ret : count; -} - -static ssize_t iwl_legacy_dbgfs_wd_timeout_write(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) { - - struct iwl_priv *priv = file->private_data; - char buf[8]; - int buf_size; - int timeout; - - memset(buf, 0, sizeof(buf)); - buf_size = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - if (sscanf(buf, "%d", &timeout) != 1) - return -EINVAL; - if (timeout < 0 || timeout > IWL_MAX_WD_TIMEOUT) - timeout = IWL_DEF_WD_TIMEOUT; - - priv->cfg->base_params->wd_timeout = timeout; - iwl_legacy_setup_watchdog(priv); - return count; -} - -DEBUGFS_READ_FILE_OPS(rx_statistics); -DEBUGFS_READ_FILE_OPS(tx_statistics); -DEBUGFS_READ_WRITE_FILE_OPS(traffic_log); -DEBUGFS_READ_FILE_OPS(rx_queue); -DEBUGFS_READ_FILE_OPS(tx_queue); -DEBUGFS_READ_FILE_OPS(ucode_rx_stats); -DEBUGFS_READ_FILE_OPS(ucode_tx_stats); -DEBUGFS_READ_FILE_OPS(ucode_general_stats); -DEBUGFS_READ_FILE_OPS(sensitivity); -DEBUGFS_READ_FILE_OPS(chain_noise); -DEBUGFS_READ_FILE_OPS(power_save_status); -DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics); -DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics); -DEBUGFS_READ_FILE_OPS(fh_reg); -DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon); -DEBUGFS_READ_WRITE_FILE_OPS(force_reset); -DEBUGFS_READ_FILE_OPS(rxon_flags); -DEBUGFS_READ_FILE_OPS(rxon_filter_flags); -DEBUGFS_WRITE_FILE_OPS(wd_timeout); - -/* - * Create the debugfs files and directories - * - */ -int iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name) -{ - struct dentry *phyd = priv->hw->wiphy->debugfsdir; - struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug; - - dir_drv = debugfs_create_dir(name, phyd); - if (!dir_drv) - return -ENOMEM; - - priv->debugfs_dir = dir_drv; - - dir_data = debugfs_create_dir("data", dir_drv); - if (!dir_data) - goto err; - dir_rf = debugfs_create_dir("rf", dir_drv); - if (!dir_rf) - goto err; - dir_debug = debugfs_create_dir("debug", dir_drv); - if (!dir_debug) - goto err; - - DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR); - DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR); - DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR); - DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR); - DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR); - DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR); - DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR); - DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR); - DEBUGFS_ADD_FILE(rx_statistics, dir_debug, S_IRUSR); - DEBUGFS_ADD_FILE(tx_statistics, dir_debug, S_IRUSR); - DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR); - DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR); - DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR); - DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR); - DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, S_IWUSR); - DEBUGFS_ADD_FILE(clear_traffic_statistics, dir_debug, S_IWUSR); - DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR); - DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR); - DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR); - DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR); - DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR); - DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR); - - if (priv->cfg->base_params->sensitivity_calib_by_driver) - DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR); - if (priv->cfg->base_params->chain_noise_calib_by_driver) - DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR); - DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR); - DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR); - DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR); - if (priv->cfg->base_params->sensitivity_calib_by_driver) - DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf, - &priv->disable_sens_cal); - if (priv->cfg->base_params->chain_noise_calib_by_driver) - DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf, - &priv->disable_chain_noise_cal); - DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf, - &priv->disable_tx_power_cal); - return 0; - -err: - IWL_ERR(priv, "Can't create the debugfs directory\n"); - iwl_legacy_dbgfs_unregister(priv); - return -ENOMEM; -} -EXPORT_SYMBOL(iwl_legacy_dbgfs_register); - -/** - * Remove the debugfs files and directories - * - */ -void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv) -{ - if (!priv->debugfs_dir) - return; - - debugfs_remove_recursive(priv->debugfs_dir); - priv->debugfs_dir = NULL; -} -EXPORT_SYMBOL(iwl_legacy_dbgfs_unregister); diff --git a/drivers/net/wireless/iwlegacy/iwl-dev.h b/drivers/net/wireless/iwlegacy/iwl-dev.h deleted file mode 100644 index 9c786edf56f..00000000000 --- a/drivers/net/wireless/iwlegacy/iwl-dev.h +++ /dev/null @@ -1,1364 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ -/* - * Please use this file (iwl-dev.h) for driver implementation definitions. - * Please use iwl-commands.h for uCode API definitions. - * Please use iwl-4965-hw.h for hardware-related definitions. - */ - -#ifndef __iwl_legacy_dev_h__ -#define __iwl_legacy_dev_h__ - -#include <linux/interrupt.h> -#include <linux/pci.h> /* for struct pci_device_id */ -#include <linux/kernel.h> -#include <linux/leds.h> -#include <linux/wait.h> -#include <net/ieee80211_radiotap.h> - -#include "iwl-eeprom.h" -#include "iwl-csr.h" -#include "iwl-prph.h" -#include "iwl-fh.h" -#include "iwl-debug.h" -#include "iwl-4965-hw.h" -#include "iwl-3945-hw.h" -#include "iwl-led.h" -#include "iwl-power.h" -#include "iwl-legacy-rs.h" - -struct iwl_tx_queue; - -/* CT-KILL constants */ -#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */ - -/* Default noise level to report when noise measurement is not available. - * This may be because we're: - * 1) Not associated (4965, no beacon statistics being sent to driver) - * 2) Scanning (noise measurement does not apply to associated channel) - * 3) Receiving CCK (3945 delivers noise info only for OFDM frames) - * Use default noise value of -127 ... this is below the range of measurable - * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user. - * Also, -127 works better than 0 when averaging frames with/without - * noise info (e.g. averaging might be done in app); measured dBm values are - * always negative ... using a negative value as the default keeps all - * averages within an s8's (used in some apps) range of negative values. */ -#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127) - -/* - * RTS threshold here is total size [2347] minus 4 FCS bytes - * Per spec: - * a value of 0 means RTS on all data/management packets - * a value > max MSDU size means no RTS - * else RTS for data/management frames where MPDU is larger - * than RTS value. - */ -#define DEFAULT_RTS_THRESHOLD 2347U -#define MIN_RTS_THRESHOLD 0U -#define MAX_RTS_THRESHOLD 2347U -#define MAX_MSDU_SIZE 2304U -#define MAX_MPDU_SIZE 2346U -#define DEFAULT_BEACON_INTERVAL 100U -#define DEFAULT_SHORT_RETRY_LIMIT 7U -#define DEFAULT_LONG_RETRY_LIMIT 4U - -struct iwl_rx_mem_buffer { - dma_addr_t page_dma; - struct page *page; - struct list_head list; -}; - -#define rxb_addr(r) page_address(r->page) - -/* defined below */ -struct iwl_device_cmd; - -struct iwl_cmd_meta { - /* only for SYNC commands, iff the reply skb is wanted */ - struct iwl_host_cmd *source; - /* - * only for ASYNC commands - * (which is somewhat stupid -- look at iwl-sta.c for instance - * which duplicates a bunch of code because the callback isn't - * invoked for SYNC commands, if it were and its result passed - * through it would be simpler...) - */ - void (*callback)(struct iwl_priv *priv, - struct iwl_device_cmd *cmd, - struct iwl_rx_packet *pkt); - - /* The CMD_SIZE_HUGE flag bit indicates that the command - * structure is stored at the end of the shared queue memory. */ - u32 flags; - - DEFINE_DMA_UNMAP_ADDR(mapping); - DEFINE_DMA_UNMAP_LEN(len); -}; - -/* - * Generic queue structure - * - * Contains common data for Rx and Tx queues - */ -struct iwl_queue { - int n_bd; /* number of BDs in this queue */ - int write_ptr; /* 1-st empty entry (index) host_w*/ - int read_ptr; /* last used entry (index) host_r*/ - /* use for monitoring and recovering the stuck queue */ - dma_addr_t dma_addr; /* physical addr for BD's */ - int n_window; /* safe queue window */ - u32 id; - int low_mark; /* low watermark, resume queue if free - * space more than this */ - int high_mark; /* high watermark, stop queue if free - * space less than this */ -}; - -/* One for each TFD */ -struct iwl_tx_info { - struct sk_buff *skb; - struct iwl_rxon_context *ctx; -}; - -/** - * struct iwl_tx_queue - Tx Queue for DMA - * @q: generic Rx/Tx queue descriptor - * @bd: base of circular buffer of TFDs - * @cmd: array of command/TX buffer pointers - * @meta: array of meta data for each command/tx buffer - * @dma_addr_cmd: physical address of cmd/tx buffer array - * @txb: array of per-TFD driver data - * @time_stamp: time (in jiffies) of last read_ptr change - * @need_update: indicates need to update read/write index - * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled - * - * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame - * descriptors) and required locking structures. - */ -#define TFD_TX_CMD_SLOTS 256 -#define TFD_CMD_SLOTS 32 - -struct iwl_tx_queue { - struct iwl_queue q; - void *tfds; - struct iwl_device_cmd **cmd; - struct iwl_cmd_meta *meta; - struct iwl_tx_info *txb; - unsigned long time_stamp; - u8 need_update; - u8 sched_retry; - u8 active; - u8 swq_id; -}; - -#define IWL_NUM_SCAN_RATES (2) - -struct iwl4965_channel_tgd_info { - u8 type; - s8 max_power; -}; - -struct iwl4965_channel_tgh_info { - s64 last_radar_time; -}; - -#define IWL4965_MAX_RATE (33) - -struct iwl3945_clip_group { - /* maximum power level to prevent clipping for each rate, derived by - * us from this band's saturation power in EEPROM */ - const s8 clip_powers[IWL_MAX_RATES]; -}; - -/* current Tx power values to use, one for each rate for each channel. - * requested power is limited by: - * -- regulatory EEPROM limits for this channel - * -- hardware capabilities (clip-powers) - * -- spectrum management - * -- user preference (e.g. iwconfig) - * when requested power is set, base power index must also be set. */ -struct iwl3945_channel_power_info { - struct iwl3945_tx_power tpc; /* actual radio and DSP gain settings */ - s8 power_table_index; /* actual (compenst'd) index into gain table */ - s8 base_power_index; /* gain index for power at factory temp. */ - s8 requested_power; /* power (dBm) requested for this chnl/rate */ -}; - -/* current scan Tx power values to use, one for each scan rate for each - * channel. */ -struct iwl3945_scan_power_info { - struct iwl3945_tx_power tpc; /* actual radio and DSP gain settings */ - s8 power_table_index; /* actual (compenst'd) index into gain table */ - s8 requested_power; /* scan pwr (dBm) requested for chnl/rate */ -}; - -/* - * One for each channel, holds all channel setup data - * Some of the fields (e.g. eeprom and flags/max_power_avg) are redundant - * with one another! - */ -struct iwl_channel_info { - struct iwl4965_channel_tgd_info tgd; - struct iwl4965_channel_tgh_info tgh; - struct iwl_eeprom_channel eeprom; /* EEPROM regulatory limit */ - struct iwl_eeprom_channel ht40_eeprom; /* EEPROM regulatory limit for - * HT40 channel */ - - u8 channel; /* channel number */ - u8 flags; /* flags copied from EEPROM */ - s8 max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */ - s8 curr_txpow; /* (dBm) regulatory/spectrum/user (not h/w) limit */ - s8 min_power; /* always 0 */ - s8 scan_power; /* (dBm) regul. eeprom, direct scans, any rate */ - - u8 group_index; /* 0-4, maps channel to group1/2/3/4/5 */ - u8 band_index; /* 0-4, maps channel to band1/2/3/4/5 */ - enum ieee80211_band band; - - /* HT40 channel info */ - s8 ht40_max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */ - u8 ht40_flags; /* flags copied from EEPROM */ - u8 ht40_extension_channel; /* HT_IE_EXT_CHANNEL_* */ - - /* Radio/DSP gain settings for each "normal" data Tx rate. - * These include, in addition to RF and DSP gain, a few fields for - * remembering/modifying gain settings (indexes). */ - struct iwl3945_channel_power_info power_info[IWL4965_MAX_RATE]; - - /* Radio/DSP gain settings for each scan rate, for directed scans. */ - struct iwl3945_scan_power_info scan_pwr_info[IWL_NUM_SCAN_RATES]; -}; - -#define IWL_TX_FIFO_BK 0 /* shared */ -#define IWL_TX_FIFO_BE 1 -#define IWL_TX_FIFO_VI 2 /* shared */ -#define IWL_TX_FIFO_VO 3 -#define IWL_TX_FIFO_UNUSED -1 - -/* Minimum number of queues. MAX_NUM is defined in hw specific files. - * Set the minimum to accommodate the 4 standard TX queues, 1 command - * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */ -#define IWL_MIN_NUM_QUEUES 10 - -#define IWL_DEFAULT_CMD_QUEUE_NUM 4 - -#define IEEE80211_DATA_LEN 2304 -#define IEEE80211_4ADDR_LEN 30 -#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN) -#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN) - -struct iwl_frame { - union { - struct ieee80211_hdr frame; - struct iwl_tx_beacon_cmd beacon; - u8 raw[IEEE80211_FRAME_LEN]; - u8 cmd[360]; - } u; - struct list_head list; -}; - -#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4) -#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ) -#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4) - -enum { - CMD_SYNC = 0, - CMD_SIZE_NORMAL = 0, - CMD_NO_SKB = 0, - CMD_SIZE_HUGE = (1 << 0), - CMD_ASYNC = (1 << 1), - CMD_WANT_SKB = (1 << 2), - CMD_MAPPED = (1 << 3), -}; - -#define DEF_CMD_PAYLOAD_SIZE 320 - -/** - * struct iwl_device_cmd - * - * For allocation of the command and tx queues, this establishes the overall - * size of the largest command we send to uCode, except for a scan command - * (which is relatively huge; space is allocated separately). - */ -struct iwl_device_cmd { - struct iwl_cmd_header hdr; /* uCode API */ - union { - u32 flags; - u8 val8; - u16 val16; - u32 val32; - struct iwl_tx_cmd tx; - u8 payload[DEF_CMD_PAYLOAD_SIZE]; - } __packed cmd; -} __packed; - -#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd)) - - -struct iwl_host_cmd { - const void *data; - unsigned long reply_page; - void (*callback)(struct iwl_priv *priv, - struct iwl_device_cmd *cmd, - struct iwl_rx_packet *pkt); - u32 flags; - u16 len; - u8 id; -}; - -#define SUP_RATE_11A_MAX_NUM_CHANNELS 8 -#define SUP_RATE_11B_MAX_NUM_CHANNELS 4 -#define SUP_RATE_11G_MAX_NUM_CHANNELS 12 - -/** - * struct iwl_rx_queue - Rx queue - * @bd: driver's pointer to buffer of receive buffer descriptors (rbd) - * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) - * @read: Shared index to newest available Rx buffer - * @write: Shared index to oldest written Rx packet - * @free_count: Number of pre-allocated buffers in rx_free - * @rx_free: list of free SKBs for use - * @rx_used: List of Rx buffers with no SKB - * @need_update: flag to indicate we need to update read/write index - * @rb_stts: driver's pointer to receive buffer status - * @rb_stts_dma: bus address of receive buffer status - * - * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers - */ -struct iwl_rx_queue { - __le32 *bd; - dma_addr_t bd_dma; - struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS]; - struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; - u32 read; - u32 write; - u32 free_count; - u32 write_actual; - struct list_head rx_free; - struct list_head rx_used; - int need_update; - struct iwl_rb_status *rb_stts; - dma_addr_t rb_stts_dma; - spinlock_t lock; -}; - -#define IWL_SUPPORTED_RATES_IE_LEN 8 - -#define MAX_TID_COUNT 9 - -#define IWL_INVALID_RATE 0xFF -#define IWL_INVALID_VALUE -1 - -/** - * struct iwl_ht_agg -- aggregation status while waiting for block-ack - * @txq_id: Tx queue used for Tx attempt - * @frame_count: # frames attempted by Tx command - * @wait_for_ba: Expect block-ack before next Tx reply - * @start_idx: Index of 1st Transmit Frame Descriptor (TFD) in Tx window - * @bitmap0: Low order bitmap, one bit for each frame pending ACK in Tx window - * @bitmap1: High order, one bit for each frame pending ACK in Tx window - * @rate_n_flags: Rate at which Tx was attempted - * - * If REPLY_TX indicates that aggregation was attempted, driver must wait - * for block ack (REPLY_COMPRESSED_BA). This struct stores tx reply info - * until block ack arrives. - */ -struct iwl_ht_agg { - u16 txq_id; - u16 frame_count; - u16 wait_for_ba; - u16 start_idx; - u64 bitmap; - u32 rate_n_flags; -#define IWL_AGG_OFF 0 -#define IWL_AGG_ON 1 -#define IWL_EMPTYING_HW_QUEUE_ADDBA 2 -#define IWL_EMPTYING_HW_QUEUE_DELBA 3 - u8 state; -}; - - -struct iwl_tid_data { - u16 seq_number; /* 4965 only */ - u16 tfds_in_queue; - struct iwl_ht_agg agg; -}; - -struct iwl_hw_key { - u32 cipher; - int keylen; - u8 keyidx; - u8 key[32]; -}; - -union iwl_ht_rate_supp { - u16 rates; - struct { - u8 siso_rate; - u8 mimo_rate; - }; -}; - -#define CFG_HT_RX_AMPDU_FACTOR_8K (0x0) -#define CFG_HT_RX_AMPDU_FACTOR_16K (0x1) -#define CFG_HT_RX_AMPDU_FACTOR_32K (0x2) -#define CFG_HT_RX_AMPDU_FACTOR_64K (0x3) -#define CFG_HT_RX_AMPDU_FACTOR_DEF CFG_HT_RX_AMPDU_FACTOR_64K -#define CFG_HT_RX_AMPDU_FACTOR_MAX CFG_HT_RX_AMPDU_FACTOR_64K -#define CFG_HT_RX_AMPDU_FACTOR_MIN CFG_HT_RX_AMPDU_FACTOR_8K - -/* - * Maximal MPDU density for TX aggregation - * 4 - 2us density - * 5 - 4us density - * 6 - 8us density - * 7 - 16us density - */ -#define CFG_HT_MPDU_DENSITY_2USEC (0x4) -#define CFG_HT_MPDU_DENSITY_4USEC (0x5) -#define CFG_HT_MPDU_DENSITY_8USEC (0x6) -#define CFG_HT_MPDU_DENSITY_16USEC (0x7) -#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_4USEC -#define CFG_HT_MPDU_DENSITY_MAX CFG_HT_MPDU_DENSITY_16USEC -#define CFG_HT_MPDU_DENSITY_MIN (0x1) - -struct iwl_ht_config { - bool single_chain_sufficient; - enum ieee80211_smps_mode smps; /* current smps mode */ -}; - -/* QoS structures */ -struct iwl_qos_info { - int qos_active; - struct iwl_qosparam_cmd def_qos_parm; -}; - -/* - * Structure should be accessed with sta_lock held. When station addition - * is in progress (IWL_STA_UCODE_INPROGRESS) it is possible to access only - * the commands (iwl_legacy_addsta_cmd and iwl_link_quality_cmd) without - * sta_lock held. - */ -struct iwl_station_entry { - struct iwl_legacy_addsta_cmd sta; - struct iwl_tid_data tid[MAX_TID_COUNT]; - u8 used, ctxid; - struct iwl_hw_key keyinfo; - struct iwl_link_quality_cmd *lq; -}; - -struct iwl_station_priv_common { - struct iwl_rxon_context *ctx; - u8 sta_id; -}; - -/* - * iwl_station_priv: Driver's private station information - * - * When mac80211 creates a station it reserves some space (hw->sta_data_size) - * in the structure for use by driver. This structure is places in that - * space. - * - * The common struct MUST be first because it is shared between - * 3945 and 4965! - */ -struct iwl_station_priv { - struct iwl_station_priv_common common; - struct iwl_lq_sta lq_sta; - atomic_t pending_frames; - bool client; - bool asleep; -}; - -/** - * struct iwl_vif_priv - driver's private per-interface information - * - * When mac80211 allocates a virtual interface, it can allocate - * space for us to put data into. - */ -struct iwl_vif_priv { - struct iwl_rxon_context *ctx; - u8 ibss_bssid_sta_id; -}; - -/* one for each uCode image (inst/data, boot/init/runtime) */ -struct fw_desc { - void *v_addr; /* access by driver */ - dma_addr_t p_addr; /* access by card's busmaster DMA */ - u32 len; /* bytes */ -}; - -/* uCode file layout */ -struct iwl_ucode_header { - __le32 ver; /* major/minor/API/serial */ - struct { - __le32 inst_size; /* bytes of runtime code */ - __le32 data_size; /* bytes of runtime data */ - __le32 init_size; /* bytes of init code */ - __le32 init_data_size; /* bytes of init data */ - __le32 boot_size; /* bytes of bootstrap code */ - u8 data[0]; /* in same order as sizes */ - } v1; -}; - -struct iwl4965_ibss_seq { - u8 mac[ETH_ALEN]; - u16 seq_num; - u16 frag_num; - unsigned long packet_time; - struct list_head list; -}; - -struct iwl_sensitivity_ranges { - u16 min_nrg_cck; - u16 max_nrg_cck; - - u16 nrg_th_cck; - u16 nrg_th_ofdm; - - u16 auto_corr_min_ofdm; - u16 auto_corr_min_ofdm_mrc; - u16 auto_corr_min_ofdm_x1; - u16 auto_corr_min_ofdm_mrc_x1; - - u16 auto_corr_max_ofdm; - u16 auto_corr_max_ofdm_mrc; - u16 auto_corr_max_ofdm_x1; - u16 auto_corr_max_ofdm_mrc_x1; - - u16 auto_corr_max_cck; - u16 auto_corr_max_cck_mrc; - u16 auto_corr_min_cck; - u16 auto_corr_min_cck_mrc; - - u16 barker_corr_th_min; - u16 barker_corr_th_min_mrc; - u16 nrg_th_cca; -}; - - -#define KELVIN_TO_CELSIUS(x) ((x)-273) -#define CELSIUS_TO_KELVIN(x) ((x)+273) - - -/** - * struct iwl_hw_params - * @max_txq_num: Max # Tx queues supported - * @dma_chnl_num: Number of Tx DMA/FIFO channels - * @scd_bc_tbls_size: size of scheduler byte count tables - * @tfd_size: TFD size - * @tx/rx_chains_num: Number of TX/RX chains - * @valid_tx/rx_ant: usable antennas - * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2) - * @max_rxq_log: Log-base-2 of max_rxq_size - * @rx_page_order: Rx buffer page order - * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR - * @max_stations: - * @ht40_channel: is 40MHz width possible in band 2.4 - * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ) - * @sw_crypto: 0 for hw, 1 for sw - * @max_xxx_size: for ucode uses - * @ct_kill_threshold: temperature threshold - * @beacon_time_tsf_bits: number of valid tsf bits for beacon time - * @struct iwl_sensitivity_ranges: range of sensitivity values - */ -struct iwl_hw_params { - u8 max_txq_num; - u8 dma_chnl_num; - u16 scd_bc_tbls_size; - u32 tfd_size; - u8 tx_chains_num; - u8 rx_chains_num; - u8 valid_tx_ant; - u8 valid_rx_ant; - u16 max_rxq_size; - u16 max_rxq_log; - u32 rx_page_order; - u32 rx_wrt_ptr_reg; - u8 max_stations; - u8 ht40_channel; - u8 max_beacon_itrvl; /* in 1024 ms */ - u32 max_inst_size; - u32 max_data_size; - u32 max_bsm_size; - u32 ct_kill_threshold; /* value in hw-dependent units */ - u16 beacon_time_tsf_bits; - const struct iwl_sensitivity_ranges *sens; -}; - - -/****************************************************************************** - * - * Functions implemented in core module which are forward declared here - * for use by iwl-[4-5].c - * - * NOTE: The implementation of these functions are not hardware specific - * which is why they are in the core module files. - * - * Naming convention -- - * iwl_ <-- Is part of iwlwifi - * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX) - * iwl4965_bg_ <-- Called from work queue context - * iwl4965_mac_ <-- mac80211 callback - * - ****************************************************************************/ -extern void iwl4965_update_chain_flags(struct iwl_priv *priv); -extern const u8 iwlegacy_bcast_addr[ETH_ALEN]; -extern int iwl_legacy_queue_space(const struct iwl_queue *q); -static inline int iwl_legacy_queue_used(const struct iwl_queue *q, int i) -{ - return q->write_ptr >= q->read_ptr ? - (i >= q->read_ptr && i < q->write_ptr) : - !(i < q->read_ptr && i >= q->write_ptr); -} - - -static inline u8 iwl_legacy_get_cmd_index(struct iwl_queue *q, u32 index, - int is_huge) -{ - /* - * This is for init calibration result and scan command which - * required buffer > TFD_MAX_PAYLOAD_SIZE, - * the big buffer at end of command array - */ - if (is_huge) - return q->n_window; /* must be power of 2 */ - - /* Otherwise, use normal size buffers */ - return index & (q->n_window - 1); -} - - -struct iwl_dma_ptr { - dma_addr_t dma; - void *addr; - size_t size; -}; - -#define IWL_OPERATION_MODE_AUTO 0 -#define IWL_OPERATION_MODE_HT_ONLY 1 -#define IWL_OPERATION_MODE_MIXED 2 -#define IWL_OPERATION_MODE_20MHZ 3 - -#define IWL_TX_CRC_SIZE 4 -#define IWL_TX_DELIMITER_SIZE 4 - -#define TX_POWER_IWL_ILLEGAL_VOLTAGE -10000 - -/* Sensitivity and chain noise calibration */ -#define INITIALIZATION_VALUE 0xFFFF -#define IWL4965_CAL_NUM_BEACONS 20 -#define IWL_CAL_NUM_BEACONS 16 -#define MAXIMUM_ALLOWED_PATHLOSS 15 - -#define CHAIN_NOISE_MAX_DELTA_GAIN_CODE 3 - -#define MAX_FA_OFDM 50 -#define MIN_FA_OFDM 5 -#define MAX_FA_CCK 50 -#define MIN_FA_CCK 5 - -#define AUTO_CORR_STEP_OFDM 1 - -#define AUTO_CORR_STEP_CCK 3 -#define AUTO_CORR_MAX_TH_CCK 160 - -#define NRG_DIFF 2 -#define NRG_STEP_CCK 2 -#define NRG_MARGIN 8 -#define MAX_NUMBER_CCK_NO_FA 100 - -#define AUTO_CORR_CCK_MIN_VAL_DEF (125) - -#define CHAIN_A 0 -#define CHAIN_B 1 -#define CHAIN_C 2 -#define CHAIN_NOISE_DELTA_GAIN_INIT_VAL 4 -#define ALL_BAND_FILTER 0xFF00 -#define IN_BAND_FILTER 0xFF -#define MIN_AVERAGE_NOISE_MAX_VALUE 0xFFFFFFFF - -#define NRG_NUM_PREV_STAT_L 20 -#define NUM_RX_CHAINS 3 - -enum iwl4965_false_alarm_state { - IWL_FA_TOO_MANY = 0, - IWL_FA_TOO_FEW = 1, - IWL_FA_GOOD_RANGE = 2, -}; - -enum iwl4965_chain_noise_state { - IWL_CHAIN_NOISE_ALIVE = 0, /* must be 0 */ - IWL_CHAIN_NOISE_ACCUMULATE, - IWL_CHAIN_NOISE_CALIBRATED, - IWL_CHAIN_NOISE_DONE, -}; - -enum iwl4965_calib_enabled_state { - IWL_CALIB_DISABLED = 0, /* must be 0 */ - IWL_CALIB_ENABLED = 1, -}; - -/* - * enum iwl_calib - * defines the order in which results of initial calibrations - * should be sent to the runtime uCode - */ -enum iwl_calib { - IWL_CALIB_MAX, -}; - -/* Opaque calibration results */ -struct iwl_calib_result { - void *buf; - size_t buf_len; -}; - -enum ucode_type { - UCODE_NONE = 0, - UCODE_INIT, - UCODE_RT -}; - -/* Sensitivity calib data */ -struct iwl_sensitivity_data { - u32 auto_corr_ofdm; - u32 auto_corr_ofdm_mrc; - u32 auto_corr_ofdm_x1; - u32 auto_corr_ofdm_mrc_x1; - u32 auto_corr_cck; - u32 auto_corr_cck_mrc; - - u32 last_bad_plcp_cnt_ofdm; - u32 last_fa_cnt_ofdm; - u32 last_bad_plcp_cnt_cck; - u32 last_fa_cnt_cck; - - u32 nrg_curr_state; - u32 nrg_prev_state; - u32 nrg_value[10]; - u8 nrg_silence_rssi[NRG_NUM_PREV_STAT_L]; - u32 nrg_silence_ref; - u32 nrg_energy_idx; - u32 nrg_silence_idx; - u32 nrg_th_cck; - s32 nrg_auto_corr_silence_diff; - u32 num_in_cck_no_fa; - u32 nrg_th_ofdm; - - u16 barker_corr_th_min; - u16 barker_corr_th_min_mrc; - u16 nrg_th_cca; -}; - -/* Chain noise (differential Rx gain) calib data */ -struct iwl_chain_noise_data { - u32 active_chains; - u32 chain_noise_a; - u32 chain_noise_b; - u32 chain_noise_c; - u32 chain_signal_a; - u32 chain_signal_b; - u32 chain_signal_c; - u16 beacon_count; - u8 disconn_array[NUM_RX_CHAINS]; - u8 delta_gain_code[NUM_RX_CHAINS]; - u8 radio_write; - u8 state; -}; - -#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */ -#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */ - -#define IWL_TRAFFIC_ENTRIES (256) -#define IWL_TRAFFIC_ENTRY_SIZE (64) - -enum { - MEASUREMENT_READY = (1 << 0), - MEASUREMENT_ACTIVE = (1 << 1), -}; - -/* interrupt statistics */ -struct isr_statistics { - u32 hw; - u32 sw; - u32 err_code; - u32 sch; - u32 alive; - u32 rfkill; - u32 ctkill; - u32 wakeup; - u32 rx; - u32 rx_handlers[REPLY_MAX]; - u32 tx; - u32 unhandled; -}; - -/* management statistics */ -enum iwl_mgmt_stats { - MANAGEMENT_ASSOC_REQ = 0, - MANAGEMENT_ASSOC_RESP, - MANAGEMENT_REASSOC_REQ, - MANAGEMENT_REASSOC_RESP, - MANAGEMENT_PROBE_REQ, - MANAGEMENT_PROBE_RESP, - MANAGEMENT_BEACON, - MANAGEMENT_ATIM, - MANAGEMENT_DISASSOC, - MANAGEMENT_AUTH, - MANAGEMENT_DEAUTH, - MANAGEMENT_ACTION, - MANAGEMENT_MAX, -}; -/* control statistics */ -enum iwl_ctrl_stats { - CONTROL_BACK_REQ = 0, - CONTROL_BACK, - CONTROL_PSPOLL, - CONTROL_RTS, - CONTROL_CTS, - CONTROL_ACK, - CONTROL_CFEND, - CONTROL_CFENDACK, - CONTROL_MAX, -}; - -struct traffic_stats { -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS - u32 mgmt[MANAGEMENT_MAX]; - u32 ctrl[CONTROL_MAX]; - u32 data_cnt; - u64 data_bytes; -#endif -}; - -/* - * host interrupt timeout value - * used with setting interrupt coalescing timer - * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit - * - * default interrupt coalescing timer is 64 x 32 = 2048 usecs - * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs - */ -#define IWL_HOST_INT_TIMEOUT_MAX (0xFF) -#define IWL_HOST_INT_TIMEOUT_DEF (0x40) -#define IWL_HOST_INT_TIMEOUT_MIN (0x0) -#define IWL_HOST_INT_CALIB_TIMEOUT_MAX (0xFF) -#define IWL_HOST_INT_CALIB_TIMEOUT_DEF (0x10) -#define IWL_HOST_INT_CALIB_TIMEOUT_MIN (0x0) - -#define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5) - -/* TX queue watchdog timeouts in mSecs */ -#define IWL_DEF_WD_TIMEOUT (2000) -#define IWL_LONG_WD_TIMEOUT (10000) -#define IWL_MAX_WD_TIMEOUT (120000) - -struct iwl_force_reset { - int reset_request_count; - int reset_success_count; - int reset_reject_count; - unsigned long reset_duration; - unsigned long last_force_reset_jiffies; -}; - -/* extend beacon time format bit shifting */ -/* - * for _3945 devices - * bits 31:24 - extended - * bits 23:0 - interval - */ -#define IWL3945_EXT_BEACON_TIME_POS 24 -/* - * for _4965 devices - * bits 31:22 - extended - * bits 21:0 - interval - */ -#define IWL4965_EXT_BEACON_TIME_POS 22 - -enum iwl_rxon_context_id { - IWL_RXON_CTX_BSS, - - NUM_IWL_RXON_CTX -}; - -struct iwl_rxon_context { - struct ieee80211_vif *vif; - - const u8 *ac_to_fifo; - const u8 *ac_to_queue; - u8 mcast_queue; - - /* - * We could use the vif to indicate active, but we - * also need it to be active during disabling when - * we already removed the vif for type setting. - */ - bool always_active, is_active; - - bool ht_need_multiple_chains; - - enum iwl_rxon_context_id ctxid; - - u32 interface_modes, exclusive_interface_modes; - u8 unused_devtype, ap_devtype, ibss_devtype, station_devtype; - - /* - * We declare this const so it can only be - * changed via explicit cast within the - * routines that actually update the physical - * hardware. - */ - const struct iwl_legacy_rxon_cmd active; - struct iwl_legacy_rxon_cmd staging; - - struct iwl_rxon_time_cmd timing; - - struct iwl_qos_info qos_data; - - u8 bcast_sta_id, ap_sta_id; - - u8 rxon_cmd, rxon_assoc_cmd, rxon_timing_cmd; - u8 qos_cmd; - u8 wep_key_cmd; - - struct iwl_wep_key wep_keys[WEP_KEYS_MAX]; - u8 key_mapping_keys; - - __le32 station_flags; - - struct { - bool non_gf_sta_present; - u8 protection; - bool enabled, is_40mhz; - u8 extension_chan_offset; - } ht; -}; - -struct iwl_priv { - - /* ieee device used by generic ieee processing code */ - struct ieee80211_hw *hw; - struct ieee80211_channel *ieee_channels; - struct ieee80211_rate *ieee_rates; - struct iwl_cfg *cfg; - - /* temporary frame storage list */ - struct list_head free_frames; - int frames_count; - - enum ieee80211_band band; - int alloc_rxb_page; - - void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb); - - struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS]; - - /* spectrum measurement report caching */ - struct iwl_spectrum_notification measure_report; - u8 measurement_status; - - /* ucode beacon time */ - u32 ucode_beacon_time; - int missed_beacon_threshold; - - /* track IBSS manager (last beacon) status */ - u32 ibss_manager; - - /* force reset */ - struct iwl_force_reset force_reset; - - /* we allocate array of iwl_channel_info for NIC's valid channels. - * Access via channel # using indirect index array */ - struct iwl_channel_info *channel_info; /* channel info array */ - u8 channel_count; /* # of channels */ - - /* thermal calibration */ - s32 temperature; /* degrees Kelvin */ - s32 last_temperature; - - /* init calibration results */ - struct iwl_calib_result calib_results[IWL_CALIB_MAX]; - - /* Scan related variables */ - unsigned long scan_start; - unsigned long scan_start_tsf; - void *scan_cmd; - enum ieee80211_band scan_band; - struct cfg80211_scan_request *scan_request; - struct ieee80211_vif *scan_vif; - u8 scan_tx_ant[IEEE80211_NUM_BANDS]; - u8 mgmt_tx_ant; - - /* spinlock */ - spinlock_t lock; /* protect general shared data */ - spinlock_t hcmd_lock; /* protect hcmd */ - spinlock_t reg_lock; /* protect hw register access */ - struct mutex mutex; - - /* basic pci-network driver stuff */ - struct pci_dev *pci_dev; - - /* pci hardware address support */ - void __iomem *hw_base; - u32 hw_rev; - u32 hw_wa_rev; - u8 rev_id; - - /* microcode/device supports multiple contexts */ - u8 valid_contexts; - - /* command queue number */ - u8 cmd_queue; - - /* max number of station keys */ - u8 sta_key_max_num; - - /* EEPROM MAC addresses */ - struct mac_address addresses[1]; - - /* uCode images, save to reload in case of failure */ - int fw_index; /* firmware we're trying to load */ - u32 ucode_ver; /* version of ucode, copy of - iwl_ucode.ver */ - struct fw_desc ucode_code; /* runtime inst */ - struct fw_desc ucode_data; /* runtime data original */ - struct fw_desc ucode_data_backup; /* runtime data save/restore */ - struct fw_desc ucode_init; /* initialization inst */ - struct fw_desc ucode_init_data; /* initialization data */ - struct fw_desc ucode_boot; /* bootstrap inst */ - enum ucode_type ucode_type; - u8 ucode_write_complete; /* the image write is complete */ - char firmware_name[25]; - - struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX]; - - __le16 switch_channel; - - /* 1st responses from initialize and runtime uCode images. - * _4965's initialize alive response contains some calibration data. */ - struct iwl_init_alive_resp card_alive_init; - struct iwl_alive_resp card_alive; - - u16 active_rate; - - u8 start_calib; - struct iwl_sensitivity_data sensitivity_data; - struct iwl_chain_noise_data chain_noise_data; - __le16 sensitivity_tbl[HD_TABLE_SIZE]; - - struct iwl_ht_config current_ht_config; - - /* Rate scaling data */ - u8 retry_rate; - - wait_queue_head_t wait_command_queue; - - int activity_timer_active; - - /* Rx and Tx DMA processing queues */ - struct iwl_rx_queue rxq; - struct iwl_tx_queue *txq; - unsigned long txq_ctx_active_msk; - struct iwl_dma_ptr kw; /* keep warm address */ - struct iwl_dma_ptr scd_bc_tbls; - - u32 scd_base_addr; /* scheduler sram base address */ - - unsigned long status; - - /* counts mgmt, ctl, and data packets */ - struct traffic_stats tx_stats; - struct traffic_stats rx_stats; - - /* counts interrupts */ - struct isr_statistics isr_stats; - - struct iwl_power_mgr power_data; - - /* context information */ - u8 bssid[ETH_ALEN]; /* used only on 3945 but filled by core */ - - /* station table variables */ - - /* Note: if lock and sta_lock are needed, lock must be acquired first */ - spinlock_t sta_lock; - int num_stations; - struct iwl_station_entry stations[IWL_STATION_COUNT]; - unsigned long ucode_key_table; - - /* queue refcounts */ -#define IWL_MAX_HW_QUEUES 32 - unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; - /* for each AC */ - atomic_t queue_stop_count[4]; - - /* Indication if ieee80211_ops->open has been called */ - u8 is_open; - - u8 mac80211_registered; - - /* eeprom -- this is in the card's little endian byte order */ - u8 *eeprom; - struct iwl_eeprom_calib_info *calib_info; - - enum nl80211_iftype iw_mode; - - /* Last Rx'd beacon timestamp */ - u64 timestamp; - - union { -#if defined(CONFIG_IWL3945) || defined(CONFIG_IWL3945_MODULE) - struct { - void *shared_virt; - dma_addr_t shared_phys; - - struct delayed_work thermal_periodic; - struct delayed_work rfkill_poll; - - struct iwl3945_notif_statistics statistics; -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS - struct iwl3945_notif_statistics accum_statistics; - struct iwl3945_notif_statistics delta_statistics; - struct iwl3945_notif_statistics max_delta; -#endif - - u32 sta_supp_rates; - int last_rx_rssi; /* From Rx packet statistics */ - - /* Rx'd packet timing information */ - u32 last_beacon_time; - u64 last_tsf; - - /* - * each calibration channel group in the - * EEPROM has a derived clip setting for - * each rate. - */ - const struct iwl3945_clip_group clip_groups[5]; - - } _3945; -#endif -#if defined(CONFIG_IWL4965) || defined(CONFIG_IWL4965_MODULE) - struct { - struct iwl_rx_phy_res last_phy_res; - bool last_phy_res_valid; - - struct completion firmware_loading_complete; - - /* - * chain noise reset and gain commands are the - * two extra calibration commands follows the standard - * phy calibration commands - */ - u8 phy_calib_chain_noise_reset_cmd; - u8 phy_calib_chain_noise_gain_cmd; - - struct iwl_notif_statistics statistics; -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS - struct iwl_notif_statistics accum_statistics; - struct iwl_notif_statistics delta_statistics; - struct iwl_notif_statistics max_delta; -#endif - - } _4965; -#endif - }; - - struct iwl_hw_params hw_params; - - u32 inta_mask; - - struct workqueue_struct *workqueue; - - struct work_struct restart; - struct work_struct scan_completed; - struct work_struct rx_replenish; - struct work_struct abort_scan; - - struct iwl_rxon_context *beacon_ctx; - struct sk_buff *beacon_skb; - - struct work_struct tx_flush; - - struct tasklet_struct irq_tasklet; - - struct delayed_work init_alive_start; - struct delayed_work alive_start; - struct delayed_work scan_check; - - /* TX Power */ - s8 tx_power_user_lmt; - s8 tx_power_device_lmt; - s8 tx_power_next; - - -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG - /* debugging info */ - u32 debug_level; /* per device debugging will override global - iwlegacy_debug_level if set */ -#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */ -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS - /* debugfs */ - u16 tx_traffic_idx; - u16 rx_traffic_idx; - u8 *tx_traffic; - u8 *rx_traffic; - struct dentry *debugfs_dir; - u32 dbgfs_sram_offset, dbgfs_sram_len; - bool disable_ht40; -#endif /* CONFIG_IWLWIFI_LEGACY_DEBUGFS */ - - struct work_struct txpower_work; - u32 disable_sens_cal; - u32 disable_chain_noise_cal; - u32 disable_tx_power_cal; - struct work_struct run_time_calib_work; - struct timer_list statistics_periodic; - struct timer_list watchdog; - bool hw_ready; - - struct led_classdev led; - unsigned long blink_on, blink_off; - bool led_registered; -}; /*iwl_priv */ - -static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id) -{ - set_bit(txq_id, &priv->txq_ctx_active_msk); -} - -static inline void iwl_txq_ctx_deactivate(struct iwl_priv *priv, int txq_id) -{ - clear_bit(txq_id, &priv->txq_ctx_active_msk); -} - -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG -/* - * iwl_legacy_get_debug_level: Return active debug level for device - * - * Using sysfs it is possible to set per device debug level. This debug - * level will be used if set, otherwise the global debug level which can be - * set via module parameter is used. - */ -static inline u32 iwl_legacy_get_debug_level(struct iwl_priv *priv) -{ - if (priv->debug_level) - return priv->debug_level; - else - return iwlegacy_debug_level; -} -#else -static inline u32 iwl_legacy_get_debug_level(struct iwl_priv *priv) -{ - return iwlegacy_debug_level; -} -#endif - - -static inline struct ieee80211_hdr * -iwl_legacy_tx_queue_get_hdr(struct iwl_priv *priv, - int txq_id, int idx) -{ - if (priv->txq[txq_id].txb[idx].skb) - return (struct ieee80211_hdr *)priv->txq[txq_id]. - txb[idx].skb->data; - return NULL; -} - -static inline struct iwl_rxon_context * -iwl_legacy_rxon_ctx_from_vif(struct ieee80211_vif *vif) -{ - struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; - - return vif_priv->ctx; -} - -#define for_each_context(priv, ctx) \ - for (ctx = &priv->contexts[IWL_RXON_CTX_BSS]; \ - ctx < &priv->contexts[NUM_IWL_RXON_CTX]; ctx++) \ - if (priv->valid_contexts & BIT(ctx->ctxid)) - -static inline int iwl_legacy_is_associated(struct iwl_priv *priv, - enum iwl_rxon_context_id ctxid) -{ - return (priv->contexts[ctxid].active.filter_flags & - RXON_FILTER_ASSOC_MSK) ? 1 : 0; -} - -static inline int iwl_legacy_is_any_associated(struct iwl_priv *priv) -{ - return iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS); -} - -static inline int iwl_legacy_is_associated_ctx(struct iwl_rxon_context *ctx) -{ - return (ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0; -} - -static inline int iwl_legacy_is_channel_valid(const struct iwl_channel_info *ch_info) -{ - if (ch_info == NULL) - return 0; - return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0; -} - -static inline int iwl_legacy_is_channel_radar(const struct iwl_channel_info *ch_info) -{ - return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0; -} - -static inline u8 iwl_legacy_is_channel_a_band(const struct iwl_channel_info *ch_info) -{ - return ch_info->band == IEEE80211_BAND_5GHZ; -} - -static inline int -iwl_legacy_is_channel_passive(const struct iwl_channel_info *ch) -{ - return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0; -} - -static inline int -iwl_legacy_is_channel_ibss(const struct iwl_channel_info *ch) -{ - return (ch->flags & EEPROM_CHANNEL_IBSS) ? 1 : 0; -} - -static inline void -__iwl_legacy_free_pages(struct iwl_priv *priv, struct page *page) -{ - __free_pages(page, priv->hw_params.rx_page_order); - priv->alloc_rxb_page--; -} - -static inline void iwl_legacy_free_pages(struct iwl_priv *priv, unsigned long page) -{ - free_pages(page, priv->hw_params.rx_page_order); - priv->alloc_rxb_page--; -} -#endif /* __iwl_legacy_dev_h__ */ diff --git a/drivers/net/wireless/iwlegacy/iwl-devtrace.c b/drivers/net/wireless/iwlegacy/iwl-devtrace.c deleted file mode 100644 index acec99197ce..00000000000 --- a/drivers/net/wireless/iwlegacy/iwl-devtrace.c +++ /dev/null @@ -1,42 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#include <linux/module.h> - -/* sparse doesn't like tracepoint macros */ -#ifndef __CHECKER__ -#include "iwl-dev.h" - -#define CREATE_TRACE_POINTS -#include "iwl-devtrace.h" - -EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_iowrite8); -EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ioread32); -EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_iowrite32); -EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_rx); -EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_tx); -EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_error); -#endif diff --git a/drivers/net/wireless/iwlegacy/iwl-devtrace.h b/drivers/net/wireless/iwlegacy/iwl-devtrace.h deleted file mode 100644 index a443725ba6b..00000000000 --- a/drivers/net/wireless/iwlegacy/iwl-devtrace.h +++ /dev/null @@ -1,210 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#if !defined(__IWLWIFI_LEGACY_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ) -#define __IWLWIFI_LEGACY_DEVICE_TRACE - -#include <linux/tracepoint.h> - -#if !defined(CONFIG_IWLWIFI_LEGACY_DEVICE_TRACING) || defined(__CHECKER__) -#undef TRACE_EVENT -#define TRACE_EVENT(name, proto, ...) \ -static inline void trace_ ## name(proto) {} -#endif - - -#define PRIV_ENTRY __field(struct iwl_priv *, priv) -#define PRIV_ASSIGN (__entry->priv = priv) - -#undef TRACE_SYSTEM -#define TRACE_SYSTEM iwlwifi_legacy_io - -TRACE_EVENT(iwlwifi_legacy_dev_ioread32, - TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val), - TP_ARGS(priv, offs, val), - TP_STRUCT__entry( - PRIV_ENTRY - __field(u32, offs) - __field(u32, val) - ), - TP_fast_assign( - PRIV_ASSIGN; - __entry->offs = offs; - __entry->val = val; - ), - TP_printk("[%p] read io[%#x] = %#x", __entry->priv, - __entry->offs, __entry->val) -); - -TRACE_EVENT(iwlwifi_legacy_dev_iowrite8, - TP_PROTO(struct iwl_priv *priv, u32 offs, u8 val), - TP_ARGS(priv, offs, val), - TP_STRUCT__entry( - PRIV_ENTRY - __field(u32, offs) - __field(u8, val) - ), - TP_fast_assign( - PRIV_ASSIGN; - __entry->offs = offs; - __entry->val = val; - ), - TP_printk("[%p] write io[%#x] = %#x)", __entry->priv, - __entry->offs, __entry->val) -); - -TRACE_EVENT(iwlwifi_legacy_dev_iowrite32, - TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val), - TP_ARGS(priv, offs, val), - TP_STRUCT__entry( - PRIV_ENTRY - __field(u32, offs) - __field(u32, val) - ), - TP_fast_assign( - PRIV_ASSIGN; - __entry->offs = offs; - __entry->val = val; - ), - TP_printk("[%p] write io[%#x] = %#x)", __entry->priv, - __entry->offs, __entry->val) -); - -#undef TRACE_SYSTEM -#define TRACE_SYSTEM iwlwifi_legacy_ucode - -#undef TRACE_SYSTEM -#define TRACE_SYSTEM iwlwifi - -TRACE_EVENT(iwlwifi_legacy_dev_hcmd, - TP_PROTO(struct iwl_priv *priv, void *hcmd, size_t len, u32 flags), - TP_ARGS(priv, hcmd, len, flags), - TP_STRUCT__entry( - PRIV_ENTRY - __dynamic_array(u8, hcmd, len) - __field(u32, flags) - ), - TP_fast_assign( - PRIV_ASSIGN; - memcpy(__get_dynamic_array(hcmd), hcmd, len); - __entry->flags = flags; - ), - TP_printk("[%p] hcmd %#.2x (%ssync)", - __entry->priv, ((u8 *)__get_dynamic_array(hcmd))[0], - __entry->flags & CMD_ASYNC ? "a" : "") -); - -TRACE_EVENT(iwlwifi_legacy_dev_rx, - TP_PROTO(struct iwl_priv *priv, void *rxbuf, size_t len), - TP_ARGS(priv, rxbuf, len), - TP_STRUCT__entry( - PRIV_ENTRY - __dynamic_array(u8, rxbuf, len) - ), - TP_fast_assign( - PRIV_ASSIGN; - memcpy(__get_dynamic_array(rxbuf), rxbuf, len); - ), - TP_printk("[%p] RX cmd %#.2x", - __entry->priv, ((u8 *)__get_dynamic_array(rxbuf))[4]) -); - -TRACE_EVENT(iwlwifi_legacy_dev_tx, - TP_PROTO(struct iwl_priv *priv, void *tfd, size_t tfdlen, - void *buf0, size_t buf0_len, - void *buf1, size_t buf1_len), - TP_ARGS(priv, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len), - TP_STRUCT__entry( - PRIV_ENTRY - - __field(size_t, framelen) - __dynamic_array(u8, tfd, tfdlen) - - /* - * Do not insert between or below these items, - * we want to keep the frame together (except - * for the possible padding). - */ - __dynamic_array(u8, buf0, buf0_len) - __dynamic_array(u8, buf1, buf1_len) - ), - TP_fast_assign( - PRIV_ASSIGN; - __entry->framelen = buf0_len + buf1_len; - memcpy(__get_dynamic_array(tfd), tfd, tfdlen); - memcpy(__get_dynamic_array(buf0), buf0, buf0_len); - memcpy(__get_dynamic_array(buf1), buf1, buf1_len); - ), - TP_printk("[%p] TX %.2x (%zu bytes)", - __entry->priv, - ((u8 *)__get_dynamic_array(buf0))[0], - __entry->framelen) -); - -TRACE_EVENT(iwlwifi_legacy_dev_ucode_error, - TP_PROTO(struct iwl_priv *priv, u32 desc, u32 time, - u32 data1, u32 data2, u32 line, u32 blink1, - u32 blink2, u32 ilink1, u32 ilink2), - TP_ARGS(priv, desc, time, data1, data2, line, - blink1, blink2, ilink1, ilink2), - TP_STRUCT__entry( - PRIV_ENTRY - __field(u32, desc) - __field(u32, time) - __field(u32, data1) - __field(u32, data2) - __field(u32, line) - __field(u32, blink1) - __field(u32, blink2) - __field(u32, ilink1) - __field(u32, ilink2) - ), - TP_fast_assign( - PRIV_ASSIGN; - __entry->desc = desc; - __entry->time = time; - __entry->data1 = data1; - __entry->data2 = data2; - __entry->line = line; - __entry->blink1 = blink1; - __entry->blink2 = blink2; - __entry->ilink1 = ilink1; - __entry->ilink2 = ilink2; - ), - TP_printk("[%p] #%02d %010u data 0x%08X 0x%08X line %u, " - "blink 0x%05X 0x%05X ilink 0x%05X 0x%05X", - __entry->priv, __entry->desc, __entry->time, __entry->data1, - __entry->data2, __entry->line, __entry->blink1, - __entry->blink2, __entry->ilink1, __entry->ilink2) -); - -#endif /* __IWLWIFI_DEVICE_TRACE */ - -#undef TRACE_INCLUDE_PATH -#define TRACE_INCLUDE_PATH . -#undef TRACE_INCLUDE_FILE -#define TRACE_INCLUDE_FILE iwl-devtrace -#include <trace/define_trace.h> diff --git a/drivers/net/wireless/iwlegacy/iwl-eeprom.c b/drivers/net/wireless/iwlegacy/iwl-eeprom.c deleted file mode 100644 index 5bf3f49b74a..00000000000 --- a/drivers/net/wireless/iwlegacy/iwl-eeprom.c +++ /dev/null @@ -1,553 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ - - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/slab.h> -#include <linux/init.h> - -#include <net/mac80211.h> - -#include "iwl-commands.h" -#include "iwl-dev.h" -#include "iwl-core.h" -#include "iwl-debug.h" -#include "iwl-eeprom.h" -#include "iwl-io.h" - -/************************** EEPROM BANDS **************************** - * - * The iwlegacy_eeprom_band definitions below provide the mapping from the - * EEPROM contents to the specific channel number supported for each - * band. - * - * For example, iwl_priv->eeprom.band_3_channels[4] from the band_3 - * definition below maps to physical channel 42 in the 5.2GHz spectrum. - * The specific geography and calibration information for that channel - * is contained in the eeprom map itself. - * - * During init, we copy the eeprom information and channel map - * information into priv->channel_info_24/52 and priv->channel_map_24/52 - * - * channel_map_24/52 provides the index in the channel_info array for a - * given channel. We have to have two separate maps as there is channel - * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and - * band_2 - * - * A value of 0xff stored in the channel_map indicates that the channel - * is not supported by the hardware at all. - * - * A value of 0xfe in the channel_map indicates that the channel is not - * valid for Tx with the current hardware. This means that - * while the system can tune and receive on a given channel, it may not - * be able to associate or transmit any frames on that - * channel. There is no corresponding channel information for that - * entry. - * - *********************************************************************/ - -/* 2.4 GHz */ -const u8 iwlegacy_eeprom_band_1[14] = { - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 -}; - -/* 5.2 GHz bands */ -static const u8 iwlegacy_eeprom_band_2[] = { /* 4915-5080MHz */ - 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16 -}; - -static const u8 iwlegacy_eeprom_band_3[] = { /* 5170-5320MHz */ - 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64 -}; - -static const u8 iwlegacy_eeprom_band_4[] = { /* 5500-5700MHz */ - 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140 -}; - -static const u8 iwlegacy_eeprom_band_5[] = { /* 5725-5825MHz */ - 145, 149, 153, 157, 161, 165 -}; - -static const u8 iwlegacy_eeprom_band_6[] = { /* 2.4 ht40 channel */ - 1, 2, 3, 4, 5, 6, 7 -}; - -static const u8 iwlegacy_eeprom_band_7[] = { /* 5.2 ht40 channel */ - 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157 -}; - -/****************************************************************************** - * - * EEPROM related functions - * -******************************************************************************/ - -static int iwl_legacy_eeprom_verify_signature(struct iwl_priv *priv) -{ - u32 gp = iwl_read32(priv, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK; - int ret = 0; - - IWL_DEBUG_EEPROM(priv, "EEPROM signature=0x%08x\n", gp); - switch (gp) { - case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K: - case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K: - break; - default: - IWL_ERR(priv, "bad EEPROM signature," - "EEPROM_GP=0x%08x\n", gp); - ret = -ENOENT; - break; - } - return ret; -} - -const u8 -*iwl_legacy_eeprom_query_addr(const struct iwl_priv *priv, size_t offset) -{ - BUG_ON(offset >= priv->cfg->base_params->eeprom_size); - return &priv->eeprom[offset]; -} -EXPORT_SYMBOL(iwl_legacy_eeprom_query_addr); - -u16 iwl_legacy_eeprom_query16(const struct iwl_priv *priv, size_t offset) -{ - if (!priv->eeprom) - return 0; - return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8); -} -EXPORT_SYMBOL(iwl_legacy_eeprom_query16); - -/** - * iwl_legacy_eeprom_init - read EEPROM contents - * - * Load the EEPROM contents from adapter into priv->eeprom - * - * NOTE: This routine uses the non-debug IO access functions. - */ -int iwl_legacy_eeprom_init(struct iwl_priv *priv) -{ - __le16 *e; - u32 gp = iwl_read32(priv, CSR_EEPROM_GP); - int sz; - int ret; - u16 addr; - - /* allocate eeprom */ - sz = priv->cfg->base_params->eeprom_size; - IWL_DEBUG_EEPROM(priv, "NVM size = %d\n", sz); - priv->eeprom = kzalloc(sz, GFP_KERNEL); - if (!priv->eeprom) { - ret = -ENOMEM; - goto alloc_err; - } - e = (__le16 *)priv->eeprom; - - priv->cfg->ops->lib->apm_ops.init(priv); - - ret = iwl_legacy_eeprom_verify_signature(priv); - if (ret < 0) { - IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp); - ret = -ENOENT; - goto err; - } - - /* Make sure driver (instead of uCode) is allowed to read EEPROM */ - ret = priv->cfg->ops->lib->eeprom_ops.acquire_semaphore(priv); - if (ret < 0) { - IWL_ERR(priv, "Failed to acquire EEPROM semaphore.\n"); - ret = -ENOENT; - goto err; - } - - /* eeprom is an array of 16bit values */ - for (addr = 0; addr < sz; addr += sizeof(u16)) { - u32 r; - - _iwl_legacy_write32(priv, CSR_EEPROM_REG, - CSR_EEPROM_REG_MSK_ADDR & (addr << 1)); - - ret = iwl_poll_bit(priv, CSR_EEPROM_REG, - CSR_EEPROM_REG_READ_VALID_MSK, - CSR_EEPROM_REG_READ_VALID_MSK, - IWL_EEPROM_ACCESS_TIMEOUT); - if (ret < 0) { - IWL_ERR(priv, "Time out reading EEPROM[%d]\n", - addr); - goto done; - } - r = _iwl_legacy_read_direct32(priv, CSR_EEPROM_REG); - e[addr / 2] = cpu_to_le16(r >> 16); - } - - IWL_DEBUG_EEPROM(priv, "NVM Type: %s, version: 0x%x\n", - "EEPROM", - iwl_legacy_eeprom_query16(priv, EEPROM_VERSION)); - - ret = 0; -done: - priv->cfg->ops->lib->eeprom_ops.release_semaphore(priv); - -err: - if (ret) - iwl_legacy_eeprom_free(priv); - /* Reset chip to save power until we load uCode during "up". */ - iwl_legacy_apm_stop(priv); -alloc_err: - return ret; -} -EXPORT_SYMBOL(iwl_legacy_eeprom_init); - -void iwl_legacy_eeprom_free(struct iwl_priv *priv) -{ - kfree(priv->eeprom); - priv->eeprom = NULL; -} -EXPORT_SYMBOL(iwl_legacy_eeprom_free); - -static void iwl_legacy_init_band_reference(const struct iwl_priv *priv, - int eep_band, int *eeprom_ch_count, - const struct iwl_eeprom_channel **eeprom_ch_info, - const u8 **eeprom_ch_index) -{ - u32 offset = priv->cfg->ops->lib-> - eeprom_ops.regulatory_bands[eep_band - 1]; - switch (eep_band) { - case 1: /* 2.4GHz band */ - *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_1); - *eeprom_ch_info = (struct iwl_eeprom_channel *) - iwl_legacy_eeprom_query_addr(priv, offset); - *eeprom_ch_index = iwlegacy_eeprom_band_1; - break; - case 2: /* 4.9GHz band */ - *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_2); - *eeprom_ch_info = (struct iwl_eeprom_channel *) - iwl_legacy_eeprom_query_addr(priv, offset); - *eeprom_ch_index = iwlegacy_eeprom_band_2; - break; - case 3: /* 5.2GHz band */ - *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_3); - *eeprom_ch_info = (struct iwl_eeprom_channel *) - iwl_legacy_eeprom_query_addr(priv, offset); - *eeprom_ch_index = iwlegacy_eeprom_band_3; - break; - case 4: /* 5.5GHz band */ - *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_4); - *eeprom_ch_info = (struct iwl_eeprom_channel *) - iwl_legacy_eeprom_query_addr(priv, offset); - *eeprom_ch_index = iwlegacy_eeprom_band_4; - break; - case 5: /* 5.7GHz band */ - *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_5); - *eeprom_ch_info = (struct iwl_eeprom_channel *) - iwl_legacy_eeprom_query_addr(priv, offset); - *eeprom_ch_index = iwlegacy_eeprom_band_5; - break; - case 6: /* 2.4GHz ht40 channels */ - *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_6); - *eeprom_ch_info = (struct iwl_eeprom_channel *) - iwl_legacy_eeprom_query_addr(priv, offset); - *eeprom_ch_index = iwlegacy_eeprom_band_6; - break; - case 7: /* 5 GHz ht40 channels */ - *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_7); - *eeprom_ch_info = (struct iwl_eeprom_channel *) - iwl_legacy_eeprom_query_addr(priv, offset); - *eeprom_ch_index = iwlegacy_eeprom_band_7; - break; - default: - BUG(); - } -} - -#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \ - ? # x " " : "") -/** - * iwl_legacy_mod_ht40_chan_info - Copy ht40 channel info into driver's priv. - * - * Does not set up a command, or touch hardware. - */ -static int iwl_legacy_mod_ht40_chan_info(struct iwl_priv *priv, - enum ieee80211_band band, u16 channel, - const struct iwl_eeprom_channel *eeprom_ch, - u8 clear_ht40_extension_channel) -{ - struct iwl_channel_info *ch_info; - - ch_info = (struct iwl_channel_info *) - iwl_legacy_get_channel_info(priv, band, channel); - - if (!iwl_legacy_is_channel_valid(ch_info)) - return -1; - - IWL_DEBUG_EEPROM(priv, "HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):" - " Ad-Hoc %ssupported\n", - ch_info->channel, - iwl_legacy_is_channel_a_band(ch_info) ? - "5.2" : "2.4", - CHECK_AND_PRINT(IBSS), - CHECK_AND_PRINT(ACTIVE), - CHECK_AND_PRINT(RADAR), - CHECK_AND_PRINT(WIDE), - CHECK_AND_PRINT(DFS), - eeprom_ch->flags, - eeprom_ch->max_power_avg, - ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS) - && !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ? - "" : "not "); - - ch_info->ht40_eeprom = *eeprom_ch; - ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg; - ch_info->ht40_flags = eeprom_ch->flags; - if (eeprom_ch->flags & EEPROM_CHANNEL_VALID) - ch_info->ht40_extension_channel &= - ~clear_ht40_extension_channel; - - return 0; -} - -#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \ - ? # x " " : "") - -/** - * iwl_legacy_init_channel_map - Set up driver's info for all possible channels - */ -int iwl_legacy_init_channel_map(struct iwl_priv *priv) -{ - int eeprom_ch_count = 0; - const u8 *eeprom_ch_index = NULL; - const struct iwl_eeprom_channel *eeprom_ch_info = NULL; - int band, ch; - struct iwl_channel_info *ch_info; - - if (priv->channel_count) { - IWL_DEBUG_EEPROM(priv, "Channel map already initialized.\n"); - return 0; - } - - IWL_DEBUG_EEPROM(priv, "Initializing regulatory info from EEPROM\n"); - - priv->channel_count = - ARRAY_SIZE(iwlegacy_eeprom_band_1) + - ARRAY_SIZE(iwlegacy_eeprom_band_2) + - ARRAY_SIZE(iwlegacy_eeprom_band_3) + - ARRAY_SIZE(iwlegacy_eeprom_band_4) + - ARRAY_SIZE(iwlegacy_eeprom_band_5); - - IWL_DEBUG_EEPROM(priv, "Parsing data for %d channels.\n", - priv->channel_count); - - priv->channel_info = kzalloc(sizeof(struct iwl_channel_info) * - priv->channel_count, GFP_KERNEL); - if (!priv->channel_info) { - IWL_ERR(priv, "Could not allocate channel_info\n"); - priv->channel_count = 0; - return -ENOMEM; - } - - ch_info = priv->channel_info; - - /* Loop through the 5 EEPROM bands adding them in order to the - * channel map we maintain (that contains additional information than - * what just in the EEPROM) */ - for (band = 1; band <= 5; band++) { - - iwl_legacy_init_band_reference(priv, band, &eeprom_ch_count, - &eeprom_ch_info, &eeprom_ch_index); - - /* Loop through each band adding each of the channels */ - for (ch = 0; ch < eeprom_ch_count; ch++) { - ch_info->channel = eeprom_ch_index[ch]; - ch_info->band = (band == 1) ? IEEE80211_BAND_2GHZ : - IEEE80211_BAND_5GHZ; - - /* permanently store EEPROM's channel regulatory flags - * and max power in channel info database. */ - ch_info->eeprom = eeprom_ch_info[ch]; - - /* Copy the run-time flags so they are there even on - * invalid channels */ - ch_info->flags = eeprom_ch_info[ch].flags; - /* First write that ht40 is not enabled, and then enable - * one by one */ - ch_info->ht40_extension_channel = - IEEE80211_CHAN_NO_HT40; - - if (!(iwl_legacy_is_channel_valid(ch_info))) { - IWL_DEBUG_EEPROM(priv, - "Ch. %d Flags %x [%sGHz] - " - "No traffic\n", - ch_info->channel, - ch_info->flags, - iwl_legacy_is_channel_a_band(ch_info) ? - "5.2" : "2.4"); - ch_info++; - continue; - } - - /* Initialize regulatory-based run-time data */ - ch_info->max_power_avg = ch_info->curr_txpow = - eeprom_ch_info[ch].max_power_avg; - ch_info->scan_power = eeprom_ch_info[ch].max_power_avg; - ch_info->min_power = 0; - - IWL_DEBUG_EEPROM(priv, "Ch. %d [%sGHz] " - "%s%s%s%s%s%s(0x%02x %ddBm):" - " Ad-Hoc %ssupported\n", - ch_info->channel, - iwl_legacy_is_channel_a_band(ch_info) ? - "5.2" : "2.4", - CHECK_AND_PRINT_I(VALID), - CHECK_AND_PRINT_I(IBSS), - CHECK_AND_PRINT_I(ACTIVE), - CHECK_AND_PRINT_I(RADAR), - CHECK_AND_PRINT_I(WIDE), - CHECK_AND_PRINT_I(DFS), - eeprom_ch_info[ch].flags, - eeprom_ch_info[ch].max_power_avg, - ((eeprom_ch_info[ch]. - flags & EEPROM_CHANNEL_IBSS) - && !(eeprom_ch_info[ch]. - flags & EEPROM_CHANNEL_RADAR)) - ? "" : "not "); - - ch_info++; - } - } - - /* Check if we do have HT40 channels */ - if (priv->cfg->ops->lib->eeprom_ops.regulatory_bands[5] == - EEPROM_REGULATORY_BAND_NO_HT40 && - priv->cfg->ops->lib->eeprom_ops.regulatory_bands[6] == - EEPROM_REGULATORY_BAND_NO_HT40) - return 0; - - /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */ - for (band = 6; band <= 7; band++) { - enum ieee80211_band ieeeband; - - iwl_legacy_init_band_reference(priv, band, &eeprom_ch_count, - &eeprom_ch_info, &eeprom_ch_index); - - /* EEPROM band 6 is 2.4, band 7 is 5 GHz */ - ieeeband = - (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; - - /* Loop through each band adding each of the channels */ - for (ch = 0; ch < eeprom_ch_count; ch++) { - /* Set up driver's info for lower half */ - iwl_legacy_mod_ht40_chan_info(priv, ieeeband, - eeprom_ch_index[ch], - &eeprom_ch_info[ch], - IEEE80211_CHAN_NO_HT40PLUS); - - /* Set up driver's info for upper half */ - iwl_legacy_mod_ht40_chan_info(priv, ieeeband, - eeprom_ch_index[ch] + 4, - &eeprom_ch_info[ch], - IEEE80211_CHAN_NO_HT40MINUS); - } - } - - return 0; -} -EXPORT_SYMBOL(iwl_legacy_init_channel_map); - -/* - * iwl_legacy_free_channel_map - undo allocations in iwl_legacy_init_channel_map - */ -void iwl_legacy_free_channel_map(struct iwl_priv *priv) -{ - kfree(priv->channel_info); - priv->channel_count = 0; -} -EXPORT_SYMBOL(iwl_legacy_free_channel_map); - -/** - * iwl_legacy_get_channel_info - Find driver's private channel info - * - * Based on band and channel number. - */ -const struct -iwl_channel_info *iwl_legacy_get_channel_info(const struct iwl_priv *priv, - enum ieee80211_band band, u16 channel) -{ - int i; - - switch (band) { - case IEEE80211_BAND_5GHZ: - for (i = 14; i < priv->channel_count; i++) { - if (priv->channel_info[i].channel == channel) - return &priv->channel_info[i]; - } - break; - case IEEE80211_BAND_2GHZ: - if (channel >= 1 && channel <= 14) - return &priv->channel_info[channel - 1]; - break; - default: - BUG(); - } - - return NULL; -} -EXPORT_SYMBOL(iwl_legacy_get_channel_info); diff --git a/drivers/net/wireless/iwlegacy/iwl-eeprom.h b/drivers/net/wireless/iwlegacy/iwl-eeprom.h deleted file mode 100644 index c59c8100202..00000000000 --- a/drivers/net/wireless/iwlegacy/iwl-eeprom.h +++ /dev/null @@ -1,344 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ - -#ifndef __iwl_legacy_eeprom_h__ -#define __iwl_legacy_eeprom_h__ - -#include <net/mac80211.h> - -struct iwl_priv; - -/* - * EEPROM access time values: - * - * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG. - * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1). - * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec. - * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG. - */ -#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */ - -#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */ -#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */ - - -/* - * Regulatory channel usage flags in EEPROM struct iwl4965_eeprom_channel.flags. - * - * IBSS and/or AP operation is allowed *only* on those channels with - * (VALID && IBSS && ACTIVE && !RADAR). This restriction is in place because - * RADAR detection is not supported by the 4965 driver, but is a - * requirement for establishing a new network for legal operation on channels - * requiring RADAR detection or restricting ACTIVE scanning. - * - * NOTE: "WIDE" flag does not indicate anything about "HT40" 40 MHz channels. - * It only indicates that 20 MHz channel use is supported; HT40 channel - * usage is indicated by a separate set of regulatory flags for each - * HT40 channel pair. - * - * NOTE: Using a channel inappropriately will result in a uCode error! - */ -#define IWL_NUM_TX_CALIB_GROUPS 5 -enum { - EEPROM_CHANNEL_VALID = (1 << 0), /* usable for this SKU/geo */ - EEPROM_CHANNEL_IBSS = (1 << 1), /* usable as an IBSS channel */ - /* Bit 2 Reserved */ - EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */ - EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */ - EEPROM_CHANNEL_WIDE = (1 << 5), /* 20 MHz channel okay */ - /* Bit 6 Reserved (was Narrow Channel) */ - EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */ -}; - -/* SKU Capabilities */ -/* 3945 only */ -#define EEPROM_SKU_CAP_SW_RF_KILL_ENABLE (1 << 0) -#define EEPROM_SKU_CAP_HW_RF_KILL_ENABLE (1 << 1) - -/* *regulatory* channel data format in eeprom, one for each channel. - * There are separate entries for HT40 (40 MHz) vs. normal (20 MHz) channels. */ -struct iwl_eeprom_channel { - u8 flags; /* EEPROM_CHANNEL_* flags copied from EEPROM */ - s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */ -} __packed; - -/* 3945 Specific */ -#define EEPROM_3945_EEPROM_VERSION (0x2f) - -/* 4965 has two radio transmitters (and 3 radio receivers) */ -#define EEPROM_TX_POWER_TX_CHAINS (2) - -/* 4965 has room for up to 8 sets of txpower calibration data */ -#define EEPROM_TX_POWER_BANDS (8) - -/* 4965 factory calibration measures txpower gain settings for - * each of 3 target output levels */ -#define EEPROM_TX_POWER_MEASUREMENTS (3) - -/* 4965 Specific */ -/* 4965 driver does not work with txpower calibration version < 5 */ -#define EEPROM_4965_TX_POWER_VERSION (5) -#define EEPROM_4965_EEPROM_VERSION (0x2f) -#define EEPROM_4965_CALIB_VERSION_OFFSET (2*0xB6) /* 2 bytes */ -#define EEPROM_4965_CALIB_TXPOWER_OFFSET (2*0xE8) /* 48 bytes */ -#define EEPROM_4965_BOARD_REVISION (2*0x4F) /* 2 bytes */ -#define EEPROM_4965_BOARD_PBA (2*0x56+1) /* 9 bytes */ - -/* 2.4 GHz */ -extern const u8 iwlegacy_eeprom_band_1[14]; - -/* - * factory calibration data for one txpower level, on one channel, - * measured on one of the 2 tx chains (radio transmitter and associated - * antenna). EEPROM contains: - * - * 1) Temperature (degrees Celsius) of device when measurement was made. - * - * 2) Gain table index used to achieve the target measurement power. - * This refers to the "well-known" gain tables (see iwl-4965-hw.h). - * - * 3) Actual measured output power, in half-dBm ("34" = 17 dBm). - * - * 4) RF power amplifier detector level measurement (not used). - */ -struct iwl_eeprom_calib_measure { - u8 temperature; /* Device temperature (Celsius) */ - u8 gain_idx; /* Index into gain table */ - u8 actual_pow; /* Measured RF output power, half-dBm */ - s8 pa_det; /* Power amp detector level (not used) */ -} __packed; - - -/* - * measurement set for one channel. EEPROM contains: - * - * 1) Channel number measured - * - * 2) Measurements for each of 3 power levels for each of 2 radio transmitters - * (a.k.a. "tx chains") (6 measurements altogether) - */ -struct iwl_eeprom_calib_ch_info { - u8 ch_num; - struct iwl_eeprom_calib_measure - measurements[EEPROM_TX_POWER_TX_CHAINS] - [EEPROM_TX_POWER_MEASUREMENTS]; -} __packed; - -/* - * txpower subband info. - * - * For each frequency subband, EEPROM contains the following: - * - * 1) First and last channels within range of the subband. "0" values - * indicate that this sample set is not being used. - * - * 2) Sample measurement sets for 2 channels close to the range endpoints. - */ -struct iwl_eeprom_calib_subband_info { - u8 ch_from; /* channel number of lowest channel in subband */ - u8 ch_to; /* channel number of highest channel in subband */ - struct iwl_eeprom_calib_ch_info ch1; - struct iwl_eeprom_calib_ch_info ch2; -} __packed; - - -/* - * txpower calibration info. EEPROM contains: - * - * 1) Factory-measured saturation power levels (maximum levels at which - * tx power amplifier can output a signal without too much distortion). - * There is one level for 2.4 GHz band and one for 5 GHz band. These - * values apply to all channels within each of the bands. - * - * 2) Factory-measured power supply voltage level. This is assumed to be - * constant (i.e. same value applies to all channels/bands) while the - * factory measurements are being made. - * - * 3) Up to 8 sets of factory-measured txpower calibration values. - * These are for different frequency ranges, since txpower gain - * characteristics of the analog radio circuitry vary with frequency. - * - * Not all sets need to be filled with data; - * struct iwl_eeprom_calib_subband_info contains range of channels - * (0 if unused) for each set of data. - */ -struct iwl_eeprom_calib_info { - u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */ - u8 saturation_power52; /* half-dBm */ - __le16 voltage; /* signed */ - struct iwl_eeprom_calib_subband_info - band_info[EEPROM_TX_POWER_BANDS]; -} __packed; - - -/* General */ -#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */ -#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */ -#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */ -#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */ -#define EEPROM_VERSION (2*0x44) /* 2 bytes */ -#define EEPROM_SKU_CAP (2*0x45) /* 2 bytes */ -#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */ -#define EEPROM_WOWLAN_MODE (2*0x47) /* 2 bytes */ -#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */ -#define EEPROM_NUM_MAC_ADDRESS (2*0x4C) /* 2 bytes */ - -/* The following masks are to be applied on EEPROM_RADIO_CONFIG */ -#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */ -#define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */ -#define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */ -#define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */ -#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */ -#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */ - -#define EEPROM_3945_RF_CFG_TYPE_MAX 0x0 -#define EEPROM_4965_RF_CFG_TYPE_MAX 0x1 - -/* - * Per-channel regulatory data. - * - * Each channel that *might* be supported by iwl has a fixed location - * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory - * txpower (MSB). - * - * Entries immediately below are for 20 MHz channel width. HT40 (40 MHz) - * channels (only for 4965, not supported by 3945) appear later in the EEPROM. - * - * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 - */ -#define EEPROM_REGULATORY_SKU_ID (2*0x60) /* 4 bytes */ -#define EEPROM_REGULATORY_BAND_1 (2*0x62) /* 2 bytes */ -#define EEPROM_REGULATORY_BAND_1_CHANNELS (2*0x63) /* 28 bytes */ - -/* - * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196, - * 5.0 GHz channels 7, 8, 11, 12, 16 - * (4915-5080MHz) (none of these is ever supported) - */ -#define EEPROM_REGULATORY_BAND_2 (2*0x71) /* 2 bytes */ -#define EEPROM_REGULATORY_BAND_2_CHANNELS (2*0x72) /* 26 bytes */ - -/* - * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64 - * (5170-5320MHz) - */ -#define EEPROM_REGULATORY_BAND_3 (2*0x7F) /* 2 bytes */ -#define EEPROM_REGULATORY_BAND_3_CHANNELS (2*0x80) /* 24 bytes */ - -/* - * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140 - * (5500-5700MHz) - */ -#define EEPROM_REGULATORY_BAND_4 (2*0x8C) /* 2 bytes */ -#define EEPROM_REGULATORY_BAND_4_CHANNELS (2*0x8D) /* 22 bytes */ - -/* - * 5.7 GHz channels 145, 149, 153, 157, 161, 165 - * (5725-5825MHz) - */ -#define EEPROM_REGULATORY_BAND_5 (2*0x98) /* 2 bytes */ -#define EEPROM_REGULATORY_BAND_5_CHANNELS (2*0x99) /* 12 bytes */ - -/* - * 2.4 GHz HT40 channels 1 (5), 2 (6), 3 (7), 4 (8), 5 (9), 6 (10), 7 (11) - * - * The channel listed is the center of the lower 20 MHz half of the channel. - * The overall center frequency is actually 2 channels (10 MHz) above that, - * and the upper half of each HT40 channel is centered 4 channels (20 MHz) away - * from the lower half; e.g. the upper half of HT40 channel 1 is channel 5, - * and the overall HT40 channel width centers on channel 3. - * - * NOTE: The RXON command uses 20 MHz channel numbers to specify the - * control channel to which to tune. RXON also specifies whether the - * control channel is the upper or lower half of a HT40 channel. - * - * NOTE: 4965 does not support HT40 channels on 2.4 GHz. - */ -#define EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS (2*0xA0) /* 14 bytes */ - -/* - * 5.2 GHz HT40 channels 36 (40), 44 (48), 52 (56), 60 (64), - * 100 (104), 108 (112), 116 (120), 124 (128), 132 (136), 149 (153), 157 (161) - */ -#define EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS (2*0xA8) /* 22 bytes */ - -#define EEPROM_REGULATORY_BAND_NO_HT40 (0) - -struct iwl_eeprom_ops { - const u32 regulatory_bands[7]; - int (*acquire_semaphore) (struct iwl_priv *priv); - void (*release_semaphore) (struct iwl_priv *priv); -}; - - -int iwl_legacy_eeprom_init(struct iwl_priv *priv); -void iwl_legacy_eeprom_free(struct iwl_priv *priv); -const u8 *iwl_legacy_eeprom_query_addr(const struct iwl_priv *priv, - size_t offset); -u16 iwl_legacy_eeprom_query16(const struct iwl_priv *priv, size_t offset); -int iwl_legacy_init_channel_map(struct iwl_priv *priv); -void iwl_legacy_free_channel_map(struct iwl_priv *priv); -const struct iwl_channel_info *iwl_legacy_get_channel_info( - const struct iwl_priv *priv, - enum ieee80211_band band, u16 channel); - -#endif /* __iwl_legacy_eeprom_h__ */ diff --git a/drivers/net/wireless/iwlegacy/iwl-fh.h b/drivers/net/wireless/iwlegacy/iwl-fh.h deleted file mode 100644 index 6e6091816e3..00000000000 --- a/drivers/net/wireless/iwlegacy/iwl-fh.h +++ /dev/null @@ -1,513 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ -#ifndef __iwl_legacy_fh_h__ -#define __iwl_legacy_fh_h__ - -/****************************/ -/* Flow Handler Definitions */ -/****************************/ - -/** - * This I/O area is directly read/writable by driver (e.g. Linux uses writel()) - * Addresses are offsets from device's PCI hardware base address. - */ -#define FH_MEM_LOWER_BOUND (0x1000) -#define FH_MEM_UPPER_BOUND (0x2000) - -/** - * Keep-Warm (KW) buffer base address. - * - * Driver must allocate a 4KByte buffer that is used by 4965 for keeping the - * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency - * DRAM access when 4965 is Txing or Rxing. The dummy accesses prevent host - * from going into a power-savings mode that would cause higher DRAM latency, - * and possible data over/under-runs, before all Tx/Rx is complete. - * - * Driver loads FH_KW_MEM_ADDR_REG with the physical address (bits 35:4) - * of the buffer, which must be 4K aligned. Once this is set up, the 4965 - * automatically invokes keep-warm accesses when normal accesses might not - * be sufficient to maintain fast DRAM response. - * - * Bit fields: - * 31-0: Keep-warm buffer physical base address [35:4], must be 4K aligned - */ -#define FH_KW_MEM_ADDR_REG (FH_MEM_LOWER_BOUND + 0x97C) - - -/** - * TFD Circular Buffers Base (CBBC) addresses - * - * 4965 has 16 base pointer registers, one for each of 16 host-DRAM-resident - * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs) - * (see struct iwl_tfd_frame). These 16 pointer registers are offset by 0x04 - * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte - * aligned (address bits 0-7 must be 0). - * - * Bit fields in each pointer register: - * 27-0: TFD CB physical base address [35:8], must be 256-byte aligned - */ -#define FH_MEM_CBBC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0) -#define FH_MEM_CBBC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xA10) - -/* Find TFD CB base pointer for given queue (range 0-15). */ -#define FH_MEM_CBBC_QUEUE(x) (FH_MEM_CBBC_LOWER_BOUND + (x) * 0x4) - - -/** - * Rx SRAM Control and Status Registers (RSCSR) - * - * These registers provide handshake between driver and 4965 for the Rx queue - * (this queue handles *all* command responses, notifications, Rx data, etc. - * sent from 4965 uCode to host driver). Unlike Tx, there is only one Rx - * queue, and only one Rx DMA/FIFO channel. Also unlike Tx, which can - * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer - * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1 - * mapping between RBDs and RBs. - * - * Driver must allocate host DRAM memory for the following, and set the - * physical address of each into 4965 registers: - * - * 1) Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256 - * entries (although any power of 2, up to 4096, is selectable by driver). - * Each entry (1 dword) points to a receive buffer (RB) of consistent size - * (typically 4K, although 8K or 16K are also selectable by driver). - * Driver sets up RB size and number of RBDs in the CB via Rx config - * register FH_MEM_RCSR_CHNL0_CONFIG_REG. - * - * Bit fields within one RBD: - * 27-0: Receive Buffer physical address bits [35:8], 256-byte aligned - * - * Driver sets physical address [35:8] of base of RBD circular buffer - * into FH_RSCSR_CHNL0_RBDCB_BASE_REG [27:0]. - * - * 2) Rx status buffer, 8 bytes, in which 4965 indicates which Rx Buffers - * (RBs) have been filled, via a "write pointer", actually the index of - * the RB's corresponding RBD within the circular buffer. Driver sets - * physical address [35:4] into FH_RSCSR_CHNL0_STTS_WPTR_REG [31:0]. - * - * Bit fields in lower dword of Rx status buffer (upper dword not used - * by driver; see struct iwl4965_shared, val0): - * 31-12: Not used by driver - * 11- 0: Index of last filled Rx buffer descriptor - * (4965 writes, driver reads this value) - * - * As the driver prepares Receive Buffers (RBs) for 4965 to fill, driver must - * enter pointers to these RBs into contiguous RBD circular buffer entries, - * and update the 4965's "write" index register, - * FH_RSCSR_CHNL0_RBDCB_WPTR_REG. - * - * This "write" index corresponds to the *next* RBD that the driver will make - * available, i.e. one RBD past the tail of the ready-to-fill RBDs within - * the circular buffer. This value should initially be 0 (before preparing any - * RBs), should be 8 after preparing the first 8 RBs (for example), and must - * wrap back to 0 at the end of the circular buffer (but don't wrap before - * "read" index has advanced past 1! See below). - * NOTE: 4965 EXPECTS THE WRITE INDEX TO BE INCREMENTED IN MULTIPLES OF 8. - * - * As the 4965 fills RBs (referenced from contiguous RBDs within the circular - * buffer), it updates the Rx status buffer in host DRAM, 2) described above, - * to tell the driver the index of the latest filled RBD. The driver must - * read this "read" index from DRAM after receiving an Rx interrupt from 4965. - * - * The driver must also internally keep track of a third index, which is the - * next RBD to process. When receiving an Rx interrupt, driver should process - * all filled but unprocessed RBs up to, but not including, the RB - * corresponding to the "read" index. For example, if "read" index becomes "1", - * driver may process the RB pointed to by RBD 0. Depending on volume of - * traffic, there may be many RBs to process. - * - * If read index == write index, 4965 thinks there is no room to put new data. - * Due to this, the maximum number of filled RBs is 255, instead of 256. To - * be safe, make sure that there is a gap of at least 2 RBDs between "write" - * and "read" indexes; that is, make sure that there are no more than 254 - * buffers waiting to be filled. - */ -#define FH_MEM_RSCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xBC0) -#define FH_MEM_RSCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00) -#define FH_MEM_RSCSR_CHNL0 (FH_MEM_RSCSR_LOWER_BOUND) - -/** - * Physical base address of 8-byte Rx Status buffer. - * Bit fields: - * 31-0: Rx status buffer physical base address [35:4], must 16-byte aligned. - */ -#define FH_RSCSR_CHNL0_STTS_WPTR_REG (FH_MEM_RSCSR_CHNL0) - -/** - * Physical base address of Rx Buffer Descriptor Circular Buffer. - * Bit fields: - * 27-0: RBD CD physical base address [35:8], must be 256-byte aligned. - */ -#define FH_RSCSR_CHNL0_RBDCB_BASE_REG (FH_MEM_RSCSR_CHNL0 + 0x004) - -/** - * Rx write pointer (index, really!). - * Bit fields: - * 11-0: Index of driver's most recent prepared-to-be-filled RBD, + 1. - * NOTE: For 256-entry circular buffer, use only bits [7:0]. - */ -#define FH_RSCSR_CHNL0_RBDCB_WPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x008) -#define FH_RSCSR_CHNL0_WPTR (FH_RSCSR_CHNL0_RBDCB_WPTR_REG) - - -/** - * Rx Config/Status Registers (RCSR) - * Rx Config Reg for channel 0 (only channel used) - * - * Driver must initialize FH_MEM_RCSR_CHNL0_CONFIG_REG as follows for - * normal operation (see bit fields). - * - * Clearing FH_MEM_RCSR_CHNL0_CONFIG_REG to 0 turns off Rx DMA. - * Driver should poll FH_MEM_RSSR_RX_STATUS_REG for - * FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (bit 24) before continuing. - * - * Bit fields: - * 31-30: Rx DMA channel enable: '00' off/pause, '01' pause at end of frame, - * '10' operate normally - * 29-24: reserved - * 23-20: # RBDs in circular buffer = 2^value; use "8" for 256 RBDs (normal), - * min "5" for 32 RBDs, max "12" for 4096 RBDs. - * 19-18: reserved - * 17-16: size of each receive buffer; '00' 4K (normal), '01' 8K, - * '10' 12K, '11' 16K. - * 15-14: reserved - * 13-12: IRQ destination; '00' none, '01' host driver (normal operation) - * 11- 4: timeout for closing Rx buffer and interrupting host (units 32 usec) - * typical value 0x10 (about 1/2 msec) - * 3- 0: reserved - */ -#define FH_MEM_RCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC00) -#define FH_MEM_RCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xCC0) -#define FH_MEM_RCSR_CHNL0 (FH_MEM_RCSR_LOWER_BOUND) - -#define FH_MEM_RCSR_CHNL0_CONFIG_REG (FH_MEM_RCSR_CHNL0) - -#define FH_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0) /* bits 4-11 */ -#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK (0x00001000) /* bits 12 */ -#define FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK (0x00008000) /* bit 15 */ -#define FH_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MSK (0x00030000) /* bits 16-17 */ -#define FH_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MSK (0x00F00000) /* bits 20-23 */ -#define FH_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MSK (0xC0000000) /* bits 30-31*/ - -#define FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS (20) -#define FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS (4) -#define RX_RB_TIMEOUT (0x10) - -#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000) -#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000) -#define FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL (0x80000000) - -#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K (0x00000000) -#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K (0x00010000) -#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K (0x00020000) -#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K (0x00030000) - -#define FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY (0x00000004) -#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000) -#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000) - -#define FH_RSCSR_FRAME_SIZE_MSK (0x00003FFF) /* bits 0-13 */ - -/** - * Rx Shared Status Registers (RSSR) - * - * After stopping Rx DMA channel (writing 0 to - * FH_MEM_RCSR_CHNL0_CONFIG_REG), driver must poll - * FH_MEM_RSSR_RX_STATUS_REG until Rx channel is idle. - * - * Bit fields: - * 24: 1 = Channel 0 is idle - * - * FH_MEM_RSSR_SHARED_CTRL_REG and FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV - * contain default values that should not be altered by the driver. - */ -#define FH_MEM_RSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC40) -#define FH_MEM_RSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xD00) - -#define FH_MEM_RSSR_SHARED_CTRL_REG (FH_MEM_RSSR_LOWER_BOUND) -#define FH_MEM_RSSR_RX_STATUS_REG (FH_MEM_RSSR_LOWER_BOUND + 0x004) -#define FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV\ - (FH_MEM_RSSR_LOWER_BOUND + 0x008) - -#define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000) - -#define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT 28 - -/* TFDB Area - TFDs buffer table */ -#define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF) -#define FH_TFDIB_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x900) -#define FH_TFDIB_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x958) -#define FH_TFDIB_CTRL0_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl)) -#define FH_TFDIB_CTRL1_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4) - -/** - * Transmit DMA Channel Control/Status Registers (TCSR) - * - * 4965 has one configuration register for each of 8 Tx DMA/FIFO channels - * supported in hardware (don't confuse these with the 16 Tx queues in DRAM, - * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes. - * - * To use a Tx DMA channel, driver must initialize its - * FH_TCSR_CHNL_TX_CONFIG_REG(chnl) with: - * - * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | - * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL - * - * All other bits should be 0. - * - * Bit fields: - * 31-30: Tx DMA channel enable: '00' off/pause, '01' pause at end of frame, - * '10' operate normally - * 29- 4: Reserved, set to "0" - * 3: Enable internal DMA requests (1, normal operation), disable (0) - * 2- 0: Reserved, set to "0" - */ -#define FH_TCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xD00) -#define FH_TCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xE60) - -/* Find Control/Status reg for given Tx DMA/FIFO channel */ -#define FH49_TCSR_CHNL_NUM (7) -#define FH50_TCSR_CHNL_NUM (8) - -/* TCSR: tx_config register values */ -#define FH_TCSR_CHNL_TX_CONFIG_REG(_chnl) \ - (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl)) -#define FH_TCSR_CHNL_TX_CREDIT_REG(_chnl) \ - (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x4) -#define FH_TCSR_CHNL_TX_BUF_STS_REG(_chnl) \ - (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x8) - -#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000) -#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRV (0x00000001) - -#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE (0x00000000) -#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE (0x00000008) - -#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT (0x00000000) -#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD (0x00100000) -#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000) - -#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000) -#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_ENDTFD (0x00400000) -#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_IFTFD (0x00800000) - -#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000) -#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF (0x40000000) -#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000) - -#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY (0x00000000) -#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT (0x00002000) -#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00000003) - -#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM (20) -#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX (12) - -/** - * Tx Shared Status Registers (TSSR) - * - * After stopping Tx DMA channel (writing 0 to - * FH_TCSR_CHNL_TX_CONFIG_REG(chnl)), driver must poll - * FH_TSSR_TX_STATUS_REG until selected Tx channel is idle - * (channel's buffers empty | no pending requests). - * - * Bit fields: - * 31-24: 1 = Channel buffers empty (channel 7:0) - * 23-16: 1 = No pending requests (channel 7:0) - */ -#define FH_TSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xEA0) -#define FH_TSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xEC0) - -#define FH_TSSR_TX_STATUS_REG (FH_TSSR_LOWER_BOUND + 0x010) - -/** - * Bit fields for TSSR(Tx Shared Status & Control) error status register: - * 31: Indicates an address error when accessed to internal memory - * uCode/driver must write "1" in order to clear this flag - * 30: Indicates that Host did not send the expected number of dwords to FH - * uCode/driver must write "1" in order to clear this flag - * 16-9:Each status bit is for one channel. Indicates that an (Error) ActDMA - * command was received from the scheduler while the TRB was already full - * with previous command - * uCode/driver must write "1" in order to clear this flag - * 7-0: Each status bit indicates a channel's TxCredit error. When an error - * bit is set, it indicates that the FH has received a full indication - * from the RTC TxFIFO and the current value of the TxCredit counter was - * not equal to zero. This mean that the credit mechanism was not - * synchronized to the TxFIFO status - * uCode/driver must write "1" in order to clear this flag - */ -#define FH_TSSR_TX_ERROR_REG (FH_TSSR_LOWER_BOUND + 0x018) - -#define FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) ((1 << (_chnl)) << 16) - -/* Tx service channels */ -#define FH_SRVC_CHNL (9) -#define FH_SRVC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9C8) -#define FH_SRVC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0) -#define FH_SRVC_CHNL_SRAM_ADDR_REG(_chnl) \ - (FH_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4) - -#define FH_TX_CHICKEN_BITS_REG (FH_MEM_LOWER_BOUND + 0xE98) -/* Instruct FH to increment the retry count of a packet when - * it is brought from the memory to TX-FIFO - */ -#define FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN (0x00000002) - -#define RX_QUEUE_SIZE 256 -#define RX_QUEUE_MASK 255 -#define RX_QUEUE_SIZE_LOG 8 - -/* - * RX related structures and functions - */ -#define RX_FREE_BUFFERS 64 -#define RX_LOW_WATERMARK 8 - -/* Size of one Rx buffer in host DRAM */ -#define IWL_RX_BUF_SIZE_3K (3 * 1000) /* 3945 only */ -#define IWL_RX_BUF_SIZE_4K (4 * 1024) -#define IWL_RX_BUF_SIZE_8K (8 * 1024) - -/** - * struct iwl_rb_status - reseve buffer status - * host memory mapped FH registers - * @closed_rb_num [0:11] - Indicates the index of the RB which was closed - * @closed_fr_num [0:11] - Indicates the index of the RX Frame which was closed - * @finished_rb_num [0:11] - Indicates the index of the current RB - * in which the last frame was written to - * @finished_fr_num [0:11] - Indicates the index of the RX Frame - * which was transferred - */ -struct iwl_rb_status { - __le16 closed_rb_num; - __le16 closed_fr_num; - __le16 finished_rb_num; - __le16 finished_fr_nam; - __le32 __unused; /* 3945 only */ -} __packed; - - -#define TFD_QUEUE_SIZE_MAX (256) -#define TFD_QUEUE_SIZE_BC_DUP (64) -#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP) -#define IWL_TX_DMA_MASK DMA_BIT_MASK(36) -#define IWL_NUM_OF_TBS 20 - -static inline u8 iwl_legacy_get_dma_hi_addr(dma_addr_t addr) -{ - return (sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0) & 0xF; -} -/** - * struct iwl_tfd_tb transmit buffer descriptor within transmit frame descriptor - * - * This structure contains dma address and length of transmission address - * - * @lo: low [31:0] portion of the dma address of TX buffer - * every even is unaligned on 16 bit boundary - * @hi_n_len 0-3 [35:32] portion of dma - * 4-15 length of the tx buffer - */ -struct iwl_tfd_tb { - __le32 lo; - __le16 hi_n_len; -} __packed; - -/** - * struct iwl_tfd - * - * Transmit Frame Descriptor (TFD) - * - * @ __reserved1[3] reserved - * @ num_tbs 0-4 number of active tbs - * 5 reserved - * 6-7 padding (not used) - * @ tbs[20] transmit frame buffer descriptors - * @ __pad padding - * - * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM. - * Both driver and device share these circular buffers, each of which must be - * contiguous 256 TFDs x 128 bytes-per-TFD = 32 KBytes - * - * Driver must indicate the physical address of the base of each - * circular buffer via the FH_MEM_CBBC_QUEUE registers. - * - * Each TFD contains pointer/size information for up to 20 data buffers - * in host DRAM. These buffers collectively contain the (one) frame described - * by the TFD. Each buffer must be a single contiguous block of memory within - * itself, but buffers may be scattered in host DRAM. Each buffer has max size - * of (4K - 4). The concatenates all of a TFD's buffers into a single - * Tx frame, up to 8 KBytes in size. - * - * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx. - */ -struct iwl_tfd { - u8 __reserved1[3]; - u8 num_tbs; - struct iwl_tfd_tb tbs[IWL_NUM_OF_TBS]; - __le32 __pad; -} __packed; - -/* Keep Warm Size */ -#define IWL_KW_SIZE 0x1000 /* 4k */ - -#endif /* !__iwl_legacy_fh_h__ */ diff --git a/drivers/net/wireless/iwlegacy/iwl-hcmd.c b/drivers/net/wireless/iwlegacy/iwl-hcmd.c deleted file mode 100644 index ce1fc9feb61..00000000000 --- a/drivers/net/wireless/iwlegacy/iwl-hcmd.c +++ /dev/null @@ -1,271 +0,0 @@ -/****************************************************************************** - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - *****************************************************************************/ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/sched.h> -#include <net/mac80211.h> - -#include "iwl-dev.h" -#include "iwl-debug.h" -#include "iwl-eeprom.h" -#include "iwl-core.h" - - -const char *iwl_legacy_get_cmd_string(u8 cmd) -{ - switch (cmd) { - IWL_CMD(REPLY_ALIVE); - IWL_CMD(REPLY_ERROR); - IWL_CMD(REPLY_RXON); - IWL_CMD(REPLY_RXON_ASSOC); - IWL_CMD(REPLY_QOS_PARAM); - IWL_CMD(REPLY_RXON_TIMING); - IWL_CMD(REPLY_ADD_STA); - IWL_CMD(REPLY_REMOVE_STA); - IWL_CMD(REPLY_WEPKEY); - IWL_CMD(REPLY_3945_RX); - IWL_CMD(REPLY_TX); - IWL_CMD(REPLY_RATE_SCALE); - IWL_CMD(REPLY_LEDS_CMD); - IWL_CMD(REPLY_TX_LINK_QUALITY_CMD); - IWL_CMD(REPLY_CHANNEL_SWITCH); - IWL_CMD(CHANNEL_SWITCH_NOTIFICATION); - IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD); - IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION); - IWL_CMD(POWER_TABLE_CMD); - IWL_CMD(PM_SLEEP_NOTIFICATION); - IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC); - IWL_CMD(REPLY_SCAN_CMD); - IWL_CMD(REPLY_SCAN_ABORT_CMD); - IWL_CMD(SCAN_START_NOTIFICATION); - IWL_CMD(SCAN_RESULTS_NOTIFICATION); - IWL_CMD(SCAN_COMPLETE_NOTIFICATION); - IWL_CMD(BEACON_NOTIFICATION); - IWL_CMD(REPLY_TX_BEACON); - IWL_CMD(REPLY_TX_PWR_TABLE_CMD); - IWL_CMD(REPLY_BT_CONFIG); - IWL_CMD(REPLY_STATISTICS_CMD); - IWL_CMD(STATISTICS_NOTIFICATION); - IWL_CMD(CARD_STATE_NOTIFICATION); - IWL_CMD(MISSED_BEACONS_NOTIFICATION); - IWL_CMD(REPLY_CT_KILL_CONFIG_CMD); - IWL_CMD(SENSITIVITY_CMD); - IWL_CMD(REPLY_PHY_CALIBRATION_CMD); - IWL_CMD(REPLY_RX_PHY_CMD); - IWL_CMD(REPLY_RX_MPDU_CMD); - IWL_CMD(REPLY_RX); - IWL_CMD(REPLY_COMPRESSED_BA); - default: - return "UNKNOWN"; - - } -} -EXPORT_SYMBOL(iwl_legacy_get_cmd_string); - -#define HOST_COMPLETE_TIMEOUT (HZ / 2) - -static void iwl_legacy_generic_cmd_callback(struct iwl_priv *priv, - struct iwl_device_cmd *cmd, - struct iwl_rx_packet *pkt) -{ - if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { - IWL_ERR(priv, "Bad return from %s (0x%08X)\n", - iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); - return; - } - -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG - switch (cmd->hdr.cmd) { - case REPLY_TX_LINK_QUALITY_CMD: - case SENSITIVITY_CMD: - IWL_DEBUG_HC_DUMP(priv, "back from %s (0x%08X)\n", - iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); - break; - default: - IWL_DEBUG_HC(priv, "back from %s (0x%08X)\n", - iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); - } -#endif -} - -static int -iwl_legacy_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd) -{ - int ret; - - BUG_ON(!(cmd->flags & CMD_ASYNC)); - - /* An asynchronous command can not expect an SKB to be set. */ - BUG_ON(cmd->flags & CMD_WANT_SKB); - - /* Assign a generic callback if one is not provided */ - if (!cmd->callback) - cmd->callback = iwl_legacy_generic_cmd_callback; - - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - return -EBUSY; - - ret = iwl_legacy_enqueue_hcmd(priv, cmd); - if (ret < 0) { - IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n", - iwl_legacy_get_cmd_string(cmd->id), ret); - return ret; - } - return 0; -} - -int iwl_legacy_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd) -{ - int cmd_idx; - int ret; - - lockdep_assert_held(&priv->mutex); - - BUG_ON(cmd->flags & CMD_ASYNC); - - /* A synchronous command can not have a callback set. */ - BUG_ON(cmd->callback); - - IWL_DEBUG_INFO(priv, "Attempting to send sync command %s\n", - iwl_legacy_get_cmd_string(cmd->id)); - - set_bit(STATUS_HCMD_ACTIVE, &priv->status); - IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s\n", - iwl_legacy_get_cmd_string(cmd->id)); - - cmd_idx = iwl_legacy_enqueue_hcmd(priv, cmd); - if (cmd_idx < 0) { - ret = cmd_idx; - IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n", - iwl_legacy_get_cmd_string(cmd->id), ret); - goto out; - } - - ret = wait_event_timeout(priv->wait_command_queue, - !test_bit(STATUS_HCMD_ACTIVE, &priv->status), - HOST_COMPLETE_TIMEOUT); - if (!ret) { - if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) { - IWL_ERR(priv, - "Error sending %s: time out after %dms.\n", - iwl_legacy_get_cmd_string(cmd->id), - jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); - - clear_bit(STATUS_HCMD_ACTIVE, &priv->status); - IWL_DEBUG_INFO(priv, - "Clearing HCMD_ACTIVE for command %s\n", - iwl_legacy_get_cmd_string(cmd->id)); - ret = -ETIMEDOUT; - goto cancel; - } - } - - if (test_bit(STATUS_RF_KILL_HW, &priv->status)) { - IWL_ERR(priv, "Command %s aborted: RF KILL Switch\n", - iwl_legacy_get_cmd_string(cmd->id)); - ret = -ECANCELED; - goto fail; - } - if (test_bit(STATUS_FW_ERROR, &priv->status)) { - IWL_ERR(priv, "Command %s failed: FW Error\n", - iwl_legacy_get_cmd_string(cmd->id)); - ret = -EIO; - goto fail; - } - if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) { - IWL_ERR(priv, "Error: Response NULL in '%s'\n", - iwl_legacy_get_cmd_string(cmd->id)); - ret = -EIO; - goto cancel; - } - - ret = 0; - goto out; - -cancel: - if (cmd->flags & CMD_WANT_SKB) { - /* - * Cancel the CMD_WANT_SKB flag for the cmd in the - * TX cmd queue. Otherwise in case the cmd comes - * in later, it will possibly set an invalid - * address (cmd->meta.source). - */ - priv->txq[priv->cmd_queue].meta[cmd_idx].flags &= - ~CMD_WANT_SKB; - } -fail: - if (cmd->reply_page) { - iwl_legacy_free_pages(priv, cmd->reply_page); - cmd->reply_page = 0; - } -out: - return ret; -} -EXPORT_SYMBOL(iwl_legacy_send_cmd_sync); - -int iwl_legacy_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) -{ - if (cmd->flags & CMD_ASYNC) - return iwl_legacy_send_cmd_async(priv, cmd); - - return iwl_legacy_send_cmd_sync(priv, cmd); -} -EXPORT_SYMBOL(iwl_legacy_send_cmd); - -int -iwl_legacy_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data) -{ - struct iwl_host_cmd cmd = { - .id = id, - .len = len, - .data = data, - }; - - return iwl_legacy_send_cmd_sync(priv, &cmd); -} -EXPORT_SYMBOL(iwl_legacy_send_cmd_pdu); - -int iwl_legacy_send_cmd_pdu_async(struct iwl_priv *priv, - u8 id, u16 len, const void *data, - void (*callback)(struct iwl_priv *priv, - struct iwl_device_cmd *cmd, - struct iwl_rx_packet *pkt)) -{ - struct iwl_host_cmd cmd = { - .id = id, - .len = len, - .data = data, - }; - - cmd.flags |= CMD_ASYNC; - cmd.callback = callback; - - return iwl_legacy_send_cmd_async(priv, &cmd); -} -EXPORT_SYMBOL(iwl_legacy_send_cmd_pdu_async); diff --git a/drivers/net/wireless/iwlegacy/iwl-helpers.h b/drivers/net/wireless/iwlegacy/iwl-helpers.h deleted file mode 100644 index 5cf23eaecbb..00000000000 --- a/drivers/net/wireless/iwlegacy/iwl-helpers.h +++ /dev/null @@ -1,196 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. - * - * Portions of this file are derived from the ipw3945 project, as well - * as portions of the ieee80211 subsystem header files. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#ifndef __iwl_legacy_helpers_h__ -#define __iwl_legacy_helpers_h__ - -#include <linux/ctype.h> -#include <net/mac80211.h> - -#include "iwl-io.h" - -#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo)))) - - -static inline struct ieee80211_conf *iwl_legacy_ieee80211_get_hw_conf( - struct ieee80211_hw *hw) -{ - return &hw->conf; -} - -/** - * iwl_legacy_queue_inc_wrap - increment queue index, wrap back to beginning - * @index -- current index - * @n_bd -- total number of entries in queue (must be power of 2) - */ -static inline int iwl_legacy_queue_inc_wrap(int index, int n_bd) -{ - return ++index & (n_bd - 1); -} - -/** - * iwl_legacy_queue_dec_wrap - decrement queue index, wrap back to end - * @index -- current index - * @n_bd -- total number of entries in queue (must be power of 2) - */ -static inline int iwl_legacy_queue_dec_wrap(int index, int n_bd) -{ - return --index & (n_bd - 1); -} - -/* TODO: Move fw_desc functions to iwl-pci.ko */ -static inline void iwl_legacy_free_fw_desc(struct pci_dev *pci_dev, - struct fw_desc *desc) -{ - if (desc->v_addr) - dma_free_coherent(&pci_dev->dev, desc->len, - desc->v_addr, desc->p_addr); - desc->v_addr = NULL; - desc->len = 0; -} - -static inline int iwl_legacy_alloc_fw_desc(struct pci_dev *pci_dev, - struct fw_desc *desc) -{ - if (!desc->len) { - desc->v_addr = NULL; - return -EINVAL; - } - - desc->v_addr = dma_alloc_coherent(&pci_dev->dev, desc->len, - &desc->p_addr, GFP_KERNEL); - return (desc->v_addr != NULL) ? 0 : -ENOMEM; -} - -/* - * we have 8 bits used like this: - * - * 7 6 5 4 3 2 1 0 - * | | | | | | | | - * | | | | | | +-+-------- AC queue (0-3) - * | | | | | | - * | +-+-+-+-+------------ HW queue ID - * | - * +---------------------- unused - */ -static inline void -iwl_legacy_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq) -{ - BUG_ON(ac > 3); /* only have 2 bits */ - BUG_ON(hwq > 31); /* only use 5 bits */ - - txq->swq_id = (hwq << 2) | ac; -} - -static inline void iwl_legacy_wake_queue(struct iwl_priv *priv, - struct iwl_tx_queue *txq) -{ - u8 queue = txq->swq_id; - u8 ac = queue & 3; - u8 hwq = (queue >> 2) & 0x1f; - - if (test_and_clear_bit(hwq, priv->queue_stopped)) - if (atomic_dec_return(&priv->queue_stop_count[ac]) <= 0) - ieee80211_wake_queue(priv->hw, ac); -} - -static inline void iwl_legacy_stop_queue(struct iwl_priv *priv, - struct iwl_tx_queue *txq) -{ - u8 queue = txq->swq_id; - u8 ac = queue & 3; - u8 hwq = (queue >> 2) & 0x1f; - - if (!test_and_set_bit(hwq, priv->queue_stopped)) - if (atomic_inc_return(&priv->queue_stop_count[ac]) > 0) - ieee80211_stop_queue(priv->hw, ac); -} - -#ifdef ieee80211_stop_queue -#undef ieee80211_stop_queue -#endif - -#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue - -#ifdef ieee80211_wake_queue -#undef ieee80211_wake_queue -#endif - -#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue - -static inline void iwl_legacy_disable_interrupts(struct iwl_priv *priv) -{ - clear_bit(STATUS_INT_ENABLED, &priv->status); - - /* disable interrupts from uCode/NIC to host */ - iwl_write32(priv, CSR_INT_MASK, 0x00000000); - - /* acknowledge/clear/reset any interrupts still pending - * from uCode or flow handler (Rx/Tx DMA) */ - iwl_write32(priv, CSR_INT, 0xffffffff); - iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff); - IWL_DEBUG_ISR(priv, "Disabled interrupts\n"); -} - -static inline void iwl_legacy_enable_rfkill_int(struct iwl_priv *priv) -{ - IWL_DEBUG_ISR(priv, "Enabling rfkill interrupt\n"); - iwl_write32(priv, CSR_INT_MASK, CSR_INT_BIT_RF_KILL); -} - -static inline void iwl_legacy_enable_interrupts(struct iwl_priv *priv) -{ - IWL_DEBUG_ISR(priv, "Enabling interrupts\n"); - set_bit(STATUS_INT_ENABLED, &priv->status); - iwl_write32(priv, CSR_INT_MASK, priv->inta_mask); -} - -/** - * iwl_legacy_beacon_time_mask_low - mask of lower 32 bit of beacon time - * @priv -- pointer to iwl_priv data structure - * @tsf_bits -- number of bits need to shift for masking) - */ -static inline u32 iwl_legacy_beacon_time_mask_low(struct iwl_priv *priv, - u16 tsf_bits) -{ - return (1 << tsf_bits) - 1; -} - -/** - * iwl_legacy_beacon_time_mask_high - mask of higher 32 bit of beacon time - * @priv -- pointer to iwl_priv data structure - * @tsf_bits -- number of bits need to shift for masking) - */ -static inline u32 iwl_legacy_beacon_time_mask_high(struct iwl_priv *priv, - u16 tsf_bits) -{ - return ((1 << (32 - tsf_bits)) - 1) << tsf_bits; -} - -#endif /* __iwl_legacy_helpers_h__ */ diff --git a/drivers/net/wireless/iwlegacy/iwl-io.h b/drivers/net/wireless/iwlegacy/iwl-io.h deleted file mode 100644 index 5cc5d342914..00000000000 --- a/drivers/net/wireless/iwlegacy/iwl-io.h +++ /dev/null @@ -1,545 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. - * - * Portions of this file are derived from the ipw3945 project. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#ifndef __iwl_legacy_io_h__ -#define __iwl_legacy_io_h__ - -#include <linux/io.h> - -#include "iwl-dev.h" -#include "iwl-debug.h" -#include "iwl-devtrace.h" - -/* - * IO, register, and NIC memory access functions - * - * NOTE on naming convention and macro usage for these - * - * A single _ prefix before a an access function means that no state - * check or debug information is printed when that function is called. - * - * A double __ prefix before an access function means that state is checked - * and the current line number and caller function name are printed in addition - * to any other debug output. - * - * The non-prefixed name is the #define that maps the caller into a - * #define that provides the caller's name and __LINE__ to the double - * prefix version. - * - * If you wish to call the function without any debug or state checking, - * you should use the single _ prefix version (as is used by dependent IO - * routines, for example _iwl_legacy_read_direct32 calls the non-check version of - * _iwl_legacy_read32.) - * - * These declarations are *extremely* useful in quickly isolating code deltas - * which result in misconfiguration of the hardware I/O. In combination with - * git-bisect and the IO debug level you can quickly determine the specific - * commit which breaks the IO sequence to the hardware. - * - */ - -static inline void _iwl_legacy_write8(struct iwl_priv *priv, u32 ofs, u8 val) -{ - trace_iwlwifi_legacy_dev_iowrite8(priv, ofs, val); - iowrite8(val, priv->hw_base + ofs); -} - -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG -static inline void -__iwl_legacy_write8(const char *f, u32 l, struct iwl_priv *priv, - u32 ofs, u8 val) -{ - IWL_DEBUG_IO(priv, "write8(0x%08X, 0x%02X) - %s %d\n", ofs, val, f, l); - _iwl_legacy_write8(priv, ofs, val); -} -#define iwl_write8(priv, ofs, val) \ - __iwl_legacy_write8(__FILE__, __LINE__, priv, ofs, val) -#else -#define iwl_write8(priv, ofs, val) _iwl_legacy_write8(priv, ofs, val) -#endif - - -static inline void _iwl_legacy_write32(struct iwl_priv *priv, u32 ofs, u32 val) -{ - trace_iwlwifi_legacy_dev_iowrite32(priv, ofs, val); - iowrite32(val, priv->hw_base + ofs); -} - -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG -static inline void -__iwl_legacy_write32(const char *f, u32 l, struct iwl_priv *priv, - u32 ofs, u32 val) -{ - IWL_DEBUG_IO(priv, "write32(0x%08X, 0x%08X) - %s %d\n", ofs, val, f, l); - _iwl_legacy_write32(priv, ofs, val); -} -#define iwl_write32(priv, ofs, val) \ - __iwl_legacy_write32(__FILE__, __LINE__, priv, ofs, val) -#else -#define iwl_write32(priv, ofs, val) _iwl_legacy_write32(priv, ofs, val) -#endif - -static inline u32 _iwl_legacy_read32(struct iwl_priv *priv, u32 ofs) -{ - u32 val = ioread32(priv->hw_base + ofs); - trace_iwlwifi_legacy_dev_ioread32(priv, ofs, val); - return val; -} - -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG -static inline u32 -__iwl_legacy_read32(char *f, u32 l, struct iwl_priv *priv, u32 ofs) -{ - IWL_DEBUG_IO(priv, "read_direct32(0x%08X) - %s %d\n", ofs, f, l); - return _iwl_legacy_read32(priv, ofs); -} -#define iwl_read32(priv, ofs) __iwl_legacy_read32(__FILE__, __LINE__, priv, ofs) -#else -#define iwl_read32(p, o) _iwl_legacy_read32(p, o) -#endif - -#define IWL_POLL_INTERVAL 10 /* microseconds */ -static inline int -_iwl_legacy_poll_bit(struct iwl_priv *priv, u32 addr, - u32 bits, u32 mask, int timeout) -{ - int t = 0; - - do { - if ((_iwl_legacy_read32(priv, addr) & mask) == (bits & mask)) - return t; - udelay(IWL_POLL_INTERVAL); - t += IWL_POLL_INTERVAL; - } while (t < timeout); - - return -ETIMEDOUT; -} -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG -static inline int __iwl_legacy_poll_bit(const char *f, u32 l, - struct iwl_priv *priv, u32 addr, - u32 bits, u32 mask, int timeout) -{ - int ret = _iwl_legacy_poll_bit(priv, addr, bits, mask, timeout); - IWL_DEBUG_IO(priv, "poll_bit(0x%08X, 0x%08X, 0x%08X) - %s- %s %d\n", - addr, bits, mask, - unlikely(ret == -ETIMEDOUT) ? "timeout" : "", f, l); - return ret; -} -#define iwl_poll_bit(priv, addr, bits, mask, timeout) \ - __iwl_legacy_poll_bit(__FILE__, __LINE__, priv, addr, \ - bits, mask, timeout) -#else -#define iwl_poll_bit(p, a, b, m, t) _iwl_legacy_poll_bit(p, a, b, m, t) -#endif - -static inline void _iwl_legacy_set_bit(struct iwl_priv *priv, u32 reg, u32 mask) -{ - _iwl_legacy_write32(priv, reg, _iwl_legacy_read32(priv, reg) | mask); -} -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG -static inline void __iwl_legacy_set_bit(const char *f, u32 l, - struct iwl_priv *priv, u32 reg, u32 mask) -{ - u32 val = _iwl_legacy_read32(priv, reg) | mask; - IWL_DEBUG_IO(priv, "set_bit(0x%08X, 0x%08X) = 0x%08X\n", reg, - mask, val); - _iwl_legacy_write32(priv, reg, val); -} -static inline void iwl_legacy_set_bit(struct iwl_priv *p, u32 r, u32 m) -{ - unsigned long reg_flags; - - spin_lock_irqsave(&p->reg_lock, reg_flags); - __iwl_legacy_set_bit(__FILE__, __LINE__, p, r, m); - spin_unlock_irqrestore(&p->reg_lock, reg_flags); -} -#else -static inline void iwl_legacy_set_bit(struct iwl_priv *p, u32 r, u32 m) -{ - unsigned long reg_flags; - - spin_lock_irqsave(&p->reg_lock, reg_flags); - _iwl_legacy_set_bit(p, r, m); - spin_unlock_irqrestore(&p->reg_lock, reg_flags); -} -#endif - -static inline void -_iwl_legacy_clear_bit(struct iwl_priv *priv, u32 reg, u32 mask) -{ - _iwl_legacy_write32(priv, reg, _iwl_legacy_read32(priv, reg) & ~mask); -} -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG -static inline void -__iwl_legacy_clear_bit(const char *f, u32 l, - struct iwl_priv *priv, u32 reg, u32 mask) -{ - u32 val = _iwl_legacy_read32(priv, reg) & ~mask; - IWL_DEBUG_IO(priv, "clear_bit(0x%08X, 0x%08X) = 0x%08X\n", reg, mask, val); - _iwl_legacy_write32(priv, reg, val); -} -static inline void iwl_legacy_clear_bit(struct iwl_priv *p, u32 r, u32 m) -{ - unsigned long reg_flags; - - spin_lock_irqsave(&p->reg_lock, reg_flags); - __iwl_legacy_clear_bit(__FILE__, __LINE__, p, r, m); - spin_unlock_irqrestore(&p->reg_lock, reg_flags); -} -#else -static inline void iwl_legacy_clear_bit(struct iwl_priv *p, u32 r, u32 m) -{ - unsigned long reg_flags; - - spin_lock_irqsave(&p->reg_lock, reg_flags); - _iwl_legacy_clear_bit(p, r, m); - spin_unlock_irqrestore(&p->reg_lock, reg_flags); -} -#endif - -static inline int _iwl_legacy_grab_nic_access(struct iwl_priv *priv) -{ - int ret; - u32 val; - - /* this bit wakes up the NIC */ - _iwl_legacy_set_bit(priv, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); - - /* - * These bits say the device is running, and should keep running for - * at least a short while (at least as long as MAC_ACCESS_REQ stays 1), - * but they do not indicate that embedded SRAM is restored yet; - * 3945 and 4965 have volatile SRAM, and must save/restore contents - * to/from host DRAM when sleeping/waking for power-saving. - * Each direction takes approximately 1/4 millisecond; with this - * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a - * series of register accesses are expected (e.g. reading Event Log), - * to keep device from sleeping. - * - * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that - * SRAM is okay/restored. We don't check that here because this call - * is just for hardware register access; but GP1 MAC_SLEEP check is a - * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log). - * - */ - ret = _iwl_legacy_poll_bit(priv, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, - (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | - CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000); - if (ret < 0) { - val = _iwl_legacy_read32(priv, CSR_GP_CNTRL); - IWL_ERR(priv, - "MAC is in deep sleep!. CSR_GP_CNTRL = 0x%08X\n", val); - _iwl_legacy_write32(priv, CSR_RESET, - CSR_RESET_REG_FLAG_FORCE_NMI); - return -EIO; - } - - return 0; -} - -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG -static inline int __iwl_legacy_grab_nic_access(const char *f, u32 l, - struct iwl_priv *priv) -{ - IWL_DEBUG_IO(priv, "grabbing nic access - %s %d\n", f, l); - return _iwl_legacy_grab_nic_access(priv); -} -#define iwl_grab_nic_access(priv) \ - __iwl_legacy_grab_nic_access(__FILE__, __LINE__, priv) -#else -#define iwl_grab_nic_access(priv) \ - _iwl_legacy_grab_nic_access(priv) -#endif - -static inline void _iwl_legacy_release_nic_access(struct iwl_priv *priv) -{ - _iwl_legacy_clear_bit(priv, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); -} -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG -static inline void __iwl_legacy_release_nic_access(const char *f, u32 l, - struct iwl_priv *priv) -{ - - IWL_DEBUG_IO(priv, "releasing nic access - %s %d\n", f, l); - _iwl_legacy_release_nic_access(priv); -} -#define iwl_release_nic_access(priv) \ - __iwl_legacy_release_nic_access(__FILE__, __LINE__, priv) -#else -#define iwl_release_nic_access(priv) \ - _iwl_legacy_release_nic_access(priv) -#endif - -static inline u32 _iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg) -{ - return _iwl_legacy_read32(priv, reg); -} -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG -static inline u32 __iwl_legacy_read_direct32(const char *f, u32 l, - struct iwl_priv *priv, u32 reg) -{ - u32 value = _iwl_legacy_read_direct32(priv, reg); - IWL_DEBUG_IO(priv, - "read_direct32(0x%4X) = 0x%08x - %s %d\n", reg, value, - f, l); - return value; -} -static inline u32 iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg) -{ - u32 value; - unsigned long reg_flags; - - spin_lock_irqsave(&priv->reg_lock, reg_flags); - iwl_grab_nic_access(priv); - value = __iwl_legacy_read_direct32(__FILE__, __LINE__, priv, reg); - iwl_release_nic_access(priv); - spin_unlock_irqrestore(&priv->reg_lock, reg_flags); - return value; -} - -#else -static inline u32 iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg) -{ - u32 value; - unsigned long reg_flags; - - spin_lock_irqsave(&priv->reg_lock, reg_flags); - iwl_grab_nic_access(priv); - value = _iwl_legacy_read_direct32(priv, reg); - iwl_release_nic_access(priv); - spin_unlock_irqrestore(&priv->reg_lock, reg_flags); - return value; - -} -#endif - -static inline void _iwl_legacy_write_direct32(struct iwl_priv *priv, - u32 reg, u32 value) -{ - _iwl_legacy_write32(priv, reg, value); -} -static inline void -iwl_legacy_write_direct32(struct iwl_priv *priv, u32 reg, u32 value) -{ - unsigned long reg_flags; - - spin_lock_irqsave(&priv->reg_lock, reg_flags); - if (!iwl_grab_nic_access(priv)) { - _iwl_legacy_write_direct32(priv, reg, value); - iwl_release_nic_access(priv); - } - spin_unlock_irqrestore(&priv->reg_lock, reg_flags); -} - -static inline void iwl_legacy_write_reg_buf(struct iwl_priv *priv, - u32 reg, u32 len, u32 *values) -{ - u32 count = sizeof(u32); - - if ((priv != NULL) && (values != NULL)) { - for (; 0 < len; len -= count, reg += count, values++) - iwl_legacy_write_direct32(priv, reg, *values); - } -} - -static inline int _iwl_legacy_poll_direct_bit(struct iwl_priv *priv, u32 addr, - u32 mask, int timeout) -{ - int t = 0; - - do { - if ((iwl_legacy_read_direct32(priv, addr) & mask) == mask) - return t; - udelay(IWL_POLL_INTERVAL); - t += IWL_POLL_INTERVAL; - } while (t < timeout); - - return -ETIMEDOUT; -} - -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG -static inline int __iwl_legacy_poll_direct_bit(const char *f, u32 l, - struct iwl_priv *priv, - u32 addr, u32 mask, int timeout) -{ - int ret = _iwl_legacy_poll_direct_bit(priv, addr, mask, timeout); - - if (unlikely(ret == -ETIMEDOUT)) - IWL_DEBUG_IO(priv, "poll_direct_bit(0x%08X, 0x%08X) - " - "timedout - %s %d\n", addr, mask, f, l); - else - IWL_DEBUG_IO(priv, "poll_direct_bit(0x%08X, 0x%08X) = 0x%08X " - "- %s %d\n", addr, mask, ret, f, l); - return ret; -} -#define iwl_poll_direct_bit(priv, addr, mask, timeout) \ -__iwl_legacy_poll_direct_bit(__FILE__, __LINE__, priv, addr, mask, timeout) -#else -#define iwl_poll_direct_bit _iwl_legacy_poll_direct_bit -#endif - -static inline u32 _iwl_legacy_read_prph(struct iwl_priv *priv, u32 reg) -{ - _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_RADDR, reg | (3 << 24)); - rmb(); - return _iwl_legacy_read_direct32(priv, HBUS_TARG_PRPH_RDAT); -} -static inline u32 iwl_legacy_read_prph(struct iwl_priv *priv, u32 reg) -{ - unsigned long reg_flags; - u32 val; - - spin_lock_irqsave(&priv->reg_lock, reg_flags); - iwl_grab_nic_access(priv); - val = _iwl_legacy_read_prph(priv, reg); - iwl_release_nic_access(priv); - spin_unlock_irqrestore(&priv->reg_lock, reg_flags); - return val; -} - -static inline void _iwl_legacy_write_prph(struct iwl_priv *priv, - u32 addr, u32 val) -{ - _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_WADDR, - ((addr & 0x0000FFFF) | (3 << 24))); - wmb(); - _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_WDAT, val); -} - -static inline void -iwl_legacy_write_prph(struct iwl_priv *priv, u32 addr, u32 val) -{ - unsigned long reg_flags; - - spin_lock_irqsave(&priv->reg_lock, reg_flags); - if (!iwl_grab_nic_access(priv)) { - _iwl_legacy_write_prph(priv, addr, val); - iwl_release_nic_access(priv); - } - spin_unlock_irqrestore(&priv->reg_lock, reg_flags); -} - -#define _iwl_legacy_set_bits_prph(priv, reg, mask) \ -_iwl_legacy_write_prph(priv, reg, (_iwl_legacy_read_prph(priv, reg) | mask)) - -static inline void -iwl_legacy_set_bits_prph(struct iwl_priv *priv, u32 reg, u32 mask) -{ - unsigned long reg_flags; - - spin_lock_irqsave(&priv->reg_lock, reg_flags); - iwl_grab_nic_access(priv); - _iwl_legacy_set_bits_prph(priv, reg, mask); - iwl_release_nic_access(priv); - spin_unlock_irqrestore(&priv->reg_lock, reg_flags); -} - -#define _iwl_legacy_set_bits_mask_prph(priv, reg, bits, mask) \ -_iwl_legacy_write_prph(priv, reg, \ - ((_iwl_legacy_read_prph(priv, reg) & mask) | bits)) - -static inline void iwl_legacy_set_bits_mask_prph(struct iwl_priv *priv, u32 reg, - u32 bits, u32 mask) -{ - unsigned long reg_flags; - - spin_lock_irqsave(&priv->reg_lock, reg_flags); - iwl_grab_nic_access(priv); - _iwl_legacy_set_bits_mask_prph(priv, reg, bits, mask); - iwl_release_nic_access(priv); - spin_unlock_irqrestore(&priv->reg_lock, reg_flags); -} - -static inline void iwl_legacy_clear_bits_prph(struct iwl_priv - *priv, u32 reg, u32 mask) -{ - unsigned long reg_flags; - u32 val; - - spin_lock_irqsave(&priv->reg_lock, reg_flags); - iwl_grab_nic_access(priv); - val = _iwl_legacy_read_prph(priv, reg); - _iwl_legacy_write_prph(priv, reg, (val & ~mask)); - iwl_release_nic_access(priv); - spin_unlock_irqrestore(&priv->reg_lock, reg_flags); -} - -static inline u32 iwl_legacy_read_targ_mem(struct iwl_priv *priv, u32 addr) -{ - unsigned long reg_flags; - u32 value; - - spin_lock_irqsave(&priv->reg_lock, reg_flags); - iwl_grab_nic_access(priv); - - _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, addr); - rmb(); - value = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT); - - iwl_release_nic_access(priv); - spin_unlock_irqrestore(&priv->reg_lock, reg_flags); - return value; -} - -static inline void -iwl_legacy_write_targ_mem(struct iwl_priv *priv, u32 addr, u32 val) -{ - unsigned long reg_flags; - - spin_lock_irqsave(&priv->reg_lock, reg_flags); - if (!iwl_grab_nic_access(priv)) { - _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr); - wmb(); - _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WDAT, val); - iwl_release_nic_access(priv); - } - spin_unlock_irqrestore(&priv->reg_lock, reg_flags); -} - -static inline void -iwl_legacy_write_targ_mem_buf(struct iwl_priv *priv, u32 addr, - u32 len, u32 *values) -{ - unsigned long reg_flags; - - spin_lock_irqsave(&priv->reg_lock, reg_flags); - if (!iwl_grab_nic_access(priv)) { - _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr); - wmb(); - for (; 0 < len; len -= sizeof(u32), values++) - _iwl_legacy_write_direct32(priv, - HBUS_TARG_MEM_WDAT, *values); - - iwl_release_nic_access(priv); - } - spin_unlock_irqrestore(&priv->reg_lock, reg_flags); -} -#endif diff --git a/drivers/net/wireless/iwlegacy/iwl-led.c b/drivers/net/wireless/iwlegacy/iwl-led.c deleted file mode 100644 index dc568a474c5..00000000000 --- a/drivers/net/wireless/iwlegacy/iwl-led.c +++ /dev/null @@ -1,205 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/init.h> -#include <linux/pci.h> -#include <linux/dma-mapping.h> -#include <linux/delay.h> -#include <linux/skbuff.h> -#include <linux/netdevice.h> -#include <net/mac80211.h> -#include <linux/etherdevice.h> -#include <asm/unaligned.h> - -#include "iwl-dev.h" -#include "iwl-core.h" -#include "iwl-io.h" - -/* default: IWL_LED_BLINK(0) using blinking index table */ -static int led_mode; -module_param(led_mode, int, S_IRUGO); -MODULE_PARM_DESC(led_mode, "0=system default, " - "1=On(RF On)/Off(RF Off), 2=blinking"); - -/* Throughput OFF time(ms) ON time (ms) - * >300 25 25 - * >200 to 300 40 40 - * >100 to 200 55 55 - * >70 to 100 65 65 - * >50 to 70 75 75 - * >20 to 50 85 85 - * >10 to 20 95 95 - * >5 to 10 110 110 - * >1 to 5 130 130 - * >0 to 1 167 167 - * <=0 SOLID ON - */ -static const struct ieee80211_tpt_blink iwl_blink[] = { - { .throughput = 0, .blink_time = 334 }, - { .throughput = 1 * 1024 - 1, .blink_time = 260 }, - { .throughput = 5 * 1024 - 1, .blink_time = 220 }, - { .throughput = 10 * 1024 - 1, .blink_time = 190 }, - { .throughput = 20 * 1024 - 1, .blink_time = 170 }, - { .throughput = 50 * 1024 - 1, .blink_time = 150 }, - { .throughput = 70 * 1024 - 1, .blink_time = 130 }, - { .throughput = 100 * 1024 - 1, .blink_time = 110 }, - { .throughput = 200 * 1024 - 1, .blink_time = 80 }, - { .throughput = 300 * 1024 - 1, .blink_time = 50 }, -}; - -/* - * Adjust led blink rate to compensate on a MAC Clock difference on every HW - * Led blink rate analysis showed an average deviation of 0% on 3945, - * 5% on 4965 HW. - * Need to compensate on the led on/off time per HW according to the deviation - * to achieve the desired led frequency - * The calculation is: (100-averageDeviation)/100 * blinkTime - * For code efficiency the calculation will be: - * compensation = (100 - averageDeviation) * 64 / 100 - * NewBlinkTime = (compensation * BlinkTime) / 64 - */ -static inline u8 iwl_legacy_blink_compensation(struct iwl_priv *priv, - u8 time, u16 compensation) -{ - if (!compensation) { - IWL_ERR(priv, "undefined blink compensation: " - "use pre-defined blinking time\n"); - return time; - } - - return (u8)((time * compensation) >> 6); -} - -/* Set led pattern command */ -static int iwl_legacy_led_cmd(struct iwl_priv *priv, - unsigned long on, - unsigned long off) -{ - struct iwl_led_cmd led_cmd = { - .id = IWL_LED_LINK, - .interval = IWL_DEF_LED_INTRVL - }; - int ret; - - if (!test_bit(STATUS_READY, &priv->status)) - return -EBUSY; - - if (priv->blink_on == on && priv->blink_off == off) - return 0; - - if (off == 0) { - /* led is SOLID_ON */ - on = IWL_LED_SOLID; - } - - IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n", - priv->cfg->base_params->led_compensation); - led_cmd.on = iwl_legacy_blink_compensation(priv, on, - priv->cfg->base_params->led_compensation); - led_cmd.off = iwl_legacy_blink_compensation(priv, off, - priv->cfg->base_params->led_compensation); - - ret = priv->cfg->ops->led->cmd(priv, &led_cmd); - if (!ret) { - priv->blink_on = on; - priv->blink_off = off; - } - return ret; -} - -static void iwl_legacy_led_brightness_set(struct led_classdev *led_cdev, - enum led_brightness brightness) -{ - struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led); - unsigned long on = 0; - - if (brightness > 0) - on = IWL_LED_SOLID; - - iwl_legacy_led_cmd(priv, on, 0); -} - -static int iwl_legacy_led_blink_set(struct led_classdev *led_cdev, - unsigned long *delay_on, - unsigned long *delay_off) -{ - struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led); - - return iwl_legacy_led_cmd(priv, *delay_on, *delay_off); -} - -void iwl_legacy_leds_init(struct iwl_priv *priv) -{ - int mode = led_mode; - int ret; - - if (mode == IWL_LED_DEFAULT) - mode = priv->cfg->led_mode; - - priv->led.name = kasprintf(GFP_KERNEL, "%s-led", - wiphy_name(priv->hw->wiphy)); - priv->led.brightness_set = iwl_legacy_led_brightness_set; - priv->led.blink_set = iwl_legacy_led_blink_set; - priv->led.max_brightness = 1; - - switch (mode) { - case IWL_LED_DEFAULT: - WARN_ON(1); - break; - case IWL_LED_BLINK: - priv->led.default_trigger = - ieee80211_create_tpt_led_trigger(priv->hw, - IEEE80211_TPT_LEDTRIG_FL_CONNECTED, - iwl_blink, ARRAY_SIZE(iwl_blink)); - break; - case IWL_LED_RF_STATE: - priv->led.default_trigger = - ieee80211_get_radio_led_name(priv->hw); - break; - } - - ret = led_classdev_register(&priv->pci_dev->dev, &priv->led); - if (ret) { - kfree(priv->led.name); - return; - } - - priv->led_registered = true; -} -EXPORT_SYMBOL(iwl_legacy_leds_init); - -void iwl_legacy_leds_exit(struct iwl_priv *priv) -{ - if (!priv->led_registered) - return; - - led_classdev_unregister(&priv->led); - kfree(priv->led.name); -} -EXPORT_SYMBOL(iwl_legacy_leds_exit); diff --git a/drivers/net/wireless/iwlegacy/iwl-led.h b/drivers/net/wireless/iwlegacy/iwl-led.h deleted file mode 100644 index f0791f70f79..00000000000 --- a/drivers/net/wireless/iwlegacy/iwl-led.h +++ /dev/null @@ -1,56 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#ifndef __iwl_legacy_leds_h__ -#define __iwl_legacy_leds_h__ - - -struct iwl_priv; - -#define IWL_LED_SOLID 11 -#define IWL_DEF_LED_INTRVL cpu_to_le32(1000) - -#define IWL_LED_ACTIVITY (0<<1) -#define IWL_LED_LINK (1<<1) - -/* - * LED mode - * IWL_LED_DEFAULT: use device default - * IWL_LED_RF_STATE: turn LED on/off based on RF state - * LED ON = RF ON - * LED OFF = RF OFF - * IWL_LED_BLINK: adjust led blink rate based on blink table - */ -enum iwl_led_mode { - IWL_LED_DEFAULT, - IWL_LED_RF_STATE, - IWL_LED_BLINK, -}; - -void iwl_legacy_leds_init(struct iwl_priv *priv); -void iwl_legacy_leds_exit(struct iwl_priv *priv); - -#endif /* __iwl_legacy_leds_h__ */ diff --git a/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h b/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h deleted file mode 100644 index 38647e481eb..00000000000 --- a/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h +++ /dev/null @@ -1,456 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#ifndef __iwl_legacy_rs_h__ -#define __iwl_legacy_rs_h__ - -struct iwl_rate_info { - u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */ - u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */ - u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */ - u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */ - u8 prev_ieee; /* previous rate in IEEE speeds */ - u8 next_ieee; /* next rate in IEEE speeds */ - u8 prev_rs; /* previous rate used in rs algo */ - u8 next_rs; /* next rate used in rs algo */ - u8 prev_rs_tgg; /* previous rate used in TGG rs algo */ - u8 next_rs_tgg; /* next rate used in TGG rs algo */ -}; - -struct iwl3945_rate_info { - u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */ - u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */ - u8 prev_ieee; /* previous rate in IEEE speeds */ - u8 next_ieee; /* next rate in IEEE speeds */ - u8 prev_rs; /* previous rate used in rs algo */ - u8 next_rs; /* next rate used in rs algo */ - u8 prev_rs_tgg; /* previous rate used in TGG rs algo */ - u8 next_rs_tgg; /* next rate used in TGG rs algo */ - u8 table_rs_index; /* index in rate scale table cmd */ - u8 prev_table_rs; /* prev in rate table cmd */ -}; - - -/* - * These serve as indexes into - * struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT]; - */ -enum { - IWL_RATE_1M_INDEX = 0, - IWL_RATE_2M_INDEX, - IWL_RATE_5M_INDEX, - IWL_RATE_11M_INDEX, - IWL_RATE_6M_INDEX, - IWL_RATE_9M_INDEX, - IWL_RATE_12M_INDEX, - IWL_RATE_18M_INDEX, - IWL_RATE_24M_INDEX, - IWL_RATE_36M_INDEX, - IWL_RATE_48M_INDEX, - IWL_RATE_54M_INDEX, - IWL_RATE_60M_INDEX, - IWL_RATE_COUNT, - IWL_RATE_COUNT_LEGACY = IWL_RATE_COUNT - 1, /* Excluding 60M */ - IWL_RATE_COUNT_3945 = IWL_RATE_COUNT - 1, - IWL_RATE_INVM_INDEX = IWL_RATE_COUNT, - IWL_RATE_INVALID = IWL_RATE_COUNT, -}; - -enum { - IWL_RATE_6M_INDEX_TABLE = 0, - IWL_RATE_9M_INDEX_TABLE, - IWL_RATE_12M_INDEX_TABLE, - IWL_RATE_18M_INDEX_TABLE, - IWL_RATE_24M_INDEX_TABLE, - IWL_RATE_36M_INDEX_TABLE, - IWL_RATE_48M_INDEX_TABLE, - IWL_RATE_54M_INDEX_TABLE, - IWL_RATE_1M_INDEX_TABLE, - IWL_RATE_2M_INDEX_TABLE, - IWL_RATE_5M_INDEX_TABLE, - IWL_RATE_11M_INDEX_TABLE, - IWL_RATE_INVM_INDEX_TABLE = IWL_RATE_INVM_INDEX - 1, -}; - -enum { - IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX, - IWL39_LAST_OFDM_RATE = IWL_RATE_54M_INDEX, - IWL_LAST_OFDM_RATE = IWL_RATE_60M_INDEX, - IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX, - IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX, -}; - -/* #define vs. enum to keep from defaulting to 'large integer' */ -#define IWL_RATE_6M_MASK (1 << IWL_RATE_6M_INDEX) -#define IWL_RATE_9M_MASK (1 << IWL_RATE_9M_INDEX) -#define IWL_RATE_12M_MASK (1 << IWL_RATE_12M_INDEX) -#define IWL_RATE_18M_MASK (1 << IWL_RATE_18M_INDEX) -#define IWL_RATE_24M_MASK (1 << IWL_RATE_24M_INDEX) -#define IWL_RATE_36M_MASK (1 << IWL_RATE_36M_INDEX) -#define IWL_RATE_48M_MASK (1 << IWL_RATE_48M_INDEX) -#define IWL_RATE_54M_MASK (1 << IWL_RATE_54M_INDEX) -#define IWL_RATE_60M_MASK (1 << IWL_RATE_60M_INDEX) -#define IWL_RATE_1M_MASK (1 << IWL_RATE_1M_INDEX) -#define IWL_RATE_2M_MASK (1 << IWL_RATE_2M_INDEX) -#define IWL_RATE_5M_MASK (1 << IWL_RATE_5M_INDEX) -#define IWL_RATE_11M_MASK (1 << IWL_RATE_11M_INDEX) - -/* uCode API values for legacy bit rates, both OFDM and CCK */ -enum { - IWL_RATE_6M_PLCP = 13, - IWL_RATE_9M_PLCP = 15, - IWL_RATE_12M_PLCP = 5, - IWL_RATE_18M_PLCP = 7, - IWL_RATE_24M_PLCP = 9, - IWL_RATE_36M_PLCP = 11, - IWL_RATE_48M_PLCP = 1, - IWL_RATE_54M_PLCP = 3, - IWL_RATE_60M_PLCP = 3,/*FIXME:RS:should be removed*/ - IWL_RATE_1M_PLCP = 10, - IWL_RATE_2M_PLCP = 20, - IWL_RATE_5M_PLCP = 55, - IWL_RATE_11M_PLCP = 110, - /*FIXME:RS:add IWL_RATE_LEGACY_INVM_PLCP = 0,*/ -}; - -/* uCode API values for OFDM high-throughput (HT) bit rates */ -enum { - IWL_RATE_SISO_6M_PLCP = 0, - IWL_RATE_SISO_12M_PLCP = 1, - IWL_RATE_SISO_18M_PLCP = 2, - IWL_RATE_SISO_24M_PLCP = 3, - IWL_RATE_SISO_36M_PLCP = 4, - IWL_RATE_SISO_48M_PLCP = 5, - IWL_RATE_SISO_54M_PLCP = 6, - IWL_RATE_SISO_60M_PLCP = 7, - IWL_RATE_MIMO2_6M_PLCP = 0x8, - IWL_RATE_MIMO2_12M_PLCP = 0x9, - IWL_RATE_MIMO2_18M_PLCP = 0xa, - IWL_RATE_MIMO2_24M_PLCP = 0xb, - IWL_RATE_MIMO2_36M_PLCP = 0xc, - IWL_RATE_MIMO2_48M_PLCP = 0xd, - IWL_RATE_MIMO2_54M_PLCP = 0xe, - IWL_RATE_MIMO2_60M_PLCP = 0xf, - IWL_RATE_SISO_INVM_PLCP, - IWL_RATE_MIMO2_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP, -}; - -/* MAC header values for bit rates */ -enum { - IWL_RATE_6M_IEEE = 12, - IWL_RATE_9M_IEEE = 18, - IWL_RATE_12M_IEEE = 24, - IWL_RATE_18M_IEEE = 36, - IWL_RATE_24M_IEEE = 48, - IWL_RATE_36M_IEEE = 72, - IWL_RATE_48M_IEEE = 96, - IWL_RATE_54M_IEEE = 108, - IWL_RATE_60M_IEEE = 120, - IWL_RATE_1M_IEEE = 2, - IWL_RATE_2M_IEEE = 4, - IWL_RATE_5M_IEEE = 11, - IWL_RATE_11M_IEEE = 22, -}; - -#define IWL_CCK_BASIC_RATES_MASK \ - (IWL_RATE_1M_MASK | \ - IWL_RATE_2M_MASK) - -#define IWL_CCK_RATES_MASK \ - (IWL_CCK_BASIC_RATES_MASK | \ - IWL_RATE_5M_MASK | \ - IWL_RATE_11M_MASK) - -#define IWL_OFDM_BASIC_RATES_MASK \ - (IWL_RATE_6M_MASK | \ - IWL_RATE_12M_MASK | \ - IWL_RATE_24M_MASK) - -#define IWL_OFDM_RATES_MASK \ - (IWL_OFDM_BASIC_RATES_MASK | \ - IWL_RATE_9M_MASK | \ - IWL_RATE_18M_MASK | \ - IWL_RATE_36M_MASK | \ - IWL_RATE_48M_MASK | \ - IWL_RATE_54M_MASK) - -#define IWL_BASIC_RATES_MASK \ - (IWL_OFDM_BASIC_RATES_MASK | \ - IWL_CCK_BASIC_RATES_MASK) - -#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1) -#define IWL_RATES_MASK_3945 ((1 << IWL_RATE_COUNT_3945) - 1) - -#define IWL_INVALID_VALUE -1 - -#define IWL_MIN_RSSI_VAL -100 -#define IWL_MAX_RSSI_VAL 0 - -/* These values specify how many Tx frame attempts before - * searching for a new modulation mode */ -#define IWL_LEGACY_FAILURE_LIMIT 160 -#define IWL_LEGACY_SUCCESS_LIMIT 480 -#define IWL_LEGACY_TABLE_COUNT 160 - -#define IWL_NONE_LEGACY_FAILURE_LIMIT 400 -#define IWL_NONE_LEGACY_SUCCESS_LIMIT 4500 -#define IWL_NONE_LEGACY_TABLE_COUNT 1500 - -/* Success ratio (ACKed / attempted tx frames) values (perfect is 128 * 100) */ -#define IWL_RS_GOOD_RATIO 12800 /* 100% */ -#define IWL_RATE_SCALE_SWITCH 10880 /* 85% */ -#define IWL_RATE_HIGH_TH 10880 /* 85% */ -#define IWL_RATE_INCREASE_TH 6400 /* 50% */ -#define IWL_RATE_DECREASE_TH 1920 /* 15% */ - -/* possible actions when in legacy mode */ -#define IWL_LEGACY_SWITCH_ANTENNA1 0 -#define IWL_LEGACY_SWITCH_ANTENNA2 1 -#define IWL_LEGACY_SWITCH_SISO 2 -#define IWL_LEGACY_SWITCH_MIMO2_AB 3 -#define IWL_LEGACY_SWITCH_MIMO2_AC 4 -#define IWL_LEGACY_SWITCH_MIMO2_BC 5 - -/* possible actions when in siso mode */ -#define IWL_SISO_SWITCH_ANTENNA1 0 -#define IWL_SISO_SWITCH_ANTENNA2 1 -#define IWL_SISO_SWITCH_MIMO2_AB 2 -#define IWL_SISO_SWITCH_MIMO2_AC 3 -#define IWL_SISO_SWITCH_MIMO2_BC 4 -#define IWL_SISO_SWITCH_GI 5 - -/* possible actions when in mimo mode */ -#define IWL_MIMO2_SWITCH_ANTENNA1 0 -#define IWL_MIMO2_SWITCH_ANTENNA2 1 -#define IWL_MIMO2_SWITCH_SISO_A 2 -#define IWL_MIMO2_SWITCH_SISO_B 3 -#define IWL_MIMO2_SWITCH_SISO_C 4 -#define IWL_MIMO2_SWITCH_GI 5 - -#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_GI - -#define IWL_ACTION_LIMIT 3 /* # possible actions */ - -#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */ - -/* load per tid defines for A-MPDU activation */ -#define IWL_AGG_TPT_THREHOLD 0 -#define IWL_AGG_LOAD_THRESHOLD 10 -#define IWL_AGG_ALL_TID 0xff -#define TID_QUEUE_CELL_SPACING 50 /*mS */ -#define TID_QUEUE_MAX_SIZE 20 -#define TID_ROUND_VALUE 5 /* mS */ -#define TID_MAX_LOAD_COUNT 8 - -#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING) -#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y)) - -extern const struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT]; - -enum iwl_table_type { - LQ_NONE, - LQ_G, /* legacy types */ - LQ_A, - LQ_SISO, /* high-throughput types */ - LQ_MIMO2, - LQ_MAX, -}; - -#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A)) -#define is_siso(tbl) ((tbl) == LQ_SISO) -#define is_mimo2(tbl) ((tbl) == LQ_MIMO2) -#define is_mimo(tbl) (is_mimo2(tbl)) -#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl)) -#define is_a_band(tbl) ((tbl) == LQ_A) -#define is_g_and(tbl) ((tbl) == LQ_G) - -#define ANT_NONE 0x0 -#define ANT_A BIT(0) -#define ANT_B BIT(1) -#define ANT_AB (ANT_A | ANT_B) -#define ANT_C BIT(2) -#define ANT_AC (ANT_A | ANT_C) -#define ANT_BC (ANT_B | ANT_C) -#define ANT_ABC (ANT_AB | ANT_C) - -#define IWL_MAX_MCS_DISPLAY_SIZE 12 - -struct iwl_rate_mcs_info { - char mbps[IWL_MAX_MCS_DISPLAY_SIZE]; - char mcs[IWL_MAX_MCS_DISPLAY_SIZE]; -}; - -/** - * struct iwl_rate_scale_data -- tx success history for one rate - */ -struct iwl_rate_scale_data { - u64 data; /* bitmap of successful frames */ - s32 success_counter; /* number of frames successful */ - s32 success_ratio; /* per-cent * 128 */ - s32 counter; /* number of frames attempted */ - s32 average_tpt; /* success ratio * expected throughput */ - unsigned long stamp; -}; - -/** - * struct iwl_scale_tbl_info -- tx params and success history for all rates - * - * There are two of these in struct iwl_lq_sta, - * one for "active", and one for "search". - */ -struct iwl_scale_tbl_info { - enum iwl_table_type lq_type; - u8 ant_type; - u8 is_SGI; /* 1 = short guard interval */ - u8 is_ht40; /* 1 = 40 MHz channel width */ - u8 is_dup; /* 1 = duplicated data streams */ - u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */ - u8 max_search; /* maximun number of tables we can search */ - s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */ - u32 current_rate; /* rate_n_flags, uCode API format */ - struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */ -}; - -struct iwl_traffic_load { - unsigned long time_stamp; /* age of the oldest statistics */ - u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time - * slice */ - u32 total; /* total num of packets during the - * last TID_MAX_TIME_DIFF */ - u8 queue_count; /* number of queues that has - * been used since the last cleanup */ - u8 head; /* start of the circular buffer */ -}; - -/** - * struct iwl_lq_sta -- driver's rate scaling private structure - * - * Pointer to this gets passed back and forth between driver and mac80211. - */ -struct iwl_lq_sta { - u8 active_tbl; /* index of active table, range 0-1 */ - u8 enable_counter; /* indicates HT mode */ - u8 stay_in_tbl; /* 1: disallow, 0: allow search for new mode */ - u8 search_better_tbl; /* 1: currently trying alternate mode */ - s32 last_tpt; - - /* The following determine when to search for a new mode */ - u32 table_count_limit; - u32 max_failure_limit; /* # failed frames before new search */ - u32 max_success_limit; /* # successful frames before new search */ - u32 table_count; - u32 total_failed; /* total failed frames, any/all rates */ - u32 total_success; /* total successful frames, any/all rates */ - u64 flush_timer; /* time staying in mode before new search */ - - u8 action_counter; /* # mode-switch actions tried */ - u8 is_green; - u8 is_dup; - enum ieee80211_band band; - - /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */ - u32 supp_rates; - u16 active_legacy_rate; - u16 active_siso_rate; - u16 active_mimo2_rate; - s8 max_rate_idx; /* Max rate set by user */ - u8 missed_rate_counter; - - struct iwl_link_quality_cmd lq; - struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */ - struct iwl_traffic_load load[TID_MAX_LOAD_COUNT]; - u8 tx_agg_tid_en; -#ifdef CONFIG_MAC80211_DEBUGFS - struct dentry *rs_sta_dbgfs_scale_table_file; - struct dentry *rs_sta_dbgfs_stats_table_file; - struct dentry *rs_sta_dbgfs_rate_scale_data_file; - struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file; - u32 dbg_fixed_rate; -#endif - struct iwl_priv *drv; - - /* used to be in sta_info */ - int last_txrate_idx; - /* last tx rate_n_flags */ - u32 last_rate_n_flags; - /* packets destined for this STA are aggregated */ - u8 is_agg; -}; - -static inline u8 iwl4965_num_of_ant(u8 mask) -{ - return !!((mask) & ANT_A) + - !!((mask) & ANT_B) + - !!((mask) & ANT_C); -} - -static inline u8 iwl4965_first_antenna(u8 mask) -{ - if (mask & ANT_A) - return ANT_A; - if (mask & ANT_B) - return ANT_B; - return ANT_C; -} - - -/** - * iwl3945_rate_scale_init - Initialize the rate scale table based on assoc info - * - * The specific throughput table used is based on the type of network - * the associated with, including A, B, G, and G w/ TGG protection - */ -extern void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id); - -/* Initialize station's rate scaling information after adding station */ -extern void iwl4965_rs_rate_init(struct iwl_priv *priv, - struct ieee80211_sta *sta, u8 sta_id); -extern void iwl3945_rs_rate_init(struct iwl_priv *priv, - struct ieee80211_sta *sta, u8 sta_id); - -/** - * iwl_rate_control_register - Register the rate control algorithm callbacks - * - * Since the rate control algorithm is hardware specific, there is no need - * or reason to place it as a stand alone module. The driver can call - * iwl_rate_control_register in order to register the rate control callbacks - * with the mac80211 subsystem. This should be performed prior to calling - * ieee80211_register_hw - * - */ -extern int iwl4965_rate_control_register(void); -extern int iwl3945_rate_control_register(void); - -/** - * iwl_rate_control_unregister - Unregister the rate control callbacks - * - * This should be called after calling ieee80211_unregister_hw, but before - * the driver is unloaded. - */ -extern void iwl4965_rate_control_unregister(void); -extern void iwl3945_rate_control_unregister(void); - -#endif /* __iwl_legacy_rs__ */ diff --git a/drivers/net/wireless/iwlegacy/iwl-power.c b/drivers/net/wireless/iwlegacy/iwl-power.c deleted file mode 100644 index 903ef0d6d6c..00000000000 --- a/drivers/net/wireless/iwlegacy/iwl-power.c +++ /dev/null @@ -1,165 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved. - * - * Portions of this file are derived from the ipw3945 project, as well - * as portions of the ieee80211 subsystem header files. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - *****************************************************************************/ - - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/slab.h> -#include <linux/init.h> - -#include <net/mac80211.h> - -#include "iwl-eeprom.h" -#include "iwl-dev.h" -#include "iwl-core.h" -#include "iwl-io.h" -#include "iwl-commands.h" -#include "iwl-debug.h" -#include "iwl-power.h" - -/* - * Setting power level allows the card to go to sleep when not busy. - * - * We calculate a sleep command based on the required latency, which - * we get from mac80211. In order to handle thermal throttling, we can - * also use pre-defined power levels. - */ - -/* - * This defines the old power levels. They are still used by default - * (level 1) and for thermal throttle (levels 3 through 5) - */ - -struct iwl_power_vec_entry { - struct iwl_powertable_cmd cmd; - u8 no_dtim; /* number of skip dtim */ -}; - -static void iwl_legacy_power_sleep_cam_cmd(struct iwl_priv *priv, - struct iwl_powertable_cmd *cmd) -{ - memset(cmd, 0, sizeof(*cmd)); - - if (priv->power_data.pci_pm) - cmd->flags |= IWL_POWER_PCI_PM_MSK; - - IWL_DEBUG_POWER(priv, "Sleep command for CAM\n"); -} - -static int -iwl_legacy_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd) -{ - IWL_DEBUG_POWER(priv, "Sending power/sleep command\n"); - IWL_DEBUG_POWER(priv, "Flags value = 0x%08X\n", cmd->flags); - IWL_DEBUG_POWER(priv, "Tx timeout = %u\n", - le32_to_cpu(cmd->tx_data_timeout)); - IWL_DEBUG_POWER(priv, "Rx timeout = %u\n", - le32_to_cpu(cmd->rx_data_timeout)); - IWL_DEBUG_POWER(priv, - "Sleep interval vector = { %d , %d , %d , %d , %d }\n", - le32_to_cpu(cmd->sleep_interval[0]), - le32_to_cpu(cmd->sleep_interval[1]), - le32_to_cpu(cmd->sleep_interval[2]), - le32_to_cpu(cmd->sleep_interval[3]), - le32_to_cpu(cmd->sleep_interval[4])); - - return iwl_legacy_send_cmd_pdu(priv, POWER_TABLE_CMD, - sizeof(struct iwl_powertable_cmd), cmd); -} - -int -iwl_legacy_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd, - bool force) -{ - int ret; - bool update_chains; - - lockdep_assert_held(&priv->mutex); - - /* Don't update the RX chain when chain noise calibration is running */ - update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE || - priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE; - - if (!memcmp(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force) - return 0; - - if (!iwl_legacy_is_ready_rf(priv)) - return -EIO; - - /* scan complete use sleep_power_next, need to be updated */ - memcpy(&priv->power_data.sleep_cmd_next, cmd, sizeof(*cmd)); - if (test_bit(STATUS_SCANNING, &priv->status) && !force) { - IWL_DEBUG_INFO(priv, "Defer power set mode while scanning\n"); - return 0; - } - - if (cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK) - set_bit(STATUS_POWER_PMI, &priv->status); - - ret = iwl_legacy_set_power(priv, cmd); - if (!ret) { - if (!(cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)) - clear_bit(STATUS_POWER_PMI, &priv->status); - - if (priv->cfg->ops->lib->update_chain_flags && update_chains) - priv->cfg->ops->lib->update_chain_flags(priv); - else if (priv->cfg->ops->lib->update_chain_flags) - IWL_DEBUG_POWER(priv, - "Cannot update the power, chain noise " - "calibration running: %d\n", - priv->chain_noise_data.state); - - memcpy(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)); - } else - IWL_ERR(priv, "set power fail, ret = %d", ret); - - return ret; -} - -int iwl_legacy_power_update_mode(struct iwl_priv *priv, bool force) -{ - struct iwl_powertable_cmd cmd; - - iwl_legacy_power_sleep_cam_cmd(priv, &cmd); - return iwl_legacy_power_set_mode(priv, &cmd, force); -} -EXPORT_SYMBOL(iwl_legacy_power_update_mode); - -/* initialize to default */ -void iwl_legacy_power_initialize(struct iwl_priv *priv) -{ - u16 lctl = iwl_legacy_pcie_link_ctl(priv); - - priv->power_data.pci_pm = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN); - - priv->power_data.debug_sleep_level_override = -1; - - memset(&priv->power_data.sleep_cmd, 0, - sizeof(priv->power_data.sleep_cmd)); -} -EXPORT_SYMBOL(iwl_legacy_power_initialize); diff --git a/drivers/net/wireless/iwlegacy/iwl-power.h b/drivers/net/wireless/iwlegacy/iwl-power.h deleted file mode 100644 index d30b36acdc4..00000000000 --- a/drivers/net/wireless/iwlegacy/iwl-power.h +++ /dev/null @@ -1,55 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved. - * - * Portions of this file are derived from the ipw3945 project, as well - * as portions of the ieee80211 subsystem header files. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - *****************************************************************************/ -#ifndef __iwl_legacy_power_setting_h__ -#define __iwl_legacy_power_setting_h__ - -#include "iwl-commands.h" - -enum iwl_power_level { - IWL_POWER_INDEX_1, - IWL_POWER_INDEX_2, - IWL_POWER_INDEX_3, - IWL_POWER_INDEX_4, - IWL_POWER_INDEX_5, - IWL_POWER_NUM -}; - -struct iwl_power_mgr { - struct iwl_powertable_cmd sleep_cmd; - struct iwl_powertable_cmd sleep_cmd_next; - int debug_sleep_level_override; - bool pci_pm; -}; - -int -iwl_legacy_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd, - bool force); -int iwl_legacy_power_update_mode(struct iwl_priv *priv, bool force); -void iwl_legacy_power_initialize(struct iwl_priv *priv); - -#endif /* __iwl_legacy_power_setting_h__ */ diff --git a/drivers/net/wireless/iwlegacy/iwl-rx.c b/drivers/net/wireless/iwlegacy/iwl-rx.c deleted file mode 100644 index f4d21ec2249..00000000000 --- a/drivers/net/wireless/iwlegacy/iwl-rx.c +++ /dev/null @@ -1,282 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. - * - * Portions of this file are derived from the ipw3945 project, as well - * as portions of the ieee80211 subsystem header files. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#include <linux/etherdevice.h> -#include <linux/slab.h> -#include <linux/export.h> -#include <net/mac80211.h> -#include <asm/unaligned.h> -#include "iwl-eeprom.h" -#include "iwl-dev.h" -#include "iwl-core.h" -#include "iwl-sta.h" -#include "iwl-io.h" -#include "iwl-helpers.h" -/************************** RX-FUNCTIONS ****************************/ -/* - * Rx theory of operation - * - * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs), - * each of which point to Receive Buffers to be filled by the NIC. These get - * used not only for Rx frames, but for any command response or notification - * from the NIC. The driver and NIC manage the Rx buffers by means - * of indexes into the circular buffer. - * - * Rx Queue Indexes - * The host/firmware share two index registers for managing the Rx buffers. - * - * The READ index maps to the first position that the firmware may be writing - * to -- the driver can read up to (but not including) this position and get - * good data. - * The READ index is managed by the firmware once the card is enabled. - * - * The WRITE index maps to the last position the driver has read from -- the - * position preceding WRITE is the last slot the firmware can place a packet. - * - * The queue is empty (no good data) if WRITE = READ - 1, and is full if - * WRITE = READ. - * - * During initialization, the host sets up the READ queue position to the first - * INDEX position, and WRITE to the last (READ - 1 wrapped) - * - * When the firmware places a packet in a buffer, it will advance the READ index - * and fire the RX interrupt. The driver can then query the READ index and - * process as many packets as possible, moving the WRITE index forward as it - * resets the Rx queue buffers with new memory. - * - * The management in the driver is as follows: - * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When - * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled - * to replenish the iwl->rxq->rx_free. - * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the - * iwl->rxq is replenished and the READ INDEX is updated (updating the - * 'processed' and 'read' driver indexes as well) - * + A received packet is processed and handed to the kernel network stack, - * detached from the iwl->rxq. The driver 'processed' index is updated. - * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free - * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ - * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there - * were enough free buffers and RX_STALLED is set it is cleared. - * - * - * Driver sequence: - * - * iwl_legacy_rx_queue_alloc() Allocates rx_free - * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls - * iwl_rx_queue_restock - * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx - * queue, updates firmware pointers, and updates - * the WRITE index. If insufficient rx_free buffers - * are available, schedules iwl_rx_replenish - * - * -- enable interrupts -- - * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the - * READ INDEX, detaching the SKB from the pool. - * Moves the packet buffer from queue to rx_used. - * Calls iwl_rx_queue_restock to refill any empty - * slots. - * ... - * - */ - -/** - * iwl_legacy_rx_queue_space - Return number of free slots available in queue. - */ -int iwl_legacy_rx_queue_space(const struct iwl_rx_queue *q) -{ - int s = q->read - q->write; - if (s <= 0) - s += RX_QUEUE_SIZE; - /* keep some buffer to not confuse full and empty queue */ - s -= 2; - if (s < 0) - s = 0; - return s; -} -EXPORT_SYMBOL(iwl_legacy_rx_queue_space); - -/** - * iwl_legacy_rx_queue_update_write_ptr - Update the write pointer for the RX queue - */ -void -iwl_legacy_rx_queue_update_write_ptr(struct iwl_priv *priv, - struct iwl_rx_queue *q) -{ - unsigned long flags; - u32 rx_wrt_ptr_reg = priv->hw_params.rx_wrt_ptr_reg; - u32 reg; - - spin_lock_irqsave(&q->lock, flags); - - if (q->need_update == 0) - goto exit_unlock; - - /* If power-saving is in use, make sure device is awake */ - if (test_bit(STATUS_POWER_PMI, &priv->status)) { - reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); - - if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { - IWL_DEBUG_INFO(priv, - "Rx queue requesting wakeup," - " GP1 = 0x%x\n", reg); - iwl_legacy_set_bit(priv, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); - goto exit_unlock; - } - - q->write_actual = (q->write & ~0x7); - iwl_legacy_write_direct32(priv, rx_wrt_ptr_reg, - q->write_actual); - - /* Else device is assumed to be awake */ - } else { - /* Device expects a multiple of 8 */ - q->write_actual = (q->write & ~0x7); - iwl_legacy_write_direct32(priv, rx_wrt_ptr_reg, - q->write_actual); - } - - q->need_update = 0; - - exit_unlock: - spin_unlock_irqrestore(&q->lock, flags); -} -EXPORT_SYMBOL(iwl_legacy_rx_queue_update_write_ptr); - -int iwl_legacy_rx_queue_alloc(struct iwl_priv *priv) -{ - struct iwl_rx_queue *rxq = &priv->rxq; - struct device *dev = &priv->pci_dev->dev; - int i; - - spin_lock_init(&rxq->lock); - INIT_LIST_HEAD(&rxq->rx_free); - INIT_LIST_HEAD(&rxq->rx_used); - - /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */ - rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma, - GFP_KERNEL); - if (!rxq->bd) - goto err_bd; - - rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status), - &rxq->rb_stts_dma, GFP_KERNEL); - if (!rxq->rb_stts) - goto err_rb; - - /* Fill the rx_used queue with _all_ of the Rx buffers */ - for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) - list_add_tail(&rxq->pool[i].list, &rxq->rx_used); - - /* Set us so that we have processed and used all buffers, but have - * not restocked the Rx queue with fresh buffers */ - rxq->read = rxq->write = 0; - rxq->write_actual = 0; - rxq->free_count = 0; - rxq->need_update = 0; - return 0; - -err_rb: - dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, - rxq->bd_dma); -err_bd: - return -ENOMEM; -} -EXPORT_SYMBOL(iwl_legacy_rx_queue_alloc); - - -void iwl_legacy_rx_spectrum_measure_notif(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif); - - if (!report->state) { - IWL_DEBUG_11H(priv, - "Spectrum Measure Notification: Start\n"); - return; - } - - memcpy(&priv->measure_report, report, sizeof(*report)); - priv->measurement_status |= MEASUREMENT_READY; -} -EXPORT_SYMBOL(iwl_legacy_rx_spectrum_measure_notif); - -/* - * returns non-zero if packet should be dropped - */ -int iwl_legacy_set_decrypted_flag(struct iwl_priv *priv, - struct ieee80211_hdr *hdr, - u32 decrypt_res, - struct ieee80211_rx_status *stats) -{ - u16 fc = le16_to_cpu(hdr->frame_control); - - /* - * All contexts have the same setting here due to it being - * a module parameter, so OK to check any context. - */ - if (priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags & - RXON_FILTER_DIS_DECRYPT_MSK) - return 0; - - if (!(fc & IEEE80211_FCTL_PROTECTED)) - return 0; - - IWL_DEBUG_RX(priv, "decrypt_res:0x%x\n", decrypt_res); - switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) { - case RX_RES_STATUS_SEC_TYPE_TKIP: - /* The uCode has got a bad phase 1 Key, pushes the packet. - * Decryption will be done in SW. */ - if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == - RX_RES_STATUS_BAD_KEY_TTAK) - break; - - case RX_RES_STATUS_SEC_TYPE_WEP: - if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == - RX_RES_STATUS_BAD_ICV_MIC) { - /* bad ICV, the packet is destroyed since the - * decryption is inplace, drop it */ - IWL_DEBUG_RX(priv, "Packet destroyed\n"); - return -1; - } - case RX_RES_STATUS_SEC_TYPE_CCMP: - if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == - RX_RES_STATUS_DECRYPT_OK) { - IWL_DEBUG_RX(priv, "hw decrypt successfully!!!\n"); - stats->flag |= RX_FLAG_DECRYPTED; - } - break; - - default: - break; - } - return 0; -} -EXPORT_SYMBOL(iwl_legacy_set_decrypted_flag); diff --git a/drivers/net/wireless/iwlegacy/iwl-scan.c b/drivers/net/wireless/iwlegacy/iwl-scan.c deleted file mode 100644 index 521b73b527d..00000000000 --- a/drivers/net/wireless/iwlegacy/iwl-scan.c +++ /dev/null @@ -1,550 +0,0 @@ -/****************************************************************************** - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - *****************************************************************************/ -#include <linux/slab.h> -#include <linux/types.h> -#include <linux/etherdevice.h> -#include <linux/export.h> -#include <net/mac80211.h> - -#include "iwl-eeprom.h" -#include "iwl-dev.h" -#include "iwl-core.h" -#include "iwl-sta.h" -#include "iwl-io.h" -#include "iwl-helpers.h" - -/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after - * sending probe req. This should be set long enough to hear probe responses - * from more than one AP. */ -#define IWL_ACTIVE_DWELL_TIME_24 (30) /* all times in msec */ -#define IWL_ACTIVE_DWELL_TIME_52 (20) - -#define IWL_ACTIVE_DWELL_FACTOR_24GHZ (3) -#define IWL_ACTIVE_DWELL_FACTOR_52GHZ (2) - -/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel. - * Must be set longer than active dwell time. - * For the most reliable scan, set > AP beacon interval (typically 100msec). */ -#define IWL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */ -#define IWL_PASSIVE_DWELL_TIME_52 (10) -#define IWL_PASSIVE_DWELL_BASE (100) -#define IWL_CHANNEL_TUNE_TIME 5 - -static int iwl_legacy_send_scan_abort(struct iwl_priv *priv) -{ - int ret; - struct iwl_rx_packet *pkt; - struct iwl_host_cmd cmd = { - .id = REPLY_SCAN_ABORT_CMD, - .flags = CMD_WANT_SKB, - }; - - /* Exit instantly with error when device is not ready - * to receive scan abort command or it does not perform - * hardware scan currently */ - if (!test_bit(STATUS_READY, &priv->status) || - !test_bit(STATUS_GEO_CONFIGURED, &priv->status) || - !test_bit(STATUS_SCAN_HW, &priv->status) || - test_bit(STATUS_FW_ERROR, &priv->status) || - test_bit(STATUS_EXIT_PENDING, &priv->status)) - return -EIO; - - ret = iwl_legacy_send_cmd_sync(priv, &cmd); - if (ret) - return ret; - - pkt = (struct iwl_rx_packet *)cmd.reply_page; - if (pkt->u.status != CAN_ABORT_STATUS) { - /* The scan abort will return 1 for success or - * 2 for "failure". A failure condition can be - * due to simply not being in an active scan which - * can occur if we send the scan abort before we - * the microcode has notified us that a scan is - * completed. */ - IWL_DEBUG_SCAN(priv, "SCAN_ABORT ret %d.\n", pkt->u.status); - ret = -EIO; - } - - iwl_legacy_free_pages(priv, cmd.reply_page); - return ret; -} - -static void iwl_legacy_complete_scan(struct iwl_priv *priv, bool aborted) -{ - /* check if scan was requested from mac80211 */ - if (priv->scan_request) { - IWL_DEBUG_SCAN(priv, "Complete scan in mac80211\n"); - ieee80211_scan_completed(priv->hw, aborted); - } - - priv->scan_vif = NULL; - priv->scan_request = NULL; -} - -void iwl_legacy_force_scan_end(struct iwl_priv *priv) -{ - lockdep_assert_held(&priv->mutex); - - if (!test_bit(STATUS_SCANNING, &priv->status)) { - IWL_DEBUG_SCAN(priv, "Forcing scan end while not scanning\n"); - return; - } - - IWL_DEBUG_SCAN(priv, "Forcing scan end\n"); - clear_bit(STATUS_SCANNING, &priv->status); - clear_bit(STATUS_SCAN_HW, &priv->status); - clear_bit(STATUS_SCAN_ABORTING, &priv->status); - iwl_legacy_complete_scan(priv, true); -} - -static void iwl_legacy_do_scan_abort(struct iwl_priv *priv) -{ - int ret; - - lockdep_assert_held(&priv->mutex); - - if (!test_bit(STATUS_SCANNING, &priv->status)) { - IWL_DEBUG_SCAN(priv, "Not performing scan to abort\n"); - return; - } - - if (test_and_set_bit(STATUS_SCAN_ABORTING, &priv->status)) { - IWL_DEBUG_SCAN(priv, "Scan abort in progress\n"); - return; - } - - ret = iwl_legacy_send_scan_abort(priv); - if (ret) { - IWL_DEBUG_SCAN(priv, "Send scan abort failed %d\n", ret); - iwl_legacy_force_scan_end(priv); - } else - IWL_DEBUG_SCAN(priv, "Successfully send scan abort\n"); -} - -/** - * iwl_scan_cancel - Cancel any currently executing HW scan - */ -int iwl_legacy_scan_cancel(struct iwl_priv *priv) -{ - IWL_DEBUG_SCAN(priv, "Queuing abort scan\n"); - queue_work(priv->workqueue, &priv->abort_scan); - return 0; -} -EXPORT_SYMBOL(iwl_legacy_scan_cancel); - -/** - * iwl_legacy_scan_cancel_timeout - Cancel any currently executing HW scan - * @ms: amount of time to wait (in milliseconds) for scan to abort - * - */ -int iwl_legacy_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms) -{ - unsigned long timeout = jiffies + msecs_to_jiffies(ms); - - lockdep_assert_held(&priv->mutex); - - IWL_DEBUG_SCAN(priv, "Scan cancel timeout\n"); - - iwl_legacy_do_scan_abort(priv); - - while (time_before_eq(jiffies, timeout)) { - if (!test_bit(STATUS_SCAN_HW, &priv->status)) - break; - msleep(20); - } - - return test_bit(STATUS_SCAN_HW, &priv->status); -} -EXPORT_SYMBOL(iwl_legacy_scan_cancel_timeout); - -/* Service response to REPLY_SCAN_CMD (0x80) */ -static void iwl_legacy_rx_reply_scan(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb) -{ -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_scanreq_notification *notif = - (struct iwl_scanreq_notification *)pkt->u.raw; - - IWL_DEBUG_SCAN(priv, "Scan request status = 0x%x\n", notif->status); -#endif -} - -/* Service SCAN_START_NOTIFICATION (0x82) */ -static void iwl_legacy_rx_scan_start_notif(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_scanstart_notification *notif = - (struct iwl_scanstart_notification *)pkt->u.raw; - priv->scan_start_tsf = le32_to_cpu(notif->tsf_low); - IWL_DEBUG_SCAN(priv, "Scan start: " - "%d [802.11%s] " - "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n", - notif->channel, - notif->band ? "bg" : "a", - le32_to_cpu(notif->tsf_high), - le32_to_cpu(notif->tsf_low), - notif->status, notif->beacon_timer); -} - -/* Service SCAN_RESULTS_NOTIFICATION (0x83) */ -static void iwl_legacy_rx_scan_results_notif(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb) -{ -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_scanresults_notification *notif = - (struct iwl_scanresults_notification *)pkt->u.raw; - - IWL_DEBUG_SCAN(priv, "Scan ch.res: " - "%d [802.11%s] " - "(TSF: 0x%08X:%08X) - %d " - "elapsed=%lu usec\n", - notif->channel, - notif->band ? "bg" : "a", - le32_to_cpu(notif->tsf_high), - le32_to_cpu(notif->tsf_low), - le32_to_cpu(notif->statistics[0]), - le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf); -#endif -} - -/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */ -static void iwl_legacy_rx_scan_complete_notif(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb) -{ - -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw; -#endif - - IWL_DEBUG_SCAN(priv, - "Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n", - scan_notif->scanned_channels, - scan_notif->tsf_low, - scan_notif->tsf_high, scan_notif->status); - - /* The HW is no longer scanning */ - clear_bit(STATUS_SCAN_HW, &priv->status); - - IWL_DEBUG_SCAN(priv, "Scan on %sGHz took %dms\n", - (priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2", - jiffies_to_msecs(jiffies - priv->scan_start)); - - queue_work(priv->workqueue, &priv->scan_completed); -} - -void iwl_legacy_setup_rx_scan_handlers(struct iwl_priv *priv) -{ - /* scan handlers */ - priv->rx_handlers[REPLY_SCAN_CMD] = iwl_legacy_rx_reply_scan; - priv->rx_handlers[SCAN_START_NOTIFICATION] = - iwl_legacy_rx_scan_start_notif; - priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] = - iwl_legacy_rx_scan_results_notif; - priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] = - iwl_legacy_rx_scan_complete_notif; -} -EXPORT_SYMBOL(iwl_legacy_setup_rx_scan_handlers); - -inline u16 iwl_legacy_get_active_dwell_time(struct iwl_priv *priv, - enum ieee80211_band band, - u8 n_probes) -{ - if (band == IEEE80211_BAND_5GHZ) - return IWL_ACTIVE_DWELL_TIME_52 + - IWL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1); - else - return IWL_ACTIVE_DWELL_TIME_24 + - IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1); -} -EXPORT_SYMBOL(iwl_legacy_get_active_dwell_time); - -u16 iwl_legacy_get_passive_dwell_time(struct iwl_priv *priv, - enum ieee80211_band band, - struct ieee80211_vif *vif) -{ - struct iwl_rxon_context *ctx; - u16 passive = (band == IEEE80211_BAND_2GHZ) ? - IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 : - IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52; - - if (iwl_legacy_is_any_associated(priv)) { - /* - * If we're associated, we clamp the maximum passive - * dwell time to be 98% of the smallest beacon interval - * (minus 2 * channel tune time) - */ - for_each_context(priv, ctx) { - u16 value; - - if (!iwl_legacy_is_associated_ctx(ctx)) - continue; - value = ctx->vif ? ctx->vif->bss_conf.beacon_int : 0; - if ((value > IWL_PASSIVE_DWELL_BASE) || !value) - value = IWL_PASSIVE_DWELL_BASE; - value = (value * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2; - passive = min(value, passive); - } - } - - return passive; -} -EXPORT_SYMBOL(iwl_legacy_get_passive_dwell_time); - -void iwl_legacy_init_scan_params(struct iwl_priv *priv) -{ - u8 ant_idx = fls(priv->hw_params.valid_tx_ant) - 1; - if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ]) - priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx; - if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ]) - priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx; -} -EXPORT_SYMBOL(iwl_legacy_init_scan_params); - -static int iwl_legacy_scan_initiate(struct iwl_priv *priv, - struct ieee80211_vif *vif) -{ - int ret; - - lockdep_assert_held(&priv->mutex); - - if (WARN_ON(!priv->cfg->ops->utils->request_scan)) - return -EOPNOTSUPP; - - cancel_delayed_work(&priv->scan_check); - - if (!iwl_legacy_is_ready_rf(priv)) { - IWL_WARN(priv, "Request scan called when driver not ready.\n"); - return -EIO; - } - - if (test_bit(STATUS_SCAN_HW, &priv->status)) { - IWL_DEBUG_SCAN(priv, - "Multiple concurrent scan requests in parallel.\n"); - return -EBUSY; - } - - if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { - IWL_DEBUG_SCAN(priv, "Scan request while abort pending.\n"); - return -EBUSY; - } - - IWL_DEBUG_SCAN(priv, "Starting scan...\n"); - - set_bit(STATUS_SCANNING, &priv->status); - priv->scan_start = jiffies; - - ret = priv->cfg->ops->utils->request_scan(priv, vif); - if (ret) { - clear_bit(STATUS_SCANNING, &priv->status); - return ret; - } - - queue_delayed_work(priv->workqueue, &priv->scan_check, - IWL_SCAN_CHECK_WATCHDOG); - - return 0; -} - -int iwl_legacy_mac_hw_scan(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct cfg80211_scan_request *req) -{ - struct iwl_priv *priv = hw->priv; - int ret; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - - if (req->n_channels == 0) - return -EINVAL; - - mutex_lock(&priv->mutex); - - if (test_bit(STATUS_SCANNING, &priv->status)) { - IWL_DEBUG_SCAN(priv, "Scan already in progress.\n"); - ret = -EAGAIN; - goto out_unlock; - } - - /* mac80211 will only ask for one band at a time */ - priv->scan_request = req; - priv->scan_vif = vif; - priv->scan_band = req->channels[0]->band; - - ret = iwl_legacy_scan_initiate(priv, vif); - - IWL_DEBUG_MAC80211(priv, "leave\n"); - -out_unlock: - mutex_unlock(&priv->mutex); - - return ret; -} -EXPORT_SYMBOL(iwl_legacy_mac_hw_scan); - -static void iwl_legacy_bg_scan_check(struct work_struct *data) -{ - struct iwl_priv *priv = - container_of(data, struct iwl_priv, scan_check.work); - - IWL_DEBUG_SCAN(priv, "Scan check work\n"); - - /* Since we are here firmware does not finish scan and - * most likely is in bad shape, so we don't bother to - * send abort command, just force scan complete to mac80211 */ - mutex_lock(&priv->mutex); - iwl_legacy_force_scan_end(priv); - mutex_unlock(&priv->mutex); -} - -/** - * iwl_legacy_fill_probe_req - fill in all required fields and IE for probe request - */ - -u16 -iwl_legacy_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame, - const u8 *ta, const u8 *ies, int ie_len, int left) -{ - int len = 0; - u8 *pos = NULL; - - /* Make sure there is enough space for the probe request, - * two mandatory IEs and the data */ - left -= 24; - if (left < 0) - return 0; - - frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ); - memcpy(frame->da, iwlegacy_bcast_addr, ETH_ALEN); - memcpy(frame->sa, ta, ETH_ALEN); - memcpy(frame->bssid, iwlegacy_bcast_addr, ETH_ALEN); - frame->seq_ctrl = 0; - - len += 24; - - /* ...next IE... */ - pos = &frame->u.probe_req.variable[0]; - - /* fill in our indirect SSID IE */ - left -= 2; - if (left < 0) - return 0; - *pos++ = WLAN_EID_SSID; - *pos++ = 0; - - len += 2; - - if (WARN_ON(left < ie_len)) - return len; - - if (ies && ie_len) { - memcpy(pos, ies, ie_len); - len += ie_len; - } - - return (u16)len; -} -EXPORT_SYMBOL(iwl_legacy_fill_probe_req); - -static void iwl_legacy_bg_abort_scan(struct work_struct *work) -{ - struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan); - - IWL_DEBUG_SCAN(priv, "Abort scan work\n"); - - /* We keep scan_check work queued in case when firmware will not - * report back scan completed notification */ - mutex_lock(&priv->mutex); - iwl_legacy_scan_cancel_timeout(priv, 200); - mutex_unlock(&priv->mutex); -} - -static void iwl_legacy_bg_scan_completed(struct work_struct *work) -{ - struct iwl_priv *priv = - container_of(work, struct iwl_priv, scan_completed); - bool aborted; - - IWL_DEBUG_SCAN(priv, "Completed scan.\n"); - - cancel_delayed_work(&priv->scan_check); - - mutex_lock(&priv->mutex); - - aborted = test_and_clear_bit(STATUS_SCAN_ABORTING, &priv->status); - if (aborted) - IWL_DEBUG_SCAN(priv, "Aborted scan completed.\n"); - - if (!test_and_clear_bit(STATUS_SCANNING, &priv->status)) { - IWL_DEBUG_SCAN(priv, "Scan already completed.\n"); - goto out_settings; - } - - iwl_legacy_complete_scan(priv, aborted); - -out_settings: - /* Can we still talk to firmware ? */ - if (!iwl_legacy_is_ready_rf(priv)) - goto out; - - /* - * We do not commit power settings while scan is pending, - * do it now if the settings changed. - */ - iwl_legacy_power_set_mode(priv, &priv->power_data.sleep_cmd_next, false); - iwl_legacy_set_tx_power(priv, priv->tx_power_next, false); - - priv->cfg->ops->utils->post_scan(priv); - -out: - mutex_unlock(&priv->mutex); -} - -void iwl_legacy_setup_scan_deferred_work(struct iwl_priv *priv) -{ - INIT_WORK(&priv->scan_completed, iwl_legacy_bg_scan_completed); - INIT_WORK(&priv->abort_scan, iwl_legacy_bg_abort_scan); - INIT_DELAYED_WORK(&priv->scan_check, iwl_legacy_bg_scan_check); -} -EXPORT_SYMBOL(iwl_legacy_setup_scan_deferred_work); - -void iwl_legacy_cancel_scan_deferred_work(struct iwl_priv *priv) -{ - cancel_work_sync(&priv->abort_scan); - cancel_work_sync(&priv->scan_completed); - - if (cancel_delayed_work_sync(&priv->scan_check)) { - mutex_lock(&priv->mutex); - iwl_legacy_force_scan_end(priv); - mutex_unlock(&priv->mutex); - } -} -EXPORT_SYMBOL(iwl_legacy_cancel_scan_deferred_work); diff --git a/drivers/net/wireless/iwlegacy/iwl-spectrum.h b/drivers/net/wireless/iwlegacy/iwl-spectrum.h index 9f70a472310..85fe48e520f 100644 --- a/drivers/net/wireless/iwlegacy/iwl-spectrum.h +++ b/drivers/net/wireless/iwlegacy/iwl-spectrum.h @@ -26,8 +26,8 @@ * *****************************************************************************/ -#ifndef __iwl_legacy_spectrum_h__ -#define __iwl_legacy_spectrum_h__ +#ifndef __il_spectrum_h__ +#define __il_spectrum_h__ enum { /* ieee80211_basic_report.map */ IEEE80211_BASIC_MAP_BSS = (1 << 0), IEEE80211_BASIC_MAP_OFDM = (1 << 1), diff --git a/drivers/net/wireless/iwlegacy/iwl-sta.c b/drivers/net/wireless/iwlegacy/iwl-sta.c deleted file mode 100644 index f10df3e2813..00000000000 --- a/drivers/net/wireless/iwlegacy/iwl-sta.c +++ /dev/null @@ -1,817 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. - * - * Portions of this file are derived from the ipw3945 project, as well - * as portions of the ieee80211 subsystem header files. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#include <net/mac80211.h> -#include <linux/etherdevice.h> -#include <linux/sched.h> -#include <linux/lockdep.h> -#include <linux/export.h> - -#include "iwl-dev.h" -#include "iwl-core.h" -#include "iwl-sta.h" - -/* priv->sta_lock must be held */ -static void iwl_legacy_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id) -{ - - if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) - IWL_ERR(priv, - "ACTIVATE a non DRIVER active station id %u addr %pM\n", - sta_id, priv->stations[sta_id].sta.sta.addr); - - if (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) { - IWL_DEBUG_ASSOC(priv, - "STA id %u addr %pM already present" - " in uCode (according to driver)\n", - sta_id, priv->stations[sta_id].sta.sta.addr); - } else { - priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE; - IWL_DEBUG_ASSOC(priv, "Added STA id %u addr %pM to uCode\n", - sta_id, priv->stations[sta_id].sta.sta.addr); - } -} - -static int iwl_legacy_process_add_sta_resp(struct iwl_priv *priv, - struct iwl_legacy_addsta_cmd *addsta, - struct iwl_rx_packet *pkt, - bool sync) -{ - u8 sta_id = addsta->sta.sta_id; - unsigned long flags; - int ret = -EIO; - - if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { - IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n", - pkt->hdr.flags); - return ret; - } - - IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n", - sta_id); - - spin_lock_irqsave(&priv->sta_lock, flags); - - switch (pkt->u.add_sta.status) { - case ADD_STA_SUCCESS_MSK: - IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n"); - iwl_legacy_sta_ucode_activate(priv, sta_id); - ret = 0; - break; - case ADD_STA_NO_ROOM_IN_TABLE: - IWL_ERR(priv, "Adding station %d failed, no room in table.\n", - sta_id); - break; - case ADD_STA_NO_BLOCK_ACK_RESOURCE: - IWL_ERR(priv, - "Adding station %d failed, no block ack resource.\n", - sta_id); - break; - case ADD_STA_MODIFY_NON_EXIST_STA: - IWL_ERR(priv, "Attempting to modify non-existing station %d\n", - sta_id); - break; - default: - IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n", - pkt->u.add_sta.status); - break; - } - - IWL_DEBUG_INFO(priv, "%s station id %u addr %pM\n", - priv->stations[sta_id].sta.mode == - STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", - sta_id, priv->stations[sta_id].sta.sta.addr); - - /* - * XXX: The MAC address in the command buffer is often changed from - * the original sent to the device. That is, the MAC address - * written to the command buffer often is not the same MAC address - * read from the command buffer when the command returns. This - * issue has not yet been resolved and this debugging is left to - * observe the problem. - */ - IWL_DEBUG_INFO(priv, "%s station according to cmd buffer %pM\n", - priv->stations[sta_id].sta.mode == - STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", - addsta->sta.addr); - spin_unlock_irqrestore(&priv->sta_lock, flags); - - return ret; -} - -static void iwl_legacy_add_sta_callback(struct iwl_priv *priv, - struct iwl_device_cmd *cmd, - struct iwl_rx_packet *pkt) -{ - struct iwl_legacy_addsta_cmd *addsta = - (struct iwl_legacy_addsta_cmd *)cmd->cmd.payload; - - iwl_legacy_process_add_sta_resp(priv, addsta, pkt, false); - -} - -int iwl_legacy_send_add_sta(struct iwl_priv *priv, - struct iwl_legacy_addsta_cmd *sta, u8 flags) -{ - struct iwl_rx_packet *pkt = NULL; - int ret = 0; - u8 data[sizeof(*sta)]; - struct iwl_host_cmd cmd = { - .id = REPLY_ADD_STA, - .flags = flags, - .data = data, - }; - u8 sta_id __maybe_unused = sta->sta.sta_id; - - IWL_DEBUG_INFO(priv, "Adding sta %u (%pM) %ssynchronously\n", - sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : ""); - - if (flags & CMD_ASYNC) - cmd.callback = iwl_legacy_add_sta_callback; - else { - cmd.flags |= CMD_WANT_SKB; - might_sleep(); - } - - cmd.len = priv->cfg->ops->utils->build_addsta_hcmd(sta, data); - ret = iwl_legacy_send_cmd(priv, &cmd); - - if (ret || (flags & CMD_ASYNC)) - return ret; - - if (ret == 0) { - pkt = (struct iwl_rx_packet *)cmd.reply_page; - ret = iwl_legacy_process_add_sta_resp(priv, sta, pkt, true); - } - iwl_legacy_free_pages(priv, cmd.reply_page); - - return ret; -} -EXPORT_SYMBOL(iwl_legacy_send_add_sta); - -static void iwl_legacy_set_ht_add_station(struct iwl_priv *priv, u8 index, - struct ieee80211_sta *sta, - struct iwl_rxon_context *ctx) -{ - struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap; - __le32 sta_flags; - u8 mimo_ps_mode; - - if (!sta || !sta_ht_inf->ht_supported) - goto done; - - mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2; - IWL_DEBUG_ASSOC(priv, "spatial multiplexing power save mode: %s\n", - (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ? - "static" : - (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ? - "dynamic" : "disabled"); - - sta_flags = priv->stations[index].sta.station_flags; - - sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK); - - switch (mimo_ps_mode) { - case WLAN_HT_CAP_SM_PS_STATIC: - sta_flags |= STA_FLG_MIMO_DIS_MSK; - break; - case WLAN_HT_CAP_SM_PS_DYNAMIC: - sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK; - break; - case WLAN_HT_CAP_SM_PS_DISABLED: - break; - default: - IWL_WARN(priv, "Invalid MIMO PS mode %d\n", mimo_ps_mode); - break; - } - - sta_flags |= cpu_to_le32( - (u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS); - - sta_flags |= cpu_to_le32( - (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS); - - if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap)) - sta_flags |= STA_FLG_HT40_EN_MSK; - else - sta_flags &= ~STA_FLG_HT40_EN_MSK; - - priv->stations[index].sta.station_flags = sta_flags; - done: - return; -} - -/** - * iwl_legacy_prep_station - Prepare station information for addition - * - * should be called with sta_lock held - */ -u8 iwl_legacy_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx, - const u8 *addr, bool is_ap, struct ieee80211_sta *sta) -{ - struct iwl_station_entry *station; - int i; - u8 sta_id = IWL_INVALID_STATION; - u16 rate; - - if (is_ap) - sta_id = ctx->ap_sta_id; - else if (is_broadcast_ether_addr(addr)) - sta_id = ctx->bcast_sta_id; - else - for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++) { - if (!compare_ether_addr(priv->stations[i].sta.sta.addr, - addr)) { - sta_id = i; - break; - } - - if (!priv->stations[i].used && - sta_id == IWL_INVALID_STATION) - sta_id = i; - } - - /* - * These two conditions have the same outcome, but keep them - * separate - */ - if (unlikely(sta_id == IWL_INVALID_STATION)) - return sta_id; - - /* - * uCode is not able to deal with multiple requests to add a - * station. Keep track if one is in progress so that we do not send - * another. - */ - if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) { - IWL_DEBUG_INFO(priv, - "STA %d already in process of being added.\n", - sta_id); - return sta_id; - } - - if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) && - (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) && - !compare_ether_addr(priv->stations[sta_id].sta.sta.addr, addr)) { - IWL_DEBUG_ASSOC(priv, - "STA %d (%pM) already added, not adding again.\n", - sta_id, addr); - return sta_id; - } - - station = &priv->stations[sta_id]; - station->used = IWL_STA_DRIVER_ACTIVE; - IWL_DEBUG_ASSOC(priv, "Add STA to driver ID %d: %pM\n", - sta_id, addr); - priv->num_stations++; - - /* Set up the REPLY_ADD_STA command to send to device */ - memset(&station->sta, 0, sizeof(struct iwl_legacy_addsta_cmd)); - memcpy(station->sta.sta.addr, addr, ETH_ALEN); - station->sta.mode = 0; - station->sta.sta.sta_id = sta_id; - station->sta.station_flags = ctx->station_flags; - station->ctxid = ctx->ctxid; - - if (sta) { - struct iwl_station_priv_common *sta_priv; - - sta_priv = (void *)sta->drv_priv; - sta_priv->ctx = ctx; - } - - /* - * OK to call unconditionally, since local stations (IBSS BSSID - * STA and broadcast STA) pass in a NULL sta, and mac80211 - * doesn't allow HT IBSS. - */ - iwl_legacy_set_ht_add_station(priv, sta_id, sta, ctx); - - /* 3945 only */ - rate = (priv->band == IEEE80211_BAND_5GHZ) ? - IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP; - /* Turn on both antennas for the station... */ - station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK); - - return sta_id; - -} -EXPORT_SYMBOL_GPL(iwl_legacy_prep_station); - -#define STA_WAIT_TIMEOUT (HZ/2) - -/** - * iwl_legacy_add_station_common - - */ -int -iwl_legacy_add_station_common(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - const u8 *addr, bool is_ap, - struct ieee80211_sta *sta, u8 *sta_id_r) -{ - unsigned long flags_spin; - int ret = 0; - u8 sta_id; - struct iwl_legacy_addsta_cmd sta_cmd; - - *sta_id_r = 0; - spin_lock_irqsave(&priv->sta_lock, flags_spin); - sta_id = iwl_legacy_prep_station(priv, ctx, addr, is_ap, sta); - if (sta_id == IWL_INVALID_STATION) { - IWL_ERR(priv, "Unable to prepare station %pM for addition\n", - addr); - spin_unlock_irqrestore(&priv->sta_lock, flags_spin); - return -EINVAL; - } - - /* - * uCode is not able to deal with multiple requests to add a - * station. Keep track if one is in progress so that we do not send - * another. - */ - if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) { - IWL_DEBUG_INFO(priv, - "STA %d already in process of being added.\n", - sta_id); - spin_unlock_irqrestore(&priv->sta_lock, flags_spin); - return -EEXIST; - } - - if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) && - (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) { - IWL_DEBUG_ASSOC(priv, - "STA %d (%pM) already added, not adding again.\n", - sta_id, addr); - spin_unlock_irqrestore(&priv->sta_lock, flags_spin); - return -EEXIST; - } - - priv->stations[sta_id].used |= IWL_STA_UCODE_INPROGRESS; - memcpy(&sta_cmd, &priv->stations[sta_id].sta, - sizeof(struct iwl_legacy_addsta_cmd)); - spin_unlock_irqrestore(&priv->sta_lock, flags_spin); - - /* Add station to device's station table */ - ret = iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC); - if (ret) { - spin_lock_irqsave(&priv->sta_lock, flags_spin); - IWL_ERR(priv, "Adding station %pM failed.\n", - priv->stations[sta_id].sta.sta.addr); - priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE; - priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS; - spin_unlock_irqrestore(&priv->sta_lock, flags_spin); - } - *sta_id_r = sta_id; - return ret; -} -EXPORT_SYMBOL(iwl_legacy_add_station_common); - -/** - * iwl_legacy_sta_ucode_deactivate - deactivate ucode status for a station - * - * priv->sta_lock must be held - */ -static void iwl_legacy_sta_ucode_deactivate(struct iwl_priv *priv, u8 sta_id) -{ - /* Ucode must be active and driver must be non active */ - if ((priv->stations[sta_id].used & - (IWL_STA_UCODE_ACTIVE | IWL_STA_DRIVER_ACTIVE)) != - IWL_STA_UCODE_ACTIVE) - IWL_ERR(priv, "removed non active STA %u\n", sta_id); - - priv->stations[sta_id].used &= ~IWL_STA_UCODE_ACTIVE; - - memset(&priv->stations[sta_id], 0, sizeof(struct iwl_station_entry)); - IWL_DEBUG_ASSOC(priv, "Removed STA %u\n", sta_id); -} - -static int iwl_legacy_send_remove_station(struct iwl_priv *priv, - const u8 *addr, int sta_id, - bool temporary) -{ - struct iwl_rx_packet *pkt; - int ret; - - unsigned long flags_spin; - struct iwl_rem_sta_cmd rm_sta_cmd; - - struct iwl_host_cmd cmd = { - .id = REPLY_REMOVE_STA, - .len = sizeof(struct iwl_rem_sta_cmd), - .flags = CMD_SYNC, - .data = &rm_sta_cmd, - }; - - memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd)); - rm_sta_cmd.num_sta = 1; - memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN); - - cmd.flags |= CMD_WANT_SKB; - - ret = iwl_legacy_send_cmd(priv, &cmd); - - if (ret) - return ret; - - pkt = (struct iwl_rx_packet *)cmd.reply_page; - if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { - IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n", - pkt->hdr.flags); - ret = -EIO; - } - - if (!ret) { - switch (pkt->u.rem_sta.status) { - case REM_STA_SUCCESS_MSK: - if (!temporary) { - spin_lock_irqsave(&priv->sta_lock, flags_spin); - iwl_legacy_sta_ucode_deactivate(priv, sta_id); - spin_unlock_irqrestore(&priv->sta_lock, - flags_spin); - } - IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n"); - break; - default: - ret = -EIO; - IWL_ERR(priv, "REPLY_REMOVE_STA failed\n"); - break; - } - } - iwl_legacy_free_pages(priv, cmd.reply_page); - - return ret; -} - -/** - * iwl_legacy_remove_station - Remove driver's knowledge of station. - */ -int iwl_legacy_remove_station(struct iwl_priv *priv, const u8 sta_id, - const u8 *addr) -{ - unsigned long flags; - - if (!iwl_legacy_is_ready(priv)) { - IWL_DEBUG_INFO(priv, - "Unable to remove station %pM, device not ready.\n", - addr); - /* - * It is typical for stations to be removed when we are - * going down. Return success since device will be down - * soon anyway - */ - return 0; - } - - IWL_DEBUG_ASSOC(priv, "Removing STA from driver:%d %pM\n", - sta_id, addr); - - if (WARN_ON(sta_id == IWL_INVALID_STATION)) - return -EINVAL; - - spin_lock_irqsave(&priv->sta_lock, flags); - - if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) { - IWL_DEBUG_INFO(priv, "Removing %pM but non DRIVER active\n", - addr); - goto out_err; - } - - if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) { - IWL_DEBUG_INFO(priv, "Removing %pM but non UCODE active\n", - addr); - goto out_err; - } - - if (priv->stations[sta_id].used & IWL_STA_LOCAL) { - kfree(priv->stations[sta_id].lq); - priv->stations[sta_id].lq = NULL; - } - - priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE; - - priv->num_stations--; - - BUG_ON(priv->num_stations < 0); - - spin_unlock_irqrestore(&priv->sta_lock, flags); - - return iwl_legacy_send_remove_station(priv, addr, sta_id, false); -out_err: - spin_unlock_irqrestore(&priv->sta_lock, flags); - return -EINVAL; -} -EXPORT_SYMBOL_GPL(iwl_legacy_remove_station); - -/** - * iwl_legacy_clear_ucode_stations - clear ucode station table bits - * - * This function clears all the bits in the driver indicating - * which stations are active in the ucode. Call when something - * other than explicit station management would cause this in - * the ucode, e.g. unassociated RXON. - */ -void iwl_legacy_clear_ucode_stations(struct iwl_priv *priv, - struct iwl_rxon_context *ctx) -{ - int i; - unsigned long flags_spin; - bool cleared = false; - - IWL_DEBUG_INFO(priv, "Clearing ucode stations in driver\n"); - - spin_lock_irqsave(&priv->sta_lock, flags_spin); - for (i = 0; i < priv->hw_params.max_stations; i++) { - if (ctx && ctx->ctxid != priv->stations[i].ctxid) - continue; - - if (priv->stations[i].used & IWL_STA_UCODE_ACTIVE) { - IWL_DEBUG_INFO(priv, - "Clearing ucode active for station %d\n", i); - priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE; - cleared = true; - } - } - spin_unlock_irqrestore(&priv->sta_lock, flags_spin); - - if (!cleared) - IWL_DEBUG_INFO(priv, - "No active stations found to be cleared\n"); -} -EXPORT_SYMBOL(iwl_legacy_clear_ucode_stations); - -/** - * iwl_legacy_restore_stations() - Restore driver known stations to device - * - * All stations considered active by driver, but not present in ucode, is - * restored. - * - * Function sleeps. - */ -void -iwl_legacy_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx) -{ - struct iwl_legacy_addsta_cmd sta_cmd; - struct iwl_link_quality_cmd lq; - unsigned long flags_spin; - int i; - bool found = false; - int ret; - bool send_lq; - - if (!iwl_legacy_is_ready(priv)) { - IWL_DEBUG_INFO(priv, - "Not ready yet, not restoring any stations.\n"); - return; - } - - IWL_DEBUG_ASSOC(priv, "Restoring all known stations ... start.\n"); - spin_lock_irqsave(&priv->sta_lock, flags_spin); - for (i = 0; i < priv->hw_params.max_stations; i++) { - if (ctx->ctxid != priv->stations[i].ctxid) - continue; - if ((priv->stations[i].used & IWL_STA_DRIVER_ACTIVE) && - !(priv->stations[i].used & IWL_STA_UCODE_ACTIVE)) { - IWL_DEBUG_ASSOC(priv, "Restoring sta %pM\n", - priv->stations[i].sta.sta.addr); - priv->stations[i].sta.mode = 0; - priv->stations[i].used |= IWL_STA_UCODE_INPROGRESS; - found = true; - } - } - - for (i = 0; i < priv->hw_params.max_stations; i++) { - if ((priv->stations[i].used & IWL_STA_UCODE_INPROGRESS)) { - memcpy(&sta_cmd, &priv->stations[i].sta, - sizeof(struct iwl_legacy_addsta_cmd)); - send_lq = false; - if (priv->stations[i].lq) { - memcpy(&lq, priv->stations[i].lq, - sizeof(struct iwl_link_quality_cmd)); - send_lq = true; - } - spin_unlock_irqrestore(&priv->sta_lock, flags_spin); - ret = iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC); - if (ret) { - spin_lock_irqsave(&priv->sta_lock, flags_spin); - IWL_ERR(priv, "Adding station %pM failed.\n", - priv->stations[i].sta.sta.addr); - priv->stations[i].used &= - ~IWL_STA_DRIVER_ACTIVE; - priv->stations[i].used &= - ~IWL_STA_UCODE_INPROGRESS; - spin_unlock_irqrestore(&priv->sta_lock, - flags_spin); - } - /* - * Rate scaling has already been initialized, send - * current LQ command - */ - if (send_lq) - iwl_legacy_send_lq_cmd(priv, ctx, &lq, - CMD_SYNC, true); - spin_lock_irqsave(&priv->sta_lock, flags_spin); - priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS; - } - } - - spin_unlock_irqrestore(&priv->sta_lock, flags_spin); - if (!found) - IWL_DEBUG_INFO(priv, "Restoring all known stations" - " .... no stations to be restored.\n"); - else - IWL_DEBUG_INFO(priv, "Restoring all known stations" - " .... complete.\n"); -} -EXPORT_SYMBOL(iwl_legacy_restore_stations); - -int iwl_legacy_get_free_ucode_key_index(struct iwl_priv *priv) -{ - int i; - - for (i = 0; i < priv->sta_key_max_num; i++) - if (!test_and_set_bit(i, &priv->ucode_key_table)) - return i; - - return WEP_INVALID_OFFSET; -} -EXPORT_SYMBOL(iwl_legacy_get_free_ucode_key_index); - -void iwl_legacy_dealloc_bcast_stations(struct iwl_priv *priv) -{ - unsigned long flags; - int i; - - spin_lock_irqsave(&priv->sta_lock, flags); - for (i = 0; i < priv->hw_params.max_stations; i++) { - if (!(priv->stations[i].used & IWL_STA_BCAST)) - continue; - - priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE; - priv->num_stations--; - BUG_ON(priv->num_stations < 0); - kfree(priv->stations[i].lq); - priv->stations[i].lq = NULL; - } - spin_unlock_irqrestore(&priv->sta_lock, flags); -} -EXPORT_SYMBOL_GPL(iwl_legacy_dealloc_bcast_stations); - -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG -static void iwl_legacy_dump_lq_cmd(struct iwl_priv *priv, - struct iwl_link_quality_cmd *lq) -{ - int i; - IWL_DEBUG_RATE(priv, "lq station id 0x%x\n", lq->sta_id); - IWL_DEBUG_RATE(priv, "lq ant 0x%X 0x%X\n", - lq->general_params.single_stream_ant_msk, - lq->general_params.dual_stream_ant_msk); - - for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) - IWL_DEBUG_RATE(priv, "lq index %d 0x%X\n", - i, lq->rs_table[i].rate_n_flags); -} -#else -static inline void iwl_legacy_dump_lq_cmd(struct iwl_priv *priv, - struct iwl_link_quality_cmd *lq) -{ -} -#endif - -/** - * iwl_legacy_is_lq_table_valid() - Test one aspect of LQ cmd for validity - * - * It sometimes happens when a HT rate has been in use and we - * loose connectivity with AP then mac80211 will first tell us that the - * current channel is not HT anymore before removing the station. In such a - * scenario the RXON flags will be updated to indicate we are not - * communicating HT anymore, but the LQ command may still contain HT rates. - * Test for this to prevent driver from sending LQ command between the time - * RXON flags are updated and when LQ command is updated. - */ -static bool iwl_legacy_is_lq_table_valid(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - struct iwl_link_quality_cmd *lq) -{ - int i; - - if (ctx->ht.enabled) - return true; - - IWL_DEBUG_INFO(priv, "Channel %u is not an HT channel\n", - ctx->active.channel); - for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { - if (le32_to_cpu(lq->rs_table[i].rate_n_flags) & - RATE_MCS_HT_MSK) { - IWL_DEBUG_INFO(priv, - "index %d of LQ expects HT channel\n", - i); - return false; - } - } - return true; -} - -/** - * iwl_legacy_send_lq_cmd() - Send link quality command - * @init: This command is sent as part of station initialization right - * after station has been added. - * - * The link quality command is sent as the last step of station creation. - * This is the special case in which init is set and we call a callback in - * this case to clear the state indicating that station creation is in - * progress. - */ -int iwl_legacy_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx, - struct iwl_link_quality_cmd *lq, u8 flags, bool init) -{ - int ret = 0; - unsigned long flags_spin; - - struct iwl_host_cmd cmd = { - .id = REPLY_TX_LINK_QUALITY_CMD, - .len = sizeof(struct iwl_link_quality_cmd), - .flags = flags, - .data = lq, - }; - - if (WARN_ON(lq->sta_id == IWL_INVALID_STATION)) - return -EINVAL; - - - spin_lock_irqsave(&priv->sta_lock, flags_spin); - if (!(priv->stations[lq->sta_id].used & IWL_STA_DRIVER_ACTIVE)) { - spin_unlock_irqrestore(&priv->sta_lock, flags_spin); - return -EINVAL; - } - spin_unlock_irqrestore(&priv->sta_lock, flags_spin); - - iwl_legacy_dump_lq_cmd(priv, lq); - BUG_ON(init && (cmd.flags & CMD_ASYNC)); - - if (iwl_legacy_is_lq_table_valid(priv, ctx, lq)) - ret = iwl_legacy_send_cmd(priv, &cmd); - else - ret = -EINVAL; - - if (cmd.flags & CMD_ASYNC) - return ret; - - if (init) { - IWL_DEBUG_INFO(priv, "init LQ command complete," - " clearing sta addition status for sta %d\n", - lq->sta_id); - spin_lock_irqsave(&priv->sta_lock, flags_spin); - priv->stations[lq->sta_id].used &= ~IWL_STA_UCODE_INPROGRESS; - spin_unlock_irqrestore(&priv->sta_lock, flags_spin); - } - return ret; -} -EXPORT_SYMBOL(iwl_legacy_send_lq_cmd); - -int iwl_legacy_mac_sta_remove(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta) -{ - struct iwl_priv *priv = hw->priv; - struct iwl_station_priv_common *sta_common = (void *)sta->drv_priv; - int ret; - - IWL_DEBUG_INFO(priv, "received request to remove station %pM\n", - sta->addr); - mutex_lock(&priv->mutex); - IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n", - sta->addr); - ret = iwl_legacy_remove_station(priv, sta_common->sta_id, sta->addr); - if (ret) - IWL_ERR(priv, "Error removing station %pM\n", - sta->addr); - mutex_unlock(&priv->mutex); - return ret; -} -EXPORT_SYMBOL(iwl_legacy_mac_sta_remove); diff --git a/drivers/net/wireless/iwlegacy/iwl-sta.h b/drivers/net/wireless/iwlegacy/iwl-sta.h deleted file mode 100644 index 67bd75fe01a..00000000000 --- a/drivers/net/wireless/iwlegacy/iwl-sta.h +++ /dev/null @@ -1,148 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. - * - * Portions of this file are derived from the ipw3945 project, as well - * as portions of the ieee80211 subsystem header files. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ -#ifndef __iwl_legacy_sta_h__ -#define __iwl_legacy_sta_h__ - -#include "iwl-dev.h" - -#define HW_KEY_DYNAMIC 0 -#define HW_KEY_DEFAULT 1 - -#define IWL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */ -#define IWL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */ -#define IWL_STA_UCODE_INPROGRESS BIT(2) /* ucode entry is in process of - being activated */ -#define IWL_STA_LOCAL BIT(3) /* station state not directed by mac80211; - (this is for the IBSS BSSID stations) */ -#define IWL_STA_BCAST BIT(4) /* this station is the special bcast station */ - - -void iwl_legacy_restore_stations(struct iwl_priv *priv, - struct iwl_rxon_context *ctx); -void iwl_legacy_clear_ucode_stations(struct iwl_priv *priv, - struct iwl_rxon_context *ctx); -void iwl_legacy_dealloc_bcast_stations(struct iwl_priv *priv); -int iwl_legacy_get_free_ucode_key_index(struct iwl_priv *priv); -int iwl_legacy_send_add_sta(struct iwl_priv *priv, - struct iwl_legacy_addsta_cmd *sta, u8 flags); -int iwl_legacy_add_station_common(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - const u8 *addr, bool is_ap, - struct ieee80211_sta *sta, u8 *sta_id_r); -int iwl_legacy_remove_station(struct iwl_priv *priv, - const u8 sta_id, - const u8 *addr); -int iwl_legacy_mac_sta_remove(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta); - -u8 iwl_legacy_prep_station(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - const u8 *addr, bool is_ap, - struct ieee80211_sta *sta); - -int iwl_legacy_send_lq_cmd(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - struct iwl_link_quality_cmd *lq, - u8 flags, bool init); - -/** - * iwl_legacy_clear_driver_stations - clear knowledge of all stations from driver - * @priv: iwl priv struct - * - * This is called during iwl_down() to make sure that in the case - * we're coming there from a hardware restart mac80211 will be - * able to reconfigure stations -- if we're getting there in the - * normal down flow then the stations will already be cleared. - */ -static inline void iwl_legacy_clear_driver_stations(struct iwl_priv *priv) -{ - unsigned long flags; - struct iwl_rxon_context *ctx; - - spin_lock_irqsave(&priv->sta_lock, flags); - memset(priv->stations, 0, sizeof(priv->stations)); - priv->num_stations = 0; - - priv->ucode_key_table = 0; - - for_each_context(priv, ctx) { - /* - * Remove all key information that is not stored as part - * of station information since mac80211 may not have had - * a chance to remove all the keys. When device is - * reconfigured by mac80211 after an error all keys will - * be reconfigured. - */ - memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys)); - ctx->key_mapping_keys = 0; - } - - spin_unlock_irqrestore(&priv->sta_lock, flags); -} - -static inline int iwl_legacy_sta_id(struct ieee80211_sta *sta) -{ - if (WARN_ON(!sta)) - return IWL_INVALID_STATION; - - return ((struct iwl_station_priv_common *)sta->drv_priv)->sta_id; -} - -/** - * iwl_legacy_sta_id_or_broadcast - return sta_id or broadcast sta - * @priv: iwl priv - * @context: the current context - * @sta: mac80211 station - * - * In certain circumstances mac80211 passes a station pointer - * that may be %NULL, for example during TX or key setup. In - * that case, we need to use the broadcast station, so this - * inline wraps that pattern. - */ -static inline int iwl_legacy_sta_id_or_broadcast(struct iwl_priv *priv, - struct iwl_rxon_context *context, - struct ieee80211_sta *sta) -{ - int sta_id; - - if (!sta) - return context->bcast_sta_id; - - sta_id = iwl_legacy_sta_id(sta); - - /* - * mac80211 should not be passing a partially - * initialised station! - */ - WARN_ON(sta_id == IWL_INVALID_STATION); - - return sta_id; -} -#endif /* __iwl_legacy_sta_h__ */ diff --git a/drivers/net/wireless/iwlegacy/iwl-tx.c b/drivers/net/wireless/iwlegacy/iwl-tx.c deleted file mode 100644 index c0dfb1a4e96..00000000000 --- a/drivers/net/wireless/iwlegacy/iwl-tx.c +++ /dev/null @@ -1,659 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. - * - * Portions of this file are derived from the ipw3945 project, as well - * as portions of the ieee80211 subsystem header files. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#include <linux/etherdevice.h> -#include <linux/sched.h> -#include <linux/slab.h> -#include <linux/export.h> -#include <net/mac80211.h> -#include "iwl-eeprom.h" -#include "iwl-dev.h" -#include "iwl-core.h" -#include "iwl-sta.h" -#include "iwl-io.h" -#include "iwl-helpers.h" - -/** - * iwl_legacy_txq_update_write_ptr - Send new write index to hardware - */ -void -iwl_legacy_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq) -{ - u32 reg = 0; - int txq_id = txq->q.id; - - if (txq->need_update == 0) - return; - - /* if we're trying to save power */ - if (test_bit(STATUS_POWER_PMI, &priv->status)) { - /* wake up nic if it's powered down ... - * uCode will wake up, and interrupt us again, so next - * time we'll skip this part. */ - reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); - - if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { - IWL_DEBUG_INFO(priv, - "Tx queue %d requesting wakeup," - " GP1 = 0x%x\n", txq_id, reg); - iwl_legacy_set_bit(priv, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); - return; - } - - iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR, - txq->q.write_ptr | (txq_id << 8)); - - /* - * else not in power-save mode, - * uCode will never sleep when we're - * trying to tx (during RFKILL, we're not trying to tx). - */ - } else - iwl_write32(priv, HBUS_TARG_WRPTR, - txq->q.write_ptr | (txq_id << 8)); - txq->need_update = 0; -} -EXPORT_SYMBOL(iwl_legacy_txq_update_write_ptr); - -/** - * iwl_legacy_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's - */ -void iwl_legacy_tx_queue_unmap(struct iwl_priv *priv, int txq_id) -{ - struct iwl_tx_queue *txq = &priv->txq[txq_id]; - struct iwl_queue *q = &txq->q; - - if (q->n_bd == 0) - return; - - while (q->write_ptr != q->read_ptr) { - priv->cfg->ops->lib->txq_free_tfd(priv, txq); - q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd); - } -} -EXPORT_SYMBOL(iwl_legacy_tx_queue_unmap); - -/** - * iwl_legacy_tx_queue_free - Deallocate DMA queue. - * @txq: Transmit queue to deallocate. - * - * Empty queue by removing and destroying all BD's. - * Free all buffers. - * 0-fill, but do not free "txq" descriptor structure. - */ -void iwl_legacy_tx_queue_free(struct iwl_priv *priv, int txq_id) -{ - struct iwl_tx_queue *txq = &priv->txq[txq_id]; - struct device *dev = &priv->pci_dev->dev; - int i; - - iwl_legacy_tx_queue_unmap(priv, txq_id); - - /* De-alloc array of command/tx buffers */ - for (i = 0; i < TFD_TX_CMD_SLOTS; i++) - kfree(txq->cmd[i]); - - /* De-alloc circular buffer of TFDs */ - if (txq->q.n_bd) - dma_free_coherent(dev, priv->hw_params.tfd_size * - txq->q.n_bd, txq->tfds, txq->q.dma_addr); - - /* De-alloc array of per-TFD driver data */ - kfree(txq->txb); - txq->txb = NULL; - - /* deallocate arrays */ - kfree(txq->cmd); - kfree(txq->meta); - txq->cmd = NULL; - txq->meta = NULL; - - /* 0-fill queue descriptor structure */ - memset(txq, 0, sizeof(*txq)); -} -EXPORT_SYMBOL(iwl_legacy_tx_queue_free); - -/** - * iwl_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue - */ -void iwl_legacy_cmd_queue_unmap(struct iwl_priv *priv) -{ - struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; - struct iwl_queue *q = &txq->q; - int i; - - if (q->n_bd == 0) - return; - - while (q->read_ptr != q->write_ptr) { - i = iwl_legacy_get_cmd_index(q, q->read_ptr, 0); - - if (txq->meta[i].flags & CMD_MAPPED) { - pci_unmap_single(priv->pci_dev, - dma_unmap_addr(&txq->meta[i], mapping), - dma_unmap_len(&txq->meta[i], len), - PCI_DMA_BIDIRECTIONAL); - txq->meta[i].flags = 0; - } - - q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd); - } - - i = q->n_window; - if (txq->meta[i].flags & CMD_MAPPED) { - pci_unmap_single(priv->pci_dev, - dma_unmap_addr(&txq->meta[i], mapping), - dma_unmap_len(&txq->meta[i], len), - PCI_DMA_BIDIRECTIONAL); - txq->meta[i].flags = 0; - } -} -EXPORT_SYMBOL(iwl_legacy_cmd_queue_unmap); - -/** - * iwl_legacy_cmd_queue_free - Deallocate DMA queue. - * @txq: Transmit queue to deallocate. - * - * Empty queue by removing and destroying all BD's. - * Free all buffers. - * 0-fill, but do not free "txq" descriptor structure. - */ -void iwl_legacy_cmd_queue_free(struct iwl_priv *priv) -{ - struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; - struct device *dev = &priv->pci_dev->dev; - int i; - - iwl_legacy_cmd_queue_unmap(priv); - - /* De-alloc array of command/tx buffers */ - for (i = 0; i <= TFD_CMD_SLOTS; i++) - kfree(txq->cmd[i]); - - /* De-alloc circular buffer of TFDs */ - if (txq->q.n_bd) - dma_free_coherent(dev, priv->hw_params.tfd_size * txq->q.n_bd, - txq->tfds, txq->q.dma_addr); - - /* deallocate arrays */ - kfree(txq->cmd); - kfree(txq->meta); - txq->cmd = NULL; - txq->meta = NULL; - - /* 0-fill queue descriptor structure */ - memset(txq, 0, sizeof(*txq)); -} -EXPORT_SYMBOL(iwl_legacy_cmd_queue_free); - -/*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** - * DMA services - * - * Theory of operation - * - * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer - * of buffer descriptors, each of which points to one or more data buffers for - * the device to read from or fill. Driver and device exchange status of each - * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty - * entries in each circular buffer, to protect against confusing empty and full - * queue states. - * - * The device reads or writes the data in the queues via the device's several - * DMA/FIFO channels. Each queue is mapped to a single DMA channel. - * - * For Tx queue, there are low mark and high mark limits. If, after queuing - * the packet for Tx, free space become < low mark, Tx queue stopped. When - * reclaiming packets (on 'tx done IRQ), if free space become > high mark, - * Tx queue resumed. - * - * See more detailed info in iwl-4965-hw.h. - ***************************************************/ - -int iwl_legacy_queue_space(const struct iwl_queue *q) -{ - int s = q->read_ptr - q->write_ptr; - - if (q->read_ptr > q->write_ptr) - s -= q->n_bd; - - if (s <= 0) - s += q->n_window; - /* keep some reserve to not confuse empty and full situations */ - s -= 2; - if (s < 0) - s = 0; - return s; -} -EXPORT_SYMBOL(iwl_legacy_queue_space); - - -/** - * iwl_legacy_queue_init - Initialize queue's high/low-water and read/write indexes - */ -static int iwl_legacy_queue_init(struct iwl_priv *priv, struct iwl_queue *q, - int count, int slots_num, u32 id) -{ - q->n_bd = count; - q->n_window = slots_num; - q->id = id; - - /* count must be power-of-two size, otherwise iwl_legacy_queue_inc_wrap - * and iwl_legacy_queue_dec_wrap are broken. */ - BUG_ON(!is_power_of_2(count)); - - /* slots_num must be power-of-two size, otherwise - * iwl_legacy_get_cmd_index is broken. */ - BUG_ON(!is_power_of_2(slots_num)); - - q->low_mark = q->n_window / 4; - if (q->low_mark < 4) - q->low_mark = 4; - - q->high_mark = q->n_window / 8; - if (q->high_mark < 2) - q->high_mark = 2; - - q->write_ptr = q->read_ptr = 0; - - return 0; -} - -/** - * iwl_legacy_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue - */ -static int iwl_legacy_tx_queue_alloc(struct iwl_priv *priv, - struct iwl_tx_queue *txq, u32 id) -{ - struct device *dev = &priv->pci_dev->dev; - size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX; - - /* Driver private data, only for Tx (not command) queues, - * not shared with device. */ - if (id != priv->cmd_queue) { - txq->txb = kzalloc(sizeof(txq->txb[0]) * - TFD_QUEUE_SIZE_MAX, GFP_KERNEL); - if (!txq->txb) { - IWL_ERR(priv, "kmalloc for auxiliary BD " - "structures failed\n"); - goto error; - } - } else { - txq->txb = NULL; - } - - /* Circular buffer of transmit frame descriptors (TFDs), - * shared with device */ - txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr, - GFP_KERNEL); - if (!txq->tfds) { - IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz); - goto error; - } - txq->q.id = id; - - return 0; - - error: - kfree(txq->txb); - txq->txb = NULL; - - return -ENOMEM; -} - -/** - * iwl_legacy_tx_queue_init - Allocate and initialize one tx/cmd queue - */ -int iwl_legacy_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, - int slots_num, u32 txq_id) -{ - int i, len; - int ret; - int actual_slots = slots_num; - - /* - * Alloc buffer array for commands (Tx or other types of commands). - * For the command queue (#4/#9), allocate command space + one big - * command for scan, since scan command is very huge; the system will - * not have two scans at the same time, so only one is needed. - * For normal Tx queues (all other queues), no super-size command - * space is needed. - */ - if (txq_id == priv->cmd_queue) - actual_slots++; - - txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots, - GFP_KERNEL); - txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * actual_slots, - GFP_KERNEL); - - if (!txq->meta || !txq->cmd) - goto out_free_arrays; - - len = sizeof(struct iwl_device_cmd); - for (i = 0; i < actual_slots; i++) { - /* only happens for cmd queue */ - if (i == slots_num) - len = IWL_MAX_CMD_SIZE; - - txq->cmd[i] = kmalloc(len, GFP_KERNEL); - if (!txq->cmd[i]) - goto err; - } - - /* Alloc driver data array and TFD circular buffer */ - ret = iwl_legacy_tx_queue_alloc(priv, txq, txq_id); - if (ret) - goto err; - - txq->need_update = 0; - - /* - * For the default queues 0-3, set up the swq_id - * already -- all others need to get one later - * (if they need one at all). - */ - if (txq_id < 4) - iwl_legacy_set_swq_id(txq, txq_id, txq_id); - - /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise - * iwl_legacy_queue_inc_wrap and iwl_legacy_queue_dec_wrap are broken. */ - BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); - - /* Initialize queue's high/low-water marks, and head/tail indexes */ - iwl_legacy_queue_init(priv, &txq->q, - TFD_QUEUE_SIZE_MAX, slots_num, txq_id); - - /* Tell device where to find queue */ - priv->cfg->ops->lib->txq_init(priv, txq); - - return 0; -err: - for (i = 0; i < actual_slots; i++) - kfree(txq->cmd[i]); -out_free_arrays: - kfree(txq->meta); - kfree(txq->cmd); - - return -ENOMEM; -} -EXPORT_SYMBOL(iwl_legacy_tx_queue_init); - -void iwl_legacy_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq, - int slots_num, u32 txq_id) -{ - int actual_slots = slots_num; - - if (txq_id == priv->cmd_queue) - actual_slots++; - - memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots); - - txq->need_update = 0; - - /* Initialize queue's high/low-water marks, and head/tail indexes */ - iwl_legacy_queue_init(priv, &txq->q, - TFD_QUEUE_SIZE_MAX, slots_num, txq_id); - - /* Tell device where to find queue */ - priv->cfg->ops->lib->txq_init(priv, txq); -} -EXPORT_SYMBOL(iwl_legacy_tx_queue_reset); - -/*************** HOST COMMAND QUEUE FUNCTIONS *****/ - -/** - * iwl_legacy_enqueue_hcmd - enqueue a uCode command - * @priv: device private data point - * @cmd: a point to the ucode command structure - * - * The function returns < 0 values to indicate the operation is - * failed. On success, it turns the index (> 0) of command in the - * command queue. - */ -int iwl_legacy_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) -{ - struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; - struct iwl_queue *q = &txq->q; - struct iwl_device_cmd *out_cmd; - struct iwl_cmd_meta *out_meta; - dma_addr_t phys_addr; - unsigned long flags; - int len; - u32 idx; - u16 fix_size; - - cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len); - fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr)); - - /* If any of the command structures end up being larger than - * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then - * we will need to increase the size of the TFD entries - * Also, check to see if command buffer should not exceed the size - * of device_cmd and max_cmd_size. */ - BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) && - !(cmd->flags & CMD_SIZE_HUGE)); - BUG_ON(fix_size > IWL_MAX_CMD_SIZE); - - if (iwl_legacy_is_rfkill(priv) || iwl_legacy_is_ctkill(priv)) { - IWL_WARN(priv, "Not sending command - %s KILL\n", - iwl_legacy_is_rfkill(priv) ? "RF" : "CT"); - return -EIO; - } - - spin_lock_irqsave(&priv->hcmd_lock, flags); - - if (iwl_legacy_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { - spin_unlock_irqrestore(&priv->hcmd_lock, flags); - - IWL_ERR(priv, "Restarting adapter due to command queue full\n"); - queue_work(priv->workqueue, &priv->restart); - return -ENOSPC; - } - - idx = iwl_legacy_get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE); - out_cmd = txq->cmd[idx]; - out_meta = &txq->meta[idx]; - - if (WARN_ON(out_meta->flags & CMD_MAPPED)) { - spin_unlock_irqrestore(&priv->hcmd_lock, flags); - return -ENOSPC; - } - - memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ - out_meta->flags = cmd->flags | CMD_MAPPED; - if (cmd->flags & CMD_WANT_SKB) - out_meta->source = cmd; - if (cmd->flags & CMD_ASYNC) - out_meta->callback = cmd->callback; - - out_cmd->hdr.cmd = cmd->id; - memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len); - - /* At this point, the out_cmd now has all of the incoming cmd - * information */ - - out_cmd->hdr.flags = 0; - out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) | - INDEX_TO_SEQ(q->write_ptr)); - if (cmd->flags & CMD_SIZE_HUGE) - out_cmd->hdr.sequence |= SEQ_HUGE_FRAME; - len = sizeof(struct iwl_device_cmd); - if (idx == TFD_CMD_SLOTS) - len = IWL_MAX_CMD_SIZE; - -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG - switch (out_cmd->hdr.cmd) { - case REPLY_TX_LINK_QUALITY_CMD: - case SENSITIVITY_CMD: - IWL_DEBUG_HC_DUMP(priv, - "Sending command %s (#%x), seq: 0x%04X, " - "%d bytes at %d[%d]:%d\n", - iwl_legacy_get_cmd_string(out_cmd->hdr.cmd), - out_cmd->hdr.cmd, - le16_to_cpu(out_cmd->hdr.sequence), fix_size, - q->write_ptr, idx, priv->cmd_queue); - break; - default: - IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, " - "%d bytes at %d[%d]:%d\n", - iwl_legacy_get_cmd_string(out_cmd->hdr.cmd), - out_cmd->hdr.cmd, - le16_to_cpu(out_cmd->hdr.sequence), fix_size, - q->write_ptr, idx, priv->cmd_queue); - } -#endif - txq->need_update = 1; - - if (priv->cfg->ops->lib->txq_update_byte_cnt_tbl) - /* Set up entry in queue's byte count circular buffer */ - priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0); - - phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr, - fix_size, PCI_DMA_BIDIRECTIONAL); - dma_unmap_addr_set(out_meta, mapping, phys_addr); - dma_unmap_len_set(out_meta, len, fix_size); - - trace_iwlwifi_legacy_dev_hcmd(priv, &out_cmd->hdr, - fix_size, cmd->flags); - - priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, - phys_addr, fix_size, 1, - U32_PAD(cmd->len)); - - /* Increment and update queue's write index */ - q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd); - iwl_legacy_txq_update_write_ptr(priv, txq); - - spin_unlock_irqrestore(&priv->hcmd_lock, flags); - return idx; -} - -/** - * iwl_legacy_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd - * - * When FW advances 'R' index, all entries between old and new 'R' index - * need to be reclaimed. As result, some free space forms. If there is - * enough free space (> low mark), wake the stack that feeds us. - */ -static void iwl_legacy_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, - int idx, int cmd_idx) -{ - struct iwl_tx_queue *txq = &priv->txq[txq_id]; - struct iwl_queue *q = &txq->q; - int nfreed = 0; - - if ((idx >= q->n_bd) || (iwl_legacy_queue_used(q, idx) == 0)) { - IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, " - "is out of range [0-%d] %d %d.\n", txq_id, - idx, q->n_bd, q->write_ptr, q->read_ptr); - return; - } - - for (idx = iwl_legacy_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; - q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) { - - if (nfreed++ > 0) { - IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx, - q->write_ptr, q->read_ptr); - queue_work(priv->workqueue, &priv->restart); - } - - } -} - -/** - * iwl_legacy_tx_cmd_complete - Pull unused buffers off the queue and reclaim them - * @rxb: Rx buffer to reclaim - * - * If an Rx buffer has an async callback associated with it the callback - * will be executed. The attached skb (if present) will only be freed - * if the callback returns 1 - */ -void -iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - u16 sequence = le16_to_cpu(pkt->hdr.sequence); - int txq_id = SEQ_TO_QUEUE(sequence); - int index = SEQ_TO_INDEX(sequence); - int cmd_index; - bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME); - struct iwl_device_cmd *cmd; - struct iwl_cmd_meta *meta; - struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; - unsigned long flags; - - /* If a Tx command is being handled and it isn't in the actual - * command queue then there a command routing bug has been introduced - * in the queue management code. */ - if (WARN(txq_id != priv->cmd_queue, - "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", - txq_id, priv->cmd_queue, sequence, - priv->txq[priv->cmd_queue].q.read_ptr, - priv->txq[priv->cmd_queue].q.write_ptr)) { - iwl_print_hex_error(priv, pkt, 32); - return; - } - - cmd_index = iwl_legacy_get_cmd_index(&txq->q, index, huge); - cmd = txq->cmd[cmd_index]; - meta = &txq->meta[cmd_index]; - - txq->time_stamp = jiffies; - - pci_unmap_single(priv->pci_dev, - dma_unmap_addr(meta, mapping), - dma_unmap_len(meta, len), - PCI_DMA_BIDIRECTIONAL); - - /* Input error checking is done when commands are added to queue. */ - if (meta->flags & CMD_WANT_SKB) { - meta->source->reply_page = (unsigned long)rxb_addr(rxb); - rxb->page = NULL; - } else if (meta->callback) - meta->callback(priv, cmd, pkt); - - spin_lock_irqsave(&priv->hcmd_lock, flags); - - iwl_legacy_hcmd_queue_reclaim(priv, txq_id, index, cmd_index); - - if (!(meta->flags & CMD_ASYNC)) { - clear_bit(STATUS_HCMD_ACTIVE, &priv->status); - IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n", - iwl_legacy_get_cmd_string(cmd->hdr.cmd)); - wake_up(&priv->wait_command_queue); - } - - /* Mark as unmapped */ - meta->flags = 0; - - spin_unlock_irqrestore(&priv->hcmd_lock, flags); -} -EXPORT_SYMBOL(iwl_legacy_tx_cmd_complete); diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c deleted file mode 100644 index b282d869a54..00000000000 --- a/drivers/net/wireless/iwlegacy/iwl3945-base.c +++ /dev/null @@ -1,4016 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. - * - * Portions of this file are derived from the ipw3945 project, as well - * as portions of the ieee80211 subsystem header files. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/init.h> -#include <linux/pci.h> -#include <linux/pci-aspm.h> -#include <linux/slab.h> -#include <linux/dma-mapping.h> -#include <linux/delay.h> -#include <linux/sched.h> -#include <linux/skbuff.h> -#include <linux/netdevice.h> -#include <linux/firmware.h> -#include <linux/etherdevice.h> -#include <linux/if_arp.h> - -#include <net/ieee80211_radiotap.h> -#include <net/mac80211.h> - -#include <asm/div64.h> - -#define DRV_NAME "iwl3945" - -#include "iwl-fh.h" -#include "iwl-3945-fh.h" -#include "iwl-commands.h" -#include "iwl-sta.h" -#include "iwl-3945.h" -#include "iwl-core.h" -#include "iwl-helpers.h" -#include "iwl-dev.h" -#include "iwl-spectrum.h" - -/* - * module name, copyright, version, etc. - */ - -#define DRV_DESCRIPTION \ -"Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux" - -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG -#define VD "d" -#else -#define VD -#endif - -/* - * add "s" to indicate spectrum measurement included. - * we add it here to be consistent with previous releases in which - * this was configurable. - */ -#define DRV_VERSION IWLWIFI_VERSION VD "s" -#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation" -#define DRV_AUTHOR "<ilw@linux.intel.com>" - -MODULE_DESCRIPTION(DRV_DESCRIPTION); -MODULE_VERSION(DRV_VERSION); -MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); -MODULE_LICENSE("GPL"); - - /* module parameters */ -struct iwl_mod_params iwl3945_mod_params = { - .sw_crypto = 1, - .restart_fw = 1, - .disable_hw_scan = 1, - /* the rest are 0 by default */ -}; - -/** - * iwl3945_get_antenna_flags - Get antenna flags for RXON command - * @priv: eeprom and antenna fields are used to determine antenna flags - * - * priv->eeprom39 is used to determine if antenna AUX/MAIN are reversed - * iwl3945_mod_params.antenna specifies the antenna diversity mode: - * - * IWL_ANTENNA_DIVERSITY - NIC selects best antenna by itself - * IWL_ANTENNA_MAIN - Force MAIN antenna - * IWL_ANTENNA_AUX - Force AUX antenna - */ -__le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv) -{ - struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; - - switch (iwl3945_mod_params.antenna) { - case IWL_ANTENNA_DIVERSITY: - return 0; - - case IWL_ANTENNA_MAIN: - if (eeprom->antenna_switch_type) - return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK; - return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK; - - case IWL_ANTENNA_AUX: - if (eeprom->antenna_switch_type) - return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK; - return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK; - } - - /* bad antenna selector value */ - IWL_ERR(priv, "Bad antenna selector value (0x%x)\n", - iwl3945_mod_params.antenna); - - return 0; /* "diversity" is default if error */ -} - -static int iwl3945_set_ccmp_dynamic_key_info(struct iwl_priv *priv, - struct ieee80211_key_conf *keyconf, - u8 sta_id) -{ - unsigned long flags; - __le16 key_flags = 0; - int ret; - - key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK); - key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); - - if (sta_id == priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id) - key_flags |= STA_KEY_MULTICAST_MSK; - - keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; - keyconf->hw_key_idx = keyconf->keyidx; - key_flags &= ~STA_KEY_FLG_INVALID; - - spin_lock_irqsave(&priv->sta_lock, flags); - priv->stations[sta_id].keyinfo.cipher = keyconf->cipher; - priv->stations[sta_id].keyinfo.keylen = keyconf->keylen; - memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, - keyconf->keylen); - - memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, - keyconf->keylen); - - if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK) - == STA_KEY_FLG_NO_ENC) - priv->stations[sta_id].sta.key.key_offset = - iwl_legacy_get_free_ucode_key_index(priv); - /* else, we are overriding an existing key => no need to allocated room - * in uCode. */ - - WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, - "no space for a new key"); - - priv->stations[sta_id].sta.key.key_flags = key_flags; - priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; - priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; - - IWL_DEBUG_INFO(priv, "hwcrypto: modify ucode station key info\n"); - - ret = iwl_legacy_send_add_sta(priv, - &priv->stations[sta_id].sta, CMD_ASYNC); - - spin_unlock_irqrestore(&priv->sta_lock, flags); - - return ret; -} - -static int iwl3945_set_tkip_dynamic_key_info(struct iwl_priv *priv, - struct ieee80211_key_conf *keyconf, - u8 sta_id) -{ - return -EOPNOTSUPP; -} - -static int iwl3945_set_wep_dynamic_key_info(struct iwl_priv *priv, - struct ieee80211_key_conf *keyconf, - u8 sta_id) -{ - return -EOPNOTSUPP; -} - -static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id) -{ - unsigned long flags; - struct iwl_legacy_addsta_cmd sta_cmd; - - spin_lock_irqsave(&priv->sta_lock, flags); - memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key)); - memset(&priv->stations[sta_id].sta.key, 0, - sizeof(struct iwl4965_keyinfo)); - priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC; - priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; - priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; - memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_legacy_addsta_cmd)); - spin_unlock_irqrestore(&priv->sta_lock, flags); - - IWL_DEBUG_INFO(priv, "hwcrypto: clear ucode station key info\n"); - return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC); -} - -static int iwl3945_set_dynamic_key(struct iwl_priv *priv, - struct ieee80211_key_conf *keyconf, u8 sta_id) -{ - int ret = 0; - - keyconf->hw_key_idx = HW_KEY_DYNAMIC; - - switch (keyconf->cipher) { - case WLAN_CIPHER_SUITE_CCMP: - ret = iwl3945_set_ccmp_dynamic_key_info(priv, keyconf, sta_id); - break; - case WLAN_CIPHER_SUITE_TKIP: - ret = iwl3945_set_tkip_dynamic_key_info(priv, keyconf, sta_id); - break; - case WLAN_CIPHER_SUITE_WEP40: - case WLAN_CIPHER_SUITE_WEP104: - ret = iwl3945_set_wep_dynamic_key_info(priv, keyconf, sta_id); - break; - default: - IWL_ERR(priv, "Unknown alg: %s alg=%x\n", __func__, - keyconf->cipher); - ret = -EINVAL; - } - - IWL_DEBUG_WEP(priv, "Set dynamic key: alg=%x len=%d idx=%d sta=%d ret=%d\n", - keyconf->cipher, keyconf->keylen, keyconf->keyidx, - sta_id, ret); - - return ret; -} - -static int iwl3945_remove_static_key(struct iwl_priv *priv) -{ - int ret = -EOPNOTSUPP; - - return ret; -} - -static int iwl3945_set_static_key(struct iwl_priv *priv, - struct ieee80211_key_conf *key) -{ - if (key->cipher == WLAN_CIPHER_SUITE_WEP40 || - key->cipher == WLAN_CIPHER_SUITE_WEP104) - return -EOPNOTSUPP; - - IWL_ERR(priv, "Static key invalid: cipher %x\n", key->cipher); - return -EINVAL; -} - -static void iwl3945_clear_free_frames(struct iwl_priv *priv) -{ - struct list_head *element; - - IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n", - priv->frames_count); - - while (!list_empty(&priv->free_frames)) { - element = priv->free_frames.next; - list_del(element); - kfree(list_entry(element, struct iwl3945_frame, list)); - priv->frames_count--; - } - - if (priv->frames_count) { - IWL_WARN(priv, "%d frames still in use. Did we lose one?\n", - priv->frames_count); - priv->frames_count = 0; - } -} - -static struct iwl3945_frame *iwl3945_get_free_frame(struct iwl_priv *priv) -{ - struct iwl3945_frame *frame; - struct list_head *element; - if (list_empty(&priv->free_frames)) { - frame = kzalloc(sizeof(*frame), GFP_KERNEL); - if (!frame) { - IWL_ERR(priv, "Could not allocate frame!\n"); - return NULL; - } - - priv->frames_count++; - return frame; - } - - element = priv->free_frames.next; - list_del(element); - return list_entry(element, struct iwl3945_frame, list); -} - -static void iwl3945_free_frame(struct iwl_priv *priv, struct iwl3945_frame *frame) -{ - memset(frame, 0, sizeof(*frame)); - list_add(&frame->list, &priv->free_frames); -} - -unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv, - struct ieee80211_hdr *hdr, - int left) -{ - - if (!iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS) || !priv->beacon_skb) - return 0; - - if (priv->beacon_skb->len > left) - return 0; - - memcpy(hdr, priv->beacon_skb->data, priv->beacon_skb->len); - - return priv->beacon_skb->len; -} - -static int iwl3945_send_beacon_cmd(struct iwl_priv *priv) -{ - struct iwl3945_frame *frame; - unsigned int frame_size; - int rc; - u8 rate; - - frame = iwl3945_get_free_frame(priv); - - if (!frame) { - IWL_ERR(priv, "Could not obtain free frame buffer for beacon " - "command.\n"); - return -ENOMEM; - } - - rate = iwl_legacy_get_lowest_plcp(priv, - &priv->contexts[IWL_RXON_CTX_BSS]); - - frame_size = iwl3945_hw_get_beacon_cmd(priv, frame, rate); - - rc = iwl_legacy_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size, - &frame->u.cmd[0]); - - iwl3945_free_frame(priv, frame); - - return rc; -} - -static void iwl3945_unset_hw_params(struct iwl_priv *priv) -{ - if (priv->_3945.shared_virt) - dma_free_coherent(&priv->pci_dev->dev, - sizeof(struct iwl3945_shared), - priv->_3945.shared_virt, - priv->_3945.shared_phys); -} - -static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv, - struct ieee80211_tx_info *info, - struct iwl_device_cmd *cmd, - struct sk_buff *skb_frag, - int sta_id) -{ - struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload; - struct iwl_hw_key *keyinfo = &priv->stations[sta_id].keyinfo; - - tx_cmd->sec_ctl = 0; - - switch (keyinfo->cipher) { - case WLAN_CIPHER_SUITE_CCMP: - tx_cmd->sec_ctl = TX_CMD_SEC_CCM; - memcpy(tx_cmd->key, keyinfo->key, keyinfo->keylen); - IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n"); - break; - - case WLAN_CIPHER_SUITE_TKIP: - break; - - case WLAN_CIPHER_SUITE_WEP104: - tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; - /* fall through */ - case WLAN_CIPHER_SUITE_WEP40: - tx_cmd->sec_ctl |= TX_CMD_SEC_WEP | - (info->control.hw_key->hw_key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT; - - memcpy(&tx_cmd->key[3], keyinfo->key, keyinfo->keylen); - - IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption " - "with key %d\n", info->control.hw_key->hw_key_idx); - break; - - default: - IWL_ERR(priv, "Unknown encode cipher %x\n", keyinfo->cipher); - break; - } -} - -/* - * handle build REPLY_TX command notification. - */ -static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv, - struct iwl_device_cmd *cmd, - struct ieee80211_tx_info *info, - struct ieee80211_hdr *hdr, u8 std_id) -{ - struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload; - __le32 tx_flags = tx_cmd->tx_flags; - __le16 fc = hdr->frame_control; - - tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; - if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { - tx_flags |= TX_CMD_FLG_ACK_MSK; - if (ieee80211_is_mgmt(fc)) - tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; - if (ieee80211_is_probe_resp(fc) && - !(le16_to_cpu(hdr->seq_ctrl) & 0xf)) - tx_flags |= TX_CMD_FLG_TSF_MSK; - } else { - tx_flags &= (~TX_CMD_FLG_ACK_MSK); - tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; - } - - tx_cmd->sta_id = std_id; - if (ieee80211_has_morefrags(fc)) - tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; - - if (ieee80211_is_data_qos(fc)) { - u8 *qc = ieee80211_get_qos_ctl(hdr); - tx_cmd->tid_tspec = qc[0] & 0xf; - tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; - } else { - tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; - } - - iwl_legacy_tx_cmd_protection(priv, info, fc, &tx_flags); - - tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); - if (ieee80211_is_mgmt(fc)) { - if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) - tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3); - else - tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2); - } else { - tx_cmd->timeout.pm_frame_timeout = 0; - } - - tx_cmd->driver_txop = 0; - tx_cmd->tx_flags = tx_flags; - tx_cmd->next_frame_len = 0; -} - -/* - * start REPLY_TX command process - */ -static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) -{ - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct iwl3945_tx_cmd *tx_cmd; - struct iwl_tx_queue *txq = NULL; - struct iwl_queue *q = NULL; - struct iwl_device_cmd *out_cmd; - struct iwl_cmd_meta *out_meta; - dma_addr_t phys_addr; - dma_addr_t txcmd_phys; - int txq_id = skb_get_queue_mapping(skb); - u16 len, idx, hdr_len; - u8 id; - u8 unicast; - u8 sta_id; - u8 tid = 0; - __le16 fc; - u8 wait_write_ptr = 0; - unsigned long flags; - - spin_lock_irqsave(&priv->lock, flags); - if (iwl_legacy_is_rfkill(priv)) { - IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n"); - goto drop_unlock; - } - - if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) == IWL_INVALID_RATE) { - IWL_ERR(priv, "ERROR: No TX rate available.\n"); - goto drop_unlock; - } - - unicast = !is_multicast_ether_addr(hdr->addr1); - id = 0; - - fc = hdr->frame_control; - -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG - if (ieee80211_is_auth(fc)) - IWL_DEBUG_TX(priv, "Sending AUTH frame\n"); - else if (ieee80211_is_assoc_req(fc)) - IWL_DEBUG_TX(priv, "Sending ASSOC frame\n"); - else if (ieee80211_is_reassoc_req(fc)) - IWL_DEBUG_TX(priv, "Sending REASSOC frame\n"); -#endif - - spin_unlock_irqrestore(&priv->lock, flags); - - hdr_len = ieee80211_hdrlen(fc); - - /* Find index into station table for destination station */ - sta_id = iwl_legacy_sta_id_or_broadcast( - priv, &priv->contexts[IWL_RXON_CTX_BSS], - info->control.sta); - if (sta_id == IWL_INVALID_STATION) { - IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", - hdr->addr1); - goto drop; - } - - IWL_DEBUG_RATE(priv, "station Id %d\n", sta_id); - - if (ieee80211_is_data_qos(fc)) { - u8 *qc = ieee80211_get_qos_ctl(hdr); - tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; - if (unlikely(tid >= MAX_TID_COUNT)) - goto drop; - } - - /* Descriptor for chosen Tx queue */ - txq = &priv->txq[txq_id]; - q = &txq->q; - - if ((iwl_legacy_queue_space(q) < q->high_mark)) - goto drop; - - spin_lock_irqsave(&priv->lock, flags); - - idx = iwl_legacy_get_cmd_index(q, q->write_ptr, 0); - - /* Set up driver data for this TFD */ - memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); - txq->txb[q->write_ptr].skb = skb; - txq->txb[q->write_ptr].ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - - /* Init first empty entry in queue's array of Tx/cmd buffers */ - out_cmd = txq->cmd[idx]; - out_meta = &txq->meta[idx]; - tx_cmd = (struct iwl3945_tx_cmd *)out_cmd->cmd.payload; - memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); - memset(tx_cmd, 0, sizeof(*tx_cmd)); - - /* - * Set up the Tx-command (not MAC!) header. - * Store the chosen Tx queue and TFD index within the sequence field; - * after Tx, uCode's Tx response will return this value so driver can - * locate the frame within the tx queue and do post-tx processing. - */ - out_cmd->hdr.cmd = REPLY_TX; - out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | - INDEX_TO_SEQ(q->write_ptr))); - - /* Copy MAC header from skb into command buffer */ - memcpy(tx_cmd->hdr, hdr, hdr_len); - - - if (info->control.hw_key) - iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, sta_id); - - /* TODO need this for burst mode later on */ - iwl3945_build_tx_cmd_basic(priv, out_cmd, info, hdr, sta_id); - - /* set is_hcca to 0; it probably will never be implemented */ - iwl3945_hw_build_tx_cmd_rate(priv, out_cmd, info, hdr, sta_id, 0); - - /* Total # bytes to be transmitted */ - len = (u16)skb->len; - tx_cmd->len = cpu_to_le16(len); - - iwl_legacy_dbg_log_tx_data_frame(priv, len, hdr); - iwl_legacy_update_stats(priv, true, fc, len); - tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK; - tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK; - - if (!ieee80211_has_morefrags(hdr->frame_control)) { - txq->need_update = 1; - } else { - wait_write_ptr = 1; - txq->need_update = 0; - } - - IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n", - le16_to_cpu(out_cmd->hdr.sequence)); - IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags)); - iwl_print_hex_dump(priv, IWL_DL_TX, tx_cmd, sizeof(*tx_cmd)); - iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, - ieee80211_hdrlen(fc)); - - /* - * Use the first empty entry in this queue's command buffer array - * to contain the Tx command and MAC header concatenated together - * (payload data will be in another buffer). - * Size of this varies, due to varying MAC header length. - * If end is not dword aligned, we'll have 2 extra bytes at the end - * of the MAC header (device reads on dword boundaries). - * We'll tell device about this padding later. - */ - len = sizeof(struct iwl3945_tx_cmd) + - sizeof(struct iwl_cmd_header) + hdr_len; - len = (len + 3) & ~3; - - /* Physical address of this Tx command's header (not MAC header!), - * within command buffer array. */ - txcmd_phys = pci_map_single(priv->pci_dev, &out_cmd->hdr, - len, PCI_DMA_TODEVICE); - /* we do not map meta data ... so we can safely access address to - * provide to unmap command*/ - dma_unmap_addr_set(out_meta, mapping, txcmd_phys); - dma_unmap_len_set(out_meta, len, len); - - /* Add buffer containing Tx command and MAC(!) header to TFD's - * first entry */ - priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, - txcmd_phys, len, 1, 0); - - - /* Set up TFD's 2nd entry to point directly to remainder of skb, - * if any (802.11 null frames have no payload). */ - len = skb->len - hdr_len; - if (len) { - phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len, - len, PCI_DMA_TODEVICE); - priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, - phys_addr, len, - 0, U32_PAD(len)); - } - - - /* Tell device the write index *just past* this latest filled TFD */ - q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd); - iwl_legacy_txq_update_write_ptr(priv, txq); - spin_unlock_irqrestore(&priv->lock, flags); - - if ((iwl_legacy_queue_space(q) < q->high_mark) - && priv->mac80211_registered) { - if (wait_write_ptr) { - spin_lock_irqsave(&priv->lock, flags); - txq->need_update = 1; - iwl_legacy_txq_update_write_ptr(priv, txq); - spin_unlock_irqrestore(&priv->lock, flags); - } - - iwl_legacy_stop_queue(priv, txq); - } - - return 0; - -drop_unlock: - spin_unlock_irqrestore(&priv->lock, flags); -drop: - return -1; -} - -static int iwl3945_get_measurement(struct iwl_priv *priv, - struct ieee80211_measurement_params *params, - u8 type) -{ - struct iwl_spectrum_cmd spectrum; - struct iwl_rx_packet *pkt; - struct iwl_host_cmd cmd = { - .id = REPLY_SPECTRUM_MEASUREMENT_CMD, - .data = (void *)&spectrum, - .flags = CMD_WANT_SKB, - }; - u32 add_time = le64_to_cpu(params->start_time); - int rc; - int spectrum_resp_status; - int duration = le16_to_cpu(params->duration); - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - - if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) - add_time = iwl_legacy_usecs_to_beacons(priv, - le64_to_cpu(params->start_time) - priv->_3945.last_tsf, - le16_to_cpu(ctx->timing.beacon_interval)); - - memset(&spectrum, 0, sizeof(spectrum)); - - spectrum.channel_count = cpu_to_le16(1); - spectrum.flags = - RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK; - spectrum.filter_flags = MEASUREMENT_FILTER_FLAG; - cmd.len = sizeof(spectrum); - spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len)); - - if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) - spectrum.start_time = - iwl_legacy_add_beacon_time(priv, - priv->_3945.last_beacon_time, add_time, - le16_to_cpu(ctx->timing.beacon_interval)); - else - spectrum.start_time = 0; - - spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT); - spectrum.channels[0].channel = params->channel; - spectrum.channels[0].type = type; - if (ctx->active.flags & RXON_FLG_BAND_24G_MSK) - spectrum.flags |= RXON_FLG_BAND_24G_MSK | - RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK; - - rc = iwl_legacy_send_cmd_sync(priv, &cmd); - if (rc) - return rc; - - pkt = (struct iwl_rx_packet *)cmd.reply_page; - if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { - IWL_ERR(priv, "Bad return from REPLY_RX_ON_ASSOC command\n"); - rc = -EIO; - } - - spectrum_resp_status = le16_to_cpu(pkt->u.spectrum.status); - switch (spectrum_resp_status) { - case 0: /* Command will be handled */ - if (pkt->u.spectrum.id != 0xff) { - IWL_DEBUG_INFO(priv, "Replaced existing measurement: %d\n", - pkt->u.spectrum.id); - priv->measurement_status &= ~MEASUREMENT_READY; - } - priv->measurement_status |= MEASUREMENT_ACTIVE; - rc = 0; - break; - - case 1: /* Command will not be handled */ - rc = -EAGAIN; - break; - } - - iwl_legacy_free_pages(priv, cmd.reply_page); - - return rc; -} - -static void iwl3945_rx_reply_alive(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_alive_resp *palive; - struct delayed_work *pwork; - - palive = &pkt->u.alive_frame; - - IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision " - "0x%01X 0x%01X\n", - palive->is_valid, palive->ver_type, - palive->ver_subtype); - - if (palive->ver_subtype == INITIALIZE_SUBTYPE) { - IWL_DEBUG_INFO(priv, "Initialization Alive received.\n"); - memcpy(&priv->card_alive_init, &pkt->u.alive_frame, - sizeof(struct iwl_alive_resp)); - pwork = &priv->init_alive_start; - } else { - IWL_DEBUG_INFO(priv, "Runtime Alive received.\n"); - memcpy(&priv->card_alive, &pkt->u.alive_frame, - sizeof(struct iwl_alive_resp)); - pwork = &priv->alive_start; - iwl3945_disable_events(priv); - } - - /* We delay the ALIVE response by 5ms to - * give the HW RF Kill time to activate... */ - if (palive->is_valid == UCODE_VALID_OK) - queue_delayed_work(priv->workqueue, pwork, - msecs_to_jiffies(5)); - else - IWL_WARN(priv, "uCode did not respond OK.\n"); -} - -static void iwl3945_rx_reply_add_sta(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb) -{ -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG - struct iwl_rx_packet *pkt = rxb_addr(rxb); -#endif - - IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status); -} - -static void iwl3945_rx_beacon_notif(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status); -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG - u8 rate = beacon->beacon_notify_hdr.rate; - - IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d " - "tsf %d %d rate %d\n", - le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK, - beacon->beacon_notify_hdr.failure_frame, - le32_to_cpu(beacon->ibss_mgr_status), - le32_to_cpu(beacon->high_tsf), - le32_to_cpu(beacon->low_tsf), rate); -#endif - - priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status); - -} - -/* Handle notification from uCode that card's power state is changing - * due to software, hardware, or critical temperature RFKILL */ -static void iwl3945_rx_card_state_notif(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); - unsigned long status = priv->status; - - IWL_WARN(priv, "Card state received: HW:%s SW:%s\n", - (flags & HW_CARD_DISABLED) ? "Kill" : "On", - (flags & SW_CARD_DISABLED) ? "Kill" : "On"); - - iwl_write32(priv, CSR_UCODE_DRV_GP1_SET, - CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); - - if (flags & HW_CARD_DISABLED) - set_bit(STATUS_RF_KILL_HW, &priv->status); - else - clear_bit(STATUS_RF_KILL_HW, &priv->status); - - - iwl_legacy_scan_cancel(priv); - - if ((test_bit(STATUS_RF_KILL_HW, &status) != - test_bit(STATUS_RF_KILL_HW, &priv->status))) - wiphy_rfkill_set_hw_state(priv->hw->wiphy, - test_bit(STATUS_RF_KILL_HW, &priv->status)); - else - wake_up(&priv->wait_command_queue); -} - -/** - * iwl3945_setup_rx_handlers - Initialize Rx handler callbacks - * - * Setup the RX handlers for each of the reply types sent from the uCode - * to the host. - * - * This function chains into the hardware specific files for them to setup - * any hardware specific handlers as well. - */ -static void iwl3945_setup_rx_handlers(struct iwl_priv *priv) -{ - priv->rx_handlers[REPLY_ALIVE] = iwl3945_rx_reply_alive; - priv->rx_handlers[REPLY_ADD_STA] = iwl3945_rx_reply_add_sta; - priv->rx_handlers[REPLY_ERROR] = iwl_legacy_rx_reply_error; - priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_legacy_rx_csa; - priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] = - iwl_legacy_rx_spectrum_measure_notif; - priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_legacy_rx_pm_sleep_notif; - priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] = - iwl_legacy_rx_pm_debug_statistics_notif; - priv->rx_handlers[BEACON_NOTIFICATION] = iwl3945_rx_beacon_notif; - - /* - * The same handler is used for both the REPLY to a discrete - * statistics request from the host as well as for the periodic - * statistics notifications (after received beacons) from the uCode. - */ - priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_reply_statistics; - priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics; - - iwl_legacy_setup_rx_scan_handlers(priv); - priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl3945_rx_card_state_notif; - - /* Set up hardware specific Rx handlers */ - iwl3945_hw_rx_handler_setup(priv); -} - -/************************** RX-FUNCTIONS ****************************/ -/* - * Rx theory of operation - * - * The host allocates 32 DMA target addresses and passes the host address - * to the firmware at register IWL_RFDS_TABLE_LOWER + N * RFD_SIZE where N is - * 0 to 31 - * - * Rx Queue Indexes - * The host/firmware share two index registers for managing the Rx buffers. - * - * The READ index maps to the first position that the firmware may be writing - * to -- the driver can read up to (but not including) this position and get - * good data. - * The READ index is managed by the firmware once the card is enabled. - * - * The WRITE index maps to the last position the driver has read from -- the - * position preceding WRITE is the last slot the firmware can place a packet. - * - * The queue is empty (no good data) if WRITE = READ - 1, and is full if - * WRITE = READ. - * - * During initialization, the host sets up the READ queue position to the first - * INDEX position, and WRITE to the last (READ - 1 wrapped) - * - * When the firmware places a packet in a buffer, it will advance the READ index - * and fire the RX interrupt. The driver can then query the READ index and - * process as many packets as possible, moving the WRITE index forward as it - * resets the Rx queue buffers with new memory. - * - * The management in the driver is as follows: - * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When - * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled - * to replenish the iwl->rxq->rx_free. - * + In iwl3945_rx_replenish (scheduled) if 'processed' != 'read' then the - * iwl->rxq is replenished and the READ INDEX is updated (updating the - * 'processed' and 'read' driver indexes as well) - * + A received packet is processed and handed to the kernel network stack, - * detached from the iwl->rxq. The driver 'processed' index is updated. - * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free - * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ - * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there - * were enough free buffers and RX_STALLED is set it is cleared. - * - * - * Driver sequence: - * - * iwl3945_rx_replenish() Replenishes rx_free list from rx_used, and calls - * iwl3945_rx_queue_restock - * iwl3945_rx_queue_restock() Moves available buffers from rx_free into Rx - * queue, updates firmware pointers, and updates - * the WRITE index. If insufficient rx_free buffers - * are available, schedules iwl3945_rx_replenish - * - * -- enable interrupts -- - * ISR - iwl3945_rx() Detach iwl_rx_mem_buffers from pool up to the - * READ INDEX, detaching the SKB from the pool. - * Moves the packet buffer from queue to rx_used. - * Calls iwl3945_rx_queue_restock to refill any empty - * slots. - * ... - * - */ - -/** - * iwl3945_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr - */ -static inline __le32 iwl3945_dma_addr2rbd_ptr(struct iwl_priv *priv, - dma_addr_t dma_addr) -{ - return cpu_to_le32((u32)dma_addr); -} - -/** - * iwl3945_rx_queue_restock - refill RX queue from pre-allocated pool - * - * If there are slots in the RX queue that need to be restocked, - * and we have free pre-allocated buffers, fill the ranks as much - * as we can, pulling from rx_free. - * - * This moves the 'write' index forward to catch up with 'processed', and - * also updates the memory address in the firmware to reference the new - * target buffer. - */ -static void iwl3945_rx_queue_restock(struct iwl_priv *priv) -{ - struct iwl_rx_queue *rxq = &priv->rxq; - struct list_head *element; - struct iwl_rx_mem_buffer *rxb; - unsigned long flags; - int write; - - spin_lock_irqsave(&rxq->lock, flags); - write = rxq->write & ~0x7; - while ((iwl_legacy_rx_queue_space(rxq) > 0) && (rxq->free_count)) { - /* Get next free Rx buffer, remove from free list */ - element = rxq->rx_free.next; - rxb = list_entry(element, struct iwl_rx_mem_buffer, list); - list_del(element); - - /* Point to Rx buffer via next RBD in circular buffer */ - rxq->bd[rxq->write] = iwl3945_dma_addr2rbd_ptr(priv, rxb->page_dma); - rxq->queue[rxq->write] = rxb; - rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; - rxq->free_count--; - } - spin_unlock_irqrestore(&rxq->lock, flags); - /* If the pre-allocated buffer pool is dropping low, schedule to - * refill it */ - if (rxq->free_count <= RX_LOW_WATERMARK) - queue_work(priv->workqueue, &priv->rx_replenish); - - - /* If we've added more space for the firmware to place data, tell it. - * Increment device's write pointer in multiples of 8. */ - if ((rxq->write_actual != (rxq->write & ~0x7)) - || (abs(rxq->write - rxq->read) > 7)) { - spin_lock_irqsave(&rxq->lock, flags); - rxq->need_update = 1; - spin_unlock_irqrestore(&rxq->lock, flags); - iwl_legacy_rx_queue_update_write_ptr(priv, rxq); - } -} - -/** - * iwl3945_rx_replenish - Move all used packet from rx_used to rx_free - * - * When moving to rx_free an SKB is allocated for the slot. - * - * Also restock the Rx queue via iwl3945_rx_queue_restock. - * This is called as a scheduled work item (except for during initialization) - */ -static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority) -{ - struct iwl_rx_queue *rxq = &priv->rxq; - struct list_head *element; - struct iwl_rx_mem_buffer *rxb; - struct page *page; - unsigned long flags; - gfp_t gfp_mask = priority; - - while (1) { - spin_lock_irqsave(&rxq->lock, flags); - - if (list_empty(&rxq->rx_used)) { - spin_unlock_irqrestore(&rxq->lock, flags); - return; - } - spin_unlock_irqrestore(&rxq->lock, flags); - - if (rxq->free_count > RX_LOW_WATERMARK) - gfp_mask |= __GFP_NOWARN; - - if (priv->hw_params.rx_page_order > 0) - gfp_mask |= __GFP_COMP; - - /* Alloc a new receive buffer */ - page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order); - if (!page) { - if (net_ratelimit()) - IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n"); - if ((rxq->free_count <= RX_LOW_WATERMARK) && - net_ratelimit()) - IWL_CRIT(priv, "Failed to allocate SKB buffer with %s. Only %u free buffers remaining.\n", - priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL", - rxq->free_count); - /* We don't reschedule replenish work here -- we will - * call the restock method and if it still needs - * more buffers it will schedule replenish */ - break; - } - - spin_lock_irqsave(&rxq->lock, flags); - if (list_empty(&rxq->rx_used)) { - spin_unlock_irqrestore(&rxq->lock, flags); - __free_pages(page, priv->hw_params.rx_page_order); - return; - } - element = rxq->rx_used.next; - rxb = list_entry(element, struct iwl_rx_mem_buffer, list); - list_del(element); - spin_unlock_irqrestore(&rxq->lock, flags); - - rxb->page = page; - /* Get physical address of RB/SKB */ - rxb->page_dma = pci_map_page(priv->pci_dev, page, 0, - PAGE_SIZE << priv->hw_params.rx_page_order, - PCI_DMA_FROMDEVICE); - - spin_lock_irqsave(&rxq->lock, flags); - - list_add_tail(&rxb->list, &rxq->rx_free); - rxq->free_count++; - priv->alloc_rxb_page++; - - spin_unlock_irqrestore(&rxq->lock, flags); - } -} - -void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq) -{ - unsigned long flags; - int i; - spin_lock_irqsave(&rxq->lock, flags); - INIT_LIST_HEAD(&rxq->rx_free); - INIT_LIST_HEAD(&rxq->rx_used); - /* Fill the rx_used queue with _all_ of the Rx buffers */ - for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { - /* In the reset function, these buffers may have been allocated - * to an SKB, so we need to unmap and free potential storage */ - if (rxq->pool[i].page != NULL) { - pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, - PAGE_SIZE << priv->hw_params.rx_page_order, - PCI_DMA_FROMDEVICE); - __iwl_legacy_free_pages(priv, rxq->pool[i].page); - rxq->pool[i].page = NULL; - } - list_add_tail(&rxq->pool[i].list, &rxq->rx_used); - } - - /* Set us so that we have processed and used all buffers, but have - * not restocked the Rx queue with fresh buffers */ - rxq->read = rxq->write = 0; - rxq->write_actual = 0; - rxq->free_count = 0; - spin_unlock_irqrestore(&rxq->lock, flags); -} - -void iwl3945_rx_replenish(void *data) -{ - struct iwl_priv *priv = data; - unsigned long flags; - - iwl3945_rx_allocate(priv, GFP_KERNEL); - - spin_lock_irqsave(&priv->lock, flags); - iwl3945_rx_queue_restock(priv); - spin_unlock_irqrestore(&priv->lock, flags); -} - -static void iwl3945_rx_replenish_now(struct iwl_priv *priv) -{ - iwl3945_rx_allocate(priv, GFP_ATOMIC); - - iwl3945_rx_queue_restock(priv); -} - - -/* Assumes that the skb field of the buffers in 'pool' is kept accurate. - * If an SKB has been detached, the POOL needs to have its SKB set to NULL - * This free routine walks the list of POOL entries and if SKB is set to - * non NULL it is unmapped and freed - */ -static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq) -{ - int i; - for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { - if (rxq->pool[i].page != NULL) { - pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, - PAGE_SIZE << priv->hw_params.rx_page_order, - PCI_DMA_FROMDEVICE); - __iwl_legacy_free_pages(priv, rxq->pool[i].page); - rxq->pool[i].page = NULL; - } - } - - dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, - rxq->bd_dma); - dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status), - rxq->rb_stts, rxq->rb_stts_dma); - rxq->bd = NULL; - rxq->rb_stts = NULL; -} - - -/* Convert linear signal-to-noise ratio into dB */ -static u8 ratio2dB[100] = { -/* 0 1 2 3 4 5 6 7 8 9 */ - 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */ - 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */ - 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */ - 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */ - 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */ - 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */ - 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */ - 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */ - 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */ - 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */ -}; - -/* Calculates a relative dB value from a ratio of linear - * (i.e. not dB) signal levels. - * Conversion assumes that levels are voltages (20*log), not powers (10*log). */ -int iwl3945_calc_db_from_ratio(int sig_ratio) -{ - /* 1000:1 or higher just report as 60 dB */ - if (sig_ratio >= 1000) - return 60; - - /* 100:1 or higher, divide by 10 and use table, - * add 20 dB to make up for divide by 10 */ - if (sig_ratio >= 100) - return 20 + (int)ratio2dB[sig_ratio/10]; - - /* We shouldn't see this */ - if (sig_ratio < 1) - return 0; - - /* Use table for ratios 1:1 - 99:1 */ - return (int)ratio2dB[sig_ratio]; -} - -/** - * iwl3945_rx_handle - Main entry function for receiving responses from uCode - * - * Uses the priv->rx_handlers callback function array to invoke - * the appropriate handlers, including command responses, - * frame-received notifications, and other notifications. - */ -static void iwl3945_rx_handle(struct iwl_priv *priv) -{ - struct iwl_rx_mem_buffer *rxb; - struct iwl_rx_packet *pkt; - struct iwl_rx_queue *rxq = &priv->rxq; - u32 r, i; - int reclaim; - unsigned long flags; - u8 fill_rx = 0; - u32 count = 8; - int total_empty = 0; - - /* uCode's read index (stored in shared DRAM) indicates the last Rx - * buffer that the driver may process (last buffer filled by ucode). */ - r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF; - i = rxq->read; - - /* calculate total frames need to be restock after handling RX */ - total_empty = r - rxq->write_actual; - if (total_empty < 0) - total_empty += RX_QUEUE_SIZE; - - if (total_empty > (RX_QUEUE_SIZE / 2)) - fill_rx = 1; - /* Rx interrupt, but nothing sent from uCode */ - if (i == r) - IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i); - - while (i != r) { - int len; - - rxb = rxq->queue[i]; - - /* If an RXB doesn't have a Rx queue slot associated with it, - * then a bug has been introduced in the queue refilling - * routines -- catch it here */ - BUG_ON(rxb == NULL); - - rxq->queue[i] = NULL; - - pci_unmap_page(priv->pci_dev, rxb->page_dma, - PAGE_SIZE << priv->hw_params.rx_page_order, - PCI_DMA_FROMDEVICE); - pkt = rxb_addr(rxb); - - len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; - len += sizeof(u32); /* account for status word */ - trace_iwlwifi_legacy_dev_rx(priv, pkt, len); - - /* Reclaim a command buffer only if this packet is a response - * to a (driver-originated) command. - * If the packet (e.g. Rx frame) originated from uCode, - * there is no command buffer to reclaim. - * Ucode should set SEQ_RX_FRAME bit if ucode-originated, - * but apparently a few don't get set; catch them here. */ - reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) && - (pkt->hdr.cmd != STATISTICS_NOTIFICATION) && - (pkt->hdr.cmd != REPLY_TX); - - /* Based on type of command response or notification, - * handle those that need handling via function in - * rx_handlers table. See iwl3945_setup_rx_handlers() */ - if (priv->rx_handlers[pkt->hdr.cmd]) { - IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, i, - iwl_legacy_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); - priv->isr_stats.rx_handlers[pkt->hdr.cmd]++; - priv->rx_handlers[pkt->hdr.cmd] (priv, rxb); - } else { - /* No handling needed */ - IWL_DEBUG_RX(priv, - "r %d i %d No handler needed for %s, 0x%02x\n", - r, i, iwl_legacy_get_cmd_string(pkt->hdr.cmd), - pkt->hdr.cmd); - } - - /* - * XXX: After here, we should always check rxb->page - * against NULL before touching it or its virtual - * memory (pkt). Because some rx_handler might have - * already taken or freed the pages. - */ - - if (reclaim) { - /* Invoke any callbacks, transfer the buffer to caller, - * and fire off the (possibly) blocking iwl_legacy_send_cmd() - * as we reclaim the driver command queue */ - if (rxb->page) - iwl_legacy_tx_cmd_complete(priv, rxb); - else - IWL_WARN(priv, "Claim null rxb?\n"); - } - - /* Reuse the page if possible. For notification packets and - * SKBs that fail to Rx correctly, add them back into the - * rx_free list for reuse later. */ - spin_lock_irqsave(&rxq->lock, flags); - if (rxb->page != NULL) { - rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page, - 0, PAGE_SIZE << priv->hw_params.rx_page_order, - PCI_DMA_FROMDEVICE); - list_add_tail(&rxb->list, &rxq->rx_free); - rxq->free_count++; - } else - list_add_tail(&rxb->list, &rxq->rx_used); - - spin_unlock_irqrestore(&rxq->lock, flags); - - i = (i + 1) & RX_QUEUE_MASK; - /* If there are a lot of unused frames, - * restock the Rx queue so ucode won't assert. */ - if (fill_rx) { - count++; - if (count >= 8) { - rxq->read = i; - iwl3945_rx_replenish_now(priv); - count = 0; - } - } - } - - /* Backtrack one entry */ - rxq->read = i; - if (fill_rx) - iwl3945_rx_replenish_now(priv); - else - iwl3945_rx_queue_restock(priv); -} - -/* call this function to flush any scheduled tasklet */ -static inline void iwl3945_synchronize_irq(struct iwl_priv *priv) -{ - /* wait to make sure we flush pending tasklet*/ - synchronize_irq(priv->pci_dev->irq); - tasklet_kill(&priv->irq_tasklet); -} - -static const char *iwl3945_desc_lookup(int i) -{ - switch (i) { - case 1: - return "FAIL"; - case 2: - return "BAD_PARAM"; - case 3: - return "BAD_CHECKSUM"; - case 4: - return "NMI_INTERRUPT"; - case 5: - return "SYSASSERT"; - case 6: - return "FATAL_ERROR"; - } - - return "UNKNOWN"; -} - -#define ERROR_START_OFFSET (1 * sizeof(u32)) -#define ERROR_ELEM_SIZE (7 * sizeof(u32)) - -void iwl3945_dump_nic_error_log(struct iwl_priv *priv) -{ - u32 i; - u32 desc, time, count, base, data1; - u32 blink1, blink2, ilink1, ilink2; - - base = le32_to_cpu(priv->card_alive.error_event_table_ptr); - - if (!iwl3945_hw_valid_rtc_data_addr(base)) { - IWL_ERR(priv, "Not valid error log pointer 0x%08X\n", base); - return; - } - - - count = iwl_legacy_read_targ_mem(priv, base); - - if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) { - IWL_ERR(priv, "Start IWL Error Log Dump:\n"); - IWL_ERR(priv, "Status: 0x%08lX, count: %d\n", - priv->status, count); - } - - IWL_ERR(priv, "Desc Time asrtPC blink2 " - "ilink1 nmiPC Line\n"); - for (i = ERROR_START_OFFSET; - i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET; - i += ERROR_ELEM_SIZE) { - desc = iwl_legacy_read_targ_mem(priv, base + i); - time = - iwl_legacy_read_targ_mem(priv, base + i + 1 * sizeof(u32)); - blink1 = - iwl_legacy_read_targ_mem(priv, base + i + 2 * sizeof(u32)); - blink2 = - iwl_legacy_read_targ_mem(priv, base + i + 3 * sizeof(u32)); - ilink1 = - iwl_legacy_read_targ_mem(priv, base + i + 4 * sizeof(u32)); - ilink2 = - iwl_legacy_read_targ_mem(priv, base + i + 5 * sizeof(u32)); - data1 = - iwl_legacy_read_targ_mem(priv, base + i + 6 * sizeof(u32)); - - IWL_ERR(priv, - "%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n", - iwl3945_desc_lookup(desc), desc, time, blink1, blink2, - ilink1, ilink2, data1); - trace_iwlwifi_legacy_dev_ucode_error(priv, desc, time, data1, 0, - 0, blink1, blink2, ilink1, ilink2); - } -} - -static void iwl3945_irq_tasklet(struct iwl_priv *priv) -{ - u32 inta, handled = 0; - u32 inta_fh; - unsigned long flags; -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG - u32 inta_mask; -#endif - - spin_lock_irqsave(&priv->lock, flags); - - /* Ack/clear/reset pending uCode interrupts. - * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, - * and will clear only when CSR_FH_INT_STATUS gets cleared. */ - inta = iwl_read32(priv, CSR_INT); - iwl_write32(priv, CSR_INT, inta); - - /* Ack/clear/reset pending flow-handler (DMA) interrupts. - * Any new interrupts that happen after this, either while we're - * in this tasklet, or later, will show up in next ISR/tasklet. */ - inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); - iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh); - -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG - if (iwl_legacy_get_debug_level(priv) & IWL_DL_ISR) { - /* just for debug */ - inta_mask = iwl_read32(priv, CSR_INT_MASK); - IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", - inta, inta_mask, inta_fh); - } -#endif - - spin_unlock_irqrestore(&priv->lock, flags); - - /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not - * atomic, make sure that inta covers all the interrupts that - * we've discovered, even if FH interrupt came in just after - * reading CSR_INT. */ - if (inta_fh & CSR39_FH_INT_RX_MASK) - inta |= CSR_INT_BIT_FH_RX; - if (inta_fh & CSR39_FH_INT_TX_MASK) - inta |= CSR_INT_BIT_FH_TX; - - /* Now service all interrupt bits discovered above. */ - if (inta & CSR_INT_BIT_HW_ERR) { - IWL_ERR(priv, "Hardware error detected. Restarting.\n"); - - /* Tell the device to stop sending interrupts */ - iwl_legacy_disable_interrupts(priv); - - priv->isr_stats.hw++; - iwl_legacy_irq_handle_error(priv); - - handled |= CSR_INT_BIT_HW_ERR; - - return; - } - -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG - if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) { - /* NIC fires this, but we don't use it, redundant with WAKEUP */ - if (inta & CSR_INT_BIT_SCD) { - IWL_DEBUG_ISR(priv, "Scheduler finished to transmit " - "the frame/frames.\n"); - priv->isr_stats.sch++; - } - - /* Alive notification via Rx interrupt will do the real work */ - if (inta & CSR_INT_BIT_ALIVE) { - IWL_DEBUG_ISR(priv, "Alive interrupt\n"); - priv->isr_stats.alive++; - } - } -#endif - /* Safely ignore these bits for debug checks below */ - inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); - - /* Error detected by uCode */ - if (inta & CSR_INT_BIT_SW_ERR) { - IWL_ERR(priv, "Microcode SW error detected. " - "Restarting 0x%X.\n", inta); - priv->isr_stats.sw++; - iwl_legacy_irq_handle_error(priv); - handled |= CSR_INT_BIT_SW_ERR; - } - - /* uCode wakes up after power-down sleep */ - if (inta & CSR_INT_BIT_WAKEUP) { - IWL_DEBUG_ISR(priv, "Wakeup interrupt\n"); - iwl_legacy_rx_queue_update_write_ptr(priv, &priv->rxq); - iwl_legacy_txq_update_write_ptr(priv, &priv->txq[0]); - iwl_legacy_txq_update_write_ptr(priv, &priv->txq[1]); - iwl_legacy_txq_update_write_ptr(priv, &priv->txq[2]); - iwl_legacy_txq_update_write_ptr(priv, &priv->txq[3]); - iwl_legacy_txq_update_write_ptr(priv, &priv->txq[4]); - iwl_legacy_txq_update_write_ptr(priv, &priv->txq[5]); - - priv->isr_stats.wakeup++; - handled |= CSR_INT_BIT_WAKEUP; - } - - /* All uCode command responses, including Tx command responses, - * Rx "responses" (frame-received notification), and other - * notifications from uCode come through here*/ - if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { - iwl3945_rx_handle(priv); - priv->isr_stats.rx++; - handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); - } - - if (inta & CSR_INT_BIT_FH_TX) { - IWL_DEBUG_ISR(priv, "Tx interrupt\n"); - priv->isr_stats.tx++; - - iwl_write32(priv, CSR_FH_INT_STATUS, (1 << 6)); - iwl_legacy_write_direct32(priv, FH39_TCSR_CREDIT - (FH39_SRVC_CHNL), 0x0); - handled |= CSR_INT_BIT_FH_TX; - } - - if (inta & ~handled) { - IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled); - priv->isr_stats.unhandled++; - } - - if (inta & ~priv->inta_mask) { - IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n", - inta & ~priv->inta_mask); - IWL_WARN(priv, " with FH_INT = 0x%08x\n", inta_fh); - } - - /* Re-enable all interrupts */ - /* only Re-enable if disabled by irq */ - if (test_bit(STATUS_INT_ENABLED, &priv->status)) - iwl_legacy_enable_interrupts(priv); - -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG - if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) { - inta = iwl_read32(priv, CSR_INT); - inta_mask = iwl_read32(priv, CSR_INT_MASK); - inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); - IWL_DEBUG_ISR(priv, "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, " - "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags); - } -#endif -} - -static int iwl3945_get_channels_for_scan(struct iwl_priv *priv, - enum ieee80211_band band, - u8 is_active, u8 n_probes, - struct iwl3945_scan_channel *scan_ch, - struct ieee80211_vif *vif) -{ - struct ieee80211_channel *chan; - const struct ieee80211_supported_band *sband; - const struct iwl_channel_info *ch_info; - u16 passive_dwell = 0; - u16 active_dwell = 0; - int added, i; - - sband = iwl_get_hw_mode(priv, band); - if (!sband) - return 0; - - active_dwell = iwl_legacy_get_active_dwell_time(priv, band, n_probes); - passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif); - - if (passive_dwell <= active_dwell) - passive_dwell = active_dwell + 1; - - for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) { - chan = priv->scan_request->channels[i]; - - if (chan->band != band) - continue; - - scan_ch->channel = chan->hw_value; - - ch_info = iwl_legacy_get_channel_info(priv, band, - scan_ch->channel); - if (!iwl_legacy_is_channel_valid(ch_info)) { - IWL_DEBUG_SCAN(priv, - "Channel %d is INVALID for this band.\n", - scan_ch->channel); - continue; - } - - scan_ch->active_dwell = cpu_to_le16(active_dwell); - scan_ch->passive_dwell = cpu_to_le16(passive_dwell); - /* If passive , set up for auto-switch - * and use long active_dwell time. - */ - if (!is_active || iwl_legacy_is_channel_passive(ch_info) || - (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) { - scan_ch->type = 0; /* passive */ - if (IWL_UCODE_API(priv->ucode_ver) == 1) - scan_ch->active_dwell = cpu_to_le16(passive_dwell - 1); - } else { - scan_ch->type = 1; /* active */ - } - - /* Set direct probe bits. These may be used both for active - * scan channels (probes gets sent right away), - * or for passive channels (probes get se sent only after - * hearing clear Rx packet).*/ - if (IWL_UCODE_API(priv->ucode_ver) >= 2) { - if (n_probes) - scan_ch->type |= IWL39_SCAN_PROBE_MASK(n_probes); - } else { - /* uCode v1 does not allow setting direct probe bits on - * passive channel. */ - if ((scan_ch->type & 1) && n_probes) - scan_ch->type |= IWL39_SCAN_PROBE_MASK(n_probes); - } - - /* Set txpower levels to defaults */ - scan_ch->tpc.dsp_atten = 110; - /* scan_pwr_info->tpc.dsp_atten; */ - - /*scan_pwr_info->tpc.tx_gain; */ - if (band == IEEE80211_BAND_5GHZ) - scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3; - else { - scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3)); - /* NOTE: if we were doing 6Mb OFDM for scans we'd use - * power level: - * scan_ch->tpc.tx_gain = ((1 << 5) | (2 << 3)) | 3; - */ - } - - IWL_DEBUG_SCAN(priv, "Scanning %d [%s %d]\n", - scan_ch->channel, - (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE", - (scan_ch->type & 1) ? - active_dwell : passive_dwell); - - scan_ch++; - added++; - } - - IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added); - return added; -} - -static void iwl3945_init_hw_rates(struct iwl_priv *priv, - struct ieee80211_rate *rates) -{ - int i; - - for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) { - rates[i].bitrate = iwl3945_rates[i].ieee * 5; - rates[i].hw_value = i; /* Rate scaling will work on indexes */ - rates[i].hw_value_short = i; - rates[i].flags = 0; - if ((i > IWL39_LAST_OFDM_RATE) || (i < IWL_FIRST_OFDM_RATE)) { - /* - * If CCK != 1M then set short preamble rate flag. - */ - rates[i].flags |= (iwl3945_rates[i].plcp == 10) ? - 0 : IEEE80211_RATE_SHORT_PREAMBLE; - } - } -} - -/****************************************************************************** - * - * uCode download functions - * - ******************************************************************************/ - -static void iwl3945_dealloc_ucode_pci(struct iwl_priv *priv) -{ - iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_code); - iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data); - iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup); - iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init); - iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init_data); - iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_boot); -} - -/** - * iwl3945_verify_inst_full - verify runtime uCode image in card vs. host, - * looking at all data. - */ -static int iwl3945_verify_inst_full(struct iwl_priv *priv, __le32 *image, u32 len) -{ - u32 val; - u32 save_len = len; - int rc = 0; - u32 errcnt; - - IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len); - - iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, - IWL39_RTC_INST_LOWER_BOUND); - - errcnt = 0; - for (; len > 0; len -= sizeof(u32), image++) { - /* read data comes through single port, auto-incr addr */ - /* NOTE: Use the debugless read so we don't flood kernel log - * if IWL_DL_IO is set */ - val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT); - if (val != le32_to_cpu(*image)) { - IWL_ERR(priv, "uCode INST section is invalid at " - "offset 0x%x, is 0x%x, s/b 0x%x\n", - save_len - len, val, le32_to_cpu(*image)); - rc = -EIO; - errcnt++; - if (errcnt >= 20) - break; - } - } - - - if (!errcnt) - IWL_DEBUG_INFO(priv, - "ucode image in INSTRUCTION memory is good\n"); - - return rc; -} - - -/** - * iwl3945_verify_inst_sparse - verify runtime uCode image in card vs. host, - * using sample data 100 bytes apart. If these sample points are good, - * it's a pretty good bet that everything between them is good, too. - */ -static int iwl3945_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len) -{ - u32 val; - int rc = 0; - u32 errcnt = 0; - u32 i; - - IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len); - - for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) { - /* read data comes through single port, auto-incr addr */ - /* NOTE: Use the debugless read so we don't flood kernel log - * if IWL_DL_IO is set */ - iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, - i + IWL39_RTC_INST_LOWER_BOUND); - val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT); - if (val != le32_to_cpu(*image)) { -#if 0 /* Enable this if you want to see details */ - IWL_ERR(priv, "uCode INST section is invalid at " - "offset 0x%x, is 0x%x, s/b 0x%x\n", - i, val, *image); -#endif - rc = -EIO; - errcnt++; - if (errcnt >= 3) - break; - } - } - - return rc; -} - - -/** - * iwl3945_verify_ucode - determine which instruction image is in SRAM, - * and verify its contents - */ -static int iwl3945_verify_ucode(struct iwl_priv *priv) -{ - __le32 *image; - u32 len; - int rc = 0; - - /* Try bootstrap */ - image = (__le32 *)priv->ucode_boot.v_addr; - len = priv->ucode_boot.len; - rc = iwl3945_verify_inst_sparse(priv, image, len); - if (rc == 0) { - IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n"); - return 0; - } - - /* Try initialize */ - image = (__le32 *)priv->ucode_init.v_addr; - len = priv->ucode_init.len; - rc = iwl3945_verify_inst_sparse(priv, image, len); - if (rc == 0) { - IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n"); - return 0; - } - - /* Try runtime/protocol */ - image = (__le32 *)priv->ucode_code.v_addr; - len = priv->ucode_code.len; - rc = iwl3945_verify_inst_sparse(priv, image, len); - if (rc == 0) { - IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n"); - return 0; - } - - IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n"); - - /* Since nothing seems to match, show first several data entries in - * instruction SRAM, so maybe visual inspection will give a clue. - * Selection of bootstrap image (vs. other images) is arbitrary. */ - image = (__le32 *)priv->ucode_boot.v_addr; - len = priv->ucode_boot.len; - rc = iwl3945_verify_inst_full(priv, image, len); - - return rc; -} - -static void iwl3945_nic_start(struct iwl_priv *priv) -{ - /* Remove all resets to allow NIC to operate */ - iwl_write32(priv, CSR_RESET, 0); -} - -#define IWL3945_UCODE_GET(item) \ -static u32 iwl3945_ucode_get_##item(const struct iwl_ucode_header *ucode)\ -{ \ - return le32_to_cpu(ucode->v1.item); \ -} - -static u32 iwl3945_ucode_get_header_size(u32 api_ver) -{ - return 24; -} - -static u8 *iwl3945_ucode_get_data(const struct iwl_ucode_header *ucode) -{ - return (u8 *) ucode->v1.data; -} - -IWL3945_UCODE_GET(inst_size); -IWL3945_UCODE_GET(data_size); -IWL3945_UCODE_GET(init_size); -IWL3945_UCODE_GET(init_data_size); -IWL3945_UCODE_GET(boot_size); - -/** - * iwl3945_read_ucode - Read uCode images from disk file. - * - * Copy into buffers for card to fetch via bus-mastering - */ -static int iwl3945_read_ucode(struct iwl_priv *priv) -{ - const struct iwl_ucode_header *ucode; - int ret = -EINVAL, index; - const struct firmware *ucode_raw; - /* firmware file name contains uCode/driver compatibility version */ - const char *name_pre = priv->cfg->fw_name_pre; - const unsigned int api_max = priv->cfg->ucode_api_max; - const unsigned int api_min = priv->cfg->ucode_api_min; - char buf[25]; - u8 *src; - size_t len; - u32 api_ver, inst_size, data_size, init_size, init_data_size, boot_size; - - /* Ask kernel firmware_class module to get the boot firmware off disk. - * request_firmware() is synchronous, file is in memory on return. */ - for (index = api_max; index >= api_min; index--) { - sprintf(buf, "%s%u%s", name_pre, index, ".ucode"); - ret = request_firmware(&ucode_raw, buf, &priv->pci_dev->dev); - if (ret < 0) { - IWL_ERR(priv, "%s firmware file req failed: %d\n", - buf, ret); - if (ret == -ENOENT) - continue; - else - goto error; - } else { - if (index < api_max) - IWL_ERR(priv, "Loaded firmware %s, " - "which is deprecated. " - " Please use API v%u instead.\n", - buf, api_max); - IWL_DEBUG_INFO(priv, "Got firmware '%s' file " - "(%zd bytes) from disk\n", - buf, ucode_raw->size); - break; - } - } - - if (ret < 0) - goto error; - - /* Make sure that we got at least our header! */ - if (ucode_raw->size < iwl3945_ucode_get_header_size(1)) { - IWL_ERR(priv, "File size way too small!\n"); - ret = -EINVAL; - goto err_release; - } - - /* Data from ucode file: header followed by uCode images */ - ucode = (struct iwl_ucode_header *)ucode_raw->data; - - priv->ucode_ver = le32_to_cpu(ucode->ver); - api_ver = IWL_UCODE_API(priv->ucode_ver); - inst_size = iwl3945_ucode_get_inst_size(ucode); - data_size = iwl3945_ucode_get_data_size(ucode); - init_size = iwl3945_ucode_get_init_size(ucode); - init_data_size = iwl3945_ucode_get_init_data_size(ucode); - boot_size = iwl3945_ucode_get_boot_size(ucode); - src = iwl3945_ucode_get_data(ucode); - - /* api_ver should match the api version forming part of the - * firmware filename ... but we don't check for that and only rely - * on the API version read from firmware header from here on forward */ - - if (api_ver < api_min || api_ver > api_max) { - IWL_ERR(priv, "Driver unable to support your firmware API. " - "Driver supports v%u, firmware is v%u.\n", - api_max, api_ver); - priv->ucode_ver = 0; - ret = -EINVAL; - goto err_release; - } - if (api_ver != api_max) - IWL_ERR(priv, "Firmware has old API version. Expected %u, " - "got %u. New firmware can be obtained " - "from http://www.intellinuxwireless.org.\n", - api_max, api_ver); - - IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n", - IWL_UCODE_MAJOR(priv->ucode_ver), - IWL_UCODE_MINOR(priv->ucode_ver), - IWL_UCODE_API(priv->ucode_ver), - IWL_UCODE_SERIAL(priv->ucode_ver)); - - snprintf(priv->hw->wiphy->fw_version, - sizeof(priv->hw->wiphy->fw_version), - "%u.%u.%u.%u", - IWL_UCODE_MAJOR(priv->ucode_ver), - IWL_UCODE_MINOR(priv->ucode_ver), - IWL_UCODE_API(priv->ucode_ver), - IWL_UCODE_SERIAL(priv->ucode_ver)); - - IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n", - priv->ucode_ver); - IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %u\n", - inst_size); - IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %u\n", - data_size); - IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %u\n", - init_size); - IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %u\n", - init_data_size); - IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %u\n", - boot_size); - - - /* Verify size of file vs. image size info in file's header */ - if (ucode_raw->size != iwl3945_ucode_get_header_size(api_ver) + - inst_size + data_size + init_size + - init_data_size + boot_size) { - - IWL_DEBUG_INFO(priv, - "uCode file size %zd does not match expected size\n", - ucode_raw->size); - ret = -EINVAL; - goto err_release; - } - - /* Verify that uCode images will fit in card's SRAM */ - if (inst_size > IWL39_MAX_INST_SIZE) { - IWL_DEBUG_INFO(priv, "uCode instr len %d too large to fit in\n", - inst_size); - ret = -EINVAL; - goto err_release; - } - - if (data_size > IWL39_MAX_DATA_SIZE) { - IWL_DEBUG_INFO(priv, "uCode data len %d too large to fit in\n", - data_size); - ret = -EINVAL; - goto err_release; - } - if (init_size > IWL39_MAX_INST_SIZE) { - IWL_DEBUG_INFO(priv, - "uCode init instr len %d too large to fit in\n", - init_size); - ret = -EINVAL; - goto err_release; - } - if (init_data_size > IWL39_MAX_DATA_SIZE) { - IWL_DEBUG_INFO(priv, - "uCode init data len %d too large to fit in\n", - init_data_size); - ret = -EINVAL; - goto err_release; - } - if (boot_size > IWL39_MAX_BSM_SIZE) { - IWL_DEBUG_INFO(priv, - "uCode boot instr len %d too large to fit in\n", - boot_size); - ret = -EINVAL; - goto err_release; - } - - /* Allocate ucode buffers for card's bus-master loading ... */ - - /* Runtime instructions and 2 copies of data: - * 1) unmodified from disk - * 2) backup cache for save/restore during power-downs */ - priv->ucode_code.len = inst_size; - iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_code); - - priv->ucode_data.len = data_size; - iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data); - - priv->ucode_data_backup.len = data_size; - iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup); - - if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr || - !priv->ucode_data_backup.v_addr) - goto err_pci_alloc; - - /* Initialization instructions and data */ - if (init_size && init_data_size) { - priv->ucode_init.len = init_size; - iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init); - - priv->ucode_init_data.len = init_data_size; - iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data); - - if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr) - goto err_pci_alloc; - } - - /* Bootstrap (instructions only, no data) */ - if (boot_size) { - priv->ucode_boot.len = boot_size; - iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot); - - if (!priv->ucode_boot.v_addr) - goto err_pci_alloc; - } - - /* Copy images into buffers for card's bus-master reads ... */ - - /* Runtime instructions (first block of data in file) */ - len = inst_size; - IWL_DEBUG_INFO(priv, - "Copying (but not loading) uCode instr len %zd\n", len); - memcpy(priv->ucode_code.v_addr, src, len); - src += len; - - IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n", - priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr); - - /* Runtime data (2nd block) - * NOTE: Copy into backup buffer will be done in iwl3945_up() */ - len = data_size; - IWL_DEBUG_INFO(priv, - "Copying (but not loading) uCode data len %zd\n", len); - memcpy(priv->ucode_data.v_addr, src, len); - memcpy(priv->ucode_data_backup.v_addr, src, len); - src += len; - - /* Initialization instructions (3rd block) */ - if (init_size) { - len = init_size; - IWL_DEBUG_INFO(priv, - "Copying (but not loading) init instr len %zd\n", len); - memcpy(priv->ucode_init.v_addr, src, len); - src += len; - } - - /* Initialization data (4th block) */ - if (init_data_size) { - len = init_data_size; - IWL_DEBUG_INFO(priv, - "Copying (but not loading) init data len %zd\n", len); - memcpy(priv->ucode_init_data.v_addr, src, len); - src += len; - } - - /* Bootstrap instructions (5th block) */ - len = boot_size; - IWL_DEBUG_INFO(priv, - "Copying (but not loading) boot instr len %zd\n", len); - memcpy(priv->ucode_boot.v_addr, src, len); - - /* We have our copies now, allow OS release its copies */ - release_firmware(ucode_raw); - return 0; - - err_pci_alloc: - IWL_ERR(priv, "failed to allocate pci memory\n"); - ret = -ENOMEM; - iwl3945_dealloc_ucode_pci(priv); - - err_release: - release_firmware(ucode_raw); - - error: - return ret; -} - - -/** - * iwl3945_set_ucode_ptrs - Set uCode address location - * - * Tell initialization uCode where to find runtime uCode. - * - * BSM registers initially contain pointers to initialization uCode. - * We need to replace them to load runtime uCode inst and data, - * and to save runtime data when powering down. - */ -static int iwl3945_set_ucode_ptrs(struct iwl_priv *priv) -{ - dma_addr_t pinst; - dma_addr_t pdata; - - /* bits 31:0 for 3945 */ - pinst = priv->ucode_code.p_addr; - pdata = priv->ucode_data_backup.p_addr; - - /* Tell bootstrap uCode where to find image to load */ - iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst); - iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata); - iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, - priv->ucode_data.len); - - /* Inst byte count must be last to set up, bit 31 signals uCode - * that all new ptr/size info is in place */ - iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, - priv->ucode_code.len | BSM_DRAM_INST_LOAD); - - IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n"); - - return 0; -} - -/** - * iwl3945_init_alive_start - Called after REPLY_ALIVE notification received - * - * Called after REPLY_ALIVE notification received from "initialize" uCode. - * - * Tell "initialize" uCode to go ahead and load the runtime uCode. - */ -static void iwl3945_init_alive_start(struct iwl_priv *priv) -{ - /* Check alive response for "valid" sign from uCode */ - if (priv->card_alive_init.is_valid != UCODE_VALID_OK) { - /* We had an error bringing up the hardware, so take it - * all the way back down so we can try again */ - IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n"); - goto restart; - } - - /* Bootstrap uCode has loaded initialize uCode ... verify inst image. - * This is a paranoid check, because we would not have gotten the - * "initialize" alive if code weren't properly loaded. */ - if (iwl3945_verify_ucode(priv)) { - /* Runtime instruction load was bad; - * take it all the way back down so we can try again */ - IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n"); - goto restart; - } - - /* Send pointers to protocol/runtime uCode image ... init code will - * load and launch runtime uCode, which will send us another "Alive" - * notification. */ - IWL_DEBUG_INFO(priv, "Initialization Alive received.\n"); - if (iwl3945_set_ucode_ptrs(priv)) { - /* Runtime instruction load won't happen; - * take it all the way back down so we can try again */ - IWL_DEBUG_INFO(priv, "Couldn't set up uCode pointers.\n"); - goto restart; - } - return; - - restart: - queue_work(priv->workqueue, &priv->restart); -} - -/** - * iwl3945_alive_start - called after REPLY_ALIVE notification received - * from protocol/runtime uCode (initialization uCode's - * Alive gets handled by iwl3945_init_alive_start()). - */ -static void iwl3945_alive_start(struct iwl_priv *priv) -{ - int thermal_spin = 0; - u32 rfkill; - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - - IWL_DEBUG_INFO(priv, "Runtime Alive received.\n"); - - if (priv->card_alive.is_valid != UCODE_VALID_OK) { - /* We had an error bringing up the hardware, so take it - * all the way back down so we can try again */ - IWL_DEBUG_INFO(priv, "Alive failed.\n"); - goto restart; - } - - /* Initialize uCode has loaded Runtime uCode ... verify inst image. - * This is a paranoid check, because we would not have gotten the - * "runtime" alive if code weren't properly loaded. */ - if (iwl3945_verify_ucode(priv)) { - /* Runtime instruction load was bad; - * take it all the way back down so we can try again */ - IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n"); - goto restart; - } - - rfkill = iwl_legacy_read_prph(priv, APMG_RFKILL_REG); - IWL_DEBUG_INFO(priv, "RFKILL status: 0x%x\n", rfkill); - - if (rfkill & 0x1) { - clear_bit(STATUS_RF_KILL_HW, &priv->status); - /* if RFKILL is not on, then wait for thermal - * sensor in adapter to kick in */ - while (iwl3945_hw_get_temperature(priv) == 0) { - thermal_spin++; - udelay(10); - } - - if (thermal_spin) - IWL_DEBUG_INFO(priv, "Thermal calibration took %dus\n", - thermal_spin * 10); - } else - set_bit(STATUS_RF_KILL_HW, &priv->status); - - /* After the ALIVE response, we can send commands to 3945 uCode */ - set_bit(STATUS_ALIVE, &priv->status); - - /* Enable watchdog to monitor the driver tx queues */ - iwl_legacy_setup_watchdog(priv); - - if (iwl_legacy_is_rfkill(priv)) - return; - - ieee80211_wake_queues(priv->hw); - - priv->active_rate = IWL_RATES_MASK_3945; - - iwl_legacy_power_update_mode(priv, true); - - if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) { - struct iwl3945_rxon_cmd *active_rxon = - (struct iwl3945_rxon_cmd *)(&ctx->active); - - ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; - active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; - } else { - /* Initialize our rx_config data */ - iwl_legacy_connection_init_rx_config(priv, ctx); - } - - /* Configure Bluetooth device coexistence support */ - iwl_legacy_send_bt_config(priv); - - set_bit(STATUS_READY, &priv->status); - - /* Configure the adapter for unassociated operation */ - iwl3945_commit_rxon(priv, ctx); - - iwl3945_reg_txpower_periodic(priv); - - IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); - wake_up(&priv->wait_command_queue); - - return; - - restart: - queue_work(priv->workqueue, &priv->restart); -} - -static void iwl3945_cancel_deferred_work(struct iwl_priv *priv); - -static void __iwl3945_down(struct iwl_priv *priv) -{ - unsigned long flags; - int exit_pending; - - IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n"); - - iwl_legacy_scan_cancel_timeout(priv, 200); - - exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status); - - /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set - * to prevent rearm timer */ - del_timer_sync(&priv->watchdog); - - /* Station information will now be cleared in device */ - iwl_legacy_clear_ucode_stations(priv, NULL); - iwl_legacy_dealloc_bcast_stations(priv); - iwl_legacy_clear_driver_stations(priv); - - /* Unblock any waiting calls */ - wake_up_all(&priv->wait_command_queue); - - /* Wipe out the EXIT_PENDING status bit if we are not actually - * exiting the module */ - if (!exit_pending) - clear_bit(STATUS_EXIT_PENDING, &priv->status); - - /* stop and reset the on-board processor */ - iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); - - /* tell the device to stop sending interrupts */ - spin_lock_irqsave(&priv->lock, flags); - iwl_legacy_disable_interrupts(priv); - spin_unlock_irqrestore(&priv->lock, flags); - iwl3945_synchronize_irq(priv); - - if (priv->mac80211_registered) - ieee80211_stop_queues(priv->hw); - - /* If we have not previously called iwl3945_init() then - * clear all bits but the RF Kill bits and return */ - if (!iwl_legacy_is_init(priv)) { - priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) << - STATUS_RF_KILL_HW | - test_bit(STATUS_GEO_CONFIGURED, &priv->status) << - STATUS_GEO_CONFIGURED | - test_bit(STATUS_EXIT_PENDING, &priv->status) << - STATUS_EXIT_PENDING; - goto exit; - } - - /* ...otherwise clear out all the status bits but the RF Kill - * bit and continue taking the NIC down. */ - priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) << - STATUS_RF_KILL_HW | - test_bit(STATUS_GEO_CONFIGURED, &priv->status) << - STATUS_GEO_CONFIGURED | - test_bit(STATUS_FW_ERROR, &priv->status) << - STATUS_FW_ERROR | - test_bit(STATUS_EXIT_PENDING, &priv->status) << - STATUS_EXIT_PENDING; - - iwl3945_hw_txq_ctx_stop(priv); - iwl3945_hw_rxq_stop(priv); - - /* Power-down device's busmaster DMA clocks */ - iwl_legacy_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT); - udelay(5); - - /* Stop the device, and put it in low power state */ - iwl_legacy_apm_stop(priv); - - exit: - memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp)); - - if (priv->beacon_skb) - dev_kfree_skb(priv->beacon_skb); - priv->beacon_skb = NULL; - - /* clear out any free frames */ - iwl3945_clear_free_frames(priv); -} - -static void iwl3945_down(struct iwl_priv *priv) -{ - mutex_lock(&priv->mutex); - __iwl3945_down(priv); - mutex_unlock(&priv->mutex); - - iwl3945_cancel_deferred_work(priv); -} - -#define MAX_HW_RESTARTS 5 - -static int iwl3945_alloc_bcast_station(struct iwl_priv *priv) -{ - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - unsigned long flags; - u8 sta_id; - - spin_lock_irqsave(&priv->sta_lock, flags); - sta_id = iwl_legacy_prep_station(priv, ctx, - iwlegacy_bcast_addr, false, NULL); - if (sta_id == IWL_INVALID_STATION) { - IWL_ERR(priv, "Unable to prepare broadcast station\n"); - spin_unlock_irqrestore(&priv->sta_lock, flags); - - return -EINVAL; - } - - priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE; - priv->stations[sta_id].used |= IWL_STA_BCAST; - spin_unlock_irqrestore(&priv->sta_lock, flags); - - return 0; -} - -static int __iwl3945_up(struct iwl_priv *priv) -{ - int rc, i; - - rc = iwl3945_alloc_bcast_station(priv); - if (rc) - return rc; - - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { - IWL_WARN(priv, "Exit pending; will not bring the NIC up\n"); - return -EIO; - } - - if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) { - IWL_ERR(priv, "ucode not available for device bring up\n"); - return -EIO; - } - - /* If platform's RF_KILL switch is NOT set to KILL */ - if (iwl_read32(priv, CSR_GP_CNTRL) & - CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) - clear_bit(STATUS_RF_KILL_HW, &priv->status); - else { - set_bit(STATUS_RF_KILL_HW, &priv->status); - IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n"); - return -ENODEV; - } - - iwl_write32(priv, CSR_INT, 0xFFFFFFFF); - - rc = iwl3945_hw_nic_init(priv); - if (rc) { - IWL_ERR(priv, "Unable to int nic\n"); - return rc; - } - - /* make sure rfkill handshake bits are cleared */ - iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); - iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, - CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); - - /* clear (again), then enable host interrupts */ - iwl_write32(priv, CSR_INT, 0xFFFFFFFF); - iwl_legacy_enable_interrupts(priv); - - /* really make sure rfkill handshake bits are cleared */ - iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); - iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); - - /* Copy original ucode data image from disk into backup cache. - * This will be used to initialize the on-board processor's - * data SRAM for a clean start when the runtime program first loads. */ - memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr, - priv->ucode_data.len); - - /* We return success when we resume from suspend and rf_kill is on. */ - if (test_bit(STATUS_RF_KILL_HW, &priv->status)) - return 0; - - for (i = 0; i < MAX_HW_RESTARTS; i++) { - - /* load bootstrap state machine, - * load bootstrap program into processor's memory, - * prepare to load the "initialize" uCode */ - rc = priv->cfg->ops->lib->load_ucode(priv); - - if (rc) { - IWL_ERR(priv, - "Unable to set up bootstrap uCode: %d\n", rc); - continue; - } - - /* start card; "initialize" will load runtime ucode */ - iwl3945_nic_start(priv); - - IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n"); - - return 0; - } - - set_bit(STATUS_EXIT_PENDING, &priv->status); - __iwl3945_down(priv); - clear_bit(STATUS_EXIT_PENDING, &priv->status); - - /* tried to restart and config the device for as long as our - * patience could withstand */ - IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i); - return -EIO; -} - - -/***************************************************************************** - * - * Workqueue callbacks - * - *****************************************************************************/ - -static void iwl3945_bg_init_alive_start(struct work_struct *data) -{ - struct iwl_priv *priv = - container_of(data, struct iwl_priv, init_alive_start.work); - - mutex_lock(&priv->mutex); - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - goto out; - - iwl3945_init_alive_start(priv); -out: - mutex_unlock(&priv->mutex); -} - -static void iwl3945_bg_alive_start(struct work_struct *data) -{ - struct iwl_priv *priv = - container_of(data, struct iwl_priv, alive_start.work); - - mutex_lock(&priv->mutex); - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - goto out; - - iwl3945_alive_start(priv); -out: - mutex_unlock(&priv->mutex); -} - -/* - * 3945 cannot interrupt driver when hardware rf kill switch toggles; - * driver must poll CSR_GP_CNTRL_REG register for change. This register - * *is* readable even when device has been SW_RESET into low power mode - * (e.g. during RF KILL). - */ -static void iwl3945_rfkill_poll(struct work_struct *data) -{ - struct iwl_priv *priv = - container_of(data, struct iwl_priv, _3945.rfkill_poll.work); - bool old_rfkill = test_bit(STATUS_RF_KILL_HW, &priv->status); - bool new_rfkill = !(iwl_read32(priv, CSR_GP_CNTRL) - & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); - - if (new_rfkill != old_rfkill) { - if (new_rfkill) - set_bit(STATUS_RF_KILL_HW, &priv->status); - else - clear_bit(STATUS_RF_KILL_HW, &priv->status); - - wiphy_rfkill_set_hw_state(priv->hw->wiphy, new_rfkill); - - IWL_DEBUG_RF_KILL(priv, "RF_KILL bit toggled to %s.\n", - new_rfkill ? "disable radio" : "enable radio"); - } - - /* Keep this running, even if radio now enabled. This will be - * cancelled in mac_start() if system decides to start again */ - queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll, - round_jiffies_relative(2 * HZ)); - -} - -int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) -{ - struct iwl_host_cmd cmd = { - .id = REPLY_SCAN_CMD, - .len = sizeof(struct iwl3945_scan_cmd), - .flags = CMD_SIZE_HUGE, - }; - struct iwl3945_scan_cmd *scan; - u8 n_probes = 0; - enum ieee80211_band band; - bool is_active = false; - int ret; - u16 len; - - lockdep_assert_held(&priv->mutex); - - if (!priv->scan_cmd) { - priv->scan_cmd = kmalloc(sizeof(struct iwl3945_scan_cmd) + - IWL_MAX_SCAN_SIZE, GFP_KERNEL); - if (!priv->scan_cmd) { - IWL_DEBUG_SCAN(priv, "Fail to allocate scan memory\n"); - return -ENOMEM; - } - } - scan = priv->scan_cmd; - memset(scan, 0, sizeof(struct iwl3945_scan_cmd) + IWL_MAX_SCAN_SIZE); - - scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH; - scan->quiet_time = IWL_ACTIVE_QUIET_TIME; - - if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) { - u16 interval; - u32 extra; - u32 suspend_time = 100; - u32 scan_suspend_time = 100; - - IWL_DEBUG_INFO(priv, "Scanning while associated...\n"); - - interval = vif->bss_conf.beacon_int; - - scan->suspend_time = 0; - scan->max_out_time = cpu_to_le32(200 * 1024); - if (!interval) - interval = suspend_time; - /* - * suspend time format: - * 0-19: beacon interval in usec (time before exec.) - * 20-23: 0 - * 24-31: number of beacons (suspend between channels) - */ - - extra = (suspend_time / interval) << 24; - scan_suspend_time = 0xFF0FFFFF & - (extra | ((suspend_time % interval) * 1024)); - - scan->suspend_time = cpu_to_le32(scan_suspend_time); - IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n", - scan_suspend_time, interval); - } - - if (priv->scan_request->n_ssids) { - int i, p = 0; - IWL_DEBUG_SCAN(priv, "Kicking off active scan\n"); - for (i = 0; i < priv->scan_request->n_ssids; i++) { - /* always does wildcard anyway */ - if (!priv->scan_request->ssids[i].ssid_len) - continue; - scan->direct_scan[p].id = WLAN_EID_SSID; - scan->direct_scan[p].len = - priv->scan_request->ssids[i].ssid_len; - memcpy(scan->direct_scan[p].ssid, - priv->scan_request->ssids[i].ssid, - priv->scan_request->ssids[i].ssid_len); - n_probes++; - p++; - } - is_active = true; - } else - IWL_DEBUG_SCAN(priv, "Kicking off passive scan.\n"); - - /* We don't build a direct scan probe request; the uCode will do - * that based on the direct_mask added to each channel entry */ - scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK; - scan->tx_cmd.sta_id = priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id; - scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; - - /* flags + rate selection */ - - switch (priv->scan_band) { - case IEEE80211_BAND_2GHZ: - scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; - scan->tx_cmd.rate = IWL_RATE_1M_PLCP; - band = IEEE80211_BAND_2GHZ; - break; - case IEEE80211_BAND_5GHZ: - scan->tx_cmd.rate = IWL_RATE_6M_PLCP; - band = IEEE80211_BAND_5GHZ; - break; - default: - IWL_WARN(priv, "Invalid scan band\n"); - return -EIO; - } - - /* - * If active scaning is requested but a certain channel - * is marked passive, we can do active scanning if we - * detect transmissions. - */ - scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT : - IWL_GOOD_CRC_TH_DISABLED; - - len = iwl_legacy_fill_probe_req(priv, (struct ieee80211_mgmt *)scan->data, - vif->addr, priv->scan_request->ie, - priv->scan_request->ie_len, - IWL_MAX_SCAN_SIZE - sizeof(*scan)); - scan->tx_cmd.len = cpu_to_le16(len); - - /* select Rx antennas */ - scan->flags |= iwl3945_get_antenna_flags(priv); - - scan->channel_count = iwl3945_get_channels_for_scan(priv, band, is_active, n_probes, - (void *)&scan->data[len], vif); - if (scan->channel_count == 0) { - IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count); - return -EIO; - } - - cmd.len += le16_to_cpu(scan->tx_cmd.len) + - scan->channel_count * sizeof(struct iwl3945_scan_channel); - cmd.data = scan; - scan->len = cpu_to_le16(cmd.len); - - set_bit(STATUS_SCAN_HW, &priv->status); - ret = iwl_legacy_send_cmd_sync(priv, &cmd); - if (ret) - clear_bit(STATUS_SCAN_HW, &priv->status); - return ret; -} - -void iwl3945_post_scan(struct iwl_priv *priv) -{ - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - - /* - * Since setting the RXON may have been deferred while - * performing the scan, fire one off if needed - */ - if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging))) - iwl3945_commit_rxon(priv, ctx); -} - -static void iwl3945_bg_restart(struct work_struct *data) -{ - struct iwl_priv *priv = container_of(data, struct iwl_priv, restart); - - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - return; - - if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) { - struct iwl_rxon_context *ctx; - mutex_lock(&priv->mutex); - for_each_context(priv, ctx) - ctx->vif = NULL; - priv->is_open = 0; - mutex_unlock(&priv->mutex); - iwl3945_down(priv); - ieee80211_restart_hw(priv->hw); - } else { - iwl3945_down(priv); - - mutex_lock(&priv->mutex); - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { - mutex_unlock(&priv->mutex); - return; - } - - __iwl3945_up(priv); - mutex_unlock(&priv->mutex); - } -} - -static void iwl3945_bg_rx_replenish(struct work_struct *data) -{ - struct iwl_priv *priv = - container_of(data, struct iwl_priv, rx_replenish); - - mutex_lock(&priv->mutex); - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - goto out; - - iwl3945_rx_replenish(priv); -out: - mutex_unlock(&priv->mutex); -} - -void iwl3945_post_associate(struct iwl_priv *priv) -{ - int rc = 0; - struct ieee80211_conf *conf = NULL; - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - - if (!ctx->vif || !priv->is_open) - return; - - IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n", - ctx->vif->bss_conf.aid, ctx->active.bssid_addr); - - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - return; - - iwl_legacy_scan_cancel_timeout(priv, 200); - - conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw); - - ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; - iwl3945_commit_rxon(priv, ctx); - - rc = iwl_legacy_send_rxon_timing(priv, ctx); - if (rc) - IWL_WARN(priv, "REPLY_RXON_TIMING failed - " - "Attempting to continue.\n"); - - ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; - - ctx->staging.assoc_id = cpu_to_le16(ctx->vif->bss_conf.aid); - - IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n", - ctx->vif->bss_conf.aid, ctx->vif->bss_conf.beacon_int); - - if (ctx->vif->bss_conf.use_short_preamble) - ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; - else - ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; - - if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) { - if (ctx->vif->bss_conf.use_short_slot) - ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; - else - ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; - } - - iwl3945_commit_rxon(priv, ctx); - - switch (ctx->vif->type) { - case NL80211_IFTYPE_STATION: - iwl3945_rate_scale_init(priv->hw, IWL_AP_ID); - break; - case NL80211_IFTYPE_ADHOC: - iwl3945_send_beacon_cmd(priv); - break; - default: - IWL_ERR(priv, "%s Should not be called in %d mode\n", - __func__, ctx->vif->type); - break; - } -} - -/***************************************************************************** - * - * mac80211 entry point functions - * - *****************************************************************************/ - -#define UCODE_READY_TIMEOUT (2 * HZ) - -static int iwl3945_mac_start(struct ieee80211_hw *hw) -{ - struct iwl_priv *priv = hw->priv; - int ret; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - - /* we should be verifying the device is ready to be opened */ - mutex_lock(&priv->mutex); - - /* fetch ucode file from disk, alloc and copy to bus-master buffers ... - * ucode filename and max sizes are card-specific. */ - - if (!priv->ucode_code.len) { - ret = iwl3945_read_ucode(priv); - if (ret) { - IWL_ERR(priv, "Could not read microcode: %d\n", ret); - mutex_unlock(&priv->mutex); - goto out_release_irq; - } - } - - ret = __iwl3945_up(priv); - - mutex_unlock(&priv->mutex); - - if (ret) - goto out_release_irq; - - IWL_DEBUG_INFO(priv, "Start UP work.\n"); - - /* Wait for START_ALIVE from ucode. Otherwise callbacks from - * mac80211 will not be run successfully. */ - ret = wait_event_timeout(priv->wait_command_queue, - test_bit(STATUS_READY, &priv->status), - UCODE_READY_TIMEOUT); - if (!ret) { - if (!test_bit(STATUS_READY, &priv->status)) { - IWL_ERR(priv, - "Wait for START_ALIVE timeout after %dms.\n", - jiffies_to_msecs(UCODE_READY_TIMEOUT)); - ret = -ETIMEDOUT; - goto out_release_irq; - } - } - - /* ucode is running and will send rfkill notifications, - * no need to poll the killswitch state anymore */ - cancel_delayed_work(&priv->_3945.rfkill_poll); - - priv->is_open = 1; - IWL_DEBUG_MAC80211(priv, "leave\n"); - return 0; - -out_release_irq: - priv->is_open = 0; - IWL_DEBUG_MAC80211(priv, "leave - failed\n"); - return ret; -} - -static void iwl3945_mac_stop(struct ieee80211_hw *hw) -{ - struct iwl_priv *priv = hw->priv; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - - if (!priv->is_open) { - IWL_DEBUG_MAC80211(priv, "leave - skip\n"); - return; - } - - priv->is_open = 0; - - iwl3945_down(priv); - - flush_workqueue(priv->workqueue); - - /* start polling the killswitch state again */ - queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll, - round_jiffies_relative(2 * HZ)); - - IWL_DEBUG_MAC80211(priv, "leave\n"); -} - -static void iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) -{ - struct iwl_priv *priv = hw->priv; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - - IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, - ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); - - if (iwl3945_tx_skb(priv, skb)) - dev_kfree_skb_any(skb); - - IWL_DEBUG_MAC80211(priv, "leave\n"); -} - -void iwl3945_config_ap(struct iwl_priv *priv) -{ - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - struct ieee80211_vif *vif = ctx->vif; - int rc = 0; - - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - return; - - /* The following should be done only at AP bring up */ - if (!(iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))) { - - /* RXON - unassoc (to set timing command) */ - ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; - iwl3945_commit_rxon(priv, ctx); - - /* RXON Timing */ - rc = iwl_legacy_send_rxon_timing(priv, ctx); - if (rc) - IWL_WARN(priv, "REPLY_RXON_TIMING failed - " - "Attempting to continue.\n"); - - ctx->staging.assoc_id = 0; - - if (vif->bss_conf.use_short_preamble) - ctx->staging.flags |= - RXON_FLG_SHORT_PREAMBLE_MSK; - else - ctx->staging.flags &= - ~RXON_FLG_SHORT_PREAMBLE_MSK; - - if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) { - if (vif->bss_conf.use_short_slot) - ctx->staging.flags |= - RXON_FLG_SHORT_SLOT_MSK; - else - ctx->staging.flags &= - ~RXON_FLG_SHORT_SLOT_MSK; - } - /* restore RXON assoc */ - ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; - iwl3945_commit_rxon(priv, ctx); - } - iwl3945_send_beacon_cmd(priv); -} - -static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, - struct ieee80211_key_conf *key) -{ - struct iwl_priv *priv = hw->priv; - int ret = 0; - u8 sta_id = IWL_INVALID_STATION; - u8 static_key; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - - if (iwl3945_mod_params.sw_crypto) { - IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n"); - return -EOPNOTSUPP; - } - - /* - * To support IBSS RSN, don't program group keys in IBSS, the - * hardware will then not attempt to decrypt the frames. - */ - if (vif->type == NL80211_IFTYPE_ADHOC && - !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) - return -EOPNOTSUPP; - - static_key = !iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS); - - if (!static_key) { - sta_id = iwl_legacy_sta_id_or_broadcast( - priv, &priv->contexts[IWL_RXON_CTX_BSS], sta); - if (sta_id == IWL_INVALID_STATION) - return -EINVAL; - } - - mutex_lock(&priv->mutex); - iwl_legacy_scan_cancel_timeout(priv, 100); - - switch (cmd) { - case SET_KEY: - if (static_key) - ret = iwl3945_set_static_key(priv, key); - else - ret = iwl3945_set_dynamic_key(priv, key, sta_id); - IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n"); - break; - case DISABLE_KEY: - if (static_key) - ret = iwl3945_remove_static_key(priv); - else - ret = iwl3945_clear_sta_key_info(priv, sta_id); - IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n"); - break; - default: - ret = -EINVAL; - } - - mutex_unlock(&priv->mutex); - IWL_DEBUG_MAC80211(priv, "leave\n"); - - return ret; -} - -static int iwl3945_mac_sta_add(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta) -{ - struct iwl_priv *priv = hw->priv; - struct iwl3945_sta_priv *sta_priv = (void *)sta->drv_priv; - int ret; - bool is_ap = vif->type == NL80211_IFTYPE_STATION; - u8 sta_id; - - IWL_DEBUG_INFO(priv, "received request to add station %pM\n", - sta->addr); - mutex_lock(&priv->mutex); - IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n", - sta->addr); - sta_priv->common.sta_id = IWL_INVALID_STATION; - - - ret = iwl_legacy_add_station_common(priv, - &priv->contexts[IWL_RXON_CTX_BSS], - sta->addr, is_ap, sta, &sta_id); - if (ret) { - IWL_ERR(priv, "Unable to add station %pM (%d)\n", - sta->addr, ret); - /* Should we return success if return code is EEXIST ? */ - mutex_unlock(&priv->mutex); - return ret; - } - - sta_priv->common.sta_id = sta_id; - - /* Initialize rate scaling */ - IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n", - sta->addr); - iwl3945_rs_rate_init(priv, sta, sta_id); - mutex_unlock(&priv->mutex); - - return 0; -} - -static void iwl3945_configure_filter(struct ieee80211_hw *hw, - unsigned int changed_flags, - unsigned int *total_flags, - u64 multicast) -{ - struct iwl_priv *priv = hw->priv; - __le32 filter_or = 0, filter_nand = 0; - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - -#define CHK(test, flag) do { \ - if (*total_flags & (test)) \ - filter_or |= (flag); \ - else \ - filter_nand |= (flag); \ - } while (0) - - IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n", - changed_flags, *total_flags); - - CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK); - CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK); - CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK); - -#undef CHK - - mutex_lock(&priv->mutex); - - ctx->staging.filter_flags &= ~filter_nand; - ctx->staging.filter_flags |= filter_or; - - /* - * Not committing directly because hardware can perform a scan, - * but even if hw is ready, committing here breaks for some reason, - * we'll eventually commit the filter flags change anyway. - */ - - mutex_unlock(&priv->mutex); - - /* - * Receiving all multicast frames is always enabled by the - * default flags setup in iwl_legacy_connection_init_rx_config() - * since we currently do not support programming multicast - * filters into the device. - */ - *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS | - FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; -} - - -/***************************************************************************** - * - * sysfs attributes - * - *****************************************************************************/ - -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG - -/* - * The following adds a new attribute to the sysfs representation - * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/) - * used for controlling the debug level. - * - * See the level definitions in iwl for details. - * - * The debug_level being managed using sysfs below is a per device debug - * level that is used instead of the global debug level if it (the per - * device debug level) is set. - */ -static ssize_t iwl3945_show_debug_level(struct device *d, - struct device_attribute *attr, char *buf) -{ - struct iwl_priv *priv = dev_get_drvdata(d); - return sprintf(buf, "0x%08X\n", iwl_legacy_get_debug_level(priv)); -} -static ssize_t iwl3945_store_debug_level(struct device *d, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct iwl_priv *priv = dev_get_drvdata(d); - unsigned long val; - int ret; - - ret = strict_strtoul(buf, 0, &val); - if (ret) - IWL_INFO(priv, "%s is not in hex or decimal form.\n", buf); - else { - priv->debug_level = val; - if (iwl_legacy_alloc_traffic_mem(priv)) - IWL_ERR(priv, - "Not enough memory to generate traffic log\n"); - } - return strnlen(buf, count); -} - -static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, - iwl3945_show_debug_level, iwl3945_store_debug_level); - -#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */ - -static ssize_t iwl3945_show_temperature(struct device *d, - struct device_attribute *attr, char *buf) -{ - struct iwl_priv *priv = dev_get_drvdata(d); - - if (!iwl_legacy_is_alive(priv)) - return -EAGAIN; - - return sprintf(buf, "%d\n", iwl3945_hw_get_temperature(priv)); -} - -static DEVICE_ATTR(temperature, S_IRUGO, iwl3945_show_temperature, NULL); - -static ssize_t iwl3945_show_tx_power(struct device *d, - struct device_attribute *attr, char *buf) -{ - struct iwl_priv *priv = dev_get_drvdata(d); - return sprintf(buf, "%d\n", priv->tx_power_user_lmt); -} - -static ssize_t iwl3945_store_tx_power(struct device *d, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct iwl_priv *priv = dev_get_drvdata(d); - char *p = (char *)buf; - u32 val; - - val = simple_strtoul(p, &p, 10); - if (p == buf) - IWL_INFO(priv, ": %s is not in decimal form.\n", buf); - else - iwl3945_hw_reg_set_txpower(priv, val); - - return count; -} - -static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, iwl3945_show_tx_power, iwl3945_store_tx_power); - -static ssize_t iwl3945_show_flags(struct device *d, - struct device_attribute *attr, char *buf) -{ - struct iwl_priv *priv = dev_get_drvdata(d); - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - - return sprintf(buf, "0x%04X\n", ctx->active.flags); -} - -static ssize_t iwl3945_store_flags(struct device *d, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct iwl_priv *priv = dev_get_drvdata(d); - u32 flags = simple_strtoul(buf, NULL, 0); - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - - mutex_lock(&priv->mutex); - if (le32_to_cpu(ctx->staging.flags) != flags) { - /* Cancel any currently running scans... */ - if (iwl_legacy_scan_cancel_timeout(priv, 100)) - IWL_WARN(priv, "Could not cancel scan.\n"); - else { - IWL_DEBUG_INFO(priv, "Committing rxon.flags = 0x%04X\n", - flags); - ctx->staging.flags = cpu_to_le32(flags); - iwl3945_commit_rxon(priv, ctx); - } - } - mutex_unlock(&priv->mutex); - - return count; -} - -static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, iwl3945_show_flags, iwl3945_store_flags); - -static ssize_t iwl3945_show_filter_flags(struct device *d, - struct device_attribute *attr, char *buf) -{ - struct iwl_priv *priv = dev_get_drvdata(d); - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - - return sprintf(buf, "0x%04X\n", - le32_to_cpu(ctx->active.filter_flags)); -} - -static ssize_t iwl3945_store_filter_flags(struct device *d, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct iwl_priv *priv = dev_get_drvdata(d); - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - u32 filter_flags = simple_strtoul(buf, NULL, 0); - - mutex_lock(&priv->mutex); - if (le32_to_cpu(ctx->staging.filter_flags) != filter_flags) { - /* Cancel any currently running scans... */ - if (iwl_legacy_scan_cancel_timeout(priv, 100)) - IWL_WARN(priv, "Could not cancel scan.\n"); - else { - IWL_DEBUG_INFO(priv, "Committing rxon.filter_flags = " - "0x%04X\n", filter_flags); - ctx->staging.filter_flags = - cpu_to_le32(filter_flags); - iwl3945_commit_rxon(priv, ctx); - } - } - mutex_unlock(&priv->mutex); - - return count; -} - -static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, iwl3945_show_filter_flags, - iwl3945_store_filter_flags); - -static ssize_t iwl3945_show_measurement(struct device *d, - struct device_attribute *attr, char *buf) -{ - struct iwl_priv *priv = dev_get_drvdata(d); - struct iwl_spectrum_notification measure_report; - u32 size = sizeof(measure_report), len = 0, ofs = 0; - u8 *data = (u8 *)&measure_report; - unsigned long flags; - - spin_lock_irqsave(&priv->lock, flags); - if (!(priv->measurement_status & MEASUREMENT_READY)) { - spin_unlock_irqrestore(&priv->lock, flags); - return 0; - } - memcpy(&measure_report, &priv->measure_report, size); - priv->measurement_status = 0; - spin_unlock_irqrestore(&priv->lock, flags); - - while (size && (PAGE_SIZE - len)) { - hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len, - PAGE_SIZE - len, 1); - len = strlen(buf); - if (PAGE_SIZE - len) - buf[len++] = '\n'; - - ofs += 16; - size -= min(size, 16U); - } - - return len; -} - -static ssize_t iwl3945_store_measurement(struct device *d, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct iwl_priv *priv = dev_get_drvdata(d); - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - struct ieee80211_measurement_params params = { - .channel = le16_to_cpu(ctx->active.channel), - .start_time = cpu_to_le64(priv->_3945.last_tsf), - .duration = cpu_to_le16(1), - }; - u8 type = IWL_MEASURE_BASIC; - u8 buffer[32]; - u8 channel; - - if (count) { - char *p = buffer; - strncpy(buffer, buf, min(sizeof(buffer), count)); - channel = simple_strtoul(p, NULL, 0); - if (channel) - params.channel = channel; - - p = buffer; - while (*p && *p != ' ') - p++; - if (*p) - type = simple_strtoul(p + 1, NULL, 0); - } - - IWL_DEBUG_INFO(priv, "Invoking measurement of type %d on " - "channel %d (for '%s')\n", type, params.channel, buf); - iwl3945_get_measurement(priv, ¶ms, type); - - return count; -} - -static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR, - iwl3945_show_measurement, iwl3945_store_measurement); - -static ssize_t iwl3945_store_retry_rate(struct device *d, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct iwl_priv *priv = dev_get_drvdata(d); - - priv->retry_rate = simple_strtoul(buf, NULL, 0); - if (priv->retry_rate <= 0) - priv->retry_rate = 1; - - return count; -} - -static ssize_t iwl3945_show_retry_rate(struct device *d, - struct device_attribute *attr, char *buf) -{ - struct iwl_priv *priv = dev_get_drvdata(d); - return sprintf(buf, "%d", priv->retry_rate); -} - -static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, iwl3945_show_retry_rate, - iwl3945_store_retry_rate); - - -static ssize_t iwl3945_show_channels(struct device *d, - struct device_attribute *attr, char *buf) -{ - /* all this shit doesn't belong into sysfs anyway */ - return 0; -} - -static DEVICE_ATTR(channels, S_IRUSR, iwl3945_show_channels, NULL); - -static ssize_t iwl3945_show_antenna(struct device *d, - struct device_attribute *attr, char *buf) -{ - struct iwl_priv *priv = dev_get_drvdata(d); - - if (!iwl_legacy_is_alive(priv)) - return -EAGAIN; - - return sprintf(buf, "%d\n", iwl3945_mod_params.antenna); -} - -static ssize_t iwl3945_store_antenna(struct device *d, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct iwl_priv *priv __maybe_unused = dev_get_drvdata(d); - int ant; - - if (count == 0) - return 0; - - if (sscanf(buf, "%1i", &ant) != 1) { - IWL_DEBUG_INFO(priv, "not in hex or decimal form.\n"); - return count; - } - - if ((ant >= 0) && (ant <= 2)) { - IWL_DEBUG_INFO(priv, "Setting antenna select to %d.\n", ant); - iwl3945_mod_params.antenna = (enum iwl3945_antenna)ant; - } else - IWL_DEBUG_INFO(priv, "Bad antenna select value %d.\n", ant); - - - return count; -} - -static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, iwl3945_show_antenna, iwl3945_store_antenna); - -static ssize_t iwl3945_show_status(struct device *d, - struct device_attribute *attr, char *buf) -{ - struct iwl_priv *priv = dev_get_drvdata(d); - if (!iwl_legacy_is_alive(priv)) - return -EAGAIN; - return sprintf(buf, "0x%08x\n", (int)priv->status); -} - -static DEVICE_ATTR(status, S_IRUGO, iwl3945_show_status, NULL); - -static ssize_t iwl3945_dump_error_log(struct device *d, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct iwl_priv *priv = dev_get_drvdata(d); - char *p = (char *)buf; - - if (p[0] == '1') - iwl3945_dump_nic_error_log(priv); - - return strnlen(buf, count); -} - -static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, iwl3945_dump_error_log); - -/***************************************************************************** - * - * driver setup and tear down - * - *****************************************************************************/ - -static void iwl3945_setup_deferred_work(struct iwl_priv *priv) -{ - priv->workqueue = create_singlethread_workqueue(DRV_NAME); - - init_waitqueue_head(&priv->wait_command_queue); - - INIT_WORK(&priv->restart, iwl3945_bg_restart); - INIT_WORK(&priv->rx_replenish, iwl3945_bg_rx_replenish); - INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start); - INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start); - INIT_DELAYED_WORK(&priv->_3945.rfkill_poll, iwl3945_rfkill_poll); - - iwl_legacy_setup_scan_deferred_work(priv); - - iwl3945_hw_setup_deferred_work(priv); - - init_timer(&priv->watchdog); - priv->watchdog.data = (unsigned long)priv; - priv->watchdog.function = iwl_legacy_bg_watchdog; - - tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) - iwl3945_irq_tasklet, (unsigned long)priv); -} - -static void iwl3945_cancel_deferred_work(struct iwl_priv *priv) -{ - iwl3945_hw_cancel_deferred_work(priv); - - cancel_delayed_work_sync(&priv->init_alive_start); - cancel_delayed_work(&priv->alive_start); - - iwl_legacy_cancel_scan_deferred_work(priv); -} - -static struct attribute *iwl3945_sysfs_entries[] = { - &dev_attr_antenna.attr, - &dev_attr_channels.attr, - &dev_attr_dump_errors.attr, - &dev_attr_flags.attr, - &dev_attr_filter_flags.attr, - &dev_attr_measurement.attr, - &dev_attr_retry_rate.attr, - &dev_attr_status.attr, - &dev_attr_temperature.attr, - &dev_attr_tx_power.attr, -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG - &dev_attr_debug_level.attr, -#endif - NULL -}; - -static struct attribute_group iwl3945_attribute_group = { - .name = NULL, /* put in device directory */ - .attrs = iwl3945_sysfs_entries, -}; - -struct ieee80211_ops iwl3945_hw_ops = { - .tx = iwl3945_mac_tx, - .start = iwl3945_mac_start, - .stop = iwl3945_mac_stop, - .add_interface = iwl_legacy_mac_add_interface, - .remove_interface = iwl_legacy_mac_remove_interface, - .change_interface = iwl_legacy_mac_change_interface, - .config = iwl_legacy_mac_config, - .configure_filter = iwl3945_configure_filter, - .set_key = iwl3945_mac_set_key, - .conf_tx = iwl_legacy_mac_conf_tx, - .reset_tsf = iwl_legacy_mac_reset_tsf, - .bss_info_changed = iwl_legacy_mac_bss_info_changed, - .hw_scan = iwl_legacy_mac_hw_scan, - .sta_add = iwl3945_mac_sta_add, - .sta_remove = iwl_legacy_mac_sta_remove, - .tx_last_beacon = iwl_legacy_mac_tx_last_beacon, -}; - -static int iwl3945_init_drv(struct iwl_priv *priv) -{ - int ret; - struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; - - priv->retry_rate = 1; - priv->beacon_skb = NULL; - - spin_lock_init(&priv->sta_lock); - spin_lock_init(&priv->hcmd_lock); - - INIT_LIST_HEAD(&priv->free_frames); - - mutex_init(&priv->mutex); - - priv->ieee_channels = NULL; - priv->ieee_rates = NULL; - priv->band = IEEE80211_BAND_2GHZ; - - priv->iw_mode = NL80211_IFTYPE_STATION; - priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF; - - /* initialize force reset */ - priv->force_reset.reset_duration = IWL_DELAY_NEXT_FORCE_FW_RELOAD; - - if (eeprom->version < EEPROM_3945_EEPROM_VERSION) { - IWL_WARN(priv, "Unsupported EEPROM version: 0x%04X\n", - eeprom->version); - ret = -EINVAL; - goto err; - } - ret = iwl_legacy_init_channel_map(priv); - if (ret) { - IWL_ERR(priv, "initializing regulatory failed: %d\n", ret); - goto err; - } - - /* Set up txpower settings in driver for all channels */ - if (iwl3945_txpower_set_from_eeprom(priv)) { - ret = -EIO; - goto err_free_channel_map; - } - - ret = iwl_legacy_init_geos(priv); - if (ret) { - IWL_ERR(priv, "initializing geos failed: %d\n", ret); - goto err_free_channel_map; - } - iwl3945_init_hw_rates(priv, priv->ieee_rates); - - return 0; - -err_free_channel_map: - iwl_legacy_free_channel_map(priv); -err: - return ret; -} - -#define IWL3945_MAX_PROBE_REQUEST 200 - -static int iwl3945_setup_mac(struct iwl_priv *priv) -{ - int ret; - struct ieee80211_hw *hw = priv->hw; - - hw->rate_control_algorithm = "iwl-3945-rs"; - hw->sta_data_size = sizeof(struct iwl3945_sta_priv); - hw->vif_data_size = sizeof(struct iwl_vif_priv); - - /* Tell mac80211 our characteristics */ - hw->flags = IEEE80211_HW_SIGNAL_DBM | - IEEE80211_HW_SPECTRUM_MGMT; - - hw->wiphy->interface_modes = - priv->contexts[IWL_RXON_CTX_BSS].interface_modes; - - hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY | - WIPHY_FLAG_DISABLE_BEACON_HINTS | - WIPHY_FLAG_IBSS_RSN; - - hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945; - /* we create the 802.11 header and a zero-length SSID element */ - hw->wiphy->max_scan_ie_len = IWL3945_MAX_PROBE_REQUEST - 24 - 2; - - /* Default value; 4 EDCA QOS priorities */ - hw->queues = 4; - - if (priv->bands[IEEE80211_BAND_2GHZ].n_channels) - priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = - &priv->bands[IEEE80211_BAND_2GHZ]; - - if (priv->bands[IEEE80211_BAND_5GHZ].n_channels) - priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = - &priv->bands[IEEE80211_BAND_5GHZ]; - - iwl_legacy_leds_init(priv); - - ret = ieee80211_register_hw(priv->hw); - if (ret) { - IWL_ERR(priv, "Failed to register hw (error %d)\n", ret); - return ret; - } - priv->mac80211_registered = 1; - - return 0; -} - -static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) -{ - int err = 0, i; - struct iwl_priv *priv; - struct ieee80211_hw *hw; - struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data); - struct iwl3945_eeprom *eeprom; - unsigned long flags; - - /*********************** - * 1. Allocating HW data - * ********************/ - - /* mac80211 allocates memory for this device instance, including - * space for this driver's private structure */ - hw = iwl_legacy_alloc_all(cfg); - if (hw == NULL) { - pr_err("Can not allocate network device\n"); - err = -ENOMEM; - goto out; - } - priv = hw->priv; - SET_IEEE80211_DEV(hw, &pdev->dev); - - priv->cmd_queue = IWL39_CMD_QUEUE_NUM; - - /* 3945 has only one valid context */ - priv->valid_contexts = BIT(IWL_RXON_CTX_BSS); - - for (i = 0; i < NUM_IWL_RXON_CTX; i++) - priv->contexts[i].ctxid = i; - - priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON; - priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING; - priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC; - priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM; - priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID; - priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY; - priv->contexts[IWL_RXON_CTX_BSS].interface_modes = - BIT(NL80211_IFTYPE_STATION) | - BIT(NL80211_IFTYPE_ADHOC); - priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS; - priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS; - priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS; - - /* - * Disabling hardware scan means that mac80211 will perform scans - * "the hard way", rather than using device's scan. - */ - if (iwl3945_mod_params.disable_hw_scan) { - IWL_DEBUG_INFO(priv, "Disabling hw_scan\n"); - iwl3945_hw_ops.hw_scan = NULL; - } - - IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n"); - priv->cfg = cfg; - priv->pci_dev = pdev; - priv->inta_mask = CSR_INI_SET_MASK; - - if (iwl_legacy_alloc_traffic_mem(priv)) - IWL_ERR(priv, "Not enough memory to generate traffic log\n"); - - /*************************** - * 2. Initializing PCI bus - * *************************/ - pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | - PCIE_LINK_STATE_CLKPM); - - if (pci_enable_device(pdev)) { - err = -ENODEV; - goto out_ieee80211_free_hw; - } - - pci_set_master(pdev); - - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (!err) - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); - if (err) { - IWL_WARN(priv, "No suitable DMA available.\n"); - goto out_pci_disable_device; - } - - pci_set_drvdata(pdev, priv); - err = pci_request_regions(pdev, DRV_NAME); - if (err) - goto out_pci_disable_device; - - /*********************** - * 3. Read REV Register - * ********************/ - priv->hw_base = pci_iomap(pdev, 0, 0); - if (!priv->hw_base) { - err = -ENODEV; - goto out_pci_release_regions; - } - - IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n", - (unsigned long long) pci_resource_len(pdev, 0)); - IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base); - - /* We disable the RETRY_TIMEOUT register (0x41) to keep - * PCI Tx retries from interfering with C3 CPU state */ - pci_write_config_byte(pdev, 0x41, 0x00); - - /* these spin locks will be used in apm_ops.init and EEPROM access - * we should init now - */ - spin_lock_init(&priv->reg_lock); - spin_lock_init(&priv->lock); - - /* - * stop and reset the on-board processor just in case it is in a - * strange state ... like being left stranded by a primary kernel - * and this is now the kdump kernel trying to start up - */ - iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); - - /*********************** - * 4. Read EEPROM - * ********************/ - - /* Read the EEPROM */ - err = iwl_legacy_eeprom_init(priv); - if (err) { - IWL_ERR(priv, "Unable to init EEPROM\n"); - goto out_iounmap; - } - /* MAC Address location in EEPROM same for 3945/4965 */ - eeprom = (struct iwl3945_eeprom *)priv->eeprom; - IWL_DEBUG_INFO(priv, "MAC address: %pM\n", eeprom->mac_address); - SET_IEEE80211_PERM_ADDR(priv->hw, eeprom->mac_address); - - /*********************** - * 5. Setup HW Constants - * ********************/ - /* Device-specific setup */ - if (iwl3945_hw_set_hw_params(priv)) { - IWL_ERR(priv, "failed to set hw settings\n"); - goto out_eeprom_free; - } - - /*********************** - * 6. Setup priv - * ********************/ - - err = iwl3945_init_drv(priv); - if (err) { - IWL_ERR(priv, "initializing driver failed\n"); - goto out_unset_hw_params; - } - - IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s\n", - priv->cfg->name); - - /*********************** - * 7. Setup Services - * ********************/ - - spin_lock_irqsave(&priv->lock, flags); - iwl_legacy_disable_interrupts(priv); - spin_unlock_irqrestore(&priv->lock, flags); - - pci_enable_msi(priv->pci_dev); - - err = request_irq(priv->pci_dev->irq, iwl_legacy_isr, - IRQF_SHARED, DRV_NAME, priv); - if (err) { - IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq); - goto out_disable_msi; - } - - err = sysfs_create_group(&pdev->dev.kobj, &iwl3945_attribute_group); - if (err) { - IWL_ERR(priv, "failed to create sysfs device attributes\n"); - goto out_release_irq; - } - - iwl_legacy_set_rxon_channel(priv, - &priv->bands[IEEE80211_BAND_2GHZ].channels[5], - &priv->contexts[IWL_RXON_CTX_BSS]); - iwl3945_setup_deferred_work(priv); - iwl3945_setup_rx_handlers(priv); - iwl_legacy_power_initialize(priv); - - /********************************* - * 8. Setup and Register mac80211 - * *******************************/ - - iwl_legacy_enable_interrupts(priv); - - err = iwl3945_setup_mac(priv); - if (err) - goto out_remove_sysfs; - - err = iwl_legacy_dbgfs_register(priv, DRV_NAME); - if (err) - IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err); - - /* Start monitoring the killswitch */ - queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll, - 2 * HZ); - - return 0; - - out_remove_sysfs: - destroy_workqueue(priv->workqueue); - priv->workqueue = NULL; - sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group); - out_release_irq: - free_irq(priv->pci_dev->irq, priv); - out_disable_msi: - pci_disable_msi(priv->pci_dev); - iwl_legacy_free_geos(priv); - iwl_legacy_free_channel_map(priv); - out_unset_hw_params: - iwl3945_unset_hw_params(priv); - out_eeprom_free: - iwl_legacy_eeprom_free(priv); - out_iounmap: - pci_iounmap(pdev, priv->hw_base); - out_pci_release_regions: - pci_release_regions(pdev); - out_pci_disable_device: - pci_set_drvdata(pdev, NULL); - pci_disable_device(pdev); - out_ieee80211_free_hw: - iwl_legacy_free_traffic_mem(priv); - ieee80211_free_hw(priv->hw); - out: - return err; -} - -static void __devexit iwl3945_pci_remove(struct pci_dev *pdev) -{ - struct iwl_priv *priv = pci_get_drvdata(pdev); - unsigned long flags; - - if (!priv) - return; - - IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n"); - - iwl_legacy_dbgfs_unregister(priv); - - set_bit(STATUS_EXIT_PENDING, &priv->status); - - iwl_legacy_leds_exit(priv); - - if (priv->mac80211_registered) { - ieee80211_unregister_hw(priv->hw); - priv->mac80211_registered = 0; - } else { - iwl3945_down(priv); - } - - /* - * Make sure device is reset to low power before unloading driver. - * This may be redundant with iwl_down(), but there are paths to - * run iwl_down() without calling apm_ops.stop(), and there are - * paths to avoid running iwl_down() at all before leaving driver. - * This (inexpensive) call *makes sure* device is reset. - */ - iwl_legacy_apm_stop(priv); - - /* make sure we flush any pending irq or - * tasklet for the driver - */ - spin_lock_irqsave(&priv->lock, flags); - iwl_legacy_disable_interrupts(priv); - spin_unlock_irqrestore(&priv->lock, flags); - - iwl3945_synchronize_irq(priv); - - sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group); - - cancel_delayed_work_sync(&priv->_3945.rfkill_poll); - - iwl3945_dealloc_ucode_pci(priv); - - if (priv->rxq.bd) - iwl3945_rx_queue_free(priv, &priv->rxq); - iwl3945_hw_txq_ctx_free(priv); - - iwl3945_unset_hw_params(priv); - - /*netif_stop_queue(dev); */ - flush_workqueue(priv->workqueue); - - /* ieee80211_unregister_hw calls iwl3945_mac_stop, which flushes - * priv->workqueue... so we can't take down the workqueue - * until now... */ - destroy_workqueue(priv->workqueue); - priv->workqueue = NULL; - iwl_legacy_free_traffic_mem(priv); - - free_irq(pdev->irq, priv); - pci_disable_msi(pdev); - - pci_iounmap(pdev, priv->hw_base); - pci_release_regions(pdev); - pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); - - iwl_legacy_free_channel_map(priv); - iwl_legacy_free_geos(priv); - kfree(priv->scan_cmd); - if (priv->beacon_skb) - dev_kfree_skb(priv->beacon_skb); - - ieee80211_free_hw(priv->hw); -} - - -/***************************************************************************** - * - * driver and module entry point - * - *****************************************************************************/ - -static struct pci_driver iwl3945_driver = { - .name = DRV_NAME, - .id_table = iwl3945_hw_card_ids, - .probe = iwl3945_pci_probe, - .remove = __devexit_p(iwl3945_pci_remove), - .driver.pm = IWL_LEGACY_PM_OPS, -}; - -static int __init iwl3945_init(void) -{ - - int ret; - pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n"); - pr_info(DRV_COPYRIGHT "\n"); - - ret = iwl3945_rate_control_register(); - if (ret) { - pr_err("Unable to register rate control algorithm: %d\n", ret); - return ret; - } - - ret = pci_register_driver(&iwl3945_driver); - if (ret) { - pr_err("Unable to initialize PCI module\n"); - goto error_register; - } - - return ret; - -error_register: - iwl3945_rate_control_unregister(); - return ret; -} - -static void __exit iwl3945_exit(void) -{ - pci_unregister_driver(&iwl3945_driver); - iwl3945_rate_control_unregister(); -} - -MODULE_FIRMWARE(IWL3945_MODULE_FIRMWARE(IWL3945_UCODE_API_MAX)); - -module_param_named(antenna, iwl3945_mod_params.antenna, int, S_IRUGO); -MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])"); -module_param_named(swcrypto, iwl3945_mod_params.sw_crypto, int, S_IRUGO); -MODULE_PARM_DESC(swcrypto, - "using software crypto (default 1 [software])"); -module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan, - int, S_IRUGO); -MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 1)"); -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG -module_param_named(debug, iwlegacy_debug_level, uint, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(debug, "debug output mask"); -#endif -module_param_named(fw_restart, iwl3945_mod_params.restart_fw, int, S_IRUGO); -MODULE_PARM_DESC(fw_restart, "restart firmware in case of error"); - -module_exit(iwl3945_exit); -module_init(iwl3945_init); diff --git a/drivers/net/wireless/iwlegacy/iwl4965-base.c b/drivers/net/wireless/iwlegacy/iwl4965-base.c deleted file mode 100644 index d2fba9eae15..00000000000 --- a/drivers/net/wireless/iwlegacy/iwl4965-base.c +++ /dev/null @@ -1,3281 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. - * - * Portions of this file are derived from the ipw3945 project, as well - * as portions of the ieee80211 subsystem header files. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/init.h> -#include <linux/pci.h> -#include <linux/pci-aspm.h> -#include <linux/slab.h> -#include <linux/dma-mapping.h> -#include <linux/delay.h> -#include <linux/sched.h> -#include <linux/skbuff.h> -#include <linux/netdevice.h> -#include <linux/firmware.h> -#include <linux/etherdevice.h> -#include <linux/if_arp.h> - -#include <net/mac80211.h> - -#include <asm/div64.h> - -#define DRV_NAME "iwl4965" - -#include "iwl-eeprom.h" -#include "iwl-dev.h" -#include "iwl-core.h" -#include "iwl-io.h" -#include "iwl-helpers.h" -#include "iwl-sta.h" -#include "iwl-4965-calib.h" -#include "iwl-4965.h" -#include "iwl-4965-led.h" - - -/****************************************************************************** - * - * module boiler plate - * - ******************************************************************************/ - -/* - * module name, copyright, version, etc. - */ -#define DRV_DESCRIPTION "Intel(R) Wireless WiFi 4965 driver for Linux" - -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG -#define VD "d" -#else -#define VD -#endif - -#define DRV_VERSION IWLWIFI_VERSION VD - - -MODULE_DESCRIPTION(DRV_DESCRIPTION); -MODULE_VERSION(DRV_VERSION); -MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("iwl4965"); - -void iwl4965_update_chain_flags(struct iwl_priv *priv) -{ - struct iwl_rxon_context *ctx; - - if (priv->cfg->ops->hcmd->set_rxon_chain) { - for_each_context(priv, ctx) { - priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); - if (ctx->active.rx_chain != ctx->staging.rx_chain) - iwl_legacy_commit_rxon(priv, ctx); - } - } -} - -static void iwl4965_clear_free_frames(struct iwl_priv *priv) -{ - struct list_head *element; - - IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n", - priv->frames_count); - - while (!list_empty(&priv->free_frames)) { - element = priv->free_frames.next; - list_del(element); - kfree(list_entry(element, struct iwl_frame, list)); - priv->frames_count--; - } - - if (priv->frames_count) { - IWL_WARN(priv, "%d frames still in use. Did we lose one?\n", - priv->frames_count); - priv->frames_count = 0; - } -} - -static struct iwl_frame *iwl4965_get_free_frame(struct iwl_priv *priv) -{ - struct iwl_frame *frame; - struct list_head *element; - if (list_empty(&priv->free_frames)) { - frame = kzalloc(sizeof(*frame), GFP_KERNEL); - if (!frame) { - IWL_ERR(priv, "Could not allocate frame!\n"); - return NULL; - } - - priv->frames_count++; - return frame; - } - - element = priv->free_frames.next; - list_del(element); - return list_entry(element, struct iwl_frame, list); -} - -static void iwl4965_free_frame(struct iwl_priv *priv, struct iwl_frame *frame) -{ - memset(frame, 0, sizeof(*frame)); - list_add(&frame->list, &priv->free_frames); -} - -static u32 iwl4965_fill_beacon_frame(struct iwl_priv *priv, - struct ieee80211_hdr *hdr, - int left) -{ - lockdep_assert_held(&priv->mutex); - - if (!priv->beacon_skb) - return 0; - - if (priv->beacon_skb->len > left) - return 0; - - memcpy(hdr, priv->beacon_skb->data, priv->beacon_skb->len); - - return priv->beacon_skb->len; -} - -/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */ -static void iwl4965_set_beacon_tim(struct iwl_priv *priv, - struct iwl_tx_beacon_cmd *tx_beacon_cmd, - u8 *beacon, u32 frame_size) -{ - u16 tim_idx; - struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon; - - /* - * The index is relative to frame start but we start looking at the - * variable-length part of the beacon. - */ - tim_idx = mgmt->u.beacon.variable - beacon; - - /* Parse variable-length elements of beacon to find WLAN_EID_TIM */ - while ((tim_idx < (frame_size - 2)) && - (beacon[tim_idx] != WLAN_EID_TIM)) - tim_idx += beacon[tim_idx+1] + 2; - - /* If TIM field was found, set variables */ - if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) { - tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx); - tx_beacon_cmd->tim_size = beacon[tim_idx+1]; - } else - IWL_WARN(priv, "Unable to find TIM Element in beacon\n"); -} - -static unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv, - struct iwl_frame *frame) -{ - struct iwl_tx_beacon_cmd *tx_beacon_cmd; - u32 frame_size; - u32 rate_flags; - u32 rate; - /* - * We have to set up the TX command, the TX Beacon command, and the - * beacon contents. - */ - - lockdep_assert_held(&priv->mutex); - - if (!priv->beacon_ctx) { - IWL_ERR(priv, "trying to build beacon w/o beacon context!\n"); - return 0; - } - - /* Initialize memory */ - tx_beacon_cmd = &frame->u.beacon; - memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd)); - - /* Set up TX beacon contents */ - frame_size = iwl4965_fill_beacon_frame(priv, tx_beacon_cmd->frame, - sizeof(frame->u) - sizeof(*tx_beacon_cmd)); - if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE)) - return 0; - if (!frame_size) - return 0; - - /* Set up TX command fields */ - tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size); - tx_beacon_cmd->tx.sta_id = priv->beacon_ctx->bcast_sta_id; - tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; - tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK | - TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK; - - /* Set up TX beacon command fields */ - iwl4965_set_beacon_tim(priv, tx_beacon_cmd, (u8 *)tx_beacon_cmd->frame, - frame_size); - - /* Set up packet rate and flags */ - rate = iwl_legacy_get_lowest_plcp(priv, priv->beacon_ctx); - priv->mgmt_tx_ant = iwl4965_toggle_tx_ant(priv, priv->mgmt_tx_ant, - priv->hw_params.valid_tx_ant); - rate_flags = iwl4965_ant_idx_to_flags(priv->mgmt_tx_ant); - if ((rate >= IWL_FIRST_CCK_RATE) && (rate <= IWL_LAST_CCK_RATE)) - rate_flags |= RATE_MCS_CCK_MSK; - tx_beacon_cmd->tx.rate_n_flags = iwl4965_hw_set_rate_n_flags(rate, - rate_flags); - - return sizeof(*tx_beacon_cmd) + frame_size; -} - -int iwl4965_send_beacon_cmd(struct iwl_priv *priv) -{ - struct iwl_frame *frame; - unsigned int frame_size; - int rc; - - frame = iwl4965_get_free_frame(priv); - if (!frame) { - IWL_ERR(priv, "Could not obtain free frame buffer for beacon " - "command.\n"); - return -ENOMEM; - } - - frame_size = iwl4965_hw_get_beacon_cmd(priv, frame); - if (!frame_size) { - IWL_ERR(priv, "Error configuring the beacon command\n"); - iwl4965_free_frame(priv, frame); - return -EINVAL; - } - - rc = iwl_legacy_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size, - &frame->u.cmd[0]); - - iwl4965_free_frame(priv, frame); - - return rc; -} - -static inline dma_addr_t iwl4965_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx) -{ - struct iwl_tfd_tb *tb = &tfd->tbs[idx]; - - dma_addr_t addr = get_unaligned_le32(&tb->lo); - if (sizeof(dma_addr_t) > sizeof(u32)) - addr |= - ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16; - - return addr; -} - -static inline u16 iwl4965_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx) -{ - struct iwl_tfd_tb *tb = &tfd->tbs[idx]; - - return le16_to_cpu(tb->hi_n_len) >> 4; -} - -static inline void iwl4965_tfd_set_tb(struct iwl_tfd *tfd, u8 idx, - dma_addr_t addr, u16 len) -{ - struct iwl_tfd_tb *tb = &tfd->tbs[idx]; - u16 hi_n_len = len << 4; - - put_unaligned_le32(addr, &tb->lo); - if (sizeof(dma_addr_t) > sizeof(u32)) - hi_n_len |= ((addr >> 16) >> 16) & 0xF; - - tb->hi_n_len = cpu_to_le16(hi_n_len); - - tfd->num_tbs = idx + 1; -} - -static inline u8 iwl4965_tfd_get_num_tbs(struct iwl_tfd *tfd) -{ - return tfd->num_tbs & 0x1f; -} - -/** - * iwl4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] - * @priv - driver private data - * @txq - tx queue - * - * Does NOT advance any TFD circular buffer read/write indexes - * Does NOT free the TFD itself (which is within circular buffer) - */ -void iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq) -{ - struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)txq->tfds; - struct iwl_tfd *tfd; - struct pci_dev *dev = priv->pci_dev; - int index = txq->q.read_ptr; - int i; - int num_tbs; - - tfd = &tfd_tmp[index]; - - /* Sanity check on number of chunks */ - num_tbs = iwl4965_tfd_get_num_tbs(tfd); - - if (num_tbs >= IWL_NUM_OF_TBS) { - IWL_ERR(priv, "Too many chunks: %i\n", num_tbs); - /* @todo issue fatal error, it is quite serious situation */ - return; - } - - /* Unmap tx_cmd */ - if (num_tbs) - pci_unmap_single(dev, - dma_unmap_addr(&txq->meta[index], mapping), - dma_unmap_len(&txq->meta[index], len), - PCI_DMA_BIDIRECTIONAL); - - /* Unmap chunks, if any. */ - for (i = 1; i < num_tbs; i++) - pci_unmap_single(dev, iwl4965_tfd_tb_get_addr(tfd, i), - iwl4965_tfd_tb_get_len(tfd, i), - PCI_DMA_TODEVICE); - - /* free SKB */ - if (txq->txb) { - struct sk_buff *skb; - - skb = txq->txb[txq->q.read_ptr].skb; - - /* can be called from irqs-disabled context */ - if (skb) { - dev_kfree_skb_any(skb); - txq->txb[txq->q.read_ptr].skb = NULL; - } - } -} - -int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, - struct iwl_tx_queue *txq, - dma_addr_t addr, u16 len, - u8 reset, u8 pad) -{ - struct iwl_queue *q; - struct iwl_tfd *tfd, *tfd_tmp; - u32 num_tbs; - - q = &txq->q; - tfd_tmp = (struct iwl_tfd *)txq->tfds; - tfd = &tfd_tmp[q->write_ptr]; - - if (reset) - memset(tfd, 0, sizeof(*tfd)); - - num_tbs = iwl4965_tfd_get_num_tbs(tfd); - - /* Each TFD can point to a maximum 20 Tx buffers */ - if (num_tbs >= IWL_NUM_OF_TBS) { - IWL_ERR(priv, "Error can not send more than %d chunks\n", - IWL_NUM_OF_TBS); - return -EINVAL; - } - - BUG_ON(addr & ~DMA_BIT_MASK(36)); - if (unlikely(addr & ~IWL_TX_DMA_MASK)) - IWL_ERR(priv, "Unaligned address = %llx\n", - (unsigned long long)addr); - - iwl4965_tfd_set_tb(tfd, num_tbs, addr, len); - - return 0; -} - -/* - * Tell nic where to find circular buffer of Tx Frame Descriptors for - * given Tx queue, and enable the DMA channel used for that queue. - * - * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA - * channels supported in hardware. - */ -int iwl4965_hw_tx_queue_init(struct iwl_priv *priv, - struct iwl_tx_queue *txq) -{ - int txq_id = txq->q.id; - - /* Circular buffer (TFD queue in DRAM) physical base address */ - iwl_legacy_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id), - txq->q.dma_addr >> 8); - - return 0; -} - -/****************************************************************************** - * - * Generic RX handler implementations - * - ******************************************************************************/ -static void iwl4965_rx_reply_alive(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_alive_resp *palive; - struct delayed_work *pwork; - - palive = &pkt->u.alive_frame; - - IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision " - "0x%01X 0x%01X\n", - palive->is_valid, palive->ver_type, - palive->ver_subtype); - - if (palive->ver_subtype == INITIALIZE_SUBTYPE) { - IWL_DEBUG_INFO(priv, "Initialization Alive received.\n"); - memcpy(&priv->card_alive_init, - &pkt->u.alive_frame, - sizeof(struct iwl_init_alive_resp)); - pwork = &priv->init_alive_start; - } else { - IWL_DEBUG_INFO(priv, "Runtime Alive received.\n"); - memcpy(&priv->card_alive, &pkt->u.alive_frame, - sizeof(struct iwl_alive_resp)); - pwork = &priv->alive_start; - } - - /* We delay the ALIVE response by 5ms to - * give the HW RF Kill time to activate... */ - if (palive->is_valid == UCODE_VALID_OK) - queue_delayed_work(priv->workqueue, pwork, - msecs_to_jiffies(5)); - else - IWL_WARN(priv, "uCode did not respond OK.\n"); -} - -/** - * iwl4965_bg_statistics_periodic - Timer callback to queue statistics - * - * This callback is provided in order to send a statistics request. - * - * This timer function is continually reset to execute within - * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION - * was received. We need to ensure we receive the statistics in order - * to update the temperature used for calibrating the TXPOWER. - */ -static void iwl4965_bg_statistics_periodic(unsigned long data) -{ - struct iwl_priv *priv = (struct iwl_priv *)data; - - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - return; - - /* dont send host command if rf-kill is on */ - if (!iwl_legacy_is_ready_rf(priv)) - return; - - iwl_legacy_send_statistics_request(priv, CMD_ASYNC, false); -} - -static void iwl4965_rx_beacon_notif(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl4965_beacon_notif *beacon = - (struct iwl4965_beacon_notif *)pkt->u.raw; -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG - u8 rate = iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags); - - IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d " - "tsf %d %d rate %d\n", - le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK, - beacon->beacon_notify_hdr.failure_frame, - le32_to_cpu(beacon->ibss_mgr_status), - le32_to_cpu(beacon->high_tsf), - le32_to_cpu(beacon->low_tsf), rate); -#endif - - priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status); -} - -static void iwl4965_perform_ct_kill_task(struct iwl_priv *priv) -{ - unsigned long flags; - - IWL_DEBUG_POWER(priv, "Stop all queues\n"); - - if (priv->mac80211_registered) - ieee80211_stop_queues(priv->hw); - - iwl_write32(priv, CSR_UCODE_DRV_GP1_SET, - CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT); - iwl_read32(priv, CSR_UCODE_DRV_GP1); - - spin_lock_irqsave(&priv->reg_lock, flags); - if (!iwl_grab_nic_access(priv)) - iwl_release_nic_access(priv); - spin_unlock_irqrestore(&priv->reg_lock, flags); -} - -/* Handle notification from uCode that card's power state is changing - * due to software, hardware, or critical temperature RFKILL */ -static void iwl4965_rx_card_state_notif(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); - unsigned long status = priv->status; - - IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n", - (flags & HW_CARD_DISABLED) ? "Kill" : "On", - (flags & SW_CARD_DISABLED) ? "Kill" : "On", - (flags & CT_CARD_DISABLED) ? - "Reached" : "Not reached"); - - if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED | - CT_CARD_DISABLED)) { - - iwl_write32(priv, CSR_UCODE_DRV_GP1_SET, - CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); - - iwl_legacy_write_direct32(priv, HBUS_TARG_MBX_C, - HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); - - if (!(flags & RXON_CARD_DISABLED)) { - iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, - CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); - iwl_legacy_write_direct32(priv, HBUS_TARG_MBX_C, - HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); - } - } - - if (flags & CT_CARD_DISABLED) - iwl4965_perform_ct_kill_task(priv); - - if (flags & HW_CARD_DISABLED) - set_bit(STATUS_RF_KILL_HW, &priv->status); - else - clear_bit(STATUS_RF_KILL_HW, &priv->status); - - if (!(flags & RXON_CARD_DISABLED)) - iwl_legacy_scan_cancel(priv); - - if ((test_bit(STATUS_RF_KILL_HW, &status) != - test_bit(STATUS_RF_KILL_HW, &priv->status))) - wiphy_rfkill_set_hw_state(priv->hw->wiphy, - test_bit(STATUS_RF_KILL_HW, &priv->status)); - else - wake_up(&priv->wait_command_queue); -} - -/** - * iwl4965_setup_rx_handlers - Initialize Rx handler callbacks - * - * Setup the RX handlers for each of the reply types sent from the uCode - * to the host. - * - * This function chains into the hardware specific files for them to setup - * any hardware specific handlers as well. - */ -static void iwl4965_setup_rx_handlers(struct iwl_priv *priv) -{ - priv->rx_handlers[REPLY_ALIVE] = iwl4965_rx_reply_alive; - priv->rx_handlers[REPLY_ERROR] = iwl_legacy_rx_reply_error; - priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_legacy_rx_csa; - priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] = - iwl_legacy_rx_spectrum_measure_notif; - priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_legacy_rx_pm_sleep_notif; - priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] = - iwl_legacy_rx_pm_debug_statistics_notif; - priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif; - - /* - * The same handler is used for both the REPLY to a discrete - * statistics request from the host as well as for the periodic - * statistics notifications (after received beacons) from the uCode. - */ - priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl4965_reply_statistics; - priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl4965_rx_statistics; - - iwl_legacy_setup_rx_scan_handlers(priv); - - /* status change handler */ - priv->rx_handlers[CARD_STATE_NOTIFICATION] = - iwl4965_rx_card_state_notif; - - priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] = - iwl4965_rx_missed_beacon_notif; - /* Rx handlers */ - priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl4965_rx_reply_rx_phy; - priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl4965_rx_reply_rx; - /* block ack */ - priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl4965_rx_reply_compressed_ba; - /* Set up hardware specific Rx handlers */ - priv->cfg->ops->lib->rx_handler_setup(priv); -} - -/** - * iwl4965_rx_handle - Main entry function for receiving responses from uCode - * - * Uses the priv->rx_handlers callback function array to invoke - * the appropriate handlers, including command responses, - * frame-received notifications, and other notifications. - */ -void iwl4965_rx_handle(struct iwl_priv *priv) -{ - struct iwl_rx_mem_buffer *rxb; - struct iwl_rx_packet *pkt; - struct iwl_rx_queue *rxq = &priv->rxq; - u32 r, i; - int reclaim; - unsigned long flags; - u8 fill_rx = 0; - u32 count = 8; - int total_empty; - - /* uCode's read index (stored in shared DRAM) indicates the last Rx - * buffer that the driver may process (last buffer filled by ucode). */ - r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF; - i = rxq->read; - - /* Rx interrupt, but nothing sent from uCode */ - if (i == r) - IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i); - - /* calculate total frames need to be restock after handling RX */ - total_empty = r - rxq->write_actual; - if (total_empty < 0) - total_empty += RX_QUEUE_SIZE; - - if (total_empty > (RX_QUEUE_SIZE / 2)) - fill_rx = 1; - - while (i != r) { - int len; - - rxb = rxq->queue[i]; - - /* If an RXB doesn't have a Rx queue slot associated with it, - * then a bug has been introduced in the queue refilling - * routines -- catch it here */ - BUG_ON(rxb == NULL); - - rxq->queue[i] = NULL; - - pci_unmap_page(priv->pci_dev, rxb->page_dma, - PAGE_SIZE << priv->hw_params.rx_page_order, - PCI_DMA_FROMDEVICE); - pkt = rxb_addr(rxb); - - len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; - len += sizeof(u32); /* account for status word */ - trace_iwlwifi_legacy_dev_rx(priv, pkt, len); - - /* Reclaim a command buffer only if this packet is a response - * to a (driver-originated) command. - * If the packet (e.g. Rx frame) originated from uCode, - * there is no command buffer to reclaim. - * Ucode should set SEQ_RX_FRAME bit if ucode-originated, - * but apparently a few don't get set; catch them here. */ - reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) && - (pkt->hdr.cmd != REPLY_RX_PHY_CMD) && - (pkt->hdr.cmd != REPLY_RX) && - (pkt->hdr.cmd != REPLY_RX_MPDU_CMD) && - (pkt->hdr.cmd != REPLY_COMPRESSED_BA) && - (pkt->hdr.cmd != STATISTICS_NOTIFICATION) && - (pkt->hdr.cmd != REPLY_TX); - - /* Based on type of command response or notification, - * handle those that need handling via function in - * rx_handlers table. See iwl4965_setup_rx_handlers() */ - if (priv->rx_handlers[pkt->hdr.cmd]) { - IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, - i, iwl_legacy_get_cmd_string(pkt->hdr.cmd), - pkt->hdr.cmd); - priv->isr_stats.rx_handlers[pkt->hdr.cmd]++; - priv->rx_handlers[pkt->hdr.cmd] (priv, rxb); - } else { - /* No handling needed */ - IWL_DEBUG_RX(priv, - "r %d i %d No handler needed for %s, 0x%02x\n", - r, i, iwl_legacy_get_cmd_string(pkt->hdr.cmd), - pkt->hdr.cmd); - } - - /* - * XXX: After here, we should always check rxb->page - * against NULL before touching it or its virtual - * memory (pkt). Because some rx_handler might have - * already taken or freed the pages. - */ - - if (reclaim) { - /* Invoke any callbacks, transfer the buffer to caller, - * and fire off the (possibly) blocking iwl_legacy_send_cmd() - * as we reclaim the driver command queue */ - if (rxb->page) - iwl_legacy_tx_cmd_complete(priv, rxb); - else - IWL_WARN(priv, "Claim null rxb?\n"); - } - - /* Reuse the page if possible. For notification packets and - * SKBs that fail to Rx correctly, add them back into the - * rx_free list for reuse later. */ - spin_lock_irqsave(&rxq->lock, flags); - if (rxb->page != NULL) { - rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page, - 0, PAGE_SIZE << priv->hw_params.rx_page_order, - PCI_DMA_FROMDEVICE); - list_add_tail(&rxb->list, &rxq->rx_free); - rxq->free_count++; - } else - list_add_tail(&rxb->list, &rxq->rx_used); - - spin_unlock_irqrestore(&rxq->lock, flags); - - i = (i + 1) & RX_QUEUE_MASK; - /* If there are a lot of unused frames, - * restock the Rx queue so ucode wont assert. */ - if (fill_rx) { - count++; - if (count >= 8) { - rxq->read = i; - iwl4965_rx_replenish_now(priv); - count = 0; - } - } - } - - /* Backtrack one entry */ - rxq->read = i; - if (fill_rx) - iwl4965_rx_replenish_now(priv); - else - iwl4965_rx_queue_restock(priv); -} - -/* call this function to flush any scheduled tasklet */ -static inline void iwl4965_synchronize_irq(struct iwl_priv *priv) -{ - /* wait to make sure we flush pending tasklet*/ - synchronize_irq(priv->pci_dev->irq); - tasklet_kill(&priv->irq_tasklet); -} - -static void iwl4965_irq_tasklet(struct iwl_priv *priv) -{ - u32 inta, handled = 0; - u32 inta_fh; - unsigned long flags; - u32 i; -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG - u32 inta_mask; -#endif - - spin_lock_irqsave(&priv->lock, flags); - - /* Ack/clear/reset pending uCode interrupts. - * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, - * and will clear only when CSR_FH_INT_STATUS gets cleared. */ - inta = iwl_read32(priv, CSR_INT); - iwl_write32(priv, CSR_INT, inta); - - /* Ack/clear/reset pending flow-handler (DMA) interrupts. - * Any new interrupts that happen after this, either while we're - * in this tasklet, or later, will show up in next ISR/tasklet. */ - inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); - iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh); - -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG - if (iwl_legacy_get_debug_level(priv) & IWL_DL_ISR) { - /* just for debug */ - inta_mask = iwl_read32(priv, CSR_INT_MASK); - IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", - inta, inta_mask, inta_fh); - } -#endif - - spin_unlock_irqrestore(&priv->lock, flags); - - /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not - * atomic, make sure that inta covers all the interrupts that - * we've discovered, even if FH interrupt came in just after - * reading CSR_INT. */ - if (inta_fh & CSR49_FH_INT_RX_MASK) - inta |= CSR_INT_BIT_FH_RX; - if (inta_fh & CSR49_FH_INT_TX_MASK) - inta |= CSR_INT_BIT_FH_TX; - - /* Now service all interrupt bits discovered above. */ - if (inta & CSR_INT_BIT_HW_ERR) { - IWL_ERR(priv, "Hardware error detected. Restarting.\n"); - - /* Tell the device to stop sending interrupts */ - iwl_legacy_disable_interrupts(priv); - - priv->isr_stats.hw++; - iwl_legacy_irq_handle_error(priv); - - handled |= CSR_INT_BIT_HW_ERR; - - return; - } - -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG - if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) { - /* NIC fires this, but we don't use it, redundant with WAKEUP */ - if (inta & CSR_INT_BIT_SCD) { - IWL_DEBUG_ISR(priv, "Scheduler finished to transmit " - "the frame/frames.\n"); - priv->isr_stats.sch++; - } - - /* Alive notification via Rx interrupt will do the real work */ - if (inta & CSR_INT_BIT_ALIVE) { - IWL_DEBUG_ISR(priv, "Alive interrupt\n"); - priv->isr_stats.alive++; - } - } -#endif - /* Safely ignore these bits for debug checks below */ - inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); - - /* HW RF KILL switch toggled */ - if (inta & CSR_INT_BIT_RF_KILL) { - int hw_rf_kill = 0; - if (!(iwl_read32(priv, CSR_GP_CNTRL) & - CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) - hw_rf_kill = 1; - - IWL_WARN(priv, "RF_KILL bit toggled to %s.\n", - hw_rf_kill ? "disable radio" : "enable radio"); - - priv->isr_stats.rfkill++; - - /* driver only loads ucode once setting the interface up. - * the driver allows loading the ucode even if the radio - * is killed. Hence update the killswitch state here. The - * rfkill handler will care about restarting if needed. - */ - if (!test_bit(STATUS_ALIVE, &priv->status)) { - if (hw_rf_kill) - set_bit(STATUS_RF_KILL_HW, &priv->status); - else - clear_bit(STATUS_RF_KILL_HW, &priv->status); - wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill); - } - - handled |= CSR_INT_BIT_RF_KILL; - } - - /* Chip got too hot and stopped itself */ - if (inta & CSR_INT_BIT_CT_KILL) { - IWL_ERR(priv, "Microcode CT kill error detected.\n"); - priv->isr_stats.ctkill++; - handled |= CSR_INT_BIT_CT_KILL; - } - - /* Error detected by uCode */ - if (inta & CSR_INT_BIT_SW_ERR) { - IWL_ERR(priv, "Microcode SW error detected. " - " Restarting 0x%X.\n", inta); - priv->isr_stats.sw++; - iwl_legacy_irq_handle_error(priv); - handled |= CSR_INT_BIT_SW_ERR; - } - - /* - * uCode wakes up after power-down sleep. - * Tell device about any new tx or host commands enqueued, - * and about any Rx buffers made available while asleep. - */ - if (inta & CSR_INT_BIT_WAKEUP) { - IWL_DEBUG_ISR(priv, "Wakeup interrupt\n"); - iwl_legacy_rx_queue_update_write_ptr(priv, &priv->rxq); - for (i = 0; i < priv->hw_params.max_txq_num; i++) - iwl_legacy_txq_update_write_ptr(priv, &priv->txq[i]); - priv->isr_stats.wakeup++; - handled |= CSR_INT_BIT_WAKEUP; - } - - /* All uCode command responses, including Tx command responses, - * Rx "responses" (frame-received notification), and other - * notifications from uCode come through here*/ - if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { - iwl4965_rx_handle(priv); - priv->isr_stats.rx++; - handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); - } - - /* This "Tx" DMA channel is used only for loading uCode */ - if (inta & CSR_INT_BIT_FH_TX) { - IWL_DEBUG_ISR(priv, "uCode load interrupt\n"); - priv->isr_stats.tx++; - handled |= CSR_INT_BIT_FH_TX; - /* Wake up uCode load routine, now that load is complete */ - priv->ucode_write_complete = 1; - wake_up(&priv->wait_command_queue); - } - - if (inta & ~handled) { - IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled); - priv->isr_stats.unhandled++; - } - - if (inta & ~(priv->inta_mask)) { - IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n", - inta & ~priv->inta_mask); - IWL_WARN(priv, " with FH_INT = 0x%08x\n", inta_fh); - } - - /* Re-enable all interrupts */ - /* only Re-enable if disabled by irq */ - if (test_bit(STATUS_INT_ENABLED, &priv->status)) - iwl_legacy_enable_interrupts(priv); - /* Re-enable RF_KILL if it occurred */ - else if (handled & CSR_INT_BIT_RF_KILL) - iwl_legacy_enable_rfkill_int(priv); - -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG - if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) { - inta = iwl_read32(priv, CSR_INT); - inta_mask = iwl_read32(priv, CSR_INT_MASK); - inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); - IWL_DEBUG_ISR(priv, - "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, " - "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags); - } -#endif -} - -/***************************************************************************** - * - * sysfs attributes - * - *****************************************************************************/ - -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG - -/* - * The following adds a new attribute to the sysfs representation - * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/) - * used for controlling the debug level. - * - * See the level definitions in iwl for details. - * - * The debug_level being managed using sysfs below is a per device debug - * level that is used instead of the global debug level if it (the per - * device debug level) is set. - */ -static ssize_t iwl4965_show_debug_level(struct device *d, - struct device_attribute *attr, char *buf) -{ - struct iwl_priv *priv = dev_get_drvdata(d); - return sprintf(buf, "0x%08X\n", iwl_legacy_get_debug_level(priv)); -} -static ssize_t iwl4965_store_debug_level(struct device *d, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct iwl_priv *priv = dev_get_drvdata(d); - unsigned long val; - int ret; - - ret = strict_strtoul(buf, 0, &val); - if (ret) - IWL_ERR(priv, "%s is not in hex or decimal form.\n", buf); - else { - priv->debug_level = val; - if (iwl_legacy_alloc_traffic_mem(priv)) - IWL_ERR(priv, - "Not enough memory to generate traffic log\n"); - } - return strnlen(buf, count); -} - -static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, - iwl4965_show_debug_level, iwl4965_store_debug_level); - - -#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */ - - -static ssize_t iwl4965_show_temperature(struct device *d, - struct device_attribute *attr, char *buf) -{ - struct iwl_priv *priv = dev_get_drvdata(d); - - if (!iwl_legacy_is_alive(priv)) - return -EAGAIN; - - return sprintf(buf, "%d\n", priv->temperature); -} - -static DEVICE_ATTR(temperature, S_IRUGO, iwl4965_show_temperature, NULL); - -static ssize_t iwl4965_show_tx_power(struct device *d, - struct device_attribute *attr, char *buf) -{ - struct iwl_priv *priv = dev_get_drvdata(d); - - if (!iwl_legacy_is_ready_rf(priv)) - return sprintf(buf, "off\n"); - else - return sprintf(buf, "%d\n", priv->tx_power_user_lmt); -} - -static ssize_t iwl4965_store_tx_power(struct device *d, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct iwl_priv *priv = dev_get_drvdata(d); - unsigned long val; - int ret; - - ret = strict_strtoul(buf, 10, &val); - if (ret) - IWL_INFO(priv, "%s is not in decimal form.\n", buf); - else { - ret = iwl_legacy_set_tx_power(priv, val, false); - if (ret) - IWL_ERR(priv, "failed setting tx power (0x%d).\n", - ret); - else - ret = count; - } - return ret; -} - -static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, - iwl4965_show_tx_power, iwl4965_store_tx_power); - -static struct attribute *iwl_sysfs_entries[] = { - &dev_attr_temperature.attr, - &dev_attr_tx_power.attr, -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG - &dev_attr_debug_level.attr, -#endif - NULL -}; - -static struct attribute_group iwl_attribute_group = { - .name = NULL, /* put in device directory */ - .attrs = iwl_sysfs_entries, -}; - -/****************************************************************************** - * - * uCode download functions - * - ******************************************************************************/ - -static void iwl4965_dealloc_ucode_pci(struct iwl_priv *priv) -{ - iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_code); - iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data); - iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup); - iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init); - iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init_data); - iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_boot); -} - -static void iwl4965_nic_start(struct iwl_priv *priv) -{ - /* Remove all resets to allow NIC to operate */ - iwl_write32(priv, CSR_RESET, 0); -} - -static void iwl4965_ucode_callback(const struct firmware *ucode_raw, - void *context); -static int iwl4965_mac_setup_register(struct iwl_priv *priv, - u32 max_probe_length); - -static int __must_check iwl4965_request_firmware(struct iwl_priv *priv, bool first) -{ - const char *name_pre = priv->cfg->fw_name_pre; - char tag[8]; - - if (first) { - priv->fw_index = priv->cfg->ucode_api_max; - sprintf(tag, "%d", priv->fw_index); - } else { - priv->fw_index--; - sprintf(tag, "%d", priv->fw_index); - } - - if (priv->fw_index < priv->cfg->ucode_api_min) { - IWL_ERR(priv, "no suitable firmware found!\n"); - return -ENOENT; - } - - sprintf(priv->firmware_name, "%s%s%s", name_pre, tag, ".ucode"); - - IWL_DEBUG_INFO(priv, "attempting to load firmware '%s'\n", - priv->firmware_name); - - return request_firmware_nowait(THIS_MODULE, 1, priv->firmware_name, - &priv->pci_dev->dev, GFP_KERNEL, priv, - iwl4965_ucode_callback); -} - -struct iwl4965_firmware_pieces { - const void *inst, *data, *init, *init_data, *boot; - size_t inst_size, data_size, init_size, init_data_size, boot_size; -}; - -static int iwl4965_load_firmware(struct iwl_priv *priv, - const struct firmware *ucode_raw, - struct iwl4965_firmware_pieces *pieces) -{ - struct iwl_ucode_header *ucode = (void *)ucode_raw->data; - u32 api_ver, hdr_size; - const u8 *src; - - priv->ucode_ver = le32_to_cpu(ucode->ver); - api_ver = IWL_UCODE_API(priv->ucode_ver); - - switch (api_ver) { - default: - case 0: - case 1: - case 2: - hdr_size = 24; - if (ucode_raw->size < hdr_size) { - IWL_ERR(priv, "File size too small!\n"); - return -EINVAL; - } - pieces->inst_size = le32_to_cpu(ucode->v1.inst_size); - pieces->data_size = le32_to_cpu(ucode->v1.data_size); - pieces->init_size = le32_to_cpu(ucode->v1.init_size); - pieces->init_data_size = - le32_to_cpu(ucode->v1.init_data_size); - pieces->boot_size = le32_to_cpu(ucode->v1.boot_size); - src = ucode->v1.data; - break; - } - - /* Verify size of file vs. image size info in file's header */ - if (ucode_raw->size != hdr_size + pieces->inst_size + - pieces->data_size + pieces->init_size + - pieces->init_data_size + pieces->boot_size) { - - IWL_ERR(priv, - "uCode file size %d does not match expected size\n", - (int)ucode_raw->size); - return -EINVAL; - } - - pieces->inst = src; - src += pieces->inst_size; - pieces->data = src; - src += pieces->data_size; - pieces->init = src; - src += pieces->init_size; - pieces->init_data = src; - src += pieces->init_data_size; - pieces->boot = src; - src += pieces->boot_size; - - return 0; -} - -/** - * iwl4965_ucode_callback - callback when firmware was loaded - * - * If loaded successfully, copies the firmware into buffers - * for the card to fetch (via DMA). - */ -static void -iwl4965_ucode_callback(const struct firmware *ucode_raw, void *context) -{ - struct iwl_priv *priv = context; - struct iwl_ucode_header *ucode; - int err; - struct iwl4965_firmware_pieces pieces; - const unsigned int api_max = priv->cfg->ucode_api_max; - const unsigned int api_min = priv->cfg->ucode_api_min; - u32 api_ver; - - u32 max_probe_length = 200; - u32 standard_phy_calibration_size = - IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE; - - memset(&pieces, 0, sizeof(pieces)); - - if (!ucode_raw) { - if (priv->fw_index <= priv->cfg->ucode_api_max) - IWL_ERR(priv, - "request for firmware file '%s' failed.\n", - priv->firmware_name); - goto try_again; - } - - IWL_DEBUG_INFO(priv, "Loaded firmware file '%s' (%zd bytes).\n", - priv->firmware_name, ucode_raw->size); - - /* Make sure that we got at least the API version number */ - if (ucode_raw->size < 4) { - IWL_ERR(priv, "File size way too small!\n"); - goto try_again; - } - - /* Data from ucode file: header followed by uCode images */ - ucode = (struct iwl_ucode_header *)ucode_raw->data; - - err = iwl4965_load_firmware(priv, ucode_raw, &pieces); - - if (err) - goto try_again; - - api_ver = IWL_UCODE_API(priv->ucode_ver); - - /* - * api_ver should match the api version forming part of the - * firmware filename ... but we don't check for that and only rely - * on the API version read from firmware header from here on forward - */ - if (api_ver < api_min || api_ver > api_max) { - IWL_ERR(priv, - "Driver unable to support your firmware API. " - "Driver supports v%u, firmware is v%u.\n", - api_max, api_ver); - goto try_again; - } - - if (api_ver != api_max) - IWL_ERR(priv, - "Firmware has old API version. Expected v%u, " - "got v%u. New firmware can be obtained " - "from http://www.intellinuxwireless.org.\n", - api_max, api_ver); - - IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n", - IWL_UCODE_MAJOR(priv->ucode_ver), - IWL_UCODE_MINOR(priv->ucode_ver), - IWL_UCODE_API(priv->ucode_ver), - IWL_UCODE_SERIAL(priv->ucode_ver)); - - snprintf(priv->hw->wiphy->fw_version, - sizeof(priv->hw->wiphy->fw_version), - "%u.%u.%u.%u", - IWL_UCODE_MAJOR(priv->ucode_ver), - IWL_UCODE_MINOR(priv->ucode_ver), - IWL_UCODE_API(priv->ucode_ver), - IWL_UCODE_SERIAL(priv->ucode_ver)); - - /* - * For any of the failures below (before allocating pci memory) - * we will try to load a version with a smaller API -- maybe the - * user just got a corrupted version of the latest API. - */ - - IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n", - priv->ucode_ver); - IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %Zd\n", - pieces.inst_size); - IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %Zd\n", - pieces.data_size); - IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %Zd\n", - pieces.init_size); - IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %Zd\n", - pieces.init_data_size); - IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %Zd\n", - pieces.boot_size); - - /* Verify that uCode images will fit in card's SRAM */ - if (pieces.inst_size > priv->hw_params.max_inst_size) { - IWL_ERR(priv, "uCode instr len %Zd too large to fit in\n", - pieces.inst_size); - goto try_again; - } - - if (pieces.data_size > priv->hw_params.max_data_size) { - IWL_ERR(priv, "uCode data len %Zd too large to fit in\n", - pieces.data_size); - goto try_again; - } - - if (pieces.init_size > priv->hw_params.max_inst_size) { - IWL_ERR(priv, "uCode init instr len %Zd too large to fit in\n", - pieces.init_size); - goto try_again; - } - - if (pieces.init_data_size > priv->hw_params.max_data_size) { - IWL_ERR(priv, "uCode init data len %Zd too large to fit in\n", - pieces.init_data_size); - goto try_again; - } - - if (pieces.boot_size > priv->hw_params.max_bsm_size) { - IWL_ERR(priv, "uCode boot instr len %Zd too large to fit in\n", - pieces.boot_size); - goto try_again; - } - - /* Allocate ucode buffers for card's bus-master loading ... */ - - /* Runtime instructions and 2 copies of data: - * 1) unmodified from disk - * 2) backup cache for save/restore during power-downs */ - priv->ucode_code.len = pieces.inst_size; - iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_code); - - priv->ucode_data.len = pieces.data_size; - iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data); - - priv->ucode_data_backup.len = pieces.data_size; - iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup); - - if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr || - !priv->ucode_data_backup.v_addr) - goto err_pci_alloc; - - /* Initialization instructions and data */ - if (pieces.init_size && pieces.init_data_size) { - priv->ucode_init.len = pieces.init_size; - iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init); - - priv->ucode_init_data.len = pieces.init_data_size; - iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data); - - if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr) - goto err_pci_alloc; - } - - /* Bootstrap (instructions only, no data) */ - if (pieces.boot_size) { - priv->ucode_boot.len = pieces.boot_size; - iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot); - - if (!priv->ucode_boot.v_addr) - goto err_pci_alloc; - } - - /* Now that we can no longer fail, copy information */ - - priv->sta_key_max_num = STA_KEY_MAX_NUM; - - /* Copy images into buffers for card's bus-master reads ... */ - - /* Runtime instructions (first block of data in file) */ - IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode instr len %Zd\n", - pieces.inst_size); - memcpy(priv->ucode_code.v_addr, pieces.inst, pieces.inst_size); - - IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n", - priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr); - - /* - * Runtime data - * NOTE: Copy into backup buffer will be done in iwl_up() - */ - IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode data len %Zd\n", - pieces.data_size); - memcpy(priv->ucode_data.v_addr, pieces.data, pieces.data_size); - memcpy(priv->ucode_data_backup.v_addr, pieces.data, pieces.data_size); - - /* Initialization instructions */ - if (pieces.init_size) { - IWL_DEBUG_INFO(priv, - "Copying (but not loading) init instr len %Zd\n", - pieces.init_size); - memcpy(priv->ucode_init.v_addr, pieces.init, pieces.init_size); - } - - /* Initialization data */ - if (pieces.init_data_size) { - IWL_DEBUG_INFO(priv, - "Copying (but not loading) init data len %Zd\n", - pieces.init_data_size); - memcpy(priv->ucode_init_data.v_addr, pieces.init_data, - pieces.init_data_size); - } - - /* Bootstrap instructions */ - IWL_DEBUG_INFO(priv, "Copying (but not loading) boot instr len %Zd\n", - pieces.boot_size); - memcpy(priv->ucode_boot.v_addr, pieces.boot, pieces.boot_size); - - /* - * figure out the offset of chain noise reset and gain commands - * base on the size of standard phy calibration commands table size - */ - priv->_4965.phy_calib_chain_noise_reset_cmd = - standard_phy_calibration_size; - priv->_4965.phy_calib_chain_noise_gain_cmd = - standard_phy_calibration_size + 1; - - /************************************************** - * This is still part of probe() in a sense... - * - * 9. Setup and register with mac80211 and debugfs - **************************************************/ - err = iwl4965_mac_setup_register(priv, max_probe_length); - if (err) - goto out_unbind; - - err = iwl_legacy_dbgfs_register(priv, DRV_NAME); - if (err) - IWL_ERR(priv, - "failed to create debugfs files. Ignoring error: %d\n", err); - - err = sysfs_create_group(&priv->pci_dev->dev.kobj, - &iwl_attribute_group); - if (err) { - IWL_ERR(priv, "failed to create sysfs device attributes\n"); - goto out_unbind; - } - - /* We have our copies now, allow OS release its copies */ - release_firmware(ucode_raw); - complete(&priv->_4965.firmware_loading_complete); - return; - - try_again: - /* try next, if any */ - if (iwl4965_request_firmware(priv, false)) - goto out_unbind; - release_firmware(ucode_raw); - return; - - err_pci_alloc: - IWL_ERR(priv, "failed to allocate pci memory\n"); - iwl4965_dealloc_ucode_pci(priv); - out_unbind: - complete(&priv->_4965.firmware_loading_complete); - device_release_driver(&priv->pci_dev->dev); - release_firmware(ucode_raw); -} - -static const char * const desc_lookup_text[] = { - "OK", - "FAIL", - "BAD_PARAM", - "BAD_CHECKSUM", - "NMI_INTERRUPT_WDG", - "SYSASSERT", - "FATAL_ERROR", - "BAD_COMMAND", - "HW_ERROR_TUNE_LOCK", - "HW_ERROR_TEMPERATURE", - "ILLEGAL_CHAN_FREQ", - "VCC_NOT_STABLE", - "FH_ERROR", - "NMI_INTERRUPT_HOST", - "NMI_INTERRUPT_ACTION_PT", - "NMI_INTERRUPT_UNKNOWN", - "UCODE_VERSION_MISMATCH", - "HW_ERROR_ABS_LOCK", - "HW_ERROR_CAL_LOCK_FAIL", - "NMI_INTERRUPT_INST_ACTION_PT", - "NMI_INTERRUPT_DATA_ACTION_PT", - "NMI_TRM_HW_ER", - "NMI_INTERRUPT_TRM", - "NMI_INTERRUPT_BREAK_POINT", - "DEBUG_0", - "DEBUG_1", - "DEBUG_2", - "DEBUG_3", -}; - -static struct { char *name; u8 num; } advanced_lookup[] = { - { "NMI_INTERRUPT_WDG", 0x34 }, - { "SYSASSERT", 0x35 }, - { "UCODE_VERSION_MISMATCH", 0x37 }, - { "BAD_COMMAND", 0x38 }, - { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C }, - { "FATAL_ERROR", 0x3D }, - { "NMI_TRM_HW_ERR", 0x46 }, - { "NMI_INTERRUPT_TRM", 0x4C }, - { "NMI_INTERRUPT_BREAK_POINT", 0x54 }, - { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C }, - { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 }, - { "NMI_INTERRUPT_HOST", 0x66 }, - { "NMI_INTERRUPT_ACTION_PT", 0x7C }, - { "NMI_INTERRUPT_UNKNOWN", 0x84 }, - { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 }, - { "ADVANCED_SYSASSERT", 0 }, -}; - -static const char *iwl4965_desc_lookup(u32 num) -{ - int i; - int max = ARRAY_SIZE(desc_lookup_text); - - if (num < max) - return desc_lookup_text[num]; - - max = ARRAY_SIZE(advanced_lookup) - 1; - for (i = 0; i < max; i++) { - if (advanced_lookup[i].num == num) - break; - } - return advanced_lookup[i].name; -} - -#define ERROR_START_OFFSET (1 * sizeof(u32)) -#define ERROR_ELEM_SIZE (7 * sizeof(u32)) - -void iwl4965_dump_nic_error_log(struct iwl_priv *priv) -{ - u32 data2, line; - u32 desc, time, count, base, data1; - u32 blink1, blink2, ilink1, ilink2; - u32 pc, hcmd; - - if (priv->ucode_type == UCODE_INIT) { - base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr); - } else { - base = le32_to_cpu(priv->card_alive.error_event_table_ptr); - } - - if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) { - IWL_ERR(priv, - "Not valid error log pointer 0x%08X for %s uCode\n", - base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT"); - return; - } - - count = iwl_legacy_read_targ_mem(priv, base); - - if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) { - IWL_ERR(priv, "Start IWL Error Log Dump:\n"); - IWL_ERR(priv, "Status: 0x%08lX, count: %d\n", - priv->status, count); - } - - desc = iwl_legacy_read_targ_mem(priv, base + 1 * sizeof(u32)); - priv->isr_stats.err_code = desc; - pc = iwl_legacy_read_targ_mem(priv, base + 2 * sizeof(u32)); - blink1 = iwl_legacy_read_targ_mem(priv, base + 3 * sizeof(u32)); - blink2 = iwl_legacy_read_targ_mem(priv, base + 4 * sizeof(u32)); - ilink1 = iwl_legacy_read_targ_mem(priv, base + 5 * sizeof(u32)); - ilink2 = iwl_legacy_read_targ_mem(priv, base + 6 * sizeof(u32)); - data1 = iwl_legacy_read_targ_mem(priv, base + 7 * sizeof(u32)); - data2 = iwl_legacy_read_targ_mem(priv, base + 8 * sizeof(u32)); - line = iwl_legacy_read_targ_mem(priv, base + 9 * sizeof(u32)); - time = iwl_legacy_read_targ_mem(priv, base + 11 * sizeof(u32)); - hcmd = iwl_legacy_read_targ_mem(priv, base + 22 * sizeof(u32)); - - trace_iwlwifi_legacy_dev_ucode_error(priv, desc, - time, data1, data2, line, - blink1, blink2, ilink1, ilink2); - - IWL_ERR(priv, "Desc Time " - "data1 data2 line\n"); - IWL_ERR(priv, "%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n", - iwl4965_desc_lookup(desc), desc, time, data1, data2, line); - IWL_ERR(priv, "pc blink1 blink2 ilink1 ilink2 hcmd\n"); - IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n", - pc, blink1, blink2, ilink1, ilink2, hcmd); -} - -static void iwl4965_rf_kill_ct_config(struct iwl_priv *priv) -{ - struct iwl_ct_kill_config cmd; - unsigned long flags; - int ret = 0; - - spin_lock_irqsave(&priv->lock, flags); - iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, - CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT); - spin_unlock_irqrestore(&priv->lock, flags); - - cmd.critical_temperature_R = - cpu_to_le32(priv->hw_params.ct_kill_threshold); - - ret = iwl_legacy_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD, - sizeof(cmd), &cmd); - if (ret) - IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n"); - else - IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD " - "succeeded, " - "critical temperature is %d\n", - priv->hw_params.ct_kill_threshold); -} - -static const s8 default_queue_to_tx_fifo[] = { - IWL_TX_FIFO_VO, - IWL_TX_FIFO_VI, - IWL_TX_FIFO_BE, - IWL_TX_FIFO_BK, - IWL49_CMD_FIFO_NUM, - IWL_TX_FIFO_UNUSED, - IWL_TX_FIFO_UNUSED, -}; - -static int iwl4965_alive_notify(struct iwl_priv *priv) -{ - u32 a; - unsigned long flags; - int i, chan; - u32 reg_val; - - spin_lock_irqsave(&priv->lock, flags); - - /* Clear 4965's internal Tx Scheduler data base */ - priv->scd_base_addr = iwl_legacy_read_prph(priv, - IWL49_SCD_SRAM_BASE_ADDR); - a = priv->scd_base_addr + IWL49_SCD_CONTEXT_DATA_OFFSET; - for (; a < priv->scd_base_addr + IWL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4) - iwl_legacy_write_targ_mem(priv, a, 0); - for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET; a += 4) - iwl_legacy_write_targ_mem(priv, a, 0); - for (; a < priv->scd_base_addr + - IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4) - iwl_legacy_write_targ_mem(priv, a, 0); - - /* Tel 4965 where to find Tx byte count tables */ - iwl_legacy_write_prph(priv, IWL49_SCD_DRAM_BASE_ADDR, - priv->scd_bc_tbls.dma >> 10); - - /* Enable DMA channel */ - for (chan = 0; chan < FH49_TCSR_CHNL_NUM ; chan++) - iwl_legacy_write_direct32(priv, - FH_TCSR_CHNL_TX_CONFIG_REG(chan), - FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | - FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); - - /* Update FH chicken bits */ - reg_val = iwl_legacy_read_direct32(priv, FH_TX_CHICKEN_BITS_REG); - iwl_legacy_write_direct32(priv, FH_TX_CHICKEN_BITS_REG, - reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); - - /* Disable chain mode for all queues */ - iwl_legacy_write_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, 0); - - /* Initialize each Tx queue (including the command queue) */ - for (i = 0; i < priv->hw_params.max_txq_num; i++) { - - /* TFD circular buffer read/write indexes */ - iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(i), 0); - iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8)); - - /* Max Tx Window size for Scheduler-ACK mode */ - iwl_legacy_write_targ_mem(priv, priv->scd_base_addr + - IWL49_SCD_CONTEXT_QUEUE_OFFSET(i), - (SCD_WIN_SIZE << - IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) & - IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK); - - /* Frame limit */ - iwl_legacy_write_targ_mem(priv, priv->scd_base_addr + - IWL49_SCD_CONTEXT_QUEUE_OFFSET(i) + - sizeof(u32), - (SCD_FRAME_LIMIT << - IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & - IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK); - - } - iwl_legacy_write_prph(priv, IWL49_SCD_INTERRUPT_MASK, - (1 << priv->hw_params.max_txq_num) - 1); - - /* Activate all Tx DMA/FIFO channels */ - iwl4965_txq_set_sched(priv, IWL_MASK(0, 6)); - - iwl4965_set_wr_ptrs(priv, IWL_DEFAULT_CMD_QUEUE_NUM, 0); - - /* make sure all queue are not stopped */ - memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped)); - for (i = 0; i < 4; i++) - atomic_set(&priv->queue_stop_count[i], 0); - - /* reset to 0 to enable all the queue first */ - priv->txq_ctx_active_msk = 0; - /* Map each Tx/cmd queue to its corresponding fifo */ - BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7); - - for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) { - int ac = default_queue_to_tx_fifo[i]; - - iwl_txq_ctx_activate(priv, i); - - if (ac == IWL_TX_FIFO_UNUSED) - continue; - - iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0); - } - - spin_unlock_irqrestore(&priv->lock, flags); - - return 0; -} - -/** - * iwl4965_alive_start - called after REPLY_ALIVE notification received - * from protocol/runtime uCode (initialization uCode's - * Alive gets handled by iwl_init_alive_start()). - */ -static void iwl4965_alive_start(struct iwl_priv *priv) -{ - int ret = 0; - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - - IWL_DEBUG_INFO(priv, "Runtime Alive received.\n"); - - if (priv->card_alive.is_valid != UCODE_VALID_OK) { - /* We had an error bringing up the hardware, so take it - * all the way back down so we can try again */ - IWL_DEBUG_INFO(priv, "Alive failed.\n"); - goto restart; - } - - /* Initialize uCode has loaded Runtime uCode ... verify inst image. - * This is a paranoid check, because we would not have gotten the - * "runtime" alive if code weren't properly loaded. */ - if (iwl4965_verify_ucode(priv)) { - /* Runtime instruction load was bad; - * take it all the way back down so we can try again */ - IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n"); - goto restart; - } - - ret = iwl4965_alive_notify(priv); - if (ret) { - IWL_WARN(priv, - "Could not complete ALIVE transition [ntf]: %d\n", ret); - goto restart; - } - - - /* After the ALIVE response, we can send host commands to the uCode */ - set_bit(STATUS_ALIVE, &priv->status); - - /* Enable watchdog to monitor the driver tx queues */ - iwl_legacy_setup_watchdog(priv); - - if (iwl_legacy_is_rfkill(priv)) - return; - - ieee80211_wake_queues(priv->hw); - - priv->active_rate = IWL_RATES_MASK; - - if (iwl_legacy_is_associated_ctx(ctx)) { - struct iwl_legacy_rxon_cmd *active_rxon = - (struct iwl_legacy_rxon_cmd *)&ctx->active; - /* apply any changes in staging */ - ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; - active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; - } else { - struct iwl_rxon_context *tmp; - /* Initialize our rx_config data */ - for_each_context(priv, tmp) - iwl_legacy_connection_init_rx_config(priv, tmp); - - if (priv->cfg->ops->hcmd->set_rxon_chain) - priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); - } - - /* Configure bluetooth coexistence if enabled */ - iwl_legacy_send_bt_config(priv); - - iwl4965_reset_run_time_calib(priv); - - set_bit(STATUS_READY, &priv->status); - - /* Configure the adapter for unassociated operation */ - iwl_legacy_commit_rxon(priv, ctx); - - /* At this point, the NIC is initialized and operational */ - iwl4965_rf_kill_ct_config(priv); - - IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); - wake_up(&priv->wait_command_queue); - - iwl_legacy_power_update_mode(priv, true); - IWL_DEBUG_INFO(priv, "Updated power mode\n"); - - return; - - restart: - queue_work(priv->workqueue, &priv->restart); -} - -static void iwl4965_cancel_deferred_work(struct iwl_priv *priv); - -static void __iwl4965_down(struct iwl_priv *priv) -{ - unsigned long flags; - int exit_pending; - - IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n"); - - iwl_legacy_scan_cancel_timeout(priv, 200); - - exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status); - - /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set - * to prevent rearm timer */ - del_timer_sync(&priv->watchdog); - - iwl_legacy_clear_ucode_stations(priv, NULL); - iwl_legacy_dealloc_bcast_stations(priv); - iwl_legacy_clear_driver_stations(priv); - - /* Unblock any waiting calls */ - wake_up_all(&priv->wait_command_queue); - - /* Wipe out the EXIT_PENDING status bit if we are not actually - * exiting the module */ - if (!exit_pending) - clear_bit(STATUS_EXIT_PENDING, &priv->status); - - /* stop and reset the on-board processor */ - iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); - - /* tell the device to stop sending interrupts */ - spin_lock_irqsave(&priv->lock, flags); - iwl_legacy_disable_interrupts(priv); - spin_unlock_irqrestore(&priv->lock, flags); - iwl4965_synchronize_irq(priv); - - if (priv->mac80211_registered) - ieee80211_stop_queues(priv->hw); - - /* If we have not previously called iwl_init() then - * clear all bits but the RF Kill bit and return */ - if (!iwl_legacy_is_init(priv)) { - priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) << - STATUS_RF_KILL_HW | - test_bit(STATUS_GEO_CONFIGURED, &priv->status) << - STATUS_GEO_CONFIGURED | - test_bit(STATUS_EXIT_PENDING, &priv->status) << - STATUS_EXIT_PENDING; - goto exit; - } - - /* ...otherwise clear out all the status bits but the RF Kill - * bit and continue taking the NIC down. */ - priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) << - STATUS_RF_KILL_HW | - test_bit(STATUS_GEO_CONFIGURED, &priv->status) << - STATUS_GEO_CONFIGURED | - test_bit(STATUS_FW_ERROR, &priv->status) << - STATUS_FW_ERROR | - test_bit(STATUS_EXIT_PENDING, &priv->status) << - STATUS_EXIT_PENDING; - - iwl4965_txq_ctx_stop(priv); - iwl4965_rxq_stop(priv); - - /* Power-down device's busmaster DMA clocks */ - iwl_legacy_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT); - udelay(5); - - /* Make sure (redundant) we've released our request to stay awake */ - iwl_legacy_clear_bit(priv, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); - - /* Stop the device, and put it in low power state */ - iwl_legacy_apm_stop(priv); - - exit: - memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp)); - - dev_kfree_skb(priv->beacon_skb); - priv->beacon_skb = NULL; - - /* clear out any free frames */ - iwl4965_clear_free_frames(priv); -} - -static void iwl4965_down(struct iwl_priv *priv) -{ - mutex_lock(&priv->mutex); - __iwl4965_down(priv); - mutex_unlock(&priv->mutex); - - iwl4965_cancel_deferred_work(priv); -} - -#define HW_READY_TIMEOUT (50) - -static int iwl4965_set_hw_ready(struct iwl_priv *priv) -{ - int ret = 0; - - iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_BIT_NIC_READY); - - /* See if we got it */ - ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, - CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, - HW_READY_TIMEOUT); - if (ret != -ETIMEDOUT) - priv->hw_ready = true; - else - priv->hw_ready = false; - - IWL_DEBUG_INFO(priv, "hardware %s\n", - (priv->hw_ready == 1) ? "ready" : "not ready"); - return ret; -} - -static int iwl4965_prepare_card_hw(struct iwl_priv *priv) -{ - int ret = 0; - - IWL_DEBUG_INFO(priv, "iwl4965_prepare_card_hw enter\n"); - - ret = iwl4965_set_hw_ready(priv); - if (priv->hw_ready) - return ret; - - /* If HW is not ready, prepare the conditions to check again */ - iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_PREPARE); - - ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG, - ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, - CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000); - - /* HW should be ready by now, check again. */ - if (ret != -ETIMEDOUT) - iwl4965_set_hw_ready(priv); - - return ret; -} - -#define MAX_HW_RESTARTS 5 - -static int __iwl4965_up(struct iwl_priv *priv) -{ - struct iwl_rxon_context *ctx; - int i; - int ret; - - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { - IWL_WARN(priv, "Exit pending; will not bring the NIC up\n"); - return -EIO; - } - - if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) { - IWL_ERR(priv, "ucode not available for device bringup\n"); - return -EIO; - } - - for_each_context(priv, ctx) { - ret = iwl4965_alloc_bcast_station(priv, ctx); - if (ret) { - iwl_legacy_dealloc_bcast_stations(priv); - return ret; - } - } - - iwl4965_prepare_card_hw(priv); - - if (!priv->hw_ready) { - IWL_WARN(priv, "Exit HW not ready\n"); - return -EIO; - } - - /* If platform's RF_KILL switch is NOT set to KILL */ - if (iwl_read32(priv, - CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) - clear_bit(STATUS_RF_KILL_HW, &priv->status); - else - set_bit(STATUS_RF_KILL_HW, &priv->status); - - if (iwl_legacy_is_rfkill(priv)) { - wiphy_rfkill_set_hw_state(priv->hw->wiphy, true); - - iwl_legacy_enable_interrupts(priv); - IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n"); - return 0; - } - - iwl_write32(priv, CSR_INT, 0xFFFFFFFF); - - /* must be initialised before iwl_hw_nic_init */ - priv->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM; - - ret = iwl4965_hw_nic_init(priv); - if (ret) { - IWL_ERR(priv, "Unable to init nic\n"); - return ret; - } - - /* make sure rfkill handshake bits are cleared */ - iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); - iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, - CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); - - /* clear (again), then enable host interrupts */ - iwl_write32(priv, CSR_INT, 0xFFFFFFFF); - iwl_legacy_enable_interrupts(priv); - - /* really make sure rfkill handshake bits are cleared */ - iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); - iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); - - /* Copy original ucode data image from disk into backup cache. - * This will be used to initialize the on-board processor's - * data SRAM for a clean start when the runtime program first loads. */ - memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr, - priv->ucode_data.len); - - for (i = 0; i < MAX_HW_RESTARTS; i++) { - - /* load bootstrap state machine, - * load bootstrap program into processor's memory, - * prepare to load the "initialize" uCode */ - ret = priv->cfg->ops->lib->load_ucode(priv); - - if (ret) { - IWL_ERR(priv, "Unable to set up bootstrap uCode: %d\n", - ret); - continue; - } - - /* start card; "initialize" will load runtime ucode */ - iwl4965_nic_start(priv); - - IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n"); - - return 0; - } - - set_bit(STATUS_EXIT_PENDING, &priv->status); - __iwl4965_down(priv); - clear_bit(STATUS_EXIT_PENDING, &priv->status); - - /* tried to restart and config the device for as long as our - * patience could withstand */ - IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i); - return -EIO; -} - - -/***************************************************************************** - * - * Workqueue callbacks - * - *****************************************************************************/ - -static void iwl4965_bg_init_alive_start(struct work_struct *data) -{ - struct iwl_priv *priv = - container_of(data, struct iwl_priv, init_alive_start.work); - - mutex_lock(&priv->mutex); - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - goto out; - - priv->cfg->ops->lib->init_alive_start(priv); -out: - mutex_unlock(&priv->mutex); -} - -static void iwl4965_bg_alive_start(struct work_struct *data) -{ - struct iwl_priv *priv = - container_of(data, struct iwl_priv, alive_start.work); - - mutex_lock(&priv->mutex); - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - goto out; - - iwl4965_alive_start(priv); -out: - mutex_unlock(&priv->mutex); -} - -static void iwl4965_bg_run_time_calib_work(struct work_struct *work) -{ - struct iwl_priv *priv = container_of(work, struct iwl_priv, - run_time_calib_work); - - mutex_lock(&priv->mutex); - - if (test_bit(STATUS_EXIT_PENDING, &priv->status) || - test_bit(STATUS_SCANNING, &priv->status)) { - mutex_unlock(&priv->mutex); - return; - } - - if (priv->start_calib) { - iwl4965_chain_noise_calibration(priv, - (void *)&priv->_4965.statistics); - iwl4965_sensitivity_calibration(priv, - (void *)&priv->_4965.statistics); - } - - mutex_unlock(&priv->mutex); -} - -static void iwl4965_bg_restart(struct work_struct *data) -{ - struct iwl_priv *priv = container_of(data, struct iwl_priv, restart); - - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - return; - - if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) { - struct iwl_rxon_context *ctx; - - mutex_lock(&priv->mutex); - for_each_context(priv, ctx) - ctx->vif = NULL; - priv->is_open = 0; - - __iwl4965_down(priv); - - mutex_unlock(&priv->mutex); - iwl4965_cancel_deferred_work(priv); - ieee80211_restart_hw(priv->hw); - } else { - iwl4965_down(priv); - - mutex_lock(&priv->mutex); - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { - mutex_unlock(&priv->mutex); - return; - } - - __iwl4965_up(priv); - mutex_unlock(&priv->mutex); - } -} - -static void iwl4965_bg_rx_replenish(struct work_struct *data) -{ - struct iwl_priv *priv = - container_of(data, struct iwl_priv, rx_replenish); - - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - return; - - mutex_lock(&priv->mutex); - iwl4965_rx_replenish(priv); - mutex_unlock(&priv->mutex); -} - -/***************************************************************************** - * - * mac80211 entry point functions - * - *****************************************************************************/ - -#define UCODE_READY_TIMEOUT (4 * HZ) - -/* - * Not a mac80211 entry point function, but it fits in with all the - * other mac80211 functions grouped here. - */ -static int iwl4965_mac_setup_register(struct iwl_priv *priv, - u32 max_probe_length) -{ - int ret; - struct ieee80211_hw *hw = priv->hw; - struct iwl_rxon_context *ctx; - - hw->rate_control_algorithm = "iwl-4965-rs"; - - /* Tell mac80211 our characteristics */ - hw->flags = IEEE80211_HW_SIGNAL_DBM | - IEEE80211_HW_AMPDU_AGGREGATION | - IEEE80211_HW_NEED_DTIM_PERIOD | - IEEE80211_HW_SPECTRUM_MGMT | - IEEE80211_HW_REPORTS_TX_ACK_STATUS; - - if (priv->cfg->sku & IWL_SKU_N) - hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | - IEEE80211_HW_SUPPORTS_STATIC_SMPS; - - hw->sta_data_size = sizeof(struct iwl_station_priv); - hw->vif_data_size = sizeof(struct iwl_vif_priv); - - for_each_context(priv, ctx) { - hw->wiphy->interface_modes |= ctx->interface_modes; - hw->wiphy->interface_modes |= ctx->exclusive_interface_modes; - } - - hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY | - WIPHY_FLAG_DISABLE_BEACON_HINTS; - - /* - * For now, disable PS by default because it affects - * RX performance significantly. - */ - hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; - - hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX; - /* we create the 802.11 header and a zero-length SSID element */ - hw->wiphy->max_scan_ie_len = max_probe_length - 24 - 2; - - /* Default value; 4 EDCA QOS priorities */ - hw->queues = 4; - - hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL; - - if (priv->bands[IEEE80211_BAND_2GHZ].n_channels) - priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = - &priv->bands[IEEE80211_BAND_2GHZ]; - if (priv->bands[IEEE80211_BAND_5GHZ].n_channels) - priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = - &priv->bands[IEEE80211_BAND_5GHZ]; - - iwl_legacy_leds_init(priv); - - ret = ieee80211_register_hw(priv->hw); - if (ret) { - IWL_ERR(priv, "Failed to register hw (error %d)\n", ret); - return ret; - } - priv->mac80211_registered = 1; - - return 0; -} - - -int iwl4965_mac_start(struct ieee80211_hw *hw) -{ - struct iwl_priv *priv = hw->priv; - int ret; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - - /* we should be verifying the device is ready to be opened */ - mutex_lock(&priv->mutex); - ret = __iwl4965_up(priv); - mutex_unlock(&priv->mutex); - - if (ret) - return ret; - - if (iwl_legacy_is_rfkill(priv)) - goto out; - - IWL_DEBUG_INFO(priv, "Start UP work done.\n"); - - /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from - * mac80211 will not be run successfully. */ - ret = wait_event_timeout(priv->wait_command_queue, - test_bit(STATUS_READY, &priv->status), - UCODE_READY_TIMEOUT); - if (!ret) { - if (!test_bit(STATUS_READY, &priv->status)) { - IWL_ERR(priv, "START_ALIVE timeout after %dms.\n", - jiffies_to_msecs(UCODE_READY_TIMEOUT)); - return -ETIMEDOUT; - } - } - - iwl4965_led_enable(priv); - -out: - priv->is_open = 1; - IWL_DEBUG_MAC80211(priv, "leave\n"); - return 0; -} - -void iwl4965_mac_stop(struct ieee80211_hw *hw) -{ - struct iwl_priv *priv = hw->priv; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - - if (!priv->is_open) - return; - - priv->is_open = 0; - - iwl4965_down(priv); - - flush_workqueue(priv->workqueue); - - /* User space software may expect getting rfkill changes - * even if interface is down */ - iwl_write32(priv, CSR_INT, 0xFFFFFFFF); - iwl_legacy_enable_rfkill_int(priv); - - IWL_DEBUG_MAC80211(priv, "leave\n"); -} - -void iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) -{ - struct iwl_priv *priv = hw->priv; - - IWL_DEBUG_MACDUMP(priv, "enter\n"); - - IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, - ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); - - if (iwl4965_tx_skb(priv, skb)) - dev_kfree_skb_any(skb); - - IWL_DEBUG_MACDUMP(priv, "leave\n"); -} - -void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_key_conf *keyconf, - struct ieee80211_sta *sta, - u32 iv32, u16 *phase1key) -{ - struct iwl_priv *priv = hw->priv; - struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - - iwl4965_update_tkip_key(priv, vif_priv->ctx, keyconf, sta, - iv32, phase1key); - - IWL_DEBUG_MAC80211(priv, "leave\n"); -} - -int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, - struct ieee80211_vif *vif, struct ieee80211_sta *sta, - struct ieee80211_key_conf *key) -{ - struct iwl_priv *priv = hw->priv; - struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; - struct iwl_rxon_context *ctx = vif_priv->ctx; - int ret; - u8 sta_id; - bool is_default_wep_key = false; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - - if (priv->cfg->mod_params->sw_crypto) { - IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n"); - return -EOPNOTSUPP; - } - - sta_id = iwl_legacy_sta_id_or_broadcast(priv, vif_priv->ctx, sta); - if (sta_id == IWL_INVALID_STATION) - return -EINVAL; - - mutex_lock(&priv->mutex); - iwl_legacy_scan_cancel_timeout(priv, 100); - - /* - * If we are getting WEP group key and we didn't receive any key mapping - * so far, we are in legacy wep mode (group key only), otherwise we are - * in 1X mode. - * In legacy wep mode, we use another host command to the uCode. - */ - if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 || - key->cipher == WLAN_CIPHER_SUITE_WEP104) && - !sta) { - if (cmd == SET_KEY) - is_default_wep_key = !ctx->key_mapping_keys; - else - is_default_wep_key = - (key->hw_key_idx == HW_KEY_DEFAULT); - } - - switch (cmd) { - case SET_KEY: - if (is_default_wep_key) - ret = iwl4965_set_default_wep_key(priv, - vif_priv->ctx, key); - else - ret = iwl4965_set_dynamic_key(priv, vif_priv->ctx, - key, sta_id); - - IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n"); - break; - case DISABLE_KEY: - if (is_default_wep_key) - ret = iwl4965_remove_default_wep_key(priv, ctx, key); - else - ret = iwl4965_remove_dynamic_key(priv, ctx, - key, sta_id); - - IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n"); - break; - default: - ret = -EINVAL; - } - - mutex_unlock(&priv->mutex); - IWL_DEBUG_MAC80211(priv, "leave\n"); - - return ret; -} - -int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - enum ieee80211_ampdu_mlme_action action, - struct ieee80211_sta *sta, u16 tid, u16 *ssn, - u8 buf_size) -{ - struct iwl_priv *priv = hw->priv; - int ret = -EINVAL; - - IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n", - sta->addr, tid); - - if (!(priv->cfg->sku & IWL_SKU_N)) - return -EACCES; - - mutex_lock(&priv->mutex); - - switch (action) { - case IEEE80211_AMPDU_RX_START: - IWL_DEBUG_HT(priv, "start Rx\n"); - ret = iwl4965_sta_rx_agg_start(priv, sta, tid, *ssn); - break; - case IEEE80211_AMPDU_RX_STOP: - IWL_DEBUG_HT(priv, "stop Rx\n"); - ret = iwl4965_sta_rx_agg_stop(priv, sta, tid); - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - ret = 0; - break; - case IEEE80211_AMPDU_TX_START: - IWL_DEBUG_HT(priv, "start Tx\n"); - ret = iwl4965_tx_agg_start(priv, vif, sta, tid, ssn); - break; - case IEEE80211_AMPDU_TX_STOP: - IWL_DEBUG_HT(priv, "stop Tx\n"); - ret = iwl4965_tx_agg_stop(priv, vif, sta, tid); - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - ret = 0; - break; - case IEEE80211_AMPDU_TX_OPERATIONAL: - ret = 0; - break; - } - mutex_unlock(&priv->mutex); - - return ret; -} - -int iwl4965_mac_sta_add(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta) -{ - struct iwl_priv *priv = hw->priv; - struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; - struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; - bool is_ap = vif->type == NL80211_IFTYPE_STATION; - int ret; - u8 sta_id; - - IWL_DEBUG_INFO(priv, "received request to add station %pM\n", - sta->addr); - mutex_lock(&priv->mutex); - IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n", - sta->addr); - sta_priv->common.sta_id = IWL_INVALID_STATION; - - atomic_set(&sta_priv->pending_frames, 0); - - ret = iwl_legacy_add_station_common(priv, vif_priv->ctx, sta->addr, - is_ap, sta, &sta_id); - if (ret) { - IWL_ERR(priv, "Unable to add station %pM (%d)\n", - sta->addr, ret); - /* Should we return success if return code is EEXIST ? */ - mutex_unlock(&priv->mutex); - return ret; - } - - sta_priv->common.sta_id = sta_id; - - /* Initialize rate scaling */ - IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n", - sta->addr); - iwl4965_rs_rate_init(priv, sta, sta_id); - mutex_unlock(&priv->mutex); - - return 0; -} - -void iwl4965_mac_channel_switch(struct ieee80211_hw *hw, - struct ieee80211_channel_switch *ch_switch) -{ - struct iwl_priv *priv = hw->priv; - const struct iwl_channel_info *ch_info; - struct ieee80211_conf *conf = &hw->conf; - struct ieee80211_channel *channel = ch_switch->channel; - struct iwl_ht_config *ht_conf = &priv->current_ht_config; - - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - u16 ch; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - - mutex_lock(&priv->mutex); - - if (iwl_legacy_is_rfkill(priv)) - goto out; - - if (test_bit(STATUS_EXIT_PENDING, &priv->status) || - test_bit(STATUS_SCANNING, &priv->status) || - test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status)) - goto out; - - if (!iwl_legacy_is_associated_ctx(ctx)) - goto out; - - if (!priv->cfg->ops->lib->set_channel_switch) - goto out; - - ch = channel->hw_value; - if (le16_to_cpu(ctx->active.channel) == ch) - goto out; - - ch_info = iwl_legacy_get_channel_info(priv, channel->band, ch); - if (!iwl_legacy_is_channel_valid(ch_info)) { - IWL_DEBUG_MAC80211(priv, "invalid channel\n"); - goto out; - } - - spin_lock_irq(&priv->lock); - - priv->current_ht_config.smps = conf->smps_mode; - - /* Configure HT40 channels */ - ctx->ht.enabled = conf_is_ht(conf); - if (ctx->ht.enabled) { - if (conf_is_ht40_minus(conf)) { - ctx->ht.extension_chan_offset = - IEEE80211_HT_PARAM_CHA_SEC_BELOW; - ctx->ht.is_40mhz = true; - } else if (conf_is_ht40_plus(conf)) { - ctx->ht.extension_chan_offset = - IEEE80211_HT_PARAM_CHA_SEC_ABOVE; - ctx->ht.is_40mhz = true; - } else { - ctx->ht.extension_chan_offset = - IEEE80211_HT_PARAM_CHA_SEC_NONE; - ctx->ht.is_40mhz = false; - } - } else - ctx->ht.is_40mhz = false; - - if ((le16_to_cpu(ctx->staging.channel) != ch)) - ctx->staging.flags = 0; - - iwl_legacy_set_rxon_channel(priv, channel, ctx); - iwl_legacy_set_rxon_ht(priv, ht_conf); - iwl_legacy_set_flags_for_band(priv, ctx, channel->band, ctx->vif); - - spin_unlock_irq(&priv->lock); - - iwl_legacy_set_rate(priv); - /* - * at this point, staging_rxon has the - * configuration for channel switch - */ - set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status); - priv->switch_channel = cpu_to_le16(ch); - if (priv->cfg->ops->lib->set_channel_switch(priv, ch_switch)) { - clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status); - priv->switch_channel = 0; - ieee80211_chswitch_done(ctx->vif, false); - } - -out: - mutex_unlock(&priv->mutex); - IWL_DEBUG_MAC80211(priv, "leave\n"); -} - -void iwl4965_configure_filter(struct ieee80211_hw *hw, - unsigned int changed_flags, - unsigned int *total_flags, - u64 multicast) -{ - struct iwl_priv *priv = hw->priv; - __le32 filter_or = 0, filter_nand = 0; - struct iwl_rxon_context *ctx; - -#define CHK(test, flag) do { \ - if (*total_flags & (test)) \ - filter_or |= (flag); \ - else \ - filter_nand |= (flag); \ - } while (0) - - IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n", - changed_flags, *total_flags); - - CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK); - /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */ - CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK); - CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK); - -#undef CHK - - mutex_lock(&priv->mutex); - - for_each_context(priv, ctx) { - ctx->staging.filter_flags &= ~filter_nand; - ctx->staging.filter_flags |= filter_or; - - /* - * Not committing directly because hardware can perform a scan, - * but we'll eventually commit the filter flags change anyway. - */ - } - - mutex_unlock(&priv->mutex); - - /* - * Receiving all multicast frames is always enabled by the - * default flags setup in iwl_legacy_connection_init_rx_config() - * since we currently do not support programming multicast - * filters into the device. - */ - *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS | - FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; -} - -/***************************************************************************** - * - * driver setup and teardown - * - *****************************************************************************/ - -static void iwl4965_bg_txpower_work(struct work_struct *work) -{ - struct iwl_priv *priv = container_of(work, struct iwl_priv, - txpower_work); - - mutex_lock(&priv->mutex); - - /* If a scan happened to start before we got here - * then just return; the statistics notification will - * kick off another scheduled work to compensate for - * any temperature delta we missed here. */ - if (test_bit(STATUS_EXIT_PENDING, &priv->status) || - test_bit(STATUS_SCANNING, &priv->status)) - goto out; - - /* Regardless of if we are associated, we must reconfigure the - * TX power since frames can be sent on non-radar channels while - * not associated */ - priv->cfg->ops->lib->send_tx_power(priv); - - /* Update last_temperature to keep is_calib_needed from running - * when it isn't needed... */ - priv->last_temperature = priv->temperature; -out: - mutex_unlock(&priv->mutex); -} - -static void iwl4965_setup_deferred_work(struct iwl_priv *priv) -{ - priv->workqueue = create_singlethread_workqueue(DRV_NAME); - - init_waitqueue_head(&priv->wait_command_queue); - - INIT_WORK(&priv->restart, iwl4965_bg_restart); - INIT_WORK(&priv->rx_replenish, iwl4965_bg_rx_replenish); - INIT_WORK(&priv->run_time_calib_work, iwl4965_bg_run_time_calib_work); - INIT_DELAYED_WORK(&priv->init_alive_start, iwl4965_bg_init_alive_start); - INIT_DELAYED_WORK(&priv->alive_start, iwl4965_bg_alive_start); - - iwl_legacy_setup_scan_deferred_work(priv); - - INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work); - - init_timer(&priv->statistics_periodic); - priv->statistics_periodic.data = (unsigned long)priv; - priv->statistics_periodic.function = iwl4965_bg_statistics_periodic; - - init_timer(&priv->watchdog); - priv->watchdog.data = (unsigned long)priv; - priv->watchdog.function = iwl_legacy_bg_watchdog; - - tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) - iwl4965_irq_tasklet, (unsigned long)priv); -} - -static void iwl4965_cancel_deferred_work(struct iwl_priv *priv) -{ - cancel_work_sync(&priv->txpower_work); - cancel_delayed_work_sync(&priv->init_alive_start); - cancel_delayed_work(&priv->alive_start); - cancel_work_sync(&priv->run_time_calib_work); - - iwl_legacy_cancel_scan_deferred_work(priv); - - del_timer_sync(&priv->statistics_periodic); -} - -static void iwl4965_init_hw_rates(struct iwl_priv *priv, - struct ieee80211_rate *rates) -{ - int i; - - for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) { - rates[i].bitrate = iwlegacy_rates[i].ieee * 5; - rates[i].hw_value = i; /* Rate scaling will work on indexes */ - rates[i].hw_value_short = i; - rates[i].flags = 0; - if ((i >= IWL_FIRST_CCK_RATE) && (i <= IWL_LAST_CCK_RATE)) { - /* - * If CCK != 1M then set short preamble rate flag. - */ - rates[i].flags |= - (iwlegacy_rates[i].plcp == IWL_RATE_1M_PLCP) ? - 0 : IEEE80211_RATE_SHORT_PREAMBLE; - } - } -} -/* - * Acquire priv->lock before calling this function ! - */ -void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index) -{ - iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR, - (index & 0xff) | (txq_id << 8)); - iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(txq_id), index); -} - -void iwl4965_tx_queue_set_status(struct iwl_priv *priv, - struct iwl_tx_queue *txq, - int tx_fifo_id, int scd_retry) -{ - int txq_id = txq->q.id; - - /* Find out whether to activate Tx queue */ - int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0; - - /* Set up and activate */ - iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id), - (active << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) | - (tx_fifo_id << IWL49_SCD_QUEUE_STTS_REG_POS_TXF) | - (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_WSL) | - (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) | - IWL49_SCD_QUEUE_STTS_REG_MSK); - - txq->sched_retry = scd_retry; - - IWL_DEBUG_INFO(priv, "%s %s Queue %d on AC %d\n", - active ? "Activate" : "Deactivate", - scd_retry ? "BA" : "AC", txq_id, tx_fifo_id); -} - - -static int iwl4965_init_drv(struct iwl_priv *priv) -{ - int ret; - - spin_lock_init(&priv->sta_lock); - spin_lock_init(&priv->hcmd_lock); - - INIT_LIST_HEAD(&priv->free_frames); - - mutex_init(&priv->mutex); - - priv->ieee_channels = NULL; - priv->ieee_rates = NULL; - priv->band = IEEE80211_BAND_2GHZ; - - priv->iw_mode = NL80211_IFTYPE_STATION; - priv->current_ht_config.smps = IEEE80211_SMPS_STATIC; - priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF; - - /* initialize force reset */ - priv->force_reset.reset_duration = IWL_DELAY_NEXT_FORCE_FW_RELOAD; - - /* Choose which receivers/antennas to use */ - if (priv->cfg->ops->hcmd->set_rxon_chain) - priv->cfg->ops->hcmd->set_rxon_chain(priv, - &priv->contexts[IWL_RXON_CTX_BSS]); - - iwl_legacy_init_scan_params(priv); - - ret = iwl_legacy_init_channel_map(priv); - if (ret) { - IWL_ERR(priv, "initializing regulatory failed: %d\n", ret); - goto err; - } - - ret = iwl_legacy_init_geos(priv); - if (ret) { - IWL_ERR(priv, "initializing geos failed: %d\n", ret); - goto err_free_channel_map; - } - iwl4965_init_hw_rates(priv, priv->ieee_rates); - - return 0; - -err_free_channel_map: - iwl_legacy_free_channel_map(priv); -err: - return ret; -} - -static void iwl4965_uninit_drv(struct iwl_priv *priv) -{ - iwl4965_calib_free_results(priv); - iwl_legacy_free_geos(priv); - iwl_legacy_free_channel_map(priv); - kfree(priv->scan_cmd); -} - -static void iwl4965_hw_detect(struct iwl_priv *priv) -{ - priv->hw_rev = _iwl_legacy_read32(priv, CSR_HW_REV); - priv->hw_wa_rev = _iwl_legacy_read32(priv, CSR_HW_REV_WA_REG); - priv->rev_id = priv->pci_dev->revision; - IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", priv->rev_id); -} - -static int iwl4965_set_hw_params(struct iwl_priv *priv) -{ - priv->hw_params.max_rxq_size = RX_QUEUE_SIZE; - priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG; - if (priv->cfg->mod_params->amsdu_size_8K) - priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_8K); - else - priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_4K); - - priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL; - - if (priv->cfg->mod_params->disable_11n) - priv->cfg->sku &= ~IWL_SKU_N; - - /* Device-specific setup */ - return priv->cfg->ops->lib->set_hw_params(priv); -} - -static const u8 iwl4965_bss_ac_to_fifo[] = { - IWL_TX_FIFO_VO, - IWL_TX_FIFO_VI, - IWL_TX_FIFO_BE, - IWL_TX_FIFO_BK, -}; - -static const u8 iwl4965_bss_ac_to_queue[] = { - 0, 1, 2, 3, -}; - -static int -iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) -{ - int err = 0, i; - struct iwl_priv *priv; - struct ieee80211_hw *hw; - struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data); - unsigned long flags; - u16 pci_cmd; - - /************************ - * 1. Allocating HW data - ************************/ - - hw = iwl_legacy_alloc_all(cfg); - if (!hw) { - err = -ENOMEM; - goto out; - } - priv = hw->priv; - /* At this point both hw and priv are allocated. */ - - /* - * The default context is always valid, - * more may be discovered when firmware - * is loaded. - */ - priv->valid_contexts = BIT(IWL_RXON_CTX_BSS); - - for (i = 0; i < NUM_IWL_RXON_CTX; i++) - priv->contexts[i].ctxid = i; - - priv->contexts[IWL_RXON_CTX_BSS].always_active = true; - priv->contexts[IWL_RXON_CTX_BSS].is_active = true; - priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON; - priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING; - priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC; - priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM; - priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID; - priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY; - priv->contexts[IWL_RXON_CTX_BSS].ac_to_fifo = iwl4965_bss_ac_to_fifo; - priv->contexts[IWL_RXON_CTX_BSS].ac_to_queue = iwl4965_bss_ac_to_queue; - priv->contexts[IWL_RXON_CTX_BSS].exclusive_interface_modes = - BIT(NL80211_IFTYPE_ADHOC); - priv->contexts[IWL_RXON_CTX_BSS].interface_modes = - BIT(NL80211_IFTYPE_STATION); - priv->contexts[IWL_RXON_CTX_BSS].ap_devtype = RXON_DEV_TYPE_AP; - priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS; - priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS; - priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS; - - BUILD_BUG_ON(NUM_IWL_RXON_CTX != 1); - - SET_IEEE80211_DEV(hw, &pdev->dev); - - IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n"); - priv->cfg = cfg; - priv->pci_dev = pdev; - priv->inta_mask = CSR_INI_SET_MASK; - - if (iwl_legacy_alloc_traffic_mem(priv)) - IWL_ERR(priv, "Not enough memory to generate traffic log\n"); - - /************************** - * 2. Initializing PCI bus - **************************/ - pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | - PCIE_LINK_STATE_CLKPM); - - if (pci_enable_device(pdev)) { - err = -ENODEV; - goto out_ieee80211_free_hw; - } - - pci_set_master(pdev); - - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); - if (!err) - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36)); - if (err) { - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (!err) - err = pci_set_consistent_dma_mask(pdev, - DMA_BIT_MASK(32)); - /* both attempts failed: */ - if (err) { - IWL_WARN(priv, "No suitable DMA available.\n"); - goto out_pci_disable_device; - } - } - - err = pci_request_regions(pdev, DRV_NAME); - if (err) - goto out_pci_disable_device; - - pci_set_drvdata(pdev, priv); - - - /*********************** - * 3. Read REV register - ***********************/ - priv->hw_base = pci_iomap(pdev, 0, 0); - if (!priv->hw_base) { - err = -ENODEV; - goto out_pci_release_regions; - } - - IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n", - (unsigned long long) pci_resource_len(pdev, 0)); - IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base); - - /* these spin locks will be used in apm_ops.init and EEPROM access - * we should init now - */ - spin_lock_init(&priv->reg_lock); - spin_lock_init(&priv->lock); - - /* - * stop and reset the on-board processor just in case it is in a - * strange state ... like being left stranded by a primary kernel - * and this is now the kdump kernel trying to start up - */ - iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); - - iwl4965_hw_detect(priv); - IWL_INFO(priv, "Detected %s, REV=0x%X\n", - priv->cfg->name, priv->hw_rev); - - /* We disable the RETRY_TIMEOUT register (0x41) to keep - * PCI Tx retries from interfering with C3 CPU state */ - pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); - - iwl4965_prepare_card_hw(priv); - if (!priv->hw_ready) { - IWL_WARN(priv, "Failed, HW not ready\n"); - goto out_iounmap; - } - - /***************** - * 4. Read EEPROM - *****************/ - /* Read the EEPROM */ - err = iwl_legacy_eeprom_init(priv); - if (err) { - IWL_ERR(priv, "Unable to init EEPROM\n"); - goto out_iounmap; - } - err = iwl4965_eeprom_check_version(priv); - if (err) - goto out_free_eeprom; - - if (err) - goto out_free_eeprom; - - /* extract MAC Address */ - iwl4965_eeprom_get_mac(priv, priv->addresses[0].addr); - IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr); - priv->hw->wiphy->addresses = priv->addresses; - priv->hw->wiphy->n_addresses = 1; - - /************************ - * 5. Setup HW constants - ************************/ - if (iwl4965_set_hw_params(priv)) { - IWL_ERR(priv, "failed to set hw parameters\n"); - goto out_free_eeprom; - } - - /******************* - * 6. Setup priv - *******************/ - - err = iwl4965_init_drv(priv); - if (err) - goto out_free_eeprom; - /* At this point both hw and priv are initialized. */ - - /******************** - * 7. Setup services - ********************/ - spin_lock_irqsave(&priv->lock, flags); - iwl_legacy_disable_interrupts(priv); - spin_unlock_irqrestore(&priv->lock, flags); - - pci_enable_msi(priv->pci_dev); - - err = request_irq(priv->pci_dev->irq, iwl_legacy_isr, - IRQF_SHARED, DRV_NAME, priv); - if (err) { - IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq); - goto out_disable_msi; - } - - iwl4965_setup_deferred_work(priv); - iwl4965_setup_rx_handlers(priv); - - /********************************************* - * 8. Enable interrupts and read RFKILL state - *********************************************/ - - /* enable rfkill interrupt: hw bug w/a */ - pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd); - if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { - pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; - pci_write_config_word(priv->pci_dev, PCI_COMMAND, pci_cmd); - } - - iwl_legacy_enable_rfkill_int(priv); - - /* If platform's RF_KILL switch is NOT set to KILL */ - if (iwl_read32(priv, CSR_GP_CNTRL) & - CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) - clear_bit(STATUS_RF_KILL_HW, &priv->status); - else - set_bit(STATUS_RF_KILL_HW, &priv->status); - - wiphy_rfkill_set_hw_state(priv->hw->wiphy, - test_bit(STATUS_RF_KILL_HW, &priv->status)); - - iwl_legacy_power_initialize(priv); - - init_completion(&priv->_4965.firmware_loading_complete); - - err = iwl4965_request_firmware(priv, true); - if (err) - goto out_destroy_workqueue; - - return 0; - - out_destroy_workqueue: - destroy_workqueue(priv->workqueue); - priv->workqueue = NULL; - free_irq(priv->pci_dev->irq, priv); - out_disable_msi: - pci_disable_msi(priv->pci_dev); - iwl4965_uninit_drv(priv); - out_free_eeprom: - iwl_legacy_eeprom_free(priv); - out_iounmap: - pci_iounmap(pdev, priv->hw_base); - out_pci_release_regions: - pci_set_drvdata(pdev, NULL); - pci_release_regions(pdev); - out_pci_disable_device: - pci_disable_device(pdev); - out_ieee80211_free_hw: - iwl_legacy_free_traffic_mem(priv); - ieee80211_free_hw(priv->hw); - out: - return err; -} - -static void __devexit iwl4965_pci_remove(struct pci_dev *pdev) -{ - struct iwl_priv *priv = pci_get_drvdata(pdev); - unsigned long flags; - - if (!priv) - return; - - wait_for_completion(&priv->_4965.firmware_loading_complete); - - IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n"); - - iwl_legacy_dbgfs_unregister(priv); - sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group); - - /* ieee80211_unregister_hw call wil cause iwl_mac_stop to - * to be called and iwl4965_down since we are removing the device - * we need to set STATUS_EXIT_PENDING bit. - */ - set_bit(STATUS_EXIT_PENDING, &priv->status); - - iwl_legacy_leds_exit(priv); - - if (priv->mac80211_registered) { - ieee80211_unregister_hw(priv->hw); - priv->mac80211_registered = 0; - } else { - iwl4965_down(priv); - } - - /* - * Make sure device is reset to low power before unloading driver. - * This may be redundant with iwl4965_down(), but there are paths to - * run iwl4965_down() without calling apm_ops.stop(), and there are - * paths to avoid running iwl4965_down() at all before leaving driver. - * This (inexpensive) call *makes sure* device is reset. - */ - iwl_legacy_apm_stop(priv); - - /* make sure we flush any pending irq or - * tasklet for the driver - */ - spin_lock_irqsave(&priv->lock, flags); - iwl_legacy_disable_interrupts(priv); - spin_unlock_irqrestore(&priv->lock, flags); - - iwl4965_synchronize_irq(priv); - - iwl4965_dealloc_ucode_pci(priv); - - if (priv->rxq.bd) - iwl4965_rx_queue_free(priv, &priv->rxq); - iwl4965_hw_txq_ctx_free(priv); - - iwl_legacy_eeprom_free(priv); - - - /*netif_stop_queue(dev); */ - flush_workqueue(priv->workqueue); - - /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes - * priv->workqueue... so we can't take down the workqueue - * until now... */ - destroy_workqueue(priv->workqueue); - priv->workqueue = NULL; - iwl_legacy_free_traffic_mem(priv); - - free_irq(priv->pci_dev->irq, priv); - pci_disable_msi(priv->pci_dev); - pci_iounmap(pdev, priv->hw_base); - pci_release_regions(pdev); - pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); - - iwl4965_uninit_drv(priv); - - dev_kfree_skb(priv->beacon_skb); - - ieee80211_free_hw(priv->hw); -} - -/* - * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask - * must be called under priv->lock and mac access - */ -void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask) -{ - iwl_legacy_write_prph(priv, IWL49_SCD_TXFACT, mask); -} - -/***************************************************************************** - * - * driver and module entry point - * - *****************************************************************************/ - -/* Hardware specific file defines the PCI IDs table for that hardware module */ -static DEFINE_PCI_DEVICE_TABLE(iwl4965_hw_card_ids) = { -#if defined(CONFIG_IWL4965_MODULE) || defined(CONFIG_IWL4965) - {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_cfg)}, - {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_cfg)}, -#endif /* CONFIG_IWL4965 */ - - {0} -}; -MODULE_DEVICE_TABLE(pci, iwl4965_hw_card_ids); - -static struct pci_driver iwl4965_driver = { - .name = DRV_NAME, - .id_table = iwl4965_hw_card_ids, - .probe = iwl4965_pci_probe, - .remove = __devexit_p(iwl4965_pci_remove), - .driver.pm = IWL_LEGACY_PM_OPS, -}; - -static int __init iwl4965_init(void) -{ - - int ret; - pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n"); - pr_info(DRV_COPYRIGHT "\n"); - - ret = iwl4965_rate_control_register(); - if (ret) { - pr_err("Unable to register rate control algorithm: %d\n", ret); - return ret; - } - - ret = pci_register_driver(&iwl4965_driver); - if (ret) { - pr_err("Unable to initialize PCI module\n"); - goto error_register; - } - - return ret; - -error_register: - iwl4965_rate_control_unregister(); - return ret; -} - -static void __exit iwl4965_exit(void) -{ - pci_unregister_driver(&iwl4965_driver); - iwl4965_rate_control_unregister(); -} - -module_exit(iwl4965_exit); -module_init(iwl4965_init); - -#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG -module_param_named(debug, iwlegacy_debug_level, uint, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(debug, "debug output mask"); -#endif - -module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, S_IRUGO); -MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])"); -module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, S_IRUGO); -MODULE_PARM_DESC(queues_num, "number of hw queues."); -module_param_named(11n_disable, iwl4965_mod_params.disable_11n, int, S_IRUGO); -MODULE_PARM_DESC(11n_disable, "disable 11n functionality"); -module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K, - int, S_IRUGO); -MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size"); -module_param_named(fw_restart, iwl4965_mod_params.restart_fw, int, S_IRUGO); -MODULE_PARM_DESC(fw_restart, "restart firmware in case of error"); diff --git a/drivers/net/wireless/iwlegacy/iwl-prph.h b/drivers/net/wireless/iwlegacy/prph.h index 30a493003ab..ffec4b4a248 100644 --- a/drivers/net/wireless/iwlegacy/iwl-prph.h +++ b/drivers/net/wireless/iwlegacy/prph.h @@ -60,8 +60,8 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ -#ifndef __iwl_legacy_prph_h__ -#define __iwl_legacy_prph_h__ +#ifndef __il_prph_h__ +#define __il_prph_h__ /* * Registers in this file are internal, not PCI bus memory mapped. @@ -91,9 +91,9 @@ #define APMG_PS_CTRL_VAL_RESET_REQ (0x04000000) #define APMG_PS_CTRL_MSK_PWR_SRC (0x03000000) #define APMG_PS_CTRL_VAL_PWR_SRC_VMAIN (0x00000000) -#define APMG_PS_CTRL_VAL_PWR_SRC_MAX (0x01000000) /* 3945 only */ +#define APMG_PS_CTRL_VAL_PWR_SRC_MAX (0x01000000) /* 3945 only */ #define APMG_PS_CTRL_VAL_PWR_SRC_VAUX (0x02000000) -#define APMG_SVR_VOLTAGE_CONFIG_BIT_MSK (0x000001E0) /* bit 8:5 */ +#define APMG_SVR_VOLTAGE_CONFIG_BIT_MSK (0x000001E0) /* bit 8:5 */ #define APMG_SVR_DIGITAL_VOLTAGE_1_32 (0x00000060) #define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800) @@ -120,13 +120,13 @@ * * 1) Initialization -- performs hardware calibration and sets up some * internal data, then notifies host via "initialize alive" notification - * (struct iwl_init_alive_resp) that it has completed all of its work. + * (struct il_init_alive_resp) that it has completed all of its work. * After signal from host, it then loads and starts the runtime program. * The initialization program must be used when initially setting up the * NIC after loading the driver. * * 2) Runtime/Protocol -- performs all normal runtime operations. This - * notifies host via "alive" notification (struct iwl_alive_resp) that it + * notifies host via "alive" notification (struct il_alive_resp) that it * is ready to be used. * * When initializing the NIC, the host driver does the following procedure: @@ -189,7 +189,7 @@ * procedure. * * This save/restore method is mostly for autonomous power management during - * normal operation (result of POWER_TABLE_CMD). Platform suspend/resume and + * normal operation (result of C_POWER_TBL). Platform suspend/resume and * RFKILL should use complete restarts (with total re-initialization) of uCode, * allowing total shutdown (including BSM memory). * @@ -202,19 +202,19 @@ */ /* BSM bit fields */ -#define BSM_WR_CTRL_REG_BIT_START (0x80000000) /* start boot load now */ -#define BSM_WR_CTRL_REG_BIT_START_EN (0x40000000) /* enable boot after pwrup*/ -#define BSM_DRAM_INST_LOAD (0x80000000) /* start program load now */ +#define BSM_WR_CTRL_REG_BIT_START (0x80000000) /* start boot load now */ +#define BSM_WR_CTRL_REG_BIT_START_EN (0x40000000) /* enable boot after pwrup */ +#define BSM_DRAM_INST_LOAD (0x80000000) /* start program load now */ /* BSM addresses */ #define BSM_BASE (PRPH_BASE + 0x3400) #define BSM_END (PRPH_BASE + 0x3800) -#define BSM_WR_CTRL_REG (BSM_BASE + 0x000) /* ctl and status */ -#define BSM_WR_MEM_SRC_REG (BSM_BASE + 0x004) /* source in BSM mem */ -#define BSM_WR_MEM_DST_REG (BSM_BASE + 0x008) /* dest in SRAM mem */ -#define BSM_WR_DWCOUNT_REG (BSM_BASE + 0x00C) /* bytes */ -#define BSM_WR_STATUS_REG (BSM_BASE + 0x010) /* bit 0: 1 == done */ +#define BSM_WR_CTRL_REG (BSM_BASE + 0x000) /* ctl and status */ +#define BSM_WR_MEM_SRC_REG (BSM_BASE + 0x004) /* source in BSM mem */ +#define BSM_WR_MEM_DST_REG (BSM_BASE + 0x008) /* dest in SRAM mem */ +#define BSM_WR_DWCOUNT_REG (BSM_BASE + 0x00C) /* bytes */ +#define BSM_WR_STATUS_REG (BSM_BASE + 0x010) /* bit 0: 1 == done */ /* * Pointers and size regs for bootstrap load and data SRAM save/restore. @@ -231,8 +231,7 @@ * Read/write, address range from LOWER_BOUND to (LOWER_BOUND + SIZE -1) */ #define BSM_SRAM_LOWER_BOUND (PRPH_BASE + 0x3800) -#define BSM_SRAM_SIZE (1024) /* bytes */ - +#define BSM_SRAM_SIZE (1024) /* bytes */ /* 3945 Tx scheduler registers */ #define ALM_SCD_BASE (PRPH_BASE + 0x2E00) @@ -255,7 +254,7 @@ * but one DMA channel may take input from several queues. * * Tx DMA FIFOs have dedicated purposes. For 4965, they are used as follows - * (cf. default_queue_to_tx_fifo in iwl-4965.c): + * (cf. default_queue_to_tx_fifo in 4965.c): * * 0 -- EDCA BK (background) frames, lowest priority * 1 -- EDCA BE (best effort) frames, normal priority @@ -274,20 +273,20 @@ * The driver sets up each queue to work in one of two modes: * * 1) Scheduler-Ack, in which the scheduler automatically supports a - * block-ack (BA) window of up to 64 TFDs. In this mode, each queue + * block-ack (BA) win of up to 64 TFDs. In this mode, each queue * contains TFDs for a unique combination of Recipient Address (RA) * and Traffic Identifier (TID), that is, traffic of a given * Quality-Of-Service (QOS) priority, destined for a single station. * * In scheduler-ack mode, the scheduler keeps track of the Tx status of - * each frame within the BA window, including whether it's been transmitted, + * each frame within the BA win, including whether it's been transmitted, * and whether it's been acknowledged by the receiving station. The device * automatically processes block-acks received from the receiving STA, * and reschedules un-acked frames to be retransmitted (successful * Tx completion may end up being out-of-order). * * The driver must maintain the queue's Byte Count table in host DRAM - * (struct iwl4965_sched_queue_byte_cnt_tbl) for this mode. + * (struct il4965_sched_queue_byte_cnt_tbl) for this mode. * This mode does not support fragmentation. * * 2) FIFO (a.k.a. non-Scheduler-ACK), in which each TFD is processed in order. @@ -316,34 +315,34 @@ */ /** - * Max Tx window size is the max number of contiguous TFDs that the scheduler + * Max Tx win size is the max number of contiguous TFDs that the scheduler * can keep track of at one time when creating block-ack chains of frames. * Note that "64" matches the number of ack bits in a block-ack packet. * Driver should use SCD_WIN_SIZE and SCD_FRAME_LIMIT values to initialize - * IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) values. + * IL49_SCD_CONTEXT_QUEUE_OFFSET(x) values. */ #define SCD_WIN_SIZE 64 #define SCD_FRAME_LIMIT 64 /* SCD registers are internal, must be accessed via HBUS_TARG_PRPH regs */ -#define IWL49_SCD_START_OFFSET 0xa02c00 +#define IL49_SCD_START_OFFSET 0xa02c00 /* * 4965 tells driver SRAM address for internal scheduler structs via this reg. * Value is valid only after "Alive" response from uCode. */ -#define IWL49_SCD_SRAM_BASE_ADDR (IWL49_SCD_START_OFFSET + 0x0) +#define IL49_SCD_SRAM_BASE_ADDR (IL49_SCD_START_OFFSET + 0x0) /* * Driver may need to update queue-empty bits after changing queue's - * write and read pointers (indexes) during (re-)initialization (i.e. when + * write and read pointers (idxes) during (re-)initialization (i.e. when * scheduler is not tracking what's happening). * Bit fields: * 31-16: Write mask -- 1: update empty bit, 0: don't change empty bit * 15-00: Empty state, one for each queue -- 1: empty, 0: non-empty * NOTE: This register is not used by Linux driver. */ -#define IWL49_SCD_EMPTY_BITS (IWL49_SCD_START_OFFSET + 0x4) +#define IL49_SCD_EMPTY_BITS (IL49_SCD_START_OFFSET + 0x4) /* * Physical base address of array of byte count (BC) circular buffers (CBs). @@ -351,11 +350,11 @@ * This register points to BC CB for queue 0, must be on 1024-byte boundary. * Others are spaced by 1024 bytes. * Each BC CB is 2 bytes * (256 + 64) = 740 bytes, followed by 384 bytes pad. - * (Index into a queue's BC CB) = (index into queue's TFD CB) = (SSN & 0xff). + * (Index into a queue's BC CB) = (idx into queue's TFD CB) = (SSN & 0xff). * Bit fields: * 25-00: Byte Count CB physical address [35:10], must be 1024-byte aligned. */ -#define IWL49_SCD_DRAM_BASE_ADDR (IWL49_SCD_START_OFFSET + 0x10) +#define IL49_SCD_DRAM_BASE_ADDR (IL49_SCD_START_OFFSET + 0x10) /* * Enables any/all Tx DMA/FIFO channels. @@ -364,23 +363,23 @@ * Bit fields: * 7- 0: Enable (1), disable (0), one bit for each channel 0-7 */ -#define IWL49_SCD_TXFACT (IWL49_SCD_START_OFFSET + 0x1c) +#define IL49_SCD_TXFACT (IL49_SCD_START_OFFSET + 0x1c) /* - * Queue (x) Write Pointers (indexes, really!), one for each Tx queue. + * Queue (x) Write Pointers (idxes, really!), one for each Tx queue. * Initialized and updated by driver as new TFDs are added to queue. - * NOTE: If using Block Ack, index must correspond to frame's - * Start Sequence Number; index = (SSN & 0xff) + * NOTE: If using Block Ack, idx must correspond to frame's + * Start Sequence Number; idx = (SSN & 0xff) * NOTE: Alternative to HBUS_TARG_WRPTR, which is what Linux driver uses? */ -#define IWL49_SCD_QUEUE_WRPTR(x) (IWL49_SCD_START_OFFSET + 0x24 + (x) * 4) +#define IL49_SCD_QUEUE_WRPTR(x) (IL49_SCD_START_OFFSET + 0x24 + (x) * 4) /* - * Queue (x) Read Pointers (indexes, really!), one for each Tx queue. - * For FIFO mode, index indicates next frame to transmit. - * For Scheduler-ACK mode, index indicates first frame in Tx window. + * Queue (x) Read Pointers (idxes, really!), one for each Tx queue. + * For FIFO mode, idx indicates next frame to transmit. + * For Scheduler-ACK mode, idx indicates first frame in Tx win. * Initialized by driver, updated by scheduler. */ -#define IWL49_SCD_QUEUE_RDPTR(x) (IWL49_SCD_START_OFFSET + 0x64 + (x) * 4) +#define IL49_SCD_QUEUE_RDPTR(x) (IL49_SCD_START_OFFSET + 0x64 + (x) * 4) /* * Select which queues work in chain mode (1) vs. not (0). @@ -391,18 +390,18 @@ * NOTE: If driver sets up queue for chain mode, it should be also set up * Scheduler-ACK mode as well, via SCD_QUEUE_STATUS_BITS(x). */ -#define IWL49_SCD_QUEUECHAIN_SEL (IWL49_SCD_START_OFFSET + 0xd0) +#define IL49_SCD_QUEUECHAIN_SEL (IL49_SCD_START_OFFSET + 0xd0) /* * Select which queues interrupt driver when scheduler increments - * a queue's read pointer (index). + * a queue's read pointer (idx). * Bit fields: * 31-16: Reserved * 15-00: Interrupt enable, one bit for each queue -- 1: enabled, 0: disabled * NOTE: This functionality is apparently a no-op; driver relies on interrupts * from Rx queue to read Tx command responses and update Tx queues. */ -#define IWL49_SCD_INTERRUPT_MASK (IWL49_SCD_START_OFFSET + 0xe4) +#define IL49_SCD_INTERRUPT_MASK (IL49_SCD_START_OFFSET + 0xe4) /* * Queue search status registers. One for each queue. @@ -414,7 +413,7 @@ * Driver should init to "1" for aggregation mode, or "0" otherwise. * 7-6: Driver should init to "0" * 5: Window Size Left; indicates whether scheduler can request - * another TFD, based on window size, etc. Driver should init + * another TFD, based on win size, etc. Driver should init * this bit to "1" for aggregation mode, or "0" for non-agg. * 4-1: Tx FIFO to use (range 0-7). * 0: Queue is active (1), not active (0). @@ -423,18 +422,18 @@ * NOTE: If enabling Scheduler-ACK mode, chain mode should also be enabled * via SCD_QUEUECHAIN_SEL. */ -#define IWL49_SCD_QUEUE_STATUS_BITS(x)\ - (IWL49_SCD_START_OFFSET + 0x104 + (x) * 4) +#define IL49_SCD_QUEUE_STATUS_BITS(x)\ + (IL49_SCD_START_OFFSET + 0x104 + (x) * 4) /* Bit field positions */ -#define IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE (0) -#define IWL49_SCD_QUEUE_STTS_REG_POS_TXF (1) -#define IWL49_SCD_QUEUE_STTS_REG_POS_WSL (5) -#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK (8) +#define IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE (0) +#define IL49_SCD_QUEUE_STTS_REG_POS_TXF (1) +#define IL49_SCD_QUEUE_STTS_REG_POS_WSL (5) +#define IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK (8) /* Write masks */ -#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (10) -#define IWL49_SCD_QUEUE_STTS_REG_MSK (0x0007FC00) +#define IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (10) +#define IL49_SCD_QUEUE_STTS_REG_MSK (0x0007FC00) /** * 4965 internal SRAM structures for scheduler, shared with driver ... @@ -460,7 +459,7 @@ * each queue's entry as follows: * * LS Dword bit fields: - * 0-06: Max Tx window size for Scheduler-ACK. Driver should init to 64. + * 0-06: Max Tx win size for Scheduler-ACK. Driver should init to 64. * * MS Dword bit fields: * 16-22: Frame limit. Driver should init to 10 (0xa). @@ -470,14 +469,14 @@ * Init must be done after driver receives "Alive" response from 4965 uCode, * and when setting up queue for aggregation. */ -#define IWL49_SCD_CONTEXT_DATA_OFFSET 0x380 -#define IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) \ - (IWL49_SCD_CONTEXT_DATA_OFFSET + ((x) * 8)) +#define IL49_SCD_CONTEXT_DATA_OFFSET 0x380 +#define IL49_SCD_CONTEXT_QUEUE_OFFSET(x) \ + (IL49_SCD_CONTEXT_DATA_OFFSET + ((x) * 8)) -#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS (0) -#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK (0x0000007F) -#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16) -#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000) +#define IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS (0) +#define IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK (0x0000007F) +#define IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16) +#define IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000) /* * Tx Status Bitmap @@ -486,7 +485,7 @@ * "Alive" notification from uCode. Area is used only by device itself; * no other support (besides clearing) is required from driver. */ -#define IWL49_SCD_TX_STTS_BITMAP_OFFSET 0x400 +#define IL49_SCD_TX_STTS_BITMAP_OFFSET 0x400 /* * RAxTID to queue translation mapping. @@ -494,7 +493,7 @@ * When queue is in Scheduler-ACK mode, frames placed in a that queue must be * for only one combination of receiver address (RA) and traffic ID (TID), i.e. * one QOS priority level destined for one station (for this wireless link, - * not final destination). The SCD_TRANSLATE_TABLE area provides 16 16-bit + * not final destination). The SCD_TRANSLATE_TBL area provides 16 16-bit * mappings, one for each of the 16 queues. If queue is not in Scheduler-ACK * mode, the device ignores the mapping value. * @@ -508,16 +507,16 @@ * must read a dword-aligned value from device SRAM, replace the 16-bit map * value of interest, and write the dword value back into device SRAM. */ -#define IWL49_SCD_TRANSLATE_TBL_OFFSET 0x500 +#define IL49_SCD_TRANSLATE_TBL_OFFSET 0x500 /* Find translation table dword to read/write for given queue */ -#define IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \ - ((IWL49_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffffffc) +#define IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \ + ((IL49_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffffffc) -#define IWL_SCD_TXFIFO_POS_TID (0) -#define IWL_SCD_TXFIFO_POS_RA (4) -#define IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF) +#define IL_SCD_TXFIFO_POS_TID (0) +#define IL_SCD_TXFIFO_POS_RA (4) +#define IL_SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF) /*********************** END TX SCHEDULER *************************************/ -#endif /* __iwl_legacy_prph_h__ */ +#endif /* __il_prph_h__ */ diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig index 57703d5209d..ae08498dfca 100644 --- a/drivers/net/wireless/iwlwifi/Kconfig +++ b/drivers/net/wireless/iwlwifi/Kconfig @@ -102,12 +102,28 @@ config IWLWIFI_DEVICE_TRACING occur. endmenu -config IWLWIFI_DEVICE_SVTOOL - bool "iwlwifi device svtool support" +config IWLWIFI_DEVICE_TESTMODE + def_bool y depends on IWLWIFI - select NL80211_TESTMODE + depends on NL80211_TESTMODE help - This option enables the svtool support for iwlwifi device through - NL80211_TESTMODE. svtool is a software validation tool that runs in - the user space and interacts with the device in the kernel space - through the generic netlink message via NL80211_TESTMODE channel. + This option enables the testmode support for iwlwifi device through + NL80211_TESTMODE. This provide the capabilities of enable user space + validation applications to interacts with the device through the + generic netlink message via NL80211_TESTMODE channel. + +config IWLWIFI_P2P + bool "iwlwifi experimental P2P support" + depends on IWLWIFI + help + This option enables experimental P2P support for some devices + based on microcode support. Since P2P support is still under + development, this option may even enable it for some devices + now that turn out to not support it in the future due to + microcode restrictions. + + To determine if your microcode supports the experimental P2P + offered by this option, check if the driver advertises AP + support when it is loaded. + + Say Y only if you want to experiment with P2P. diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile index c73e5ed8db5..9dc84a7354d 100644 --- a/drivers/net/wireless/iwlwifi/Makefile +++ b/drivers/net/wireless/iwlwifi/Makefile @@ -1,7 +1,7 @@ # WIFI obj-$(CONFIG_IWLWIFI) += iwlwifi.o -iwlwifi-objs := iwl-agn.o iwl-agn-rs.o -iwlwifi-objs += iwl-agn-ucode.o iwl-agn-tx.o +iwlwifi-objs := iwl-agn.o iwl-agn-rs.o iwl-mac80211.o +iwlwifi-objs += iwl-ucode.o iwl-agn-tx.o iwlwifi-objs += iwl-agn-lib.o iwl-agn-calib.o iwl-io.o iwlwifi-objs += iwl-agn-tt.o iwl-agn-sta.o iwl-agn-rx.o @@ -18,7 +18,7 @@ iwlwifi-objs += iwl-trans-pcie.o iwl-trans-pcie-rx.o iwl-trans-pcie-tx.o iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o -iwlwifi-$(CONFIG_IWLWIFI_DEVICE_SVTOOL) += iwl-sv-open.o +iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-testmode.o CFLAGS_iwl-devtrace.o := -I$(src) diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c index dd008b0e641..1ef7bfc2ab2 100644 --- a/drivers/net/wireless/iwlwifi/iwl-1000.c +++ b/drivers/net/wireless/iwlwifi/iwl-1000.c @@ -124,10 +124,10 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv) { if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES && iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES) - priv->cfg->base_params->num_of_queues = + cfg(priv)->base_params->num_of_queues = iwlagn_mod_params.num_of_queues; - hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues; + hw_params(priv).max_txq_num = cfg(priv)->base_params->num_of_queues; priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID; hw_params(priv).max_data_size = IWLAGN_RTC_DATA_SIZE; @@ -135,28 +135,19 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv) hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ); - hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant); - if (priv->cfg->rx_with_siso_diversity) + hw_params(priv).tx_chains_num = num_of_ant(cfg(priv)->valid_tx_ant); + if (cfg(priv)->rx_with_siso_diversity) hw_params(priv).rx_chains_num = 1; else hw_params(priv).rx_chains_num = - num_of_ant(priv->cfg->valid_rx_ant); - hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant; - hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant; + num_of_ant(cfg(priv)->valid_rx_ant); + hw_params(priv).valid_tx_ant = cfg(priv)->valid_tx_ant; + hw_params(priv).valid_rx_ant = cfg(priv)->valid_rx_ant; iwl1000_set_ct_threshold(priv); /* Set initial sensitivity parameters */ - /* Set initial calibration set */ hw_params(priv).sens = &iwl1000_sensitivity; - hw_params(priv).calib_init_cfg = - BIT(IWL_CALIB_XTAL) | - BIT(IWL_CALIB_LO) | - BIT(IWL_CALIB_TX_IQ) | - BIT(IWL_CALIB_TX_IQ_PERD) | - BIT(IWL_CALIB_BASE_BAND); - if (priv->cfg->need_dc_calib) - hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_DC); return 0; } diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c index 79431977a96..094693328db 100644 --- a/drivers/net/wireless/iwlwifi/iwl-2000.c +++ b/drivers/net/wireless/iwlwifi/iwl-2000.c @@ -86,7 +86,7 @@ static void iwl2000_nic_config(struct iwl_priv *priv) { iwl_rf_config(priv); - if (priv->cfg->iq_invert) + if (cfg(priv)->iq_invert) iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG, CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER); } @@ -120,10 +120,10 @@ static int iwl2000_hw_set_hw_params(struct iwl_priv *priv) { if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES && iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES) - priv->cfg->base_params->num_of_queues = + cfg(priv)->base_params->num_of_queues = iwlagn_mod_params.num_of_queues; - hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues; + hw_params(priv).max_txq_num = cfg(priv)->base_params->num_of_queues; priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID; hw_params(priv).max_data_size = IWL60_RTC_DATA_SIZE; @@ -131,29 +131,19 @@ static int iwl2000_hw_set_hw_params(struct iwl_priv *priv) hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ); - hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant); - if (priv->cfg->rx_with_siso_diversity) + hw_params(priv).tx_chains_num = num_of_ant(cfg(priv)->valid_tx_ant); + if (cfg(priv)->rx_with_siso_diversity) hw_params(priv).rx_chains_num = 1; else hw_params(priv).rx_chains_num = - num_of_ant(priv->cfg->valid_rx_ant); - hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant; - hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant; + num_of_ant(cfg(priv)->valid_rx_ant); + hw_params(priv).valid_tx_ant = cfg(priv)->valid_tx_ant; + hw_params(priv).valid_rx_ant = cfg(priv)->valid_rx_ant; iwl2000_set_ct_threshold(priv); /* Set initial sensitivity parameters */ - /* Set initial calibration set */ hw_params(priv).sens = &iwl2000_sensitivity; - hw_params(priv).calib_init_cfg = - BIT(IWL_CALIB_XTAL) | - BIT(IWL_CALIB_LO) | - BIT(IWL_CALIB_TX_IQ) | - BIT(IWL_CALIB_BASE_BAND); - if (priv->cfg->need_dc_calib) - hw_params(priv).calib_rt_cfg |= IWL_CALIB_CFG_DC_IDX; - if (priv->cfg->need_temp_offset_calib) - hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET); return 0; } @@ -258,25 +248,19 @@ static struct iwl_bt_params iwl2030_bt_params = { .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ .lib = &iwl2000_lib, \ .base_params = &iwl2000_base_params, \ - .need_dc_calib = true, \ .need_temp_offset_calib = true, \ .temp_offset_v2 = true, \ .led_mode = IWL_LED_RF_STATE, \ .iq_invert = true \ struct iwl_cfg iwl2000_2bgn_cfg = { - .name = "2000 Series 2x2 BGN", + .name = "Intel(R) Centrino(R) Wireless-N 2200 BGN", IWL_DEVICE_2000, .ht_params = &iwl2000_ht_params, }; -struct iwl_cfg iwl2000_2bg_cfg = { - .name = "2000 Series 2x2 BG", - IWL_DEVICE_2000, -}; - struct iwl_cfg iwl2000_2bgn_d_cfg = { - .name = "2000D Series 2x2 BGN", + .name = "Intel(R) Centrino(R) Wireless-N 2200D BGN", IWL_DEVICE_2000, .ht_params = &iwl2000_ht_params, }; @@ -291,7 +275,6 @@ struct iwl_cfg iwl2000_2bgn_d_cfg = { .lib = &iwl2030_lib, \ .base_params = &iwl2030_base_params, \ .bt_params = &iwl2030_bt_params, \ - .need_dc_calib = true, \ .need_temp_offset_calib = true, \ .temp_offset_v2 = true, \ .led_mode = IWL_LED_RF_STATE, \ @@ -299,16 +282,11 @@ struct iwl_cfg iwl2000_2bgn_d_cfg = { .iq_invert = true \ struct iwl_cfg iwl2030_2bgn_cfg = { - .name = "2000 Series 2x2 BGN/BT", + .name = "Intel(R) Centrino(R) Wireless-N 2230 BGN", IWL_DEVICE_2030, .ht_params = &iwl2000_ht_params, }; -struct iwl_cfg iwl2030_2bg_cfg = { - .name = "2000 Series 2x2 BG/BT", - IWL_DEVICE_2030, -}; - #define IWL_DEVICE_105 \ .fw_name_pre = IWL105_FW_PRE, \ .ucode_api_max = IWL105_UCODE_API_MAX, \ @@ -318,7 +296,6 @@ struct iwl_cfg iwl2030_2bg_cfg = { .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ .lib = &iwl2000_lib, \ .base_params = &iwl2000_base_params, \ - .need_dc_calib = true, \ .need_temp_offset_calib = true, \ .temp_offset_v2 = true, \ .led_mode = IWL_LED_RF_STATE, \ @@ -326,19 +303,14 @@ struct iwl_cfg iwl2030_2bg_cfg = { .rx_with_siso_diversity = true, \ .iq_invert = true \ -struct iwl_cfg iwl105_bg_cfg = { - .name = "105 Series 1x1 BG", - IWL_DEVICE_105, -}; - struct iwl_cfg iwl105_bgn_cfg = { - .name = "105 Series 1x1 BGN", + .name = "Intel(R) Centrino(R) Wireless-N 105 BGN", IWL_DEVICE_105, .ht_params = &iwl2000_ht_params, }; struct iwl_cfg iwl105_bgn_d_cfg = { - .name = "105D Series 1x1 BGN", + .name = "Intel(R) Centrino(R) Wireless-N 105D BGN", IWL_DEVICE_105, .ht_params = &iwl2000_ht_params, }; @@ -353,7 +325,6 @@ struct iwl_cfg iwl105_bgn_d_cfg = { .lib = &iwl2030_lib, \ .base_params = &iwl2030_base_params, \ .bt_params = &iwl2030_bt_params, \ - .need_dc_calib = true, \ .need_temp_offset_calib = true, \ .temp_offset_v2 = true, \ .led_mode = IWL_LED_RF_STATE, \ @@ -361,13 +332,8 @@ struct iwl_cfg iwl105_bgn_d_cfg = { .rx_with_siso_diversity = true, \ .iq_invert = true \ -struct iwl_cfg iwl135_bg_cfg = { - .name = "135 Series 1x1 BG/BT", - IWL_DEVICE_135, -}; - struct iwl_cfg iwl135_bgn_cfg = { - .name = "135 Series 1x1 BGN/BT", + .name = "Intel(R) Centrino(R) Wireless-N 135 BGN", IWL_DEVICE_135, .ht_params = &iwl2000_ht_params, }; diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c index f55fb2d1af5..b3a365fea7b 100644 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c @@ -134,10 +134,10 @@ static struct iwl_sensitivity_ranges iwl5150_sensitivity = { #define IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF (-5) -static s32 iwl_temp_calib_to_offset(struct iwl_priv *priv) +static s32 iwl_temp_calib_to_offset(struct iwl_shared *shrd) { u16 temperature, voltage; - __le16 *temp_calib = (__le16 *)iwl_eeprom_query_addr(priv, + __le16 *temp_calib = (__le16 *)iwl_eeprom_query_addr(shrd, EEPROM_KELVIN_TEMPERATURE); temperature = le16_to_cpu(temp_calib[0]); @@ -151,7 +151,7 @@ static void iwl5150_set_ct_threshold(struct iwl_priv *priv) { const s32 volt2temp_coef = IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF; s32 threshold = (s32)CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY) - - iwl_temp_calib_to_offset(priv); + iwl_temp_calib_to_offset(priv->shrd); hw_params(priv).ct_kill_threshold = threshold * volt2temp_coef; } @@ -166,10 +166,10 @@ static int iwl5000_hw_set_hw_params(struct iwl_priv *priv) { if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES && iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES) - priv->cfg->base_params->num_of_queues = + cfg(priv)->base_params->num_of_queues = iwlagn_mod_params.num_of_queues; - hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues; + hw_params(priv).max_txq_num = cfg(priv)->base_params->num_of_queues; priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID; hw_params(priv).max_data_size = IWLAGN_RTC_DATA_SIZE; @@ -178,22 +178,15 @@ static int iwl5000_hw_set_hw_params(struct iwl_priv *priv) hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ); - hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant); - hw_params(priv).rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant); - hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant; - hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant; + hw_params(priv).tx_chains_num = num_of_ant(cfg(priv)->valid_tx_ant); + hw_params(priv).rx_chains_num = num_of_ant(cfg(priv)->valid_rx_ant); + hw_params(priv).valid_tx_ant = cfg(priv)->valid_tx_ant; + hw_params(priv).valid_rx_ant = cfg(priv)->valid_rx_ant; iwl5000_set_ct_threshold(priv); /* Set initial sensitivity parameters */ - /* Set initial calibration set */ hw_params(priv).sens = &iwl5000_sensitivity; - hw_params(priv).calib_init_cfg = - BIT(IWL_CALIB_XTAL) | - BIT(IWL_CALIB_LO) | - BIT(IWL_CALIB_TX_IQ) | - BIT(IWL_CALIB_TX_IQ_PERD) | - BIT(IWL_CALIB_BASE_BAND); return 0; } @@ -202,10 +195,10 @@ static int iwl5150_hw_set_hw_params(struct iwl_priv *priv) { if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES && iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES) - priv->cfg->base_params->num_of_queues = + cfg(priv)->base_params->num_of_queues = iwlagn_mod_params.num_of_queues; - hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues; + hw_params(priv).max_txq_num = cfg(priv)->base_params->num_of_queues; priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID; hw_params(priv).max_data_size = IWLAGN_RTC_DATA_SIZE; @@ -214,22 +207,15 @@ static int iwl5150_hw_set_hw_params(struct iwl_priv *priv) hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ); - hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant); - hw_params(priv).rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant); - hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant; - hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant; + hw_params(priv).tx_chains_num = num_of_ant(cfg(priv)->valid_tx_ant); + hw_params(priv).rx_chains_num = num_of_ant(cfg(priv)->valid_rx_ant); + hw_params(priv).valid_tx_ant = cfg(priv)->valid_tx_ant; + hw_params(priv).valid_rx_ant = cfg(priv)->valid_rx_ant; iwl5150_set_ct_threshold(priv); /* Set initial sensitivity parameters */ - /* Set initial calibration set */ hw_params(priv).sens = &iwl5150_sensitivity; - hw_params(priv).calib_init_cfg = - BIT(IWL_CALIB_LO) | - BIT(IWL_CALIB_TX_IQ) | - BIT(IWL_CALIB_BASE_BAND); - if (priv->cfg->need_dc_calib) - hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_DC); return 0; } @@ -237,7 +223,7 @@ static int iwl5150_hw_set_hw_params(struct iwl_priv *priv) static void iwl5150_temperature(struct iwl_priv *priv) { u32 vt = 0; - s32 offset = iwl_temp_calib_to_offset(priv); + s32 offset = iwl_temp_calib_to_offset(priv->shrd); vt = le32_to_cpu(priv->statistics.common.temperature); vt = vt / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF + offset; @@ -434,7 +420,7 @@ struct iwl_cfg iwl5350_agn_cfg = { .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, \ .lib = &iwl5150_lib, \ .base_params = &iwl5000_base_params, \ - .need_dc_calib = true, \ + .no_xtal_calib = true, \ .led_mode = IWL_LED_BLINK, \ .internal_wimax_coex = true diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c index c840c78278d..54b753399e6 100644 --- a/drivers/net/wireless/iwlwifi/iwl-6000.c +++ b/drivers/net/wireless/iwlwifi/iwl-6000.c @@ -46,11 +46,12 @@ #include "iwl-cfg.h" /* Highest firmware API version supported */ -#define IWL6000_UCODE_API_MAX 4 +#define IWL6000_UCODE_API_MAX 6 #define IWL6050_UCODE_API_MAX 5 #define IWL6000G2_UCODE_API_MAX 6 /* Oldest version we won't warn about */ +#define IWL6000_UCODE_API_OK 4 #define IWL6000G2_UCODE_API_OK 5 /* Lowest firmware API version supported */ @@ -80,7 +81,7 @@ static void iwl6000_set_ct_threshold(struct iwl_priv *priv) static void iwl6050_additional_nic_config(struct iwl_priv *priv) { /* Indicate calibration version to uCode. */ - if (iwlagn_eeprom_calib_version(priv) >= 6) + if (iwl_eeprom_calib_version(priv->shrd) >= 6) iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG, CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6); } @@ -88,7 +89,7 @@ static void iwl6050_additional_nic_config(struct iwl_priv *priv) static void iwl6150_additional_nic_config(struct iwl_priv *priv) { /* Indicate calibration version to uCode. */ - if (iwlagn_eeprom_calib_version(priv) >= 6) + if (iwl_eeprom_calib_version(priv->shrd) >= 6) iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG, CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6); iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG, @@ -101,14 +102,14 @@ static void iwl6000_nic_config(struct iwl_priv *priv) iwl_rf_config(priv); /* no locking required for register write */ - if (priv->cfg->pa_type == IWL_PA_INTERNAL) { + if (cfg(priv)->pa_type == IWL_PA_INTERNAL) { /* 2x2 IPA phy type */ iwl_write32(bus(priv), CSR_GP_DRIVER_REG, CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA); } /* do additional nic configuration if needed */ - if (priv->cfg->additional_nic_config) - priv->cfg->additional_nic_config(priv); + if (cfg(priv)->additional_nic_config) + cfg(priv)->additional_nic_config(priv); } static struct iwl_sensitivity_ranges iwl6000_sensitivity = { @@ -140,10 +141,10 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv) { if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES && iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES) - priv->cfg->base_params->num_of_queues = + cfg(priv)->base_params->num_of_queues = iwlagn_mod_params.num_of_queues; - hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues; + hw_params(priv).max_txq_num = cfg(priv)->base_params->num_of_queues; priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID; hw_params(priv).max_data_size = IWL60_RTC_DATA_SIZE; @@ -152,29 +153,19 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv) hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ); - hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant); - if (priv->cfg->rx_with_siso_diversity) + hw_params(priv).tx_chains_num = num_of_ant(cfg(priv)->valid_tx_ant); + if (cfg(priv)->rx_with_siso_diversity) hw_params(priv).rx_chains_num = 1; else hw_params(priv).rx_chains_num = - num_of_ant(priv->cfg->valid_rx_ant); - hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant; - hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant; + num_of_ant(cfg(priv)->valid_rx_ant); + hw_params(priv).valid_tx_ant = cfg(priv)->valid_tx_ant; + hw_params(priv).valid_rx_ant = cfg(priv)->valid_rx_ant; iwl6000_set_ct_threshold(priv); /* Set initial sensitivity parameters */ - /* Set initial calibration set */ hw_params(priv).sens = &iwl6000_sensitivity; - hw_params(priv).calib_init_cfg = - BIT(IWL_CALIB_XTAL) | - BIT(IWL_CALIB_LO) | - BIT(IWL_CALIB_TX_IQ) | - BIT(IWL_CALIB_BASE_BAND); - if (priv->cfg->need_dc_calib) - hw_params(priv).calib_rt_cfg |= IWL_CALIB_CFG_DC_IDX; - if (priv->cfg->need_temp_offset_calib) - hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET); return 0; } @@ -364,7 +355,6 @@ static struct iwl_bt_params iwl6000_bt_params = { .eeprom_calib_ver = EEPROM_6005_TX_POWER_VERSION, \ .lib = &iwl6000_lib, \ .base_params = &iwl6000_g2_base_params, \ - .need_dc_calib = true, \ .need_temp_offset_calib = true, \ .led_mode = IWL_LED_RF_STATE @@ -406,7 +396,6 @@ struct iwl_cfg iwl6005_2agn_d_cfg = { .lib = &iwl6030_lib, \ .base_params = &iwl6000_g2_base_params, \ .bt_params = &iwl6000_bt_params, \ - .need_dc_calib = true, \ .need_temp_offset_calib = true, \ .led_mode = IWL_LED_RF_STATE, \ .adv_pm = true \ @@ -434,21 +423,11 @@ struct iwl_cfg iwl6030_2bg_cfg = { }; struct iwl_cfg iwl6035_2agn_cfg = { - .name = "6035 Series 2x2 AGN/BT", + .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN", IWL_DEVICE_6030, .ht_params = &iwl6000_ht_params, }; -struct iwl_cfg iwl6035_2abg_cfg = { - .name = "6035 Series 2x2 ABG/BT", - IWL_DEVICE_6030, -}; - -struct iwl_cfg iwl6035_2bg_cfg = { - .name = "6035 Series 2x2 BG/BT", - IWL_DEVICE_6030, -}; - struct iwl_cfg iwl1030_bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 1030 BGN", IWL_DEVICE_6030, @@ -479,6 +458,7 @@ struct iwl_cfg iwl130_bg_cfg = { #define IWL_DEVICE_6000i \ .fw_name_pre = IWL6000_FW_PRE, \ .ucode_api_max = IWL6000_UCODE_API_MAX, \ + .ucode_api_ok = IWL6000_UCODE_API_OK, \ .ucode_api_min = IWL6000_UCODE_API_MIN, \ .valid_tx_ant = ANT_BC, /* .cfg overwrite */ \ .valid_rx_ant = ANT_BC, /* .cfg overwrite */ \ @@ -516,7 +496,6 @@ struct iwl_cfg iwl6000i_2bg_cfg = { .eeprom_ver = EEPROM_6050_EEPROM_VERSION, \ .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, \ .base_params = &iwl6050_base_params, \ - .need_dc_calib = true, \ .led_mode = IWL_LED_BLINK, \ .internal_wimax_coex = true @@ -540,7 +519,6 @@ struct iwl_cfg iwl6050_2abg_cfg = { .eeprom_ver = EEPROM_6150_EEPROM_VERSION, \ .eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION, \ .base_params = &iwl6050_base_params, \ - .need_dc_calib = true, \ .led_mode = IWL_LED_BLINK, \ .internal_wimax_coex = true @@ -559,17 +537,17 @@ struct iwl_cfg iwl6000_3agn_cfg = { .name = "Intel(R) Centrino(R) Ultimate-N 6300 AGN", .fw_name_pre = IWL6000_FW_PRE, .ucode_api_max = IWL6000_UCODE_API_MAX, + .ucode_api_ok = IWL6000_UCODE_API_OK, .ucode_api_min = IWL6000_UCODE_API_MIN, .eeprom_ver = EEPROM_6000_EEPROM_VERSION, .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION, .lib = &iwl6000_lib, .base_params = &iwl6000_base_params, .ht_params = &iwl6000_ht_params, - .need_dc_calib = true, .led_mode = IWL_LED_BLINK, }; -MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_OK)); MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX)); MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX)); MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX)); diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c index 03bac48558b..50ff849c9f6 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c @@ -82,56 +82,64 @@ struct statistics_general_data { u32 beacon_energy_c; }; -int iwl_send_calib_results(struct iwl_priv *priv) +int iwl_send_calib_results(struct iwl_trans *trans) { - int ret = 0; - int i = 0; - struct iwl_host_cmd hcmd = { .id = REPLY_PHY_CALIBRATION_CMD, .flags = CMD_SYNC, }; - - for (i = 0; i < IWL_CALIB_MAX; i++) { - if ((BIT(i) & hw_params(priv).calib_init_cfg) && - priv->calib_results[i].buf) { - hcmd.len[0] = priv->calib_results[i].buf_len; - hcmd.data[0] = priv->calib_results[i].buf; - hcmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY; - ret = iwl_trans_send_cmd(trans(priv), &hcmd); - if (ret) { - IWL_ERR(priv, "Error %d iteration %d\n", - ret, i); - break; - } + struct iwl_calib_result *res; + + list_for_each_entry(res, &trans->calib_results, list) { + int ret; + + hcmd.len[0] = res->cmd_len; + hcmd.data[0] = &res->hdr; + hcmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY; + ret = iwl_trans_send_cmd(trans, &hcmd); + if (ret) { + IWL_ERR(trans, "Error %d on calib cmd %d\n", + ret, res->hdr.op_code); + return ret; } } - return ret; + return 0; } -int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len) +int iwl_calib_set(struct iwl_trans *trans, + const struct iwl_calib_hdr *cmd, int len) { - if (res->buf_len != len) { - kfree(res->buf); - res->buf = kzalloc(len, GFP_ATOMIC); - } - if (unlikely(res->buf == NULL)) + struct iwl_calib_result *res, *tmp; + + res = kmalloc(sizeof(*res) + len - sizeof(struct iwl_calib_hdr), + GFP_ATOMIC); + if (!res) return -ENOMEM; + memcpy(&res->hdr, cmd, len); + res->cmd_len = len; + + list_for_each_entry(tmp, &trans->calib_results, list) { + if (tmp->hdr.op_code == res->hdr.op_code) { + list_replace(&tmp->list, &res->list); + kfree(tmp); + return 0; + } + } + + /* wasn't in list already */ + list_add_tail(&res->list, &trans->calib_results); - res->buf_len = len; - memcpy(res->buf, buf, len); return 0; } -void iwl_calib_free_results(struct iwl_priv *priv) +void iwl_calib_free_results(struct iwl_trans *trans) { - int i; + struct iwl_calib_result *res, *tmp; - for (i = 0; i < IWL_CALIB_MAX; i++) { - kfree(priv->calib_results[i].buf); - priv->calib_results[i].buf = NULL; - priv->calib_results[i].buf_len = 0; + list_for_each_entry_safe(res, tmp, &trans->calib_results, list) { + list_del(&res->list); + kfree(res); } } @@ -505,7 +513,7 @@ static int iwl_enhance_sensitivity_write(struct iwl_priv *priv) iwl_prepare_legacy_sensitivity_tbl(priv, data, &cmd.enhance_table[0]); - if (priv->cfg->base_params->hd_v2) { + if (cfg(priv)->base_params->hd_v2) { cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX] = HD_INA_NON_SQUARE_DET_OFDM_DATA_V2; cmd.enhance_table[HD_INA_NON_SQUARE_DET_CCK_INDEX] = @@ -839,7 +847,7 @@ static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig, * connect the first valid tx chain */ first_chain = - find_first_chain(priv->cfg->valid_tx_ant); + find_first_chain(cfg(priv)->valid_tx_ant); data->disconn_array[first_chain] = 0; active_chains |= BIT(first_chain); IWL_DEBUG_CALIB(priv, @@ -882,7 +890,7 @@ static void iwlagn_gain_computation(struct iwl_priv *priv, continue; } - delta_g = (priv->cfg->base_params->chain_noise_scale * + delta_g = (cfg(priv)->base_params->chain_noise_scale * ((s32)average_noise[default_chain] - (s32)average_noise[i])) / 1500; @@ -1039,8 +1047,8 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv) return; /* Analyze signal for disconnected antenna */ - if (priv->cfg->bt_params && - priv->cfg->bt_params->advanced_bt_coexist) { + if (cfg(priv)->bt_params && + cfg(priv)->bt_params->advanced_bt_coexist) { /* Disable disconnected antenna algorithm for advanced bt coex, assuming valid antennas are connected */ data->active_chains = hw_params(priv).valid_rx_ant; @@ -1074,7 +1082,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv) iwlagn_gain_computation(priv, average_noise, min_average_noise_antenna_i, min_average_noise, - find_first_chain(priv->cfg->valid_rx_ant)); + find_first_chain(cfg(priv)->valid_rx_ant)); /* Some power changes may have been made during the calibration. * Update and commit the RXON diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.h b/drivers/net/wireless/iwlwifi/iwl-agn-calib.h index a869fc9205d..10275ce92bd 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.h +++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.h @@ -72,8 +72,4 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv); void iwl_init_sensitivity(struct iwl_priv *priv); void iwl_reset_run_time_calib(struct iwl_priv *priv); -int iwl_send_calib_results(struct iwl_priv *priv); -int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len); -void iwl_calib_free_results(struct iwl_priv *priv); - #endif /* __iwl_calib_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c index 1a52ed29f2d..64cf439035c 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c @@ -32,6 +32,7 @@ #include <linux/init.h> #include <linux/sched.h> +#include "iwl-wifi.h" #include "iwl-dev.h" #include "iwl-core.h" #include "iwl-io.h" @@ -92,11 +93,11 @@ void iwlagn_temperature(struct iwl_priv *priv) iwl_tt_handler(priv); } -u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv) +u16 iwl_eeprom_calib_version(struct iwl_shared *shrd) { struct iwl_eeprom_calib_hdr *hdr; - hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv, + hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(shrd, EEPROM_CALIB_ALL); return hdr->version; @@ -105,7 +106,7 @@ u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv) /* * EEPROM */ -static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address) +static u32 eeprom_indirect_address(const struct iwl_shared *shrd, u32 address) { u16 offset = 0; @@ -114,31 +115,31 @@ static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address) switch (address & INDIRECT_TYPE_MSK) { case INDIRECT_HOST: - offset = iwl_eeprom_query16(priv, EEPROM_LINK_HOST); + offset = iwl_eeprom_query16(shrd, EEPROM_LINK_HOST); break; case INDIRECT_GENERAL: - offset = iwl_eeprom_query16(priv, EEPROM_LINK_GENERAL); + offset = iwl_eeprom_query16(shrd, EEPROM_LINK_GENERAL); break; case INDIRECT_REGULATORY: - offset = iwl_eeprom_query16(priv, EEPROM_LINK_REGULATORY); + offset = iwl_eeprom_query16(shrd, EEPROM_LINK_REGULATORY); break; case INDIRECT_TXP_LIMIT: - offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT); + offset = iwl_eeprom_query16(shrd, EEPROM_LINK_TXP_LIMIT); break; case INDIRECT_TXP_LIMIT_SIZE: - offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT_SIZE); + offset = iwl_eeprom_query16(shrd, EEPROM_LINK_TXP_LIMIT_SIZE); break; case INDIRECT_CALIBRATION: - offset = iwl_eeprom_query16(priv, EEPROM_LINK_CALIBRATION); + offset = iwl_eeprom_query16(shrd, EEPROM_LINK_CALIBRATION); break; case INDIRECT_PROCESS_ADJST: - offset = iwl_eeprom_query16(priv, EEPROM_LINK_PROCESS_ADJST); + offset = iwl_eeprom_query16(shrd, EEPROM_LINK_PROCESS_ADJST); break; case INDIRECT_OTHERS: - offset = iwl_eeprom_query16(priv, EEPROM_LINK_OTHERS); + offset = iwl_eeprom_query16(shrd, EEPROM_LINK_OTHERS); break; default: - IWL_ERR(priv, "illegal indirect type: 0x%X\n", + IWL_ERR(shrd->trans, "illegal indirect type: 0x%X\n", address & INDIRECT_TYPE_MSK); break; } @@ -147,11 +148,11 @@ static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address) return (address & ADDRESS_MSK) + (offset << 1); } -const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset) +const u8 *iwl_eeprom_query_addr(const struct iwl_shared *shrd, size_t offset) { - u32 address = eeprom_indirect_address(priv, offset); - BUG_ON(address >= priv->cfg->base_params->eeprom_size); - return &priv->eeprom[address]; + u32 address = eeprom_indirect_address(shrd, offset); + BUG_ON(address >= shrd->cfg->base_params->eeprom_size); + return &shrd->eeprom[address]; } struct iwl_mod_params iwlagn_mod_params = { @@ -232,7 +233,7 @@ int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control) IWL_PAN_SCD_BK_MSK | IWL_PAN_SCD_MGMT_MSK | IWL_PAN_SCD_MULTICAST_MSK; - if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE) + if (cfg(priv)->sku & EEPROM_SKU_CAP_11N_ENABLE) flush_cmd.fifo_control |= IWL_AGG_TX_QUEUE_MSK; IWL_DEBUG_INFO(priv, "fifo queue control: 0X%x\n", @@ -374,15 +375,15 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv) BUILD_BUG_ON(sizeof(iwlagn_def_3w_lookup) != sizeof(basic.bt3_lookup_table)); - if (priv->cfg->bt_params) { - if (priv->cfg->bt_params->bt_session_2) { + if (cfg(priv)->bt_params) { + if (cfg(priv)->bt_params->bt_session_2) { bt_cmd_2000.prio_boost = cpu_to_le32( - priv->cfg->bt_params->bt_prio_boost); + cfg(priv)->bt_params->bt_prio_boost); bt_cmd_2000.tx_prio_boost = 0; bt_cmd_2000.rx_prio_boost = 0; } else { bt_cmd_6000.prio_boost = - priv->cfg->bt_params->bt_prio_boost; + cfg(priv)->bt_params->bt_prio_boost; bt_cmd_6000.tx_prio_boost = 0; bt_cmd_6000.rx_prio_boost = 0; } @@ -430,7 +431,7 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv) priv->bt_full_concurrent ? "full concurrency" : "3-wire"); - if (priv->cfg->bt_params->bt_session_2) { + if (cfg(priv)->bt_params->bt_session_2) { memcpy(&bt_cmd_2000.basic, &basic, sizeof(basic)); ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_BT_CONFIG, @@ -799,8 +800,8 @@ static bool is_single_rx_stream(struct iwl_priv *priv) */ static int iwl_get_active_rx_chain_count(struct iwl_priv *priv) { - if (priv->cfg->bt_params && - priv->cfg->bt_params->advanced_bt_coexist && + if (cfg(priv)->bt_params && + cfg(priv)->bt_params->advanced_bt_coexist && (priv->bt_full_concurrent || priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) { /* @@ -827,6 +828,7 @@ static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt) case IEEE80211_SMPS_STATIC: case IEEE80211_SMPS_DYNAMIC: return IWL_NUM_IDLE_CHAINS_SINGLE; + case IEEE80211_SMPS_AUTOMATIC: case IEEE80211_SMPS_OFF: return active_cnt; default: @@ -870,8 +872,8 @@ void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx) else active_chains = hw_params(priv).valid_rx_ant; - if (priv->cfg->bt_params && - priv->cfg->bt_params->advanced_bt_coexist && + if (cfg(priv)->bt_params && + cfg(priv)->bt_params->advanced_bt_coexist && (priv->bt_full_concurrent || priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) { /* @@ -933,53 +935,359 @@ u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid) return ant; } -/* notification wait support */ -void iwlagn_init_notification_wait(struct iwl_priv *priv, - struct iwl_notification_wait *wait_entry, - u8 cmd, - void (*fn)(struct iwl_priv *priv, - struct iwl_rx_packet *pkt, - void *data), - void *fn_data) +#ifdef CONFIG_PM_SLEEP +static void iwlagn_convert_p1k(u16 *p1k, __le16 *out) { - wait_entry->fn = fn; - wait_entry->fn_data = fn_data; - wait_entry->cmd = cmd; - wait_entry->triggered = false; - wait_entry->aborted = false; - - spin_lock_bh(&priv->notif_wait_lock); - list_add(&wait_entry->list, &priv->notif_waits); - spin_unlock_bh(&priv->notif_wait_lock); + int i; + + for (i = 0; i < IWLAGN_P1K_SIZE; i++) + out[i] = cpu_to_le16(p1k[i]); } -int iwlagn_wait_notification(struct iwl_priv *priv, - struct iwl_notification_wait *wait_entry, - unsigned long timeout) +struct wowlan_key_data { + struct iwl_rxon_context *ctx; + struct iwlagn_wowlan_rsc_tsc_params_cmd *rsc_tsc; + struct iwlagn_wowlan_tkip_params_cmd *tkip; + const u8 *bssid; + bool error, use_rsc_tsc, use_tkip; +}; + + +static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + void *_data) { - int ret; + struct iwl_priv *priv = hw->priv; + struct wowlan_key_data *data = _data; + struct iwl_rxon_context *ctx = data->ctx; + struct aes_sc *aes_sc, *aes_tx_sc = NULL; + struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL; + struct iwlagn_p1k_cache *rx_p1ks; + u8 *rx_mic_key; + struct ieee80211_key_seq seq; + u32 cur_rx_iv32 = 0; + u16 p1k[IWLAGN_P1K_SIZE]; + int ret, i; + + mutex_lock(&priv->shrd->mutex); - ret = wait_event_timeout(priv->notif_waitq, - wait_entry->triggered || wait_entry->aborted, - timeout); + if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 || + key->cipher == WLAN_CIPHER_SUITE_WEP104) && + !sta && !ctx->key_mapping_keys) + ret = iwl_set_default_wep_key(priv, ctx, key); + else + ret = iwl_set_dynamic_key(priv, ctx, key, sta); - spin_lock_bh(&priv->notif_wait_lock); - list_del(&wait_entry->list); - spin_unlock_bh(&priv->notif_wait_lock); + if (ret) { + IWL_ERR(priv, "Error setting key during suspend!\n"); + data->error = true; + } - if (wait_entry->aborted) - return -EIO; + switch (key->cipher) { + case WLAN_CIPHER_SUITE_TKIP: + if (sta) { + tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc; + tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc; - /* return value is always >= 0 */ - if (ret <= 0) - return -ETIMEDOUT; - return 0; + rx_p1ks = data->tkip->rx_uni; + + ieee80211_get_key_tx_seq(key, &seq); + tkip_tx_sc->iv16 = cpu_to_le16(seq.tkip.iv16); + tkip_tx_sc->iv32 = cpu_to_le32(seq.tkip.iv32); + + ieee80211_get_tkip_p1k_iv(key, seq.tkip.iv32, p1k); + iwlagn_convert_p1k(p1k, data->tkip->tx.p1k); + + memcpy(data->tkip->mic_keys.tx, + &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY], + IWLAGN_MIC_KEY_SIZE); + + rx_mic_key = data->tkip->mic_keys.rx_unicast; + } else { + tkip_sc = + data->rsc_tsc->all_tsc_rsc.tkip.multicast_rsc; + rx_p1ks = data->tkip->rx_multi; + rx_mic_key = data->tkip->mic_keys.rx_mcast; + } + + /* + * For non-QoS this relies on the fact that both the uCode and + * mac80211 use TID 0 (as they need to to avoid replay attacks) + * for checking the IV in the frames. + */ + for (i = 0; i < IWLAGN_NUM_RSC; i++) { + ieee80211_get_key_rx_seq(key, i, &seq); + tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16); + tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32); + /* wrapping isn't allowed, AP must rekey */ + if (seq.tkip.iv32 > cur_rx_iv32) + cur_rx_iv32 = seq.tkip.iv32; + } + + ieee80211_get_tkip_rx_p1k(key, data->bssid, cur_rx_iv32, p1k); + iwlagn_convert_p1k(p1k, rx_p1ks[0].p1k); + ieee80211_get_tkip_rx_p1k(key, data->bssid, + cur_rx_iv32 + 1, p1k); + iwlagn_convert_p1k(p1k, rx_p1ks[1].p1k); + + memcpy(rx_mic_key, + &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY], + IWLAGN_MIC_KEY_SIZE); + + data->use_tkip = true; + data->use_rsc_tsc = true; + break; + case WLAN_CIPHER_SUITE_CCMP: + if (sta) { + u8 *pn = seq.ccmp.pn; + + aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc; + aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc; + + ieee80211_get_key_tx_seq(key, &seq); + aes_tx_sc->pn = cpu_to_le64( + (u64)pn[5] | + ((u64)pn[4] << 8) | + ((u64)pn[3] << 16) | + ((u64)pn[2] << 24) | + ((u64)pn[1] << 32) | + ((u64)pn[0] << 40)); + } else + aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc; + + /* + * For non-QoS this relies on the fact that both the uCode and + * mac80211 use TID 0 for checking the IV in the frames. + */ + for (i = 0; i < IWLAGN_NUM_RSC; i++) { + u8 *pn = seq.ccmp.pn; + + ieee80211_get_key_rx_seq(key, i, &seq); + aes_sc->pn = cpu_to_le64( + (u64)pn[5] | + ((u64)pn[4] << 8) | + ((u64)pn[3] << 16) | + ((u64)pn[2] << 24) | + ((u64)pn[1] << 32) | + ((u64)pn[0] << 40)); + } + data->use_rsc_tsc = true; + break; + } + + mutex_unlock(&priv->shrd->mutex); +} + +int iwlagn_send_patterns(struct iwl_priv *priv, + struct cfg80211_wowlan *wowlan) +{ + struct iwlagn_wowlan_patterns_cmd *pattern_cmd; + struct iwl_host_cmd cmd = { + .id = REPLY_WOWLAN_PATTERNS, + .dataflags[0] = IWL_HCMD_DFL_NOCOPY, + .flags = CMD_SYNC, + }; + int i, err; + + if (!wowlan->n_patterns) + return 0; + + cmd.len[0] = sizeof(*pattern_cmd) + + wowlan->n_patterns * sizeof(struct iwlagn_wowlan_pattern); + + pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL); + if (!pattern_cmd) + return -ENOMEM; + + pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns); + + for (i = 0; i < wowlan->n_patterns; i++) { + int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8); + + memcpy(&pattern_cmd->patterns[i].mask, + wowlan->patterns[i].mask, mask_len); + memcpy(&pattern_cmd->patterns[i].pattern, + wowlan->patterns[i].pattern, + wowlan->patterns[i].pattern_len); + pattern_cmd->patterns[i].mask_size = mask_len; + pattern_cmd->patterns[i].pattern_size = + wowlan->patterns[i].pattern_len; + } + + cmd.data[0] = pattern_cmd; + err = iwl_trans_send_cmd(trans(priv), &cmd); + kfree(pattern_cmd); + return err; } -void iwlagn_remove_notification(struct iwl_priv *priv, - struct iwl_notification_wait *wait_entry) +int iwlagn_suspend(struct iwl_priv *priv, + struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) { - spin_lock_bh(&priv->notif_wait_lock); - list_del(&wait_entry->list); - spin_unlock_bh(&priv->notif_wait_lock); + struct iwlagn_wowlan_wakeup_filter_cmd wakeup_filter_cmd; + struct iwl_rxon_cmd rxon; + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + struct iwlagn_wowlan_kek_kck_material_cmd kek_kck_cmd; + struct iwlagn_wowlan_tkip_params_cmd tkip_cmd = {}; + struct iwlagn_d3_config_cmd d3_cfg_cmd = {}; + struct wowlan_key_data key_data = { + .ctx = ctx, + .bssid = ctx->active.bssid_addr, + .use_rsc_tsc = false, + .tkip = &tkip_cmd, + .use_tkip = false, + }; + int ret, i; + u16 seq; + + key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL); + if (!key_data.rsc_tsc) + return -ENOMEM; + + memset(&wakeup_filter_cmd, 0, sizeof(wakeup_filter_cmd)); + + /* + * We know the last used seqno, and the uCode expects to know that + * one, it will increment before TX. + */ + seq = le16_to_cpu(priv->last_seq_ctl) & IEEE80211_SCTL_SEQ; + wakeup_filter_cmd.non_qos_seq = cpu_to_le16(seq); + + /* + * For QoS counters, we store the one to use next, so subtract 0x10 + * since the uCode will add 0x10 before using the value. + */ + for (i = 0; i < IWL_MAX_TID_COUNT; i++) { + seq = priv->tid_data[IWL_AP_ID][i].seq_number; + seq -= 0x10; + wakeup_filter_cmd.qos_seq[i] = cpu_to_le16(seq); + } + + if (wowlan->disconnect) + wakeup_filter_cmd.enabled |= + cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_BEACON_MISS | + IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE); + if (wowlan->magic_pkt) + wakeup_filter_cmd.enabled |= + cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_MAGIC_PACKET); + if (wowlan->gtk_rekey_failure) + wakeup_filter_cmd.enabled |= + cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL); + if (wowlan->eap_identity_req) + wakeup_filter_cmd.enabled |= + cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ); + if (wowlan->four_way_handshake) + wakeup_filter_cmd.enabled |= + cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE); + if (wowlan->n_patterns) + wakeup_filter_cmd.enabled |= + cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_PATTERN_MATCH); + + if (wowlan->rfkill_release) + d3_cfg_cmd.wakeup_flags |= + cpu_to_le32(IWLAGN_D3_WAKEUP_RFKILL); + + iwl_scan_cancel_timeout(priv, 200); + + memcpy(&rxon, &ctx->active, sizeof(rxon)); + + iwl_trans_stop_device(trans(priv)); + + priv->shrd->wowlan = true; + + ret = iwl_load_ucode_wait_alive(trans(priv), IWL_UCODE_WOWLAN); + if (ret) + goto out; + + /* now configure WoWLAN ucode */ + ret = iwl_alive_start(priv); + if (ret) + goto out; + + memcpy(&ctx->staging, &rxon, sizeof(rxon)); + ret = iwlagn_commit_rxon(priv, ctx); + if (ret) + goto out; + + ret = iwl_power_update_mode(priv, true); + if (ret) + goto out; + + if (!iwlagn_mod_params.sw_crypto) { + /* mark all keys clear */ + priv->ucode_key_table = 0; + ctx->key_mapping_keys = 0; + + /* + * This needs to be unlocked due to lock ordering + * constraints. Since we're in the suspend path + * that isn't really a problem though. + */ + mutex_unlock(&priv->shrd->mutex); + ieee80211_iter_keys(priv->hw, ctx->vif, + iwlagn_wowlan_program_keys, + &key_data); + mutex_lock(&priv->shrd->mutex); + if (key_data.error) { + ret = -EIO; + goto out; + } + + if (key_data.use_rsc_tsc) { + struct iwl_host_cmd rsc_tsc_cmd = { + .id = REPLY_WOWLAN_TSC_RSC_PARAMS, + .flags = CMD_SYNC, + .data[0] = key_data.rsc_tsc, + .dataflags[0] = IWL_HCMD_DFL_NOCOPY, + .len[0] = sizeof(key_data.rsc_tsc), + }; + + ret = iwl_trans_send_cmd(trans(priv), &rsc_tsc_cmd); + if (ret) + goto out; + } + + if (key_data.use_tkip) { + ret = iwl_trans_send_cmd_pdu(trans(priv), + REPLY_WOWLAN_TKIP_PARAMS, + CMD_SYNC, sizeof(tkip_cmd), + &tkip_cmd); + if (ret) + goto out; + } + + if (priv->have_rekey_data) { + memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd)); + memcpy(kek_kck_cmd.kck, priv->kck, NL80211_KCK_LEN); + kek_kck_cmd.kck_len = cpu_to_le16(NL80211_KCK_LEN); + memcpy(kek_kck_cmd.kek, priv->kek, NL80211_KEK_LEN); + kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN); + kek_kck_cmd.replay_ctr = priv->replay_ctr; + + ret = iwl_trans_send_cmd_pdu(trans(priv), + REPLY_WOWLAN_KEK_KCK_MATERIAL, + CMD_SYNC, sizeof(kek_kck_cmd), + &kek_kck_cmd); + if (ret) + goto out; + } + } + + ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_D3_CONFIG, CMD_SYNC, + sizeof(d3_cfg_cmd), &d3_cfg_cmd); + if (ret) + goto out; + + ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_WOWLAN_WAKEUP_FILTER, + CMD_SYNC, sizeof(wakeup_filter_cmd), + &wakeup_filter_cmd); + if (ret) + goto out; + + ret = iwlagn_send_patterns(priv, wowlan); + out: + kfree(key_data.rsc_tsc); + return ret; } +#endif diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c index 66118cea2af..334b5ae8fdd 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c @@ -298,7 +298,7 @@ static u8 rs_tl_add_packet(struct iwl_lq_sta *lq_data, } else return IWL_MAX_TID_COUNT; - if (unlikely(tid >= TID_MAX_LOAD_COUNT)) + if (unlikely(tid >= IWL_MAX_TID_COUNT)) return IWL_MAX_TID_COUNT; tl = &lq_data->load[tid]; @@ -352,7 +352,7 @@ static void rs_program_fix_rate(struct iwl_priv *priv, lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ lq_sta->active_mimo3_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ -#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL +#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE /* testmode has higher priority to overwirte the fixed rate */ if (priv->tm_fixed_rate) lq_sta->dbg_fixed_rate = priv->tm_fixed_rate; @@ -379,7 +379,7 @@ static u32 rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid) s32 index; struct iwl_traffic_load *tl = NULL; - if (tid >= TID_MAX_LOAD_COUNT) + if (tid >= IWL_MAX_TID_COUNT) return 0; tl = &(lq_data->load[tid]); @@ -444,11 +444,11 @@ static void rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid, struct iwl_lq_sta *lq_data, struct ieee80211_sta *sta) { - if (tid < TID_MAX_LOAD_COUNT) + if (tid < IWL_MAX_TID_COUNT) rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta); else - IWL_ERR(priv, "tid exceeds max load count: %d/%d\n", - tid, TID_MAX_LOAD_COUNT); + IWL_ERR(priv, "tid exceeds max TID count: %d/%d\n", + tid, IWL_MAX_TID_COUNT); } static inline int get_num_of_ant_from_rate(u32 rate_n_flags) @@ -1081,12 +1081,12 @@ done: if (sta && sta->supp_rates[sband->band]) rs_rate_scale_perform(priv, skb, sta, lq_sta); -#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_IWLWIFI_DEVICE_SVTOOL) +#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_IWLWIFI_DEVICE_TESTMODE) if ((priv->tm_fixed_rate) && (priv->tm_fixed_rate != lq_sta->dbg_fixed_rate)) rs_program_fix_rate(priv, lq_sta); #endif - if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist) + if (cfg(priv)->bt_params && cfg(priv)->bt_params->advanced_bt_coexist) rs_bt_update_lq(priv, ctx, lq_sta); } @@ -1458,10 +1458,8 @@ static int rs_move_legacy_other(struct iwl_priv *priv, break; case IWL_BT_COEX_TRAFFIC_LOAD_LOW: /* avoid antenna B unless MIMO */ - valid_tx_ant = - first_antenna(hw_params(priv).valid_tx_ant); if (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2) - tbl->action = IWL_LEGACY_SWITCH_ANTENNA1; + tbl->action = IWL_LEGACY_SWITCH_SISO; break; case IWL_BT_COEX_TRAFFIC_LOAD_HIGH: case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS: @@ -1636,10 +1634,8 @@ static int rs_move_siso_to_other(struct iwl_priv *priv, break; case IWL_BT_COEX_TRAFFIC_LOAD_LOW: /* avoid antenna B unless MIMO */ - valid_tx_ant = - first_antenna(hw_params(priv).valid_tx_ant); if (tbl->action == IWL_SISO_SWITCH_ANTENNA2) - tbl->action = IWL_SISO_SWITCH_ANTENNA1; + tbl->action = IWL_SISO_SWITCH_MIMO2_AB; break; case IWL_BT_COEX_TRAFFIC_LOAD_HIGH: case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS: @@ -2277,7 +2273,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv, tid = rs_tl_add_packet(lq_sta, hdr); if ((tid != IWL_MAX_TID_COUNT) && (lq_sta->tx_agg_tid_en & (1 << tid))) { - tid_data = &priv->shrd->tid_data[lq_sta->lq.sta_id][tid]; + tid_data = &priv->tid_data[lq_sta->lq.sta_id][tid]; if (tid_data->agg.state == IWL_AGG_OFF) lq_sta->is_agg = 0; else @@ -2649,8 +2645,7 @@ lq_update: (lq_sta->tx_agg_tid_en & (1 << tid)) && (tid != IWL_MAX_TID_COUNT)) { u8 sta_id = lq_sta->lq.sta_id; - tid_data = - &priv->shrd->tid_data[sta_id][tid]; + tid_data = &priv->tid_data[sta_id][tid]; if (tid_data->agg.state == IWL_AGG_OFF) { IWL_DEBUG_RATE(priv, "try to aggregate tid %d\n", @@ -2908,7 +2903,7 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i if (sband->band == IEEE80211_BAND_5GHZ) lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE; lq_sta->is_agg = 0; -#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL +#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE priv->tm_fixed_rate = 0; #endif #ifdef CONFIG_MAC80211_DEBUGFS @@ -3059,11 +3054,11 @@ static void rs_fill_link_cmd(struct iwl_priv *priv, * overwrite if needed, pass aggregation time limit * to uCode in uSec */ - if (priv && priv->cfg->bt_params && - priv->cfg->bt_params->agg_time_limit && + if (priv && cfg(priv)->bt_params && + cfg(priv)->bt_params->agg_time_limit && priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) lq_cmd->agg_params.agg_time_limit = - cpu_to_le16(priv->cfg->bt_params->agg_time_limit); + cpu_to_le16(cfg(priv)->bt_params->agg_time_limit); } static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h index f4f6deb829a..6675b3c816d 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h @@ -281,7 +281,6 @@ enum { #define TID_QUEUE_CELL_SPACING 50 /*mS */ #define TID_QUEUE_MAX_SIZE 20 #define TID_ROUND_VALUE 5 /* mS */ -#define TID_MAX_LOAD_COUNT 8 #define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING) #define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y)) @@ -402,7 +401,7 @@ struct iwl_lq_sta { struct iwl_link_quality_cmd lq; struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */ - struct iwl_traffic_load load[TID_MAX_LOAD_COUNT]; + struct iwl_traffic_load load[IWL_MAX_TID_COUNT]; u8 tx_agg_tid_en; #ifdef CONFIG_MAC80211_DEBUGFS struct dentry *rs_sta_dbgfs_scale_table_file; diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c index 5af9e6258a1..b22b2976f89 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c @@ -117,6 +117,7 @@ const char *get_cmd_string(u8 cmd) IWL_CMD(REPLY_WOWLAN_TKIP_PARAMS); IWL_CMD(REPLY_WOWLAN_KEK_KCK_MATERIAL); IWL_CMD(REPLY_WOWLAN_GET_STATUS); + IWL_CMD(REPLY_D3_CONFIG); default: return "UNKNOWN"; @@ -317,7 +318,7 @@ static bool iwlagn_good_plcp_health(struct iwl_priv *priv, unsigned int msecs) { int delta; - int threshold = priv->cfg->base_params->plcp_delta_threshold; + int threshold = cfg(priv)->base_params->plcp_delta_threshold; if (threshold == IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) { IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n"); @@ -582,8 +583,8 @@ static int iwlagn_rx_statistics(struct iwl_priv *priv, iwlagn_rx_calc_noise(priv); queue_work(priv->shrd->workqueue, &priv->run_time_calib_work); } - if (priv->cfg->lib->temperature && change) - priv->cfg->lib->temperature(priv); + if (cfg(priv)->lib->temperature && change) + cfg(priv)->lib->temperature(priv); return 0; } @@ -800,7 +801,8 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv, ctx->active.bssid_addr)) continue; ctx->last_tx_rejected = false; - iwl_trans_wake_any_queue(trans(priv), ctx->ctxid); + iwl_trans_wake_any_queue(trans(priv), ctx->ctxid, + "channel got active"); } } @@ -1032,6 +1034,50 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv, return 0; } +static int iwlagn_rx_noa_notification(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb, + struct iwl_device_cmd *cmd) +{ + struct iwl_wipan_noa_data *new_data, *old_data; + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_wipan_noa_notification *noa_notif = (void *)pkt->u.raw; + + /* no condition -- we're in softirq */ + old_data = rcu_dereference_protected(priv->noa_data, true); + + if (noa_notif->noa_active) { + u32 len = le16_to_cpu(noa_notif->noa_attribute.length); + u32 copylen = len; + + /* EID, len, OUI, subtype */ + len += 1 + 1 + 3 + 1; + /* P2P id, P2P length */ + len += 1 + 2; + copylen += 1 + 2; + + new_data = kmalloc(sizeof(*new_data) + len, GFP_ATOMIC); + if (new_data) { + new_data->length = len; + new_data->data[0] = WLAN_EID_VENDOR_SPECIFIC; + new_data->data[1] = len - 2; /* not counting EID, len */ + new_data->data[2] = (WLAN_OUI_WFA >> 16) & 0xff; + new_data->data[3] = (WLAN_OUI_WFA >> 8) & 0xff; + new_data->data[4] = (WLAN_OUI_WFA >> 0) & 0xff; + new_data->data[5] = WLAN_OUI_TYPE_WFA_P2P; + memcpy(&new_data->data[6], &noa_notif->noa_attribute, + copylen); + } + } else + new_data = NULL; + + rcu_assign_pointer(priv->noa_data, new_data); + + if (old_data) + kfree_rcu(old_data, rcu_head); + + return 0; +} + /** * iwl_setup_rx_handlers - Initialize Rx handler callbacks * @@ -1055,6 +1101,8 @@ void iwl_setup_rx_handlers(struct iwl_priv *priv) handlers[BEACON_NOTIFICATION] = iwlagn_rx_beacon_notif; handlers[REPLY_ADD_STA] = iwl_add_sta_callback; + handlers[REPLY_WIPAN_NOA_NOTIFICATION] = iwlagn_rx_noa_notification; + /* * The same handler is used for both the REPLY to a discrete * statistics request from the host as well as for the periodic @@ -1083,13 +1131,13 @@ void iwl_setup_rx_handlers(struct iwl_priv *priv) priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx; /* set up notification wait support */ - spin_lock_init(&priv->notif_wait_lock); - INIT_LIST_HEAD(&priv->notif_waits); - init_waitqueue_head(&priv->notif_waitq); + spin_lock_init(&priv->shrd->notif_wait_lock); + INIT_LIST_HEAD(&priv->shrd->notif_waits); + init_waitqueue_head(&priv->shrd->notif_waitq); /* Set up BT Rx handlers */ - if (priv->cfg->lib->bt_rx_handler_setup) - priv->cfg->lib->bt_rx_handler_setup(priv); + if (cfg(priv)->lib->bt_rx_handler_setup) + cfg(priv)->lib->bt_rx_handler_setup(priv); } @@ -1104,11 +1152,11 @@ int iwl_rx_dispatch(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb, * even if the RX handler consumes the RXB we have * access to it in the notification wait entry. */ - if (!list_empty(&priv->notif_waits)) { + if (!list_empty(&priv->shrd->notif_waits)) { struct iwl_notification_wait *w; - spin_lock(&priv->notif_wait_lock); - list_for_each_entry(w, &priv->notif_waits, list) { + spin_lock(&priv->shrd->notif_wait_lock); + list_for_each_entry(w, &priv->shrd->notif_waits, list) { if (w->cmd != pkt->hdr.cmd) continue; IWL_DEBUG_RX(priv, @@ -1117,11 +1165,11 @@ int iwl_rx_dispatch(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb, pkt->hdr.cmd); w->triggered = true; if (w->fn) - w->fn(priv, pkt, w->fn_data); + w->fn(trans(priv), pkt, w->fn_data); } - spin_unlock(&priv->notif_wait_lock); + spin_unlock(&priv->shrd->notif_wait_lock); - wake_up_all(&priv->notif_waitq); + wake_up_all(&priv->shrd->notif_waitq); } if (priv->pre_rx_handler) diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c index 5c7c17c7166..1c665941662 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c @@ -45,7 +45,8 @@ static int iwlagn_disable_bss(struct iwl_priv *priv, send->filter_flags = old_filter; if (ret) - IWL_ERR(priv, "Error clearing ASSOC_MSK on BSS (%d)\n", ret); + IWL_DEBUG_QUIET_RFKILL(priv, + "Error clearing ASSOC_MSK on BSS (%d)\n", ret); return ret; } @@ -59,7 +60,7 @@ static int iwlagn_disable_pan(struct iwl_priv *priv, u8 old_dev_type = send->dev_type; int ret; - iwlagn_init_notification_wait(priv, &disable_wait, + iwl_init_notification_wait(priv->shrd, &disable_wait, REPLY_WIPAN_DEACTIVATION_COMPLETE, NULL, NULL); @@ -73,9 +74,9 @@ static int iwlagn_disable_pan(struct iwl_priv *priv, if (ret) { IWL_ERR(priv, "Error disabling PAN (%d)\n", ret); - iwlagn_remove_notification(priv, &disable_wait); + iwl_remove_notification(priv->shrd, &disable_wait); } else { - ret = iwlagn_wait_notification(priv, &disable_wait, HZ); + ret = iwl_wait_notification(priv->shrd, &disable_wait, HZ); if (ret) IWL_ERR(priv, "Timed out waiting for PAN disable\n"); } @@ -116,7 +117,7 @@ static void iwlagn_update_qos(struct iwl_priv *priv, if (ctx->ht.enabled) ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK; - IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n", + IWL_DEBUG_INFO(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n", ctx->qos_data.qos_active, ctx->qos_data.def_qos_parm.qos_flags); @@ -124,7 +125,7 @@ static void iwlagn_update_qos(struct iwl_priv *priv, sizeof(struct iwl_qosparam_cmd), &ctx->qos_data.def_qos_parm); if (ret) - IWL_ERR(priv, "Failed to update QoS\n"); + IWL_DEBUG_QUIET_RFKILL(priv, "Failed to update QoS\n"); } static int iwlagn_update_beacon(struct iwl_priv *priv, @@ -295,9 +296,9 @@ static int iwlagn_rxon_connect(struct iwl_priv *priv, } if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION && - priv->cfg->ht_params && priv->cfg->ht_params->smps_mode) + cfg(priv)->ht_params && cfg(priv)->ht_params->smps_mode) ieee80211_request_smps(ctx->vif, - priv->cfg->ht_params->smps_mode); + cfg(priv)->ht_params->smps_mode); return 0; } @@ -444,8 +445,8 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) * force CTS-to-self frames protection if RTS-CTS is not preferred * one aggregation protection method */ - if (!(priv->cfg->ht_params && - priv->cfg->ht_params->use_rts_for_aggregation)) + if (!(cfg(priv)->ht_params && + cfg(priv)->ht_params->use_rts_for_aggregation)) ctx->staging.flags |= RXON_FLG_SELF_CTS_EN; if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) || @@ -559,6 +560,9 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed) mutex_lock(&priv->shrd->mutex); + if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) + goto out; + if (unlikely(test_bit(STATUS_SCANNING, &priv->shrd->status))) { IWL_DEBUG_MAC80211(priv, "leave - scanning\n"); goto out; @@ -850,7 +854,8 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw, if (ctx->last_tx_rejected) { ctx->last_tx_rejected = false; iwl_trans_wake_any_queue(trans(priv), - ctx->ctxid); + ctx->ctxid, + "Disassoc: flush queue"); } ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c index 4b2aa1da095..7353826095f 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c @@ -130,25 +130,15 @@ int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb, return iwl_process_add_sta_resp(priv, addsta, pkt); } -static u16 iwlagn_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data) -{ - u16 size = (u16)sizeof(struct iwl_addsta_cmd); - struct iwl_addsta_cmd *addsta = (struct iwl_addsta_cmd *)data; - memcpy(addsta, cmd, size); - /* resrved in 5000 */ - addsta->rate_n_flags = cpu_to_le16(0); - return size; -} - int iwl_send_add_sta(struct iwl_priv *priv, struct iwl_addsta_cmd *sta, u8 flags) { int ret = 0; - u8 data[sizeof(*sta)]; struct iwl_host_cmd cmd = { .id = REPLY_ADD_STA, .flags = flags, - .data = { data, }, + .data = { sta, }, + .len = { sizeof(*sta), }, }; u8 sta_id __maybe_unused = sta->sta.sta_id; @@ -160,7 +150,6 @@ int iwl_send_add_sta(struct iwl_priv *priv, might_sleep(); } - cmd.len[0] = iwlagn_build_addsta_hcmd(sta, data); ret = iwl_trans_send_cmd(trans(priv), &cmd); if (ret || (flags & CMD_ASYNC)) @@ -463,6 +452,7 @@ int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id, const u8 *addr) { unsigned long flags; + u8 tid; if (!iwl_is_ready(priv->shrd)) { IWL_DEBUG_INFO(priv, @@ -501,6 +491,10 @@ int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id, priv->stations[sta_id].lq = NULL; } + for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) + memset(&priv->tid_data[sta_id][tid], 0, + sizeof(priv->tid_data[sta_id][tid])); + priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE; priv->num_stations--; @@ -647,7 +641,7 @@ void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx) int ret; struct iwl_addsta_cmd sta_cmd; struct iwl_link_quality_cmd lq; - bool active; + bool active, have_lq = false; spin_lock_irqsave(&priv->shrd->sta_lock, flags); if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) { @@ -657,7 +651,10 @@ void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx) memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd)); sta_cmd.mode = 0; - memcpy(&lq, priv->stations[sta_id].lq, sizeof(lq)); + if (priv->stations[sta_id].lq) { + memcpy(&lq, priv->stations[sta_id].lq, sizeof(lq)); + have_lq = true; + } active = priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE; priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE; @@ -679,7 +676,8 @@ void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx) if (ret) IWL_ERR(priv, "failed to re-add STA %pM (%d)\n", priv->stations[sta_id].sta.sta.addr, ret); - iwl_send_lq_cmd(priv, ctx, &lq, CMD_SYNC, true); + if (have_lq) + iwl_send_lq_cmd(priv, ctx, &lq, CMD_SYNC, true); } int iwl_get_free_ucode_key_offset(struct iwl_priv *priv) @@ -825,28 +823,6 @@ int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx, return ret; } -int iwlagn_mac_sta_remove(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta) -{ - struct iwl_priv *priv = hw->priv; - struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; - int ret; - - IWL_DEBUG_MAC80211(priv, "enter: received request to remove " - "station %pM\n", sta->addr); - mutex_lock(&priv->shrd->mutex); - IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n", - sta->addr); - ret = iwl_remove_station(priv, sta_priv->sta_id, sta->addr); - if (ret) - IWL_ERR(priv, "Error removing station %pM\n", - sta->addr); - mutex_unlock(&priv->shrd->mutex); - IWL_DEBUG_MAC80211(priv, "leave\n"); - - return ret; -} void iwl_sta_fill_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx, u8 sta_id, struct iwl_link_quality_cmd *link_cmd) @@ -1459,20 +1435,7 @@ int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta, return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC); } -static void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id) -{ - unsigned long flags; - spin_lock_irqsave(&priv->shrd->sta_lock, flags); - priv->stations[sta_id].sta.station_flags &= ~STA_FLG_PWR_SAVE_MSK; - priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK; - priv->stations[sta_id].sta.sta.modify_mask = 0; - priv->stations[sta_id].sta.sleep_tx_count = 0; - priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; - iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); - spin_unlock_irqrestore(&priv->shrd->sta_lock, flags); - -} void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt) { @@ -1489,36 +1452,3 @@ void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt) spin_unlock_irqrestore(&priv->shrd->sta_lock, flags); } - -void iwlagn_mac_sta_notify(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - enum sta_notify_cmd cmd, - struct ieee80211_sta *sta) -{ - struct iwl_priv *priv = hw->priv; - struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; - int sta_id; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - - switch (cmd) { - case STA_NOTIFY_SLEEP: - WARN_ON(!sta_priv->client); - sta_priv->asleep = true; - if (atomic_read(&sta_priv->pending_frames) > 0) - ieee80211_sta_block_awake(hw, sta, true); - break; - case STA_NOTIFY_AWAKE: - WARN_ON(!sta_priv->client); - if (!sta_priv->asleep) - break; - sta_priv->asleep = false; - sta_id = iwl_sta_id(sta); - if (sta_id != IWL_INVALID_STATION) - iwl_sta_modify_ps_wake(priv, sta_id); - break; - default: - break; - } - IWL_DEBUG_MAC80211(priv, "leave\n"); -} diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tt.c b/drivers/net/wireless/iwlwifi/iwl-agn-tt.c index c27180a7335..b0dff7a753a 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-tt.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-tt.c @@ -633,7 +633,7 @@ void iwl_tt_initialize(struct iwl_priv *priv) INIT_WORK(&priv->ct_enter, iwl_bg_ct_enter); INIT_WORK(&priv->ct_exit, iwl_bg_ct_exit); - if (priv->cfg->base_params->adv_thermal_throttle) { + if (cfg(priv)->base_params->adv_thermal_throttle) { IWL_DEBUG_TEMP(priv, "Advanced Thermal Throttling\n"); tt->restriction = kcalloc(IWL_TI_STATE_MAX, sizeof(struct iwl_tt_restriction), diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c index df1540ca610..c664c272655 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c @@ -74,8 +74,8 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv, else if (ieee80211_is_back_req(fc)) tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK; else if (info->band == IEEE80211_BAND_2GHZ && - priv->cfg->bt_params && - priv->cfg->bt_params->advanced_bt_coexist && + cfg(priv)->bt_params && + cfg(priv)->bt_params->advanced_bt_coexist && (ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc) || skb->protocol == cpu_to_be16(ETH_P_PAE))) @@ -151,7 +151,7 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv, if (ieee80211_is_data(fc)) { tx_cmd->initial_rate_index = 0; tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK; -#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL +#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE if (priv->tm_fixed_rate) { /* * rate overwrite by testmode @@ -164,7 +164,8 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv, } #endif return; - } + } else if (ieee80211_is_back_req(fc)) + tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK; /** * If the current TX rate stored in mac80211 has the MCS bit set, it's @@ -190,8 +191,8 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv, rate_flags |= RATE_MCS_CCK_MSK; /* Set up antennas */ - if (priv->cfg->bt_params && - priv->cfg->bt_params->advanced_bt_coexist && + if (cfg(priv)->bt_params && + cfg(priv)->bt_params->advanced_bt_coexist && priv->bt_full_concurrent) { /* operated as 1x1 in full concurrency mode */ priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant, @@ -261,8 +262,8 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) __le16 fc; u8 hdr_len; - u16 len; - u8 sta_id; + u16 len, seq_number = 0; + u8 sta_id, tid = IWL_MAX_TID_COUNT; unsigned long flags; bool is_agg = false; @@ -286,6 +287,19 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) IWL_DEBUG_TX(priv, "Sending REASSOC frame\n"); #endif + if (unlikely(ieee80211_is_probe_resp(fc))) { + struct iwl_wipan_noa_data *noa_data = + rcu_dereference(priv->noa_data); + + if (noa_data && + pskb_expand_head(skb, 0, noa_data->length, + GFP_ATOMIC) == 0) { + memcpy(skb_put(skb, noa_data->length), + noa_data->data, noa_data->length); + hdr = (struct ieee80211_hdr *)skb->data; + } + } + hdr_len = ieee80211_hdrlen(fc); /* For management frames use broadcast id to do not break aggregation */ @@ -354,9 +368,51 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) info->driver_data[0] = ctx; info->driver_data[1] = dev_cmd; - if (iwl_trans_tx(trans(priv), skb, dev_cmd, ctx->ctxid, sta_id)) + if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) { + u8 *qc = NULL; + struct iwl_tid_data *tid_data; + qc = ieee80211_get_qos_ctl(hdr); + tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; + if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) + goto drop_unlock_sta; + tid_data = &priv->tid_data[sta_id][tid]; + + /* aggregation is on for this <sta,tid> */ + if (info->flags & IEEE80211_TX_CTL_AMPDU && + tid_data->agg.state != IWL_AGG_ON) { + IWL_ERR(priv, "TX_CTL_AMPDU while not in AGG:" + " Tx flags = 0x%08x, agg.state = %d", + info->flags, tid_data->agg.state); + IWL_ERR(priv, "sta_id = %d, tid = %d seq_num = %d", + sta_id, tid, SEQ_TO_SN(tid_data->seq_number)); + goto drop_unlock_sta; + } + + /* We can receive packets from the stack in IWL_AGG_{ON,OFF} + * only. Check this here. + */ + if (WARN_ONCE(tid_data->agg.state != IWL_AGG_ON && + tid_data->agg.state != IWL_AGG_OFF, + "Tx while agg.state = %d", tid_data->agg.state)) + goto drop_unlock_sta; + + seq_number = tid_data->seq_number; + seq_number &= IEEE80211_SCTL_SEQ; + hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); + hdr->seq_ctrl |= cpu_to_le16(seq_number); + seq_number += 0x10; + } + + /* Copy MAC header from skb into command buffer */ + memcpy(tx_cmd->hdr, hdr, hdr_len); + + if (iwl_trans_tx(trans(priv), skb, dev_cmd, ctx->ctxid, sta_id, tid)) goto drop_unlock_sta; + if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc) && + !ieee80211_has_morefrags(fc)) + priv->tid_data[sta_id][tid].seq_number = seq_number; + spin_unlock(&priv->shrd->sta_lock); spin_unlock_irqrestore(&priv->shrd->lock, flags); @@ -381,10 +437,81 @@ drop_unlock_priv: return -1; } +int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid) +{ + struct iwl_tid_data *tid_data; + unsigned long flags; + int sta_id; + + sta_id = iwl_sta_id(sta); + + if (sta_id == IWL_INVALID_STATION) { + IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid); + return -ENXIO; + } + + spin_lock_irqsave(&priv->shrd->sta_lock, flags); + + tid_data = &priv->tid_data[sta_id][tid]; + + switch (priv->tid_data[sta_id][tid].agg.state) { + case IWL_EMPTYING_HW_QUEUE_ADDBA: + /* + * This can happen if the peer stops aggregation + * again before we've had a chance to drain the + * queue we selected previously, i.e. before the + * session was really started completely. + */ + IWL_DEBUG_HT(priv, "AGG stop before setup done\n"); + goto turn_off; + case IWL_AGG_ON: + break; + default: + IWL_WARN(priv, "Stopping AGG while state not ON " + "or starting for %d on %d (%d)\n", sta_id, tid, + priv->tid_data[sta_id][tid].agg.state); + spin_unlock_irqrestore(&priv->shrd->sta_lock, flags); + return 0; + } + + tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number); + + /* There are still packets for this RA / TID in the HW */ + if (tid_data->agg.ssn != tid_data->next_reclaimed) { + IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, " + "next_recl = %d", + tid_data->agg.ssn, + tid_data->next_reclaimed); + priv->tid_data[sta_id][tid].agg.state = + IWL_EMPTYING_HW_QUEUE_DELBA; + spin_unlock_irqrestore(&priv->shrd->sta_lock, flags); + return 0; + } + + IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d", + tid_data->agg.ssn); +turn_off: + priv->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF; + + /* do not restore/save irqs */ + spin_unlock(&priv->shrd->sta_lock); + spin_lock(&priv->shrd->lock); + + iwl_trans_tx_agg_disable(trans(priv), sta_id, tid); + + spin_unlock_irqrestore(&priv->shrd->lock, flags); + + ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); + + return 0; +} + int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid, u16 *ssn) { - struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; + struct iwl_tid_data *tid_data; + unsigned long flags; int sta_id; int ret; @@ -399,7 +526,7 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif, if (unlikely(tid >= IWL_MAX_TID_COUNT)) return -EINVAL; - if (priv->shrd->tid_data[sta_id][tid].agg.state != IWL_AGG_OFF) { + if (priv->tid_data[sta_id][tid].agg.state != IWL_AGG_OFF) { IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n"); return -ENXIO; } @@ -408,27 +535,136 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif, if (ret) return ret; - ret = iwl_trans_tx_agg_alloc(trans(priv), vif_priv->ctx->ctxid, sta_id, - tid, ssn); + spin_lock_irqsave(&priv->shrd->sta_lock, flags); + + tid_data = &priv->tid_data[sta_id][tid]; + tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number); + + *ssn = tid_data->agg.ssn; + + ret = iwl_trans_tx_agg_alloc(trans(priv), sta_id, tid); + if (ret) { + spin_unlock_irqrestore(&priv->shrd->sta_lock, flags); + return ret; + } + + if (*ssn == tid_data->next_reclaimed) { + IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d", + tid_data->agg.ssn); + tid_data->agg.state = IWL_AGG_ON; + ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); + } else { + IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, " + "next_reclaimed = %d", + tid_data->agg.ssn, + tid_data->next_reclaimed); + tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA; + } + + spin_unlock_irqrestore(&priv->shrd->sta_lock, flags); return ret; } -int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, u16 tid) +int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid, u8 buf_size) { - int sta_id; - struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; + struct iwl_station_priv *sta_priv = (void *) sta->drv_priv; + struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); + unsigned long flags; + u16 ssn; - sta_id = iwl_sta_id(sta); + buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF); - if (sta_id == IWL_INVALID_STATION) { - IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid); - return -ENXIO; + spin_lock_irqsave(&priv->shrd->sta_lock, flags); + ssn = priv->tid_data[sta_priv->sta_id][tid].agg.ssn; + spin_unlock_irqrestore(&priv->shrd->sta_lock, flags); + + iwl_trans_tx_agg_setup(trans(priv), ctx->ctxid, sta_priv->sta_id, tid, + buf_size, ssn); + + /* + * If the limit is 0, then it wasn't initialised yet, + * use the default. We can do that since we take the + * minimum below, and we don't want to go above our + * default due to hardware restrictions. + */ + if (sta_priv->max_agg_bufsize == 0) + sta_priv->max_agg_bufsize = + LINK_QUAL_AGG_FRAME_LIMIT_DEF; + + /* + * Even though in theory the peer could have different + * aggregation reorder buffer sizes for different sessions, + * our ucode doesn't allow for that and has a global limit + * for each station. Therefore, use the minimum of all the + * aggregation sessions and our default value. + */ + sta_priv->max_agg_bufsize = + min(sta_priv->max_agg_bufsize, buf_size); + + if (cfg(priv)->ht_params && + cfg(priv)->ht_params->use_rts_for_aggregation) { + /* + * switch to RTS/CTS if it is the prefer protection + * method for HT traffic + */ + + sta_priv->lq_sta.lq.general_params.flags |= + LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK; } + priv->agg_tids_count++; + IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n", + priv->agg_tids_count); + + sta_priv->lq_sta.lq.agg_params.agg_frame_cnt_limit = + sta_priv->max_agg_bufsize; - return iwl_trans_tx_agg_disable(trans(priv), vif_priv->ctx->ctxid, - sta_id, tid); + IWL_INFO(priv, "Tx aggregation enabled on ra = %pM tid = %d\n", + sta->addr, tid); + + return iwl_send_lq_cmd(priv, ctx, + &sta_priv->lq_sta.lq, CMD_ASYNC, false); +} + +static void iwlagn_check_ratid_empty(struct iwl_priv *priv, int sta_id, u8 tid) +{ + struct iwl_tid_data *tid_data = &priv->tid_data[sta_id][tid]; + enum iwl_rxon_context_id ctx; + struct ieee80211_vif *vif; + u8 *addr; + + lockdep_assert_held(&priv->shrd->sta_lock); + + addr = priv->stations[sta_id].sta.sta.addr; + ctx = priv->stations[sta_id].ctxid; + vif = priv->contexts[ctx].vif; + + switch (priv->tid_data[sta_id][tid].agg.state) { + case IWL_EMPTYING_HW_QUEUE_DELBA: + /* There are no packets for this RA / TID in the HW any more */ + if (tid_data->agg.ssn == tid_data->next_reclaimed) { + IWL_DEBUG_TX_QUEUES(priv, + "Can continue DELBA flow ssn = next_recl =" + " %d", tid_data->next_reclaimed); + iwl_trans_tx_agg_disable(trans(priv), sta_id, tid); + tid_data->agg.state = IWL_AGG_OFF; + ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid); + } + break; + case IWL_EMPTYING_HW_QUEUE_ADDBA: + /* There are no packets for this RA / TID in the HW any more */ + if (tid_data->agg.ssn == tid_data->next_reclaimed) { + IWL_DEBUG_TX_QUEUES(priv, + "Can continue ADDBA flow ssn = next_recl =" + " %d", tid_data->next_reclaimed); + tid_data->agg.state = IWL_AGG_ON; + ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid); + } + break; + default: + break; + } } static void iwlagn_non_agg_tx_status(struct iwl_priv *priv, @@ -568,7 +804,7 @@ static void iwl_rx_reply_tx_agg(struct iwl_priv *priv, IWLAGN_TX_RES_TID_POS; int sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >> IWLAGN_TX_RES_RA_POS; - struct iwl_ht_agg *agg = &priv->shrd->tid_data[sta_id][tid].agg; + struct iwl_ht_agg *agg = &priv->tid_data[sta_id][tid].agg; u32 status = le16_to_cpu(tx_resp->status.status); int i; @@ -584,8 +820,8 @@ static void iwl_rx_reply_tx_agg(struct iwl_priv *priv, * notification again. */ if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 && - priv->cfg->bt_params && - priv->cfg->bt_params->advanced_bt_coexist) { + cfg(priv)->bt_params && + cfg(priv)->bt_params->advanced_bt_coexist) { IWL_DEBUG_COEX(priv, "receive reply tx w/ bt_kill\n"); } @@ -758,7 +994,7 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb, struct iwlagn_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; struct ieee80211_hdr *hdr; u32 status = le16_to_cpu(tx_resp->status.status); - u32 ssn = iwlagn_get_scd_ssn(tx_resp); + u16 ssn = iwlagn_get_scd_ssn(tx_resp); int tid; int sta_id; int freed; @@ -780,10 +1016,34 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb, iwl_rx_reply_tx_agg(priv, tx_resp); if (tx_resp->frame_count == 1) { + u16 next_reclaimed = le16_to_cpu(tx_resp->seq_ctl); + next_reclaimed = SEQ_TO_SN(next_reclaimed + 0x10); + + if (is_agg) { + /* If this is an aggregation queue, we can rely on the + * ssn since the wifi sequence number corresponds to + * the index in the TFD ring (%256). + * The seq_ctl is the sequence control of the packet + * to which this Tx response relates. But if there is a + * hole in the bitmap of the BA we received, this Tx + * response may allow to reclaim the hole and all the + * subsequent packets that were already acked. + * In that case, seq_ctl != ssn, and the next packet + * to be reclaimed will be ssn and not seq_ctl. + */ + next_reclaimed = ssn; + } + __skb_queue_head_init(&skbs); + priv->tid_data[sta_id][tid].next_reclaimed = next_reclaimed; + + IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d", + next_reclaimed); + /*we can free until ssn % q.n_bd not inclusive */ - iwl_trans_reclaim(trans(priv), sta_id, tid, txq_id, - ssn, status, &skbs); + WARN_ON(iwl_trans_reclaim(trans(priv), sta_id, tid, txq_id, + ssn, status, &skbs)); + iwlagn_check_ratid_empty(priv, sta_id, tid); freed = 0; while (!skb_queue_empty(&skbs)) { skb = __skb_dequeue(&skbs); @@ -803,7 +1063,8 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb, iwl_is_associated_ctx(ctx) && ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION) { ctx->last_tx_rejected = true; - iwl_trans_stop_queue(trans(priv), txq_id); + iwl_trans_stop_queue(trans(priv), txq_id, + "Tx on passive channel"); IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) " @@ -877,27 +1138,24 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, sta_id = ba_resp->sta_id; tid = ba_resp->tid; - agg = &priv->shrd->tid_data[sta_id][tid].agg; + agg = &priv->tid_data[sta_id][tid].agg; spin_lock_irqsave(&priv->shrd->sta_lock, flags); - if (unlikely(agg->txq_id != scd_flow)) { - /* - * FIXME: this is a uCode bug which need to be addressed, - * log the information and return for now! - * since it is possible happen very often and in order - * not to fill the syslog, don't enable the logging by default - */ - IWL_DEBUG_TX_REPLY(priv, - "BA scd_flow %d does not match txq_id %d\n", - scd_flow, agg->txq_id); + if (unlikely(!agg->wait_for_ba)) { + if (unlikely(ba_resp->bitmap)) + IWL_ERR(priv, "Received BA when not expected\n"); spin_unlock_irqrestore(&priv->shrd->sta_lock, flags); return 0; } - if (unlikely(!agg->wait_for_ba)) { - if (unlikely(ba_resp->bitmap)) - IWL_ERR(priv, "Received BA when not expected\n"); + __skb_queue_head_init(&reclaimed_skbs); + + /* Release all TFDs before the SSN, i.e. all TFDs in front of + * block-ack window (we assume that they've been successfully + * transmitted ... if not, it's too late anyway). */ + if (iwl_trans_reclaim(trans(priv), sta_id, tid, scd_flow, + ba_resp_scd_ssn, 0, &reclaimed_skbs)) { spin_unlock_irqrestore(&priv->shrd->sta_lock, flags); return 0; } @@ -909,11 +1167,9 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, ba_resp->sta_id); IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, " "scd_flow = %d, scd_ssn = %d\n", - ba_resp->tid, - ba_resp->seq_ctl, + ba_resp->tid, le16_to_cpu(ba_resp->seq_ctl), (unsigned long long)le64_to_cpu(ba_resp->bitmap), - ba_resp->scd_flow, - ba_resp->scd_ssn); + scd_flow, ba_resp_scd_ssn); /* Mark that the expected block-ack response arrived */ agg->wait_for_ba = false; @@ -932,13 +1188,9 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, IWL_DEBUG_HT(priv, "agg frames sent:%d, acked:%d\n", ba_resp->txed, ba_resp->txed_2_done); - __skb_queue_head_init(&reclaimed_skbs); + priv->tid_data[sta_id][tid].next_reclaimed = ba_resp_scd_ssn; - /* Release all TFDs before the SSN, i.e. all TFDs in front of - * block-ack window (we assume that they've been successfully - * transmitted ... if not, it's too late anyway). */ - iwl_trans_reclaim(trans(priv), sta_id, tid, scd_flow, ba_resp_scd_ssn, - 0, &reclaimed_skbs); + iwlagn_check_ratid_empty(priv, sta_id, tid); freed = 0; while (!skb_queue_empty(&reclaimed_skbs)) { diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index e0e9a3dfbc0..b5c7c5f0a75 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c @@ -30,7 +30,6 @@ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> -#include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/skbuff.h> @@ -44,6 +43,7 @@ #include <asm/div64.h> #include "iwl-eeprom.h" +#include "iwl-wifi.h" #include "iwl-dev.h" #include "iwl-core.h" #include "iwl-io.h" @@ -367,7 +367,7 @@ static void iwl_continuous_event_trace(struct iwl_priv *priv) u32 num_wraps; /* # times uCode wrapped to top of log */ u32 next_entry; /* index of next entry to be written by uCode */ - base = priv->device_pointers.error_event_table; + base = priv->shrd->device_pointers.error_event_table; if (iwlagn_hw_valid_rtc_data_addr(base)) { capacity = iwl_read_targ_mem(bus(priv), base); num_wraps = iwl_read_targ_mem(bus(priv), @@ -452,52 +452,6 @@ static void iwl_bg_tx_flush(struct work_struct *work) iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL); } -/****************************************************************************** - * - * uCode download functions - * - ******************************************************************************/ - -static void iwl_free_fw_desc(struct iwl_priv *priv, struct fw_desc *desc) -{ - if (desc->v_addr) - dma_free_coherent(bus(priv)->dev, desc->len, - desc->v_addr, desc->p_addr); - desc->v_addr = NULL; - desc->len = 0; -} - -static void iwl_free_fw_img(struct iwl_priv *priv, struct fw_img *img) -{ - iwl_free_fw_desc(priv, &img->code); - iwl_free_fw_desc(priv, &img->data); -} - -static void iwl_dealloc_ucode(struct iwl_priv *priv) -{ - iwl_free_fw_img(priv, &priv->ucode_rt); - iwl_free_fw_img(priv, &priv->ucode_init); - iwl_free_fw_img(priv, &priv->ucode_wowlan); -} - -static int iwl_alloc_fw_desc(struct iwl_priv *priv, struct fw_desc *desc, - const void *data, size_t len) -{ - if (!len) { - desc->v_addr = NULL; - return -EINVAL; - } - - desc->v_addr = dma_alloc_coherent(bus(priv)->dev, len, - &desc->p_addr, GFP_KERNEL); - if (!desc->v_addr) - return -ENOMEM; - - desc->len = len; - memcpy(desc->v_addr, data, len); - return 0; -} - static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags) { int i; @@ -555,23 +509,14 @@ static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags) BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2); } - -struct iwlagn_ucode_capabilities { - u32 max_probe_length; - u32 standard_phy_calibration_size; - u32 flags; -}; - static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context); -static int iwlagn_mac_setup_register(struct iwl_priv *priv, - struct iwlagn_ucode_capabilities *capa); #define UCODE_EXPERIMENTAL_INDEX 100 #define UCODE_EXPERIMENTAL_TAG "exp" static int __must_check iwl_request_firmware(struct iwl_priv *priv, bool first) { - const char *name_pre = priv->cfg->fw_name_pre; + const char *name_pre = cfg(priv)->fw_name_pre; char tag[8]; if (first) { @@ -580,14 +525,14 @@ static int __must_check iwl_request_firmware(struct iwl_priv *priv, bool first) strcpy(tag, UCODE_EXPERIMENTAL_TAG); } else if (priv->fw_index == UCODE_EXPERIMENTAL_INDEX) { #endif - priv->fw_index = priv->cfg->ucode_api_max; + priv->fw_index = cfg(priv)->ucode_api_max; sprintf(tag, "%d", priv->fw_index); } else { priv->fw_index--; sprintf(tag, "%d", priv->fw_index); } - if (priv->fw_index < priv->cfg->ucode_api_min) { + if (priv->fw_index < cfg(priv)->ucode_api_min) { IWL_ERR(priv, "no suitable firmware found!\n"); return -ENOENT; } @@ -892,9 +837,9 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context) struct iwl_ucode_header *ucode; int err; struct iwlagn_firmware_pieces pieces; - const unsigned int api_max = priv->cfg->ucode_api_max; - unsigned int api_ok = priv->cfg->ucode_api_ok; - const unsigned int api_min = priv->cfg->ucode_api_min; + const unsigned int api_max = cfg(priv)->ucode_api_max; + unsigned int api_ok = cfg(priv)->ucode_api_ok; + const unsigned int api_min = cfg(priv)->ucode_api_min; u32 api_ver; char buildstr[25]; u32 build; @@ -1040,30 +985,32 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context) /* Runtime instructions and 2 copies of data: * 1) unmodified from disk * 2) backup cache for save/restore during power-downs */ - if (iwl_alloc_fw_desc(priv, &priv->ucode_rt.code, + if (iwl_alloc_fw_desc(bus(priv), &trans(priv)->ucode_rt.code, pieces.inst, pieces.inst_size)) goto err_pci_alloc; - if (iwl_alloc_fw_desc(priv, &priv->ucode_rt.data, + if (iwl_alloc_fw_desc(bus(priv), &trans(priv)->ucode_rt.data, pieces.data, pieces.data_size)) goto err_pci_alloc; /* Initialization instructions and data */ if (pieces.init_size && pieces.init_data_size) { - if (iwl_alloc_fw_desc(priv, &priv->ucode_init.code, + if (iwl_alloc_fw_desc(bus(priv), &trans(priv)->ucode_init.code, pieces.init, pieces.init_size)) goto err_pci_alloc; - if (iwl_alloc_fw_desc(priv, &priv->ucode_init.data, + if (iwl_alloc_fw_desc(bus(priv), &trans(priv)->ucode_init.data, pieces.init_data, pieces.init_data_size)) goto err_pci_alloc; } /* WoWLAN instructions and data */ if (pieces.wowlan_inst_size && pieces.wowlan_data_size) { - if (iwl_alloc_fw_desc(priv, &priv->ucode_wowlan.code, + if (iwl_alloc_fw_desc(bus(priv), + &trans(priv)->ucode_wowlan.code, pieces.wowlan_inst, pieces.wowlan_inst_size)) goto err_pci_alloc; - if (iwl_alloc_fw_desc(priv, &priv->ucode_wowlan.data, + if (iwl_alloc_fw_desc(bus(priv), + &trans(priv)->ucode_wowlan.data, pieces.wowlan_data, pieces.wowlan_data_size)) goto err_pci_alloc; @@ -1081,20 +1028,23 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context) priv->init_evtlog_size = (pieces.init_evtlog_size - 16)/12; else priv->init_evtlog_size = - priv->cfg->base_params->max_event_log_size; + cfg(priv)->base_params->max_event_log_size; priv->init_errlog_ptr = pieces.init_errlog_ptr; priv->inst_evtlog_ptr = pieces.inst_evtlog_ptr; if (pieces.inst_evtlog_size) priv->inst_evtlog_size = (pieces.inst_evtlog_size - 16)/12; else priv->inst_evtlog_size = - priv->cfg->base_params->max_event_log_size; + cfg(priv)->base_params->max_event_log_size; priv->inst_errlog_ptr = pieces.inst_errlog_ptr; +#ifndef CONFIG_IWLWIFI_P2P + ucode_capa.flags &= ~IWL_UCODE_TLV_FLAGS_PAN; +#endif priv->new_scan_threshold_behaviour = !!(ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWSCAN); - if (!(priv->cfg->sku & EEPROM_SKU_CAP_IPAN_ENABLE)) + if (!(cfg(priv)->sku & EEPROM_SKU_CAP_IPAN_ENABLE)) ucode_capa.flags &= ~IWL_UCODE_TLV_FLAGS_PAN; /* @@ -1111,7 +1061,6 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context) priv->sta_key_max_num = STA_KEY_MAX_NUM; priv->shrd->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM; } - /* * figure out the offset of chain noise reset and gain commands * base on the size of standard phy calibration commands table size @@ -1156,7 +1105,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context) err_pci_alloc: IWL_ERR(priv, "failed to allocate pci memory\n"); - iwl_dealloc_ucode(priv); + iwl_dealloc_ucode(trans(priv)); out_unbind: complete(&priv->firmware_loading_complete); device_release_driver(bus(priv)->dev); @@ -1176,7 +1125,7 @@ static void iwl_rf_kill_ct_config(struct iwl_priv *priv) spin_unlock_irqrestore(&priv->shrd->lock, flags); priv->thermal_throttle.ct_kill_toggle = false; - if (priv->cfg->base_params->support_ct_kill_exit) { + if (cfg(priv)->base_params->support_ct_kill_exit) { adv_cmd.critical_temperature_enter = cpu_to_le32(hw_params(priv).ct_kill_threshold); adv_cmd.critical_temperature_exit = @@ -1271,10 +1220,10 @@ int iwl_alive_start(struct iwl_priv *priv) return -ERFKILL; /* download priority table before any calibration request */ - if (priv->cfg->bt_params && - priv->cfg->bt_params->advanced_bt_coexist) { + if (cfg(priv)->bt_params && + cfg(priv)->bt_params->advanced_bt_coexist) { /* Configure Bluetooth device coexistence support */ - if (priv->cfg->bt_params->bt_sco_disable) + if (cfg(priv)->bt_params->bt_sco_disable) priv->bt_enable_pspoll = false; else priv->bt_enable_pspoll = true; @@ -1286,14 +1235,14 @@ int iwl_alive_start(struct iwl_priv *priv) priv->bt_valid = IWLAGN_BT_VALID_ENABLE_FLAGS; priv->cur_rssi_ctx = NULL; - iwlagn_send_prio_tbl(priv); + iwl_send_prio_tbl(trans(priv)); /* FIXME: w/a to force change uCode BT state machine */ - ret = iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN, + ret = iwl_send_bt_env(trans(priv), IWL_BT_COEX_ENV_OPEN, BT_COEX_PRIO_TBL_EVT_INIT_CALIB2); if (ret) return ret; - ret = iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_CLOSE, + ret = iwl_send_bt_env(trans(priv), IWL_BT_COEX_ENV_CLOSE, BT_COEX_PRIO_TBL_EVT_INIT_CALIB2); if (ret) return ret; @@ -1304,16 +1253,17 @@ int iwl_alive_start(struct iwl_priv *priv) iwl_send_bt_config(priv); } - if (hw_params(priv).calib_rt_cfg) - iwlagn_send_calib_cfg_rt(priv, - hw_params(priv).calib_rt_cfg); + /* + * Perform runtime calibrations, including DC calibration. + */ + iwlagn_send_calib_cfg_rt(priv, IWL_CALIB_CFG_DC_IDX); ieee80211_wake_queues(priv->hw); priv->active_rate = IWL_RATES_MASK; /* Configure Tx antenna selection based on H/W config */ - iwlagn_send_tx_ant_config(priv, priv->cfg->valid_tx_ant); + iwlagn_send_tx_ant_config(priv, cfg(priv)->valid_tx_ant); if (iwl_is_associated_ctx(ctx) && !priv->shrd->wowlan) { struct iwl_rxon_cmd *active_rxon = @@ -1352,7 +1302,7 @@ int iwl_alive_start(struct iwl_priv *priv) static void iwl_cancel_deferred_work(struct iwl_priv *priv); -static void __iwl_down(struct iwl_priv *priv) +void __iwl_down(struct iwl_priv *priv) { int exit_pending; @@ -1382,9 +1332,9 @@ static void __iwl_down(struct iwl_priv *priv) priv->bt_status = 0; priv->cur_rssi_ctx = NULL; priv->bt_is_sco = 0; - if (priv->cfg->bt_params) + if (cfg(priv)->bt_params) priv->bt_traffic_load = - priv->cfg->bt_params->bt_init_traffic_load; + cfg(priv)->bt_params->bt_init_traffic_load; else priv->bt_traffic_load = 0; priv->bt_full_concurrent = false; @@ -1415,7 +1365,7 @@ static void __iwl_down(struct iwl_priv *priv) priv->beacon_skb = NULL; } -static void iwl_down(struct iwl_priv *priv) +void iwl_down(struct iwl_priv *priv) { mutex_lock(&priv->shrd->mutex); __iwl_down(priv); @@ -1424,57 +1374,6 @@ static void iwl_down(struct iwl_priv *priv) iwl_cancel_deferred_work(priv); } -#define MAX_HW_RESTARTS 5 - -static int __iwl_up(struct iwl_priv *priv) -{ - struct iwl_rxon_context *ctx; - int ret; - - lockdep_assert_held(&priv->shrd->mutex); - - if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) { - IWL_WARN(priv, "Exit pending; will not bring the NIC up\n"); - return -EIO; - } - - for_each_context(priv, ctx) { - ret = iwlagn_alloc_bcast_station(priv, ctx); - if (ret) { - iwl_dealloc_bcast_stations(priv); - return ret; - } - } - - ret = iwlagn_run_init_ucode(priv); - if (ret) { - IWL_ERR(priv, "Failed to run INIT ucode: %d\n", ret); - goto error; - } - - ret = iwlagn_load_ucode_wait_alive(priv, - &priv->ucode_rt, - IWL_UCODE_REGULAR); - if (ret) { - IWL_ERR(priv, "Failed to start RT ucode: %d\n", ret); - goto error; - } - - ret = iwl_alive_start(priv); - if (ret) - goto error; - return 0; - - error: - set_bit(STATUS_EXIT_PENDING, &priv->shrd->status); - __iwl_down(priv); - clear_bit(STATUS_EXIT_PENDING, &priv->shrd->status); - - IWL_ERR(priv, "Unable to initialize device.\n"); - return ret; -} - - /***************************************************************************** * * Workqueue callbacks @@ -1502,7 +1401,7 @@ static void iwl_bg_run_time_calib_work(struct work_struct *work) mutex_unlock(&priv->shrd->mutex); } -static void iwlagn_prepare_restart(struct iwl_priv *priv) +void iwlagn_prepare_restart(struct iwl_priv *priv) { struct iwl_rxon_context *ctx; bool bt_full_concurrent; @@ -1559,1172 +1458,8 @@ static void iwl_bg_restart(struct work_struct *data) } } -/***************************************************************************** - * - * mac80211 entry point functions - * - *****************************************************************************/ - -static const struct ieee80211_iface_limit iwlagn_sta_ap_limits[] = { - { - .max = 1, - .types = BIT(NL80211_IFTYPE_STATION), - }, - { - .max = 1, - .types = BIT(NL80211_IFTYPE_AP), - }, -}; - -static const struct ieee80211_iface_limit iwlagn_2sta_limits[] = { - { - .max = 2, - .types = BIT(NL80211_IFTYPE_STATION), - }, -}; - -static const struct ieee80211_iface_limit iwlagn_p2p_sta_go_limits[] = { - { - .max = 1, - .types = BIT(NL80211_IFTYPE_STATION), - }, - { - .max = 1, - .types = BIT(NL80211_IFTYPE_P2P_GO) | - BIT(NL80211_IFTYPE_AP), - }, -}; - -static const struct ieee80211_iface_limit iwlagn_p2p_2sta_limits[] = { - { - .max = 2, - .types = BIT(NL80211_IFTYPE_STATION), - }, - { - .max = 1, - .types = BIT(NL80211_IFTYPE_P2P_CLIENT), - }, -}; - -static const struct ieee80211_iface_combination -iwlagn_iface_combinations_dualmode[] = { - { .num_different_channels = 1, - .max_interfaces = 2, - .beacon_int_infra_match = true, - .limits = iwlagn_sta_ap_limits, - .n_limits = ARRAY_SIZE(iwlagn_sta_ap_limits), - }, - { .num_different_channels = 1, - .max_interfaces = 2, - .limits = iwlagn_2sta_limits, - .n_limits = ARRAY_SIZE(iwlagn_2sta_limits), - }, -}; - -static const struct ieee80211_iface_combination -iwlagn_iface_combinations_p2p[] = { - { .num_different_channels = 1, - .max_interfaces = 2, - .beacon_int_infra_match = true, - .limits = iwlagn_p2p_sta_go_limits, - .n_limits = ARRAY_SIZE(iwlagn_p2p_sta_go_limits), - }, - { .num_different_channels = 1, - .max_interfaces = 2, - .limits = iwlagn_p2p_2sta_limits, - .n_limits = ARRAY_SIZE(iwlagn_p2p_2sta_limits), - }, -}; - -/* - * Not a mac80211 entry point function, but it fits in with all the - * other mac80211 functions grouped here. - */ -static int iwlagn_mac_setup_register(struct iwl_priv *priv, - struct iwlagn_ucode_capabilities *capa) -{ - int ret; - struct ieee80211_hw *hw = priv->hw; - struct iwl_rxon_context *ctx; - - hw->rate_control_algorithm = "iwl-agn-rs"; - - /* Tell mac80211 our characteristics */ - hw->flags = IEEE80211_HW_SIGNAL_DBM | - IEEE80211_HW_AMPDU_AGGREGATION | - IEEE80211_HW_NEED_DTIM_PERIOD | - IEEE80211_HW_SPECTRUM_MGMT | - IEEE80211_HW_REPORTS_TX_ACK_STATUS; - - /* - * Including the following line will crash some AP's. This - * workaround removes the stimulus which causes the crash until - * the AP software can be fixed. - hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF; - */ - - hw->flags |= IEEE80211_HW_SUPPORTS_PS | - IEEE80211_HW_SUPPORTS_DYNAMIC_PS; - - if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE) - hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | - IEEE80211_HW_SUPPORTS_STATIC_SMPS; - - if (capa->flags & IWL_UCODE_TLV_FLAGS_MFP) - hw->flags |= IEEE80211_HW_MFP_CAPABLE; - - hw->sta_data_size = sizeof(struct iwl_station_priv); - hw->vif_data_size = sizeof(struct iwl_vif_priv); - - for_each_context(priv, ctx) { - hw->wiphy->interface_modes |= ctx->interface_modes; - hw->wiphy->interface_modes |= ctx->exclusive_interface_modes; - } - - BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2); - - if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)) { - hw->wiphy->iface_combinations = iwlagn_iface_combinations_p2p; - hw->wiphy->n_iface_combinations = - ARRAY_SIZE(iwlagn_iface_combinations_p2p); - } else if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) { - hw->wiphy->iface_combinations = iwlagn_iface_combinations_dualmode; - hw->wiphy->n_iface_combinations = - ARRAY_SIZE(iwlagn_iface_combinations_dualmode); - } - - hw->wiphy->max_remain_on_channel_duration = 1000; - - hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY | - WIPHY_FLAG_DISABLE_BEACON_HINTS | - WIPHY_FLAG_IBSS_RSN; - - if (priv->ucode_wowlan.code.len && device_can_wakeup(bus(priv)->dev)) { - hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT | - WIPHY_WOWLAN_DISCONNECT | - WIPHY_WOWLAN_EAP_IDENTITY_REQ | - WIPHY_WOWLAN_RFKILL_RELEASE; - if (!iwlagn_mod_params.sw_crypto) - hw->wiphy->wowlan.flags |= - WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | - WIPHY_WOWLAN_GTK_REKEY_FAILURE; - - hw->wiphy->wowlan.n_patterns = IWLAGN_WOWLAN_MAX_PATTERNS; - hw->wiphy->wowlan.pattern_min_len = - IWLAGN_WOWLAN_MIN_PATTERN_LEN; - hw->wiphy->wowlan.pattern_max_len = - IWLAGN_WOWLAN_MAX_PATTERN_LEN; - } - - if (iwlagn_mod_params.power_save) - hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; - else - hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; - - hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX; - /* we create the 802.11 header and a zero-length SSID element */ - hw->wiphy->max_scan_ie_len = capa->max_probe_length - 24 - 2; - - /* Default value; 4 EDCA QOS priorities */ - hw->queues = 4; - - hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL; - - if (priv->bands[IEEE80211_BAND_2GHZ].n_channels) - priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = - &priv->bands[IEEE80211_BAND_2GHZ]; - if (priv->bands[IEEE80211_BAND_5GHZ].n_channels) - priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = - &priv->bands[IEEE80211_BAND_5GHZ]; - - iwl_leds_init(priv); - - ret = ieee80211_register_hw(priv->hw); - if (ret) { - IWL_ERR(priv, "Failed to register hw (error %d)\n", ret); - return ret; - } - priv->mac80211_registered = 1; - - return 0; -} - - -static int iwlagn_mac_start(struct ieee80211_hw *hw) -{ - struct iwl_priv *priv = hw->priv; - int ret; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - - /* we should be verifying the device is ready to be opened */ - mutex_lock(&priv->shrd->mutex); - ret = __iwl_up(priv); - mutex_unlock(&priv->shrd->mutex); - if (ret) - return ret; - - IWL_DEBUG_INFO(priv, "Start UP work done.\n"); - - /* Now we should be done, and the READY bit should be set. */ - if (WARN_ON(!test_bit(STATUS_READY, &priv->shrd->status))) - ret = -EIO; - - iwlagn_led_enable(priv); - - priv->is_open = 1; - IWL_DEBUG_MAC80211(priv, "leave\n"); - return 0; -} - -static void iwlagn_mac_stop(struct ieee80211_hw *hw) -{ - struct iwl_priv *priv = hw->priv; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - - if (!priv->is_open) - return; - - priv->is_open = 0; - - iwl_down(priv); - - flush_workqueue(priv->shrd->workqueue); - - /* User space software may expect getting rfkill changes - * even if interface is down */ - iwl_write32(bus(priv), CSR_INT, 0xFFFFFFFF); - iwl_enable_rfkill_int(priv); - - IWL_DEBUG_MAC80211(priv, "leave\n"); -} - -#ifdef CONFIG_PM_SLEEP -static int iwlagn_send_patterns(struct iwl_priv *priv, - struct cfg80211_wowlan *wowlan) -{ - struct iwlagn_wowlan_patterns_cmd *pattern_cmd; - struct iwl_host_cmd cmd = { - .id = REPLY_WOWLAN_PATTERNS, - .dataflags[0] = IWL_HCMD_DFL_NOCOPY, - .flags = CMD_SYNC, - }; - int i, err; - - if (!wowlan->n_patterns) - return 0; - - cmd.len[0] = sizeof(*pattern_cmd) + - wowlan->n_patterns * sizeof(struct iwlagn_wowlan_pattern); - - pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL); - if (!pattern_cmd) - return -ENOMEM; - pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns); - for (i = 0; i < wowlan->n_patterns; i++) { - int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8); - - memcpy(&pattern_cmd->patterns[i].mask, - wowlan->patterns[i].mask, mask_len); - memcpy(&pattern_cmd->patterns[i].pattern, - wowlan->patterns[i].pattern, - wowlan->patterns[i].pattern_len); - pattern_cmd->patterns[i].mask_size = mask_len; - pattern_cmd->patterns[i].pattern_size = - wowlan->patterns[i].pattern_len; - } - - cmd.data[0] = pattern_cmd; - err = iwl_trans_send_cmd(trans(priv), &cmd); - kfree(pattern_cmd); - return err; -} -#endif - -static void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct cfg80211_gtk_rekey_data *data) -{ - struct iwl_priv *priv = hw->priv; - - if (iwlagn_mod_params.sw_crypto) - return; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - mutex_lock(&priv->shrd->mutex); - - if (priv->contexts[IWL_RXON_CTX_BSS].vif != vif) - goto out; - - memcpy(priv->kek, data->kek, NL80211_KEK_LEN); - memcpy(priv->kck, data->kck, NL80211_KCK_LEN); - priv->replay_ctr = cpu_to_le64(be64_to_cpup((__be64 *)&data->replay_ctr)); - priv->have_rekey_data = true; - - out: - mutex_unlock(&priv->shrd->mutex); - IWL_DEBUG_MAC80211(priv, "leave\n"); -} - -struct wowlan_key_data { - struct iwl_rxon_context *ctx; - struct iwlagn_wowlan_rsc_tsc_params_cmd *rsc_tsc; - struct iwlagn_wowlan_tkip_params_cmd *tkip; - const u8 *bssid; - bool error, use_rsc_tsc, use_tkip; -}; - -#ifdef CONFIG_PM_SLEEP -static void iwlagn_convert_p1k(u16 *p1k, __le16 *out) -{ - int i; - - for (i = 0; i < IWLAGN_P1K_SIZE; i++) - out[i] = cpu_to_le16(p1k[i]); -} - -static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, - struct ieee80211_key_conf *key, - void *_data) -{ - struct iwl_priv *priv = hw->priv; - struct wowlan_key_data *data = _data; - struct iwl_rxon_context *ctx = data->ctx; - struct aes_sc *aes_sc, *aes_tx_sc = NULL; - struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL; - struct iwlagn_p1k_cache *rx_p1ks; - u8 *rx_mic_key; - struct ieee80211_key_seq seq; - u32 cur_rx_iv32 = 0; - u16 p1k[IWLAGN_P1K_SIZE]; - int ret, i; - - mutex_lock(&priv->shrd->mutex); - - if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 || - key->cipher == WLAN_CIPHER_SUITE_WEP104) && - !sta && !ctx->key_mapping_keys) - ret = iwl_set_default_wep_key(priv, ctx, key); - else - ret = iwl_set_dynamic_key(priv, ctx, key, sta); - - if (ret) { - IWL_ERR(priv, "Error setting key during suspend!\n"); - data->error = true; - } - - switch (key->cipher) { - case WLAN_CIPHER_SUITE_TKIP: - if (sta) { - tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc; - tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc; - - rx_p1ks = data->tkip->rx_uni; - - ieee80211_get_key_tx_seq(key, &seq); - tkip_tx_sc->iv16 = cpu_to_le16(seq.tkip.iv16); - tkip_tx_sc->iv32 = cpu_to_le32(seq.tkip.iv32); - - ieee80211_get_tkip_p1k_iv(key, seq.tkip.iv32, p1k); - iwlagn_convert_p1k(p1k, data->tkip->tx.p1k); - - memcpy(data->tkip->mic_keys.tx, - &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY], - IWLAGN_MIC_KEY_SIZE); - - rx_mic_key = data->tkip->mic_keys.rx_unicast; - } else { - tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.multicast_rsc; - rx_p1ks = data->tkip->rx_multi; - rx_mic_key = data->tkip->mic_keys.rx_mcast; - } - - /* - * For non-QoS this relies on the fact that both the uCode and - * mac80211 use TID 0 (as they need to to avoid replay attacks) - * for checking the IV in the frames. - */ - for (i = 0; i < IWLAGN_NUM_RSC; i++) { - ieee80211_get_key_rx_seq(key, i, &seq); - tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16); - tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32); - /* wrapping isn't allowed, AP must rekey */ - if (seq.tkip.iv32 > cur_rx_iv32) - cur_rx_iv32 = seq.tkip.iv32; - } - - ieee80211_get_tkip_rx_p1k(key, data->bssid, cur_rx_iv32, p1k); - iwlagn_convert_p1k(p1k, rx_p1ks[0].p1k); - ieee80211_get_tkip_rx_p1k(key, data->bssid, - cur_rx_iv32 + 1, p1k); - iwlagn_convert_p1k(p1k, rx_p1ks[1].p1k); - - memcpy(rx_mic_key, - &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY], - IWLAGN_MIC_KEY_SIZE); - - data->use_tkip = true; - data->use_rsc_tsc = true; - break; - case WLAN_CIPHER_SUITE_CCMP: - if (sta) { - u8 *pn = seq.ccmp.pn; - - aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc; - aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc; - - ieee80211_get_key_tx_seq(key, &seq); - aes_tx_sc->pn = cpu_to_le64( - (u64)pn[5] | - ((u64)pn[4] << 8) | - ((u64)pn[3] << 16) | - ((u64)pn[2] << 24) | - ((u64)pn[1] << 32) | - ((u64)pn[0] << 40)); - } else - aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc; - - /* - * For non-QoS this relies on the fact that both the uCode and - * mac80211 use TID 0 for checking the IV in the frames. - */ - for (i = 0; i < IWLAGN_NUM_RSC; i++) { - u8 *pn = seq.ccmp.pn; - - ieee80211_get_key_rx_seq(key, i, &seq); - aes_sc->pn = cpu_to_le64( - (u64)pn[5] | - ((u64)pn[4] << 8) | - ((u64)pn[3] << 16) | - ((u64)pn[2] << 24) | - ((u64)pn[1] << 32) | - ((u64)pn[0] << 40)); - } - data->use_rsc_tsc = true; - break; - } - - mutex_unlock(&priv->shrd->mutex); -} - -static int iwlagn_mac_suspend(struct ieee80211_hw *hw, - struct cfg80211_wowlan *wowlan) -{ - struct iwl_priv *priv = hw->priv; - struct iwlagn_wowlan_wakeup_filter_cmd wakeup_filter_cmd; - struct iwl_rxon_cmd rxon; - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - struct iwlagn_wowlan_kek_kck_material_cmd kek_kck_cmd; - struct iwlagn_wowlan_tkip_params_cmd tkip_cmd = {}; - struct wowlan_key_data key_data = { - .ctx = ctx, - .bssid = ctx->active.bssid_addr, - .use_rsc_tsc = false, - .tkip = &tkip_cmd, - .use_tkip = false, - }; - int ret, i; - u16 seq; - - if (WARN_ON(!wowlan)) - return -EINVAL; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - mutex_lock(&priv->shrd->mutex); - - /* Don't attempt WoWLAN when not associated, tear down instead. */ - if (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION || - !iwl_is_associated_ctx(ctx)) { - ret = 1; - goto out; - } - - key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL); - if (!key_data.rsc_tsc) { - ret = -ENOMEM; - goto out; - } - - memset(&wakeup_filter_cmd, 0, sizeof(wakeup_filter_cmd)); - - /* - * We know the last used seqno, and the uCode expects to know that - * one, it will increment before TX. - */ - seq = le16_to_cpu(priv->last_seq_ctl) & IEEE80211_SCTL_SEQ; - wakeup_filter_cmd.non_qos_seq = cpu_to_le16(seq); - - /* - * For QoS counters, we store the one to use next, so subtract 0x10 - * since the uCode will add 0x10 before using the value. - */ - for (i = 0; i < 8; i++) { - seq = priv->shrd->tid_data[IWL_AP_ID][i].seq_number; - seq -= 0x10; - wakeup_filter_cmd.qos_seq[i] = cpu_to_le16(seq); - } - - if (wowlan->disconnect) - wakeup_filter_cmd.enabled |= - cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_BEACON_MISS | - IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE); - if (wowlan->magic_pkt) - wakeup_filter_cmd.enabled |= - cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_MAGIC_PACKET); - if (wowlan->gtk_rekey_failure) - wakeup_filter_cmd.enabled |= - cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL); - if (wowlan->eap_identity_req) - wakeup_filter_cmd.enabled |= - cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ); - if (wowlan->four_way_handshake) - wakeup_filter_cmd.enabled |= - cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE); - if (wowlan->rfkill_release) - wakeup_filter_cmd.enabled |= - cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_RFKILL); - if (wowlan->n_patterns) - wakeup_filter_cmd.enabled |= - cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_PATTERN_MATCH); - - iwl_scan_cancel_timeout(priv, 200); - - memcpy(&rxon, &ctx->active, sizeof(rxon)); - - iwl_trans_stop_device(trans(priv)); - - priv->shrd->wowlan = true; - - ret = iwlagn_load_ucode_wait_alive(priv, &priv->ucode_wowlan, - IWL_UCODE_WOWLAN); - if (ret) - goto error; - - /* now configure WoWLAN ucode */ - ret = iwl_alive_start(priv); - if (ret) - goto error; - - memcpy(&ctx->staging, &rxon, sizeof(rxon)); - ret = iwlagn_commit_rxon(priv, ctx); - if (ret) - goto error; - - ret = iwl_power_update_mode(priv, true); - if (ret) - goto error; - - if (!iwlagn_mod_params.sw_crypto) { - /* mark all keys clear */ - priv->ucode_key_table = 0; - ctx->key_mapping_keys = 0; - - /* - * This needs to be unlocked due to lock ordering - * constraints. Since we're in the suspend path - * that isn't really a problem though. - */ - mutex_unlock(&priv->shrd->mutex); - ieee80211_iter_keys(priv->hw, ctx->vif, - iwlagn_wowlan_program_keys, - &key_data); - mutex_lock(&priv->shrd->mutex); - if (key_data.error) { - ret = -EIO; - goto error; - } - - if (key_data.use_rsc_tsc) { - struct iwl_host_cmd rsc_tsc_cmd = { - .id = REPLY_WOWLAN_TSC_RSC_PARAMS, - .flags = CMD_SYNC, - .data[0] = key_data.rsc_tsc, - .dataflags[0] = IWL_HCMD_DFL_NOCOPY, - .len[0] = sizeof(*key_data.rsc_tsc), - }; - - ret = iwl_trans_send_cmd(trans(priv), &rsc_tsc_cmd); - if (ret) - goto error; - } - - if (key_data.use_tkip) { - ret = iwl_trans_send_cmd_pdu(trans(priv), - REPLY_WOWLAN_TKIP_PARAMS, - CMD_SYNC, sizeof(tkip_cmd), - &tkip_cmd); - if (ret) - goto error; - } - - if (priv->have_rekey_data) { - memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd)); - memcpy(kek_kck_cmd.kck, priv->kck, NL80211_KCK_LEN); - kek_kck_cmd.kck_len = cpu_to_le16(NL80211_KCK_LEN); - memcpy(kek_kck_cmd.kek, priv->kek, NL80211_KEK_LEN); - kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN); - kek_kck_cmd.replay_ctr = priv->replay_ctr; - - ret = iwl_trans_send_cmd_pdu(trans(priv), - REPLY_WOWLAN_KEK_KCK_MATERIAL, - CMD_SYNC, sizeof(kek_kck_cmd), - &kek_kck_cmd); - if (ret) - goto error; - } - } - - ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_WOWLAN_WAKEUP_FILTER, - CMD_SYNC, sizeof(wakeup_filter_cmd), - &wakeup_filter_cmd); - if (ret) - goto error; - - ret = iwlagn_send_patterns(priv, wowlan); - if (ret) - goto error; - - device_set_wakeup_enable(bus(priv)->dev, true); - - /* Now let the ucode operate on its own */ - iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_SET, - CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE); - - goto out; - - error: - priv->shrd->wowlan = false; - iwlagn_prepare_restart(priv); - ieee80211_restart_hw(priv->hw); - out: - mutex_unlock(&priv->shrd->mutex); - kfree(key_data.rsc_tsc); - IWL_DEBUG_MAC80211(priv, "leave\n"); - - return ret; -} - -static int iwlagn_mac_resume(struct ieee80211_hw *hw) -{ - struct iwl_priv *priv = hw->priv; - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - struct ieee80211_vif *vif; - unsigned long flags; - u32 base, status = 0xffffffff; - int ret = -EIO; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - mutex_lock(&priv->shrd->mutex); - - iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_CLR, - CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE); - - base = priv->device_pointers.error_event_table; - if (iwlagn_hw_valid_rtc_data_addr(base)) { - spin_lock_irqsave(&bus(priv)->reg_lock, flags); - ret = iwl_grab_nic_access_silent(bus(priv)); - if (ret == 0) { - iwl_write32(bus(priv), HBUS_TARG_MEM_RADDR, base); - status = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT); - iwl_release_nic_access(bus(priv)); - } - spin_unlock_irqrestore(&bus(priv)->reg_lock, flags); - -#ifdef CONFIG_IWLWIFI_DEBUGFS - if (ret == 0) { - if (!priv->wowlan_sram) - priv->wowlan_sram = - kzalloc(priv->ucode_wowlan.data.len, - GFP_KERNEL); - - if (priv->wowlan_sram) - _iwl_read_targ_mem_words( - bus(priv), 0x800000, priv->wowlan_sram, - priv->ucode_wowlan.data.len / 4); - } -#endif - } - - /* we'll clear ctx->vif during iwlagn_prepare_restart() */ - vif = ctx->vif; - - priv->shrd->wowlan = false; - - device_set_wakeup_enable(bus(priv)->dev, false); - - iwlagn_prepare_restart(priv); - - memset((void *)&ctx->active, 0, sizeof(ctx->active)); - iwl_connection_init_rx_config(priv, ctx); - iwlagn_set_rxon_chain(priv, ctx); - - mutex_unlock(&priv->shrd->mutex); - IWL_DEBUG_MAC80211(priv, "leave\n"); - - ieee80211_resume_disconnect(vif); - - return 1; -} -#endif - -static void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) -{ - struct iwl_priv *priv = hw->priv; - - IWL_DEBUG_MACDUMP(priv, "enter\n"); - - IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, - ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); - - if (iwlagn_tx_skb(priv, skb)) - dev_kfree_skb_any(skb); - - IWL_DEBUG_MACDUMP(priv, "leave\n"); -} - -static void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_key_conf *keyconf, - struct ieee80211_sta *sta, - u32 iv32, u16 *phase1key) -{ - struct iwl_priv *priv = hw->priv; - - iwl_update_tkip_key(priv, vif, keyconf, sta, iv32, phase1key); -} - -static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, - struct ieee80211_key_conf *key) -{ - struct iwl_priv *priv = hw->priv; - struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; - struct iwl_rxon_context *ctx = vif_priv->ctx; - int ret; - bool is_default_wep_key = false; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - - if (iwlagn_mod_params.sw_crypto) { - IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n"); - return -EOPNOTSUPP; - } - - switch (key->cipher) { - case WLAN_CIPHER_SUITE_TKIP: - key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; - /* fall through */ - case WLAN_CIPHER_SUITE_CCMP: - key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; - break; - default: - break; - } - - /* - * We could program these keys into the hardware as well, but we - * don't expect much multicast traffic in IBSS and having keys - * for more stations is probably more useful. - * - * Mark key TX-only and return 0. - */ - if (vif->type == NL80211_IFTYPE_ADHOC && - !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) { - key->hw_key_idx = WEP_INVALID_OFFSET; - return 0; - } - - /* If they key was TX-only, accept deletion */ - if (cmd == DISABLE_KEY && key->hw_key_idx == WEP_INVALID_OFFSET) - return 0; - - mutex_lock(&priv->shrd->mutex); - iwl_scan_cancel_timeout(priv, 100); - - BUILD_BUG_ON(WEP_INVALID_OFFSET == IWLAGN_HW_KEY_DEFAULT); - - /* - * If we are getting WEP group key and we didn't receive any key mapping - * so far, we are in legacy wep mode (group key only), otherwise we are - * in 1X mode. - * In legacy wep mode, we use another host command to the uCode. - */ - if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 || - key->cipher == WLAN_CIPHER_SUITE_WEP104) && !sta) { - if (cmd == SET_KEY) - is_default_wep_key = !ctx->key_mapping_keys; - else - is_default_wep_key = - key->hw_key_idx == IWLAGN_HW_KEY_DEFAULT; - } - - - switch (cmd) { - case SET_KEY: - if (is_default_wep_key) { - ret = iwl_set_default_wep_key(priv, vif_priv->ctx, key); - break; - } - ret = iwl_set_dynamic_key(priv, vif_priv->ctx, key, sta); - if (ret) { - /* - * can't add key for RX, but we don't need it - * in the device for TX so still return 0 - */ - ret = 0; - key->hw_key_idx = WEP_INVALID_OFFSET; - } - - IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n"); - break; - case DISABLE_KEY: - if (is_default_wep_key) - ret = iwl_remove_default_wep_key(priv, ctx, key); - else - ret = iwl_remove_dynamic_key(priv, ctx, key, sta); - - IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n"); - break; - default: - ret = -EINVAL; - } - - mutex_unlock(&priv->shrd->mutex); - IWL_DEBUG_MAC80211(priv, "leave\n"); - - return ret; -} - -static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - enum ieee80211_ampdu_mlme_action action, - struct ieee80211_sta *sta, u16 tid, u16 *ssn, - u8 buf_size) -{ - struct iwl_priv *priv = hw->priv; - int ret = -EINVAL; - struct iwl_station_priv *sta_priv = (void *) sta->drv_priv; - struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); - - IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n", - sta->addr, tid); - - if (!(priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE)) - return -EACCES; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - mutex_lock(&priv->shrd->mutex); - - switch (action) { - case IEEE80211_AMPDU_RX_START: - IWL_DEBUG_HT(priv, "start Rx\n"); - ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn); - break; - case IEEE80211_AMPDU_RX_STOP: - IWL_DEBUG_HT(priv, "stop Rx\n"); - ret = iwl_sta_rx_agg_stop(priv, sta, tid); - if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) - ret = 0; - break; - case IEEE80211_AMPDU_TX_START: - IWL_DEBUG_HT(priv, "start Tx\n"); - ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn); - break; - case IEEE80211_AMPDU_TX_STOP: - IWL_DEBUG_HT(priv, "stop Tx\n"); - ret = iwlagn_tx_agg_stop(priv, vif, sta, tid); - if ((ret == 0) && (priv->agg_tids_count > 0)) { - priv->agg_tids_count--; - IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n", - priv->agg_tids_count); - } - if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) - ret = 0; - if (!priv->agg_tids_count && priv->cfg->ht_params && - priv->cfg->ht_params->use_rts_for_aggregation) { - /* - * switch off RTS/CTS if it was previously enabled - */ - sta_priv->lq_sta.lq.general_params.flags &= - ~LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK; - iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif), - &sta_priv->lq_sta.lq, CMD_ASYNC, false); - } - break; - case IEEE80211_AMPDU_TX_OPERATIONAL: - buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF); - - iwl_trans_tx_agg_setup(trans(priv), ctx->ctxid, iwl_sta_id(sta), - tid, buf_size); - - /* - * If the limit is 0, then it wasn't initialised yet, - * use the default. We can do that since we take the - * minimum below, and we don't want to go above our - * default due to hardware restrictions. - */ - if (sta_priv->max_agg_bufsize == 0) - sta_priv->max_agg_bufsize = - LINK_QUAL_AGG_FRAME_LIMIT_DEF; - - /* - * Even though in theory the peer could have different - * aggregation reorder buffer sizes for different sessions, - * our ucode doesn't allow for that and has a global limit - * for each station. Therefore, use the minimum of all the - * aggregation sessions and our default value. - */ - sta_priv->max_agg_bufsize = - min(sta_priv->max_agg_bufsize, buf_size); - - if (priv->cfg->ht_params && - priv->cfg->ht_params->use_rts_for_aggregation) { - /* - * switch to RTS/CTS if it is the prefer protection - * method for HT traffic - */ - - sta_priv->lq_sta.lq.general_params.flags |= - LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK; - } - priv->agg_tids_count++; - IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n", - priv->agg_tids_count); - - sta_priv->lq_sta.lq.agg_params.agg_frame_cnt_limit = - sta_priv->max_agg_bufsize; - - iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif), - &sta_priv->lq_sta.lq, CMD_ASYNC, false); - - IWL_INFO(priv, "Tx aggregation enabled on ra = %pM tid = %d\n", - sta->addr, tid); - ret = 0; - break; - } - mutex_unlock(&priv->shrd->mutex); - IWL_DEBUG_MAC80211(priv, "leave\n"); - return ret; -} - -static int iwlagn_mac_sta_add(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta) -{ - struct iwl_priv *priv = hw->priv; - struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; - struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; - bool is_ap = vif->type == NL80211_IFTYPE_STATION; - int ret = 0; - u8 sta_id; - - IWL_DEBUG_MAC80211(priv, "received request to add station %pM\n", - sta->addr); - mutex_lock(&priv->shrd->mutex); - IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n", - sta->addr); - sta_priv->sta_id = IWL_INVALID_STATION; - - atomic_set(&sta_priv->pending_frames, 0); - if (vif->type == NL80211_IFTYPE_AP) - sta_priv->client = true; - - ret = iwl_add_station_common(priv, vif_priv->ctx, sta->addr, - is_ap, sta, &sta_id); - if (ret) { - IWL_ERR(priv, "Unable to add station %pM (%d)\n", - sta->addr, ret); - /* Should we return success if return code is EEXIST ? */ - goto out; - } - - sta_priv->sta_id = sta_id; - - /* Initialize rate scaling */ - IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n", - sta->addr); - iwl_rs_rate_init(priv, sta, sta_id); - out: - mutex_unlock(&priv->shrd->mutex); - IWL_DEBUG_MAC80211(priv, "leave\n"); - - return ret; -} - -static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw, - struct ieee80211_channel_switch *ch_switch) -{ - struct iwl_priv *priv = hw->priv; - const struct iwl_channel_info *ch_info; - struct ieee80211_conf *conf = &hw->conf; - struct ieee80211_channel *channel = ch_switch->channel; - struct iwl_ht_config *ht_conf = &priv->current_ht_config; - /* - * MULTI-FIXME - * When we add support for multiple interfaces, we need to - * revisit this. The channel switch command in the device - * only affects the BSS context, but what does that really - * mean? And what if we get a CSA on the second interface? - * This needs a lot of work. - */ - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - u16 ch; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - - mutex_lock(&priv->shrd->mutex); - - if (iwl_is_rfkill(priv->shrd)) - goto out; - - if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status) || - test_bit(STATUS_SCANNING, &priv->shrd->status) || - test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status)) - goto out; - - if (!iwl_is_associated_ctx(ctx)) - goto out; - - if (!priv->cfg->lib->set_channel_switch) - goto out; - - ch = channel->hw_value; - if (le16_to_cpu(ctx->active.channel) == ch) - goto out; - - ch_info = iwl_get_channel_info(priv, channel->band, ch); - if (!is_channel_valid(ch_info)) { - IWL_DEBUG_MAC80211(priv, "invalid channel\n"); - goto out; - } - - spin_lock_irq(&priv->shrd->lock); - - priv->current_ht_config.smps = conf->smps_mode; - - /* Configure HT40 channels */ - ctx->ht.enabled = conf_is_ht(conf); - if (ctx->ht.enabled) - iwlagn_config_ht40(conf, ctx); - else - ctx->ht.is_40mhz = false; - - if ((le16_to_cpu(ctx->staging.channel) != ch)) - ctx->staging.flags = 0; - - iwl_set_rxon_channel(priv, channel, ctx); - iwl_set_rxon_ht(priv, ht_conf); - iwl_set_flags_for_band(priv, ctx, channel->band, ctx->vif); - - spin_unlock_irq(&priv->shrd->lock); - - iwl_set_rate(priv); - /* - * at this point, staging_rxon has the - * configuration for channel switch - */ - set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status); - priv->switch_channel = cpu_to_le16(ch); - if (priv->cfg->lib->set_channel_switch(priv, ch_switch)) { - clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status); - priv->switch_channel = 0; - ieee80211_chswitch_done(ctx->vif, false); - } - -out: - mutex_unlock(&priv->shrd->mutex); - IWL_DEBUG_MAC80211(priv, "leave\n"); -} - -static void iwlagn_configure_filter(struct ieee80211_hw *hw, - unsigned int changed_flags, - unsigned int *total_flags, - u64 multicast) -{ - struct iwl_priv *priv = hw->priv; - __le32 filter_or = 0, filter_nand = 0; - struct iwl_rxon_context *ctx; - -#define CHK(test, flag) do { \ - if (*total_flags & (test)) \ - filter_or |= (flag); \ - else \ - filter_nand |= (flag); \ - } while (0) - - IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n", - changed_flags, *total_flags); - - CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK); - /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */ - CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK); - CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK); - -#undef CHK - - mutex_lock(&priv->shrd->mutex); - - for_each_context(priv, ctx) { - ctx->staging.filter_flags &= ~filter_nand; - ctx->staging.filter_flags |= filter_or; - - /* - * Not committing directly because hardware can perform a scan, - * but we'll eventually commit the filter flags change anyway. - */ - } - - mutex_unlock(&priv->shrd->mutex); - - /* - * Receiving all multicast frames is always enabled by the - * default flags setup in iwl_connection_init_rx_config() - * since we currently do not support programming multicast - * filters into the device. - */ - *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS | - FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; -} - -static void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop) -{ - struct iwl_priv *priv = hw->priv; - - mutex_lock(&priv->shrd->mutex); - IWL_DEBUG_MAC80211(priv, "enter\n"); - - if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) { - IWL_DEBUG_TX(priv, "Aborting flush due to device shutdown\n"); - goto done; - } - if (iwl_is_rfkill(priv->shrd)) { - IWL_DEBUG_TX(priv, "Aborting flush due to RF Kill\n"); - goto done; - } - - /* - * mac80211 will not push any more frames for transmit - * until the flush is completed - */ - if (drop) { - IWL_DEBUG_MAC80211(priv, "send flush command\n"); - if (iwlagn_txfifo_flush(priv, IWL_DROP_ALL)) { - IWL_ERR(priv, "flush request fail\n"); - goto done; - } - } - IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n"); - iwl_trans_wait_tx_queue_empty(trans(priv)); -done: - mutex_unlock(&priv->shrd->mutex); - IWL_DEBUG_MAC80211(priv, "leave\n"); -} void iwlagn_disable_roc(struct iwl_priv *priv) { @@ -2758,166 +1493,6 @@ static void iwlagn_disable_roc_work(struct work_struct *work) mutex_unlock(&priv->shrd->mutex); } -static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw, - struct ieee80211_channel *channel, - enum nl80211_channel_type channel_type, - int duration) -{ - struct iwl_priv *priv = hw->priv; - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN]; - int err = 0; - - if (!(priv->shrd->valid_contexts & BIT(IWL_RXON_CTX_PAN))) - return -EOPNOTSUPP; - - if (!(ctx->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT))) - return -EOPNOTSUPP; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - mutex_lock(&priv->shrd->mutex); - - if (test_bit(STATUS_SCAN_HW, &priv->shrd->status)) { - err = -EBUSY; - goto out; - } - - priv->hw_roc_channel = channel; - priv->hw_roc_chantype = channel_type; - priv->hw_roc_duration = duration; - priv->hw_roc_start_notified = false; - cancel_delayed_work(&priv->hw_roc_disable_work); - - if (!ctx->is_active) { - ctx->is_active = true; - ctx->staging.dev_type = RXON_DEV_TYPE_P2P; - memcpy(ctx->staging.node_addr, - priv->contexts[IWL_RXON_CTX_BSS].staging.node_addr, - ETH_ALEN); - memcpy(ctx->staging.bssid_addr, - priv->contexts[IWL_RXON_CTX_BSS].staging.node_addr, - ETH_ALEN); - err = iwlagn_commit_rxon(priv, ctx); - if (err) - goto out; - ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK | - RXON_FILTER_PROMISC_MSK | - RXON_FILTER_CTL2HOST_MSK; - - err = iwlagn_commit_rxon(priv, ctx); - if (err) { - iwlagn_disable_roc(priv); - goto out; - } - priv->hw_roc_setup = true; - } - - err = iwl_scan_initiate(priv, ctx->vif, IWL_SCAN_ROC, channel->band); - if (err) - iwlagn_disable_roc(priv); - - out: - mutex_unlock(&priv->shrd->mutex); - IWL_DEBUG_MAC80211(priv, "leave\n"); - - return err; -} - -static int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw) -{ - struct iwl_priv *priv = hw->priv; - - if (!(priv->shrd->valid_contexts & BIT(IWL_RXON_CTX_PAN))) - return -EOPNOTSUPP; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - mutex_lock(&priv->shrd->mutex); - iwl_scan_cancel_timeout(priv, priv->hw_roc_duration); - iwlagn_disable_roc(priv); - mutex_unlock(&priv->shrd->mutex); - IWL_DEBUG_MAC80211(priv, "leave\n"); - - return 0; -} - -static int iwlagn_mac_tx_sync(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - const u8 *bssid, - enum ieee80211_tx_sync_type type) -{ - struct iwl_priv *priv = hw->priv; - struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; - struct iwl_rxon_context *ctx = vif_priv->ctx; - int ret; - u8 sta_id; - - if (ctx->ctxid != IWL_RXON_CTX_PAN) - return 0; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - mutex_lock(&priv->shrd->mutex); - - if (iwl_is_associated_ctx(ctx)) { - ret = 0; - goto out; - } - - if (ctx->preauth_bssid || test_bit(STATUS_SCAN_HW, &priv->shrd->status)) { - ret = -EBUSY; - goto out; - } - - ret = iwl_add_station_common(priv, ctx, bssid, true, NULL, &sta_id); - if (ret) - goto out; - - if (WARN_ON(sta_id != ctx->ap_sta_id)) { - ret = -EIO; - goto out_remove_sta; - } - - memcpy(ctx->bssid, bssid, ETH_ALEN); - ctx->preauth_bssid = true; - - ret = iwlagn_commit_rxon(priv, ctx); - - if (ret == 0) - goto out; - - out_remove_sta: - iwl_remove_station(priv, sta_id, bssid); - out: - mutex_unlock(&priv->shrd->mutex); - IWL_DEBUG_MAC80211(priv, "leave\n"); - - return ret; -} - -static void iwlagn_mac_finish_tx_sync(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - const u8 *bssid, - enum ieee80211_tx_sync_type type) -{ - struct iwl_priv *priv = hw->priv; - struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; - struct iwl_rxon_context *ctx = vif_priv->ctx; - - if (ctx->ctxid != IWL_RXON_CTX_PAN) - return; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - mutex_lock(&priv->shrd->mutex); - - if (iwl_is_associated_ctx(ctx)) - goto out; - - iwl_remove_station(priv, ctx->ap_sta_id, bssid); - ctx->preauth_bssid = false; - /* no need to commit */ - out: - mutex_unlock(&priv->shrd->mutex); - IWL_DEBUG_MAC80211(priv, "leave\n"); -} - /***************************************************************************** * * driver setup and teardown @@ -2941,8 +1516,8 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv) iwl_setup_scan_deferred_work(priv); - if (priv->cfg->lib->bt_setup_deferred_work) - priv->cfg->lib->bt_setup_deferred_work(priv); + if (cfg(priv)->lib->bt_setup_deferred_work) + cfg(priv)->lib->bt_setup_deferred_work(priv); init_timer(&priv->statistics_periodic); priv->statistics_periodic.data = (unsigned long)priv; @@ -2959,8 +1534,8 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv) static void iwl_cancel_deferred_work(struct iwl_priv *priv) { - if (priv->cfg->lib->cancel_deferred_work) - priv->cfg->lib->cancel_deferred_work(priv); + if (cfg(priv)->lib->cancel_deferred_work) + cfg(priv)->lib->cancel_deferred_work(priv); cancel_work_sync(&priv->run_time_calib_work); cancel_work_sync(&priv->beacon_update); @@ -3004,6 +1579,8 @@ static int iwl_init_drv(struct iwl_priv *priv) mutex_init(&priv->shrd->mutex); + INIT_LIST_HEAD(&trans(priv)->calib_results); + priv->ieee_channels = NULL; priv->ieee_rates = NULL; priv->band = IEEE80211_BAND_2GHZ; @@ -3027,8 +1604,8 @@ static int iwl_init_drv(struct iwl_priv *priv) iwl_init_scan_params(priv); /* init bt coex */ - if (priv->cfg->bt_params && - priv->cfg->bt_params->advanced_bt_coexist) { + if (cfg(priv)->bt_params && + cfg(priv)->bt_params->advanced_bt_coexist) { priv->kill_ack_mask = IWLAGN_BT_KILL_ACK_MASK_DEFAULT; priv->kill_cts_mask = IWLAGN_BT_KILL_CTS_MASK_DEFAULT; priv->bt_valid = IWLAGN_BT_ALL_VALID_MSK; @@ -3060,88 +1637,19 @@ err: static void iwl_uninit_drv(struct iwl_priv *priv) { - iwl_calib_free_results(priv); iwl_free_geos(priv); iwl_free_channel_map(priv); if (priv->tx_cmd_pool) kmem_cache_destroy(priv->tx_cmd_pool); kfree(priv->scan_cmd); kfree(priv->beacon_cmd); + kfree(rcu_dereference_raw(priv->noa_data)); #ifdef CONFIG_IWLWIFI_DEBUGFS kfree(priv->wowlan_sram); #endif } -static void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw, - enum ieee80211_rssi_event rssi_event) -{ - struct iwl_priv *priv = hw->priv; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - mutex_lock(&priv->shrd->mutex); - - if (priv->cfg->bt_params && - priv->cfg->bt_params->advanced_bt_coexist) { - if (rssi_event == RSSI_EVENT_LOW) - priv->bt_enable_pspoll = true; - else if (rssi_event == RSSI_EVENT_HIGH) - priv->bt_enable_pspoll = false; - - iwlagn_send_advance_bt_config(priv); - } else { - IWL_DEBUG_MAC80211(priv, "Advanced BT coex disabled," - "ignoring RSSI callback\n"); - } - - mutex_unlock(&priv->shrd->mutex); - IWL_DEBUG_MAC80211(priv, "leave\n"); -} - -static int iwlagn_mac_set_tim(struct ieee80211_hw *hw, - struct ieee80211_sta *sta, bool set) -{ - struct iwl_priv *priv = hw->priv; - queue_work(priv->shrd->workqueue, &priv->beacon_update); - - return 0; -} - -struct ieee80211_ops iwlagn_hw_ops = { - .tx = iwlagn_mac_tx, - .start = iwlagn_mac_start, - .stop = iwlagn_mac_stop, -#ifdef CONFIG_PM_SLEEP - .suspend = iwlagn_mac_suspend, - .resume = iwlagn_mac_resume, -#endif - .add_interface = iwlagn_mac_add_interface, - .remove_interface = iwlagn_mac_remove_interface, - .change_interface = iwlagn_mac_change_interface, - .config = iwlagn_mac_config, - .configure_filter = iwlagn_configure_filter, - .set_key = iwlagn_mac_set_key, - .update_tkip_key = iwlagn_mac_update_tkip_key, - .set_rekey_data = iwlagn_mac_set_rekey_data, - .conf_tx = iwlagn_mac_conf_tx, - .bss_info_changed = iwlagn_bss_info_changed, - .ampdu_action = iwlagn_mac_ampdu_action, - .hw_scan = iwlagn_mac_hw_scan, - .sta_notify = iwlagn_mac_sta_notify, - .sta_add = iwlagn_mac_sta_add, - .sta_remove = iwlagn_mac_sta_remove, - .channel_switch = iwlagn_mac_channel_switch, - .flush = iwlagn_mac_flush, - .tx_last_beacon = iwlagn_mac_tx_last_beacon, - .remain_on_channel = iwlagn_mac_remain_on_channel, - .cancel_remain_on_channel = iwlagn_mac_cancel_remain_on_channel, - .rssi_callback = iwlagn_mac_rssi_callback, - CFG80211_TESTMODE_CMD(iwlagn_mac_testmode_cmd) - CFG80211_TESTMODE_DUMP(iwlagn_mac_testmode_dump) - .tx_sync = iwlagn_mac_tx_sync, - .finish_tx_sync = iwlagn_mac_finish_tx_sync, - .set_tim = iwlagn_mac_set_tim, -}; static u32 iwl_hw_detect(struct iwl_priv *priv) { @@ -3161,40 +1669,55 @@ static int iwl_set_hw_params(struct iwl_priv *priv) hw_params(priv).rx_page_order = get_order(IWL_RX_BUF_SIZE_4K); - if (iwlagn_mod_params.disable_11n) - priv->cfg->sku &= ~EEPROM_SKU_CAP_11N_ENABLE; + if (iwlagn_mod_params.disable_11n & IWL_DISABLE_HT_ALL) + cfg(priv)->sku &= ~EEPROM_SKU_CAP_11N_ENABLE; hw_params(priv).num_ampdu_queues = - priv->cfg->base_params->num_of_ampdu_queues; + cfg(priv)->base_params->num_of_ampdu_queues; hw_params(priv).shadow_reg_enable = - priv->cfg->base_params->shadow_reg_enable; - hw_params(priv).sku = priv->cfg->sku; - hw_params(priv).wd_timeout = priv->cfg->base_params->wd_timeout; + cfg(priv)->base_params->shadow_reg_enable; + hw_params(priv).sku = cfg(priv)->sku; + hw_params(priv).wd_timeout = cfg(priv)->base_params->wd_timeout; /* Device-specific setup */ - return priv->cfg->lib->set_hw_params(priv); + return cfg(priv)->lib->set_hw_params(priv); } -/* This function both allocates and initializes hw and priv. */ -static struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg) -{ - struct iwl_priv *priv; - /* mac80211 allocates memory for this device instance, including - * space for this driver's private structure */ - struct ieee80211_hw *hw; - hw = ieee80211_alloc_hw(sizeof(struct iwl_priv), &iwlagn_hw_ops); - if (hw == NULL) { - pr_err("%s: Can not allocate network device\n", - cfg->name); - goto out; - } - priv = hw->priv; - priv->hw = hw; +static void iwl_debug_config(struct iwl_priv *priv) +{ + dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_DEBUG " +#ifdef CONFIG_IWLWIFI_DEBUG + "enabled\n"); +#else + "disabled\n"); +#endif + dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_DEBUGFS " +#ifdef CONFIG_IWLWIFI_DEBUGFS + "enabled\n"); +#else + "disabled\n"); +#endif + dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_DEVICE_TRACING " +#ifdef CONFIG_IWLWIFI_DEVICE_TRACING + "enabled\n"); +#else + "disabled\n"); +#endif -out: - return hw; + dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_DEVICE_TESTMODE " +#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE + "enabled\n"); +#else + "disabled\n"); +#endif + dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_P2P " +#ifdef CONFIG_IWLWIFI_P2P + "enabled\n"); +#else + "disabled\n"); +#endif } int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops, @@ -3209,8 +1732,9 @@ int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops, /************************ * 1. Allocating HW data ************************/ - hw = iwl_alloc_all(cfg); + hw = iwl_alloc_all(); if (!hw) { + pr_err("%s: Cannot allocate network device\n", cfg->name); err = -ENOMEM; goto out; } @@ -3231,8 +1755,11 @@ int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops, SET_IEEE80211_DEV(hw, bus(priv)->dev); + /* what debugging capabilities we have */ + iwl_debug_config(priv); + IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n"); - priv->cfg = cfg; + cfg(priv) = cfg; /* is antenna coupling more than 35dB ? */ priv->bt_ant_couple_ok = @@ -3266,7 +1793,7 @@ int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops, ***********************/ hw_rev = iwl_hw_detect(priv); IWL_INFO(priv, "Detected %s, REV=0x%X\n", - priv->cfg->name, hw_rev); + cfg(priv)->name, hw_rev); err = iwl_trans_request_irq(trans(priv)); if (err) @@ -3296,11 +1823,11 @@ int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops, goto out_free_eeprom; /* extract MAC Address */ - iwl_eeprom_get_mac(priv, priv->addresses[0].addr); + iwl_eeprom_get_mac(priv->shrd, priv->addresses[0].addr); IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr); priv->hw->wiphy->addresses = priv->addresses; priv->hw->wiphy->n_addresses = 1; - num_mac = iwl_eeprom_query16(priv, EEPROM_NUM_MAC_ADDRESS); + num_mac = iwl_eeprom_query16(priv->shrd, EEPROM_NUM_MAC_ADDRESS); if (num_mac > 1) { memcpy(priv->addresses[1].addr, priv->addresses[0].addr, ETH_ALEN); @@ -3365,7 +1892,7 @@ out_destroy_workqueue: priv->shrd->workqueue = NULL; iwl_uninit_drv(priv); out_free_eeprom: - iwl_eeprom_free(priv); + iwl_eeprom_free(priv->shrd); out_free_trans: iwl_trans_free(trans(priv)); out_free_traffic_mem: @@ -3390,21 +1917,16 @@ void __devexit iwl_remove(struct iwl_priv * priv) set_bit(STATUS_EXIT_PENDING, &priv->shrd->status); iwl_testmode_cleanup(priv); - iwl_leds_exit(priv); - - if (priv->mac80211_registered) { - ieee80211_unregister_hw(priv->hw); - priv->mac80211_registered = 0; - } + iwlagn_mac_unregister(priv); iwl_tt_exit(priv); /*This will stop the queues, move the device to low power state */ iwl_trans_stop_device(trans(priv)); - iwl_dealloc_ucode(priv); + iwl_dealloc_ucode(trans(priv)); - iwl_eeprom_free(priv); + iwl_eeprom_free(priv->shrd); /*netif_stop_queue(dev); */ flush_workqueue(priv->shrd->workqueue); @@ -3474,8 +1996,9 @@ module_param_named(swcrypto, iwlagn_mod_params.sw_crypto, int, S_IRUGO); MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])"); module_param_named(queues_num, iwlagn_mod_params.num_of_queues, int, S_IRUGO); MODULE_PARM_DESC(queues_num, "number of hw queues."); -module_param_named(11n_disable, iwlagn_mod_params.disable_11n, int, S_IRUGO); -MODULE_PARM_DESC(11n_disable, "disable 11n functionality"); +module_param_named(11n_disable, iwlagn_mod_params.disable_11n, uint, S_IRUGO); +MODULE_PARM_DESC(11n_disable, + "disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX"); module_param_named(amsdu_size_8K, iwlagn_mod_params.amsdu_size_8K, int, S_IRUGO); MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size"); diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h index 3856abaea50..f84fb3c5356 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.h +++ b/drivers/net/wireless/iwlwifi/iwl-agn.h @@ -65,6 +65,12 @@ #include "iwl-dev.h" +struct iwlagn_ucode_capabilities { + u32 max_probe_length; + u32 standard_phy_calibration_size; + u32 flags; +}; + extern struct ieee80211_ops iwlagn_hw_ops; int iwl_reset_ict(struct iwl_trans *trans); @@ -77,6 +83,16 @@ static inline void iwl_set_calib_hdr(struct iwl_calib_hdr *hdr, u8 cmd) hdr->data_valid = 1; } +void __iwl_down(struct iwl_priv *priv); +void iwl_down(struct iwl_priv *priv); +void iwlagn_prepare_restart(struct iwl_priv *priv); + +/* MAC80211 */ +struct ieee80211_hw *iwl_alloc_all(void); +int iwlagn_mac_setup_register(struct iwl_priv *priv, + struct iwlagn_ucode_capabilities *capa); +void iwlagn_mac_unregister(struct iwl_priv *priv); + /* RXON */ int iwlagn_set_pan_params(struct iwl_priv *priv); int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx); @@ -93,20 +109,20 @@ void iwlagn_config_ht40(struct ieee80211_conf *conf, int iwlagn_rx_calib_result(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb, struct iwl_device_cmd *cmd); -int iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type); -void iwlagn_send_prio_tbl(struct iwl_priv *priv); -int iwlagn_run_init_ucode(struct iwl_priv *priv); -int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv, - struct fw_img *image, - enum iwlagn_ucode_type ucode_type); /* lib */ int iwlagn_send_tx_power(struct iwl_priv *priv); void iwlagn_temperature(struct iwl_priv *priv); -u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv); +u16 iwl_eeprom_calib_version(struct iwl_shared *shrd); int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control); void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control); int iwlagn_send_beacon_cmd(struct iwl_priv *priv); +#ifdef CONFIG_PM_SLEEP +int iwlagn_send_patterns(struct iwl_priv *priv, + struct cfg80211_wowlan *wowlan); +int iwlagn_suspend(struct iwl_priv *priv, + struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan); +#endif /* rx */ int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band); @@ -117,6 +133,8 @@ void iwl_setup_rx_handlers(struct iwl_priv *priv); int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb); int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid, u16 *ssn); +int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid, u8 buf_size); int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid); int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, @@ -198,9 +216,6 @@ int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx, struct ieee80211_sta *sta, u8 *sta_id_r); int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id, const u8 *addr); -int iwlagn_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_sta *sta); - u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx, const u8 *addr, bool is_ap, struct ieee80211_sta *sta); @@ -318,10 +333,6 @@ void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt); int iwl_update_bcast_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx); int iwl_update_bcast_stations(struct iwl_priv *priv); -void iwlagn_mac_sta_notify(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - enum sta_notify_cmd cmd, - struct ieee80211_sta *sta); /* rate */ static inline u32 iwl_ant_idx_to_flags(u8 ant_idx) @@ -341,28 +352,11 @@ static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags) /* eeprom */ void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv); -void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac); - -/* notification wait support */ -void __acquires(wait_entry) -iwlagn_init_notification_wait(struct iwl_priv *priv, - struct iwl_notification_wait *wait_entry, - u8 cmd, - void (*fn)(struct iwl_priv *priv, - struct iwl_rx_packet *pkt, - void *data), - void *fn_data); -int __must_check __releases(wait_entry) -iwlagn_wait_notification(struct iwl_priv *priv, - struct iwl_notification_wait *wait_entry, - unsigned long timeout); -void __releases(wait_entry) -iwlagn_remove_notification(struct iwl_priv *priv, - struct iwl_notification_wait *wait_entry); -extern int iwlagn_init_alive_start(struct iwl_priv *priv); +void iwl_eeprom_get_mac(const struct iwl_shared *shrd, u8 *mac); + extern int iwl_alive_start(struct iwl_priv *priv); /* svtool */ -#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL +#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE extern int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len); extern int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/iwlwifi/iwl-bus.h b/drivers/net/wireless/iwlwifi/iwl-bus.h index 08b97594e30..940d5038b39 100644 --- a/drivers/net/wireless/iwlwifi/iwl-bus.h +++ b/drivers/net/wireless/iwlwifi/iwl-bus.h @@ -122,7 +122,8 @@ struct iwl_bus; * struct iwl_bus_ops - bus specific operations * @get_pm_support: must returns true if the bus can go to sleep * @apm_config: will be called during the config of the APM - * @get_hw_id: prints the hw_id in the provided buffer + * @get_hw_id_string: prints the hw_id in the provided buffer + * @get_hw_id: get hw_id in u32 * @write8: write a byte to register at offset ofs * @write32: write a dword to register at offset ofs * @wread32: read a dword at register at offset ofs @@ -130,7 +131,8 @@ struct iwl_bus; struct iwl_bus_ops { bool (*get_pm_support)(struct iwl_bus *bus); void (*apm_config)(struct iwl_bus *bus); - void (*get_hw_id)(struct iwl_bus *bus, char buf[], int buf_len); + void (*get_hw_id_string)(struct iwl_bus *bus, char buf[], int buf_len); + u32 (*get_hw_id)(struct iwl_bus *bus); void (*write8)(struct iwl_bus *bus, u32 ofs, u8 val); void (*write32)(struct iwl_bus *bus, u32 ofs, u32 val); u32 (*read32)(struct iwl_bus *bus, u32 ofs); @@ -172,9 +174,15 @@ static inline void bus_apm_config(struct iwl_bus *bus) bus->ops->apm_config(bus); } -static inline void bus_get_hw_id(struct iwl_bus *bus, char buf[], int buf_len) +static inline void bus_get_hw_id_string(struct iwl_bus *bus, char buf[], + int buf_len) { - bus->ops->get_hw_id(bus, buf, buf_len); + bus->ops->get_hw_id_string(bus, buf, buf_len); +} + +static inline u32 bus_get_hw_id(struct iwl_bus *bus) +{ + return bus->ops->get_hw_id(bus); } static inline void bus_write8(struct iwl_bus *bus, u32 ofs, u8 val) diff --git a/drivers/net/wireless/iwlwifi/iwl-cfg.h b/drivers/net/wireless/iwlwifi/iwl-cfg.h index 2a2dc4597ba..e1d78257e4a 100644 --- a/drivers/net/wireless/iwlwifi/iwl-cfg.h +++ b/drivers/net/wireless/iwlwifi/iwl-cfg.h @@ -101,17 +101,11 @@ extern struct iwl_cfg iwl100_bg_cfg; extern struct iwl_cfg iwl130_bgn_cfg; extern struct iwl_cfg iwl130_bg_cfg; extern struct iwl_cfg iwl2000_2bgn_cfg; -extern struct iwl_cfg iwl2000_2bg_cfg; extern struct iwl_cfg iwl2000_2bgn_d_cfg; extern struct iwl_cfg iwl2030_2bgn_cfg; -extern struct iwl_cfg iwl2030_2bg_cfg; extern struct iwl_cfg iwl6035_2agn_cfg; -extern struct iwl_cfg iwl6035_2abg_cfg; -extern struct iwl_cfg iwl6035_2bg_cfg; -extern struct iwl_cfg iwl105_bg_cfg; extern struct iwl_cfg iwl105_bgn_cfg; extern struct iwl_cfg iwl105_bgn_d_cfg; -extern struct iwl_cfg iwl135_bg_cfg; extern struct iwl_cfg iwl135_bgn_cfg; #endif /* __iwl_pci_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h index 69d5f85d11e..265de39d394 100644 --- a/drivers/net/wireless/iwlwifi/iwl-commands.h +++ b/drivers/net/wireless/iwlwifi/iwl-commands.h @@ -109,10 +109,10 @@ enum { /* RX, TX, LEDs */ REPLY_TX = 0x1c, REPLY_LEDS_CMD = 0x48, - REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* for 4965 and up */ + REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* WiMAX coexistence */ - COEX_PRIORITY_TABLE_CMD = 0x5a, /* for 5000 series and up */ + COEX_PRIORITY_TABLE_CMD = 0x5a, COEX_MEDIUM_NOTIFICATION = 0x5b, COEX_EVENT_CMD = 0x5c, @@ -198,6 +198,7 @@ enum { REPLY_WOWLAN_TKIP_PARAMS = 0xe3, REPLY_WOWLAN_KEK_KCK_MATERIAL = 0xe4, REPLY_WOWLAN_GET_STATUS = 0xe5, + REPLY_D3_CONFIG = 0xd3, REPLY_MAX = 0xff }; @@ -465,23 +466,27 @@ struct iwl_error_event_table { u32 frame_ptr; /* frame pointer */ u32 stack_ptr; /* stack pointer */ u32 hcmd; /* last host command header */ -#if 0 - /* no need to read the remainder, we don't use the values */ - u32 isr0; /* isr status register LMPM_NIC_ISR0: rxtx_flag */ - u32 isr1; /* isr status register LMPM_NIC_ISR1: host_flag */ - u32 isr2; /* isr status register LMPM_NIC_ISR2: enc_flag */ - u32 isr3; /* isr status register LMPM_NIC_ISR3: time_flag */ - u32 isr4; /* isr status register LMPM_NIC_ISR4: wico interrupt */ + u32 isr0; /* isr status register LMPM_NIC_ISR0: + * rxtx_flag */ + u32 isr1; /* isr status register LMPM_NIC_ISR1: + * host_flag */ + u32 isr2; /* isr status register LMPM_NIC_ISR2: + * enc_flag */ + u32 isr3; /* isr status register LMPM_NIC_ISR3: + * time_flag */ + u32 isr4; /* isr status register LMPM_NIC_ISR4: + * wico interrupt */ u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */ u32 wait_event; /* wait event() caller address */ u32 l2p_control; /* L2pControlField */ u32 l2p_duration; /* L2pDurationField */ u32 l2p_mhvalid; /* L2pMhValidBits */ u32 l2p_addr_match; /* L2pAddrMatchStat */ - u32 lmpm_pmg_sel; /* indicate which clocks are turned on (LMPM_PMG_SEL) */ - u32 u_timestamp; /* indicate when the date and time of the compilation */ + u32 lmpm_pmg_sel; /* indicate which clocks are turned on + * (LMPM_PMG_SEL) */ + u32 u_timestamp; /* indicate when the date and time of the + * compilation */ u32 flow_handler; /* FH read/write pointers, RX credit */ -#endif } __packed; struct iwl_alive_resp { @@ -809,7 +814,7 @@ struct iwl_qosparam_cmd { #define IWLAGN_STATION_COUNT 16 #define IWL_INVALID_STATION 255 -#define IWL_MAX_TID_COUNT 9 +#define IWL_MAX_TID_COUNT 8 #define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2) #define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8) @@ -930,8 +935,7 @@ struct iwl_addsta_cmd { * corresponding to bit (e.g. bit 5 controls TID 5). * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */ __le16 tid_disable_tx; - - __le16 rate_n_flags; /* 3945 only */ + __le16 legacy_reserved; /* TID for which to add block-ack support. * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */ @@ -1161,8 +1165,7 @@ struct iwl_rx_mpdu_res_start { * * uCode handles retrying Tx when an ACK is expected but not received. * This includes trying lower data rates than the one requested in the Tx - * command, as set up by the REPLY_RATE_SCALE (for 3945) or - * REPLY_TX_LINK_QUALITY_CMD (agn). + * command, as set up by the REPLY_TX_LINK_QUALITY_CMD (agn). * * Driver sets up transmit power for various rates via REPLY_TX_PWR_TABLE_CMD. * This command must be executed after every RXON command, before Tx can occur. @@ -1174,25 +1177,9 @@ struct iwl_rx_mpdu_res_start { * 1: Use RTS/CTS protocol or CTS-to-self if spec allows it * before this frame. if CTS-to-self required check * RXON_FLG_SELF_CTS_EN status. - * unused in 3945/4965, used in 5000 series and after */ #define TX_CMD_FLG_PROT_REQUIRE_MSK cpu_to_le32(1 << 0) -/* - * 1: Use Request-To-Send protocol before this frame. - * Mutually exclusive vs. TX_CMD_FLG_CTS_MSK. - * used in 3945/4965, unused in 5000 series and after - */ -#define TX_CMD_FLG_RTS_MSK cpu_to_le32(1 << 1) - -/* - * 1: Transmit Clear-To-Send to self before this frame. - * Driver should set this for AUTH/DEAUTH/ASSOC-REQ/REASSOC mgmnt frames. - * Mutually exclusive vs. TX_CMD_FLG_RTS_MSK. - * used in 3945/4965, unused in 5000 series and after - */ -#define TX_CMD_FLG_CTS_MSK cpu_to_le32(1 << 2) - /* 1: Expect ACK from receiving station * 0: Don't expect ACK (MAC header's duration field s/b 0) * Set this for unicast frames, but not broadcast/multicast. */ @@ -1210,18 +1197,8 @@ struct iwl_rx_mpdu_res_start { * Set when Txing a block-ack request frame. Also set TX_CMD_FLG_ACK_MSK. */ #define TX_CMD_FLG_IMM_BA_RSP_MASK cpu_to_le32(1 << 6) -/* - * 1: Frame requires full Tx-Op protection. - * Set this if either RTS or CTS Tx Flag gets set. - * used in 3945/4965, unused in 5000 series and after - */ -#define TX_CMD_FLG_FULL_TXOP_PROT_MSK cpu_to_le32(1 << 7) - -/* Tx antenna selection field; used only for 3945, reserved (0) for agn devices. - * Set field to "0" to allow 3945 uCode to select antenna (normal usage). */ +/* Tx antenna selection field; reserved (0) for agn devices. */ #define TX_CMD_FLG_ANT_SEL_MSK cpu_to_le32(0xf00) -#define TX_CMD_FLG_ANT_A_MSK cpu_to_le32(1 << 8) -#define TX_CMD_FLG_ANT_B_MSK cpu_to_le32(1 << 9) /* 1: Ignore Bluetooth priority for this frame. * 0: Delay Tx until Bluetooth device is done (normal usage). */ @@ -1567,7 +1544,6 @@ struct iwl_compressed_ba_resp { __le64 bitmap; __le16 scd_flow; __le16 scd_ssn; - /* following only for 5000 series and up */ u8 txed; /* number of frames sent */ u8 txed_2_done; /* number of frames acked */ } __packed; @@ -1669,7 +1645,7 @@ struct iwl_link_qual_agg_params { /* * REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response) * - * For agn devices only; 3945 uses REPLY_RATE_SCALE. + * For agn devices * * Each station in the agn device's internal station table has its own table * of 16 @@ -1918,7 +1894,7 @@ struct iwl_link_quality_cmd { /* * REPLY_BT_CONFIG = 0x9b (command, has simple generic response) * - * 3945 and agn devices support hardware handshake with Bluetooth device on + * agn devices support hardware handshake with Bluetooth device on * same platform. Bluetooth device alerts wireless device when it will Tx; * wireless device can delay or kill its own Tx to accommodate. */ @@ -2202,8 +2178,8 @@ struct iwl_spectrum_notification { struct iwl_powertable_cmd { __le16 flags; - u8 keep_alive_seconds; /* 3945 reserved */ - u8 debug_flags; /* 3945 reserved */ + u8 keep_alive_seconds; + u8 debug_flags; __le32 rx_data_timeout; __le32 tx_data_timeout; __le32 sleep_interval[IWL_POWER_VEC_SIZE]; @@ -2324,9 +2300,9 @@ struct iwl_scan_channel { /** * struct iwl_ssid_ie - directed scan network information element * - * Up to 20 of these may appear in REPLY_SCAN_CMD (Note: Only 4 are in - * 3945 SCAN api), selected by "type" bit field in struct iwl_scan_channel; - * each channel may select different ssids from among the 20 (4) entries. + * Up to 20 of these may appear in REPLY_SCAN_CMD, + * selected by "type" bit field in struct iwl_scan_channel; + * each channel may select different ssids from among the 20 entries. * SSID IEs get transmitted in reverse order of entry. */ struct iwl_ssid_ie { @@ -2335,7 +2311,6 @@ struct iwl_ssid_ie { u8 ssid[32]; } __packed; -#define PROBE_OPTION_MAX_3945 4 #define PROBE_OPTION_MAX 20 #define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF) #define IWL_GOOD_CRC_TH_DISABLED 0 @@ -2416,8 +2391,6 @@ struct iwl_scan_cmd { * channel */ __le32 suspend_time; /* pause scan this long (in "extended beacon * format") when returning to service chnl: - * 3945; 31:24 # beacons, 19:0 additional usec, - * 4965; 31:22 # beacons, 21:0 additional usec. */ __le32 flags; /* RXON_FLG_* */ __le32 filter_flags; /* RXON_FILTER_* */ @@ -2733,7 +2706,7 @@ struct statistics_div { struct statistics_general_common { __le32 temperature; /* radio temperature */ - __le32 temperature_m; /* for 5000 and up, this is radio voltage */ + __le32 temperature_m; /* radio voltage */ struct statistics_dbg dbg; __le32 sleep_time; __le32 slots_out; @@ -3801,6 +3774,19 @@ struct iwl_bt_coex_prot_env_cmd { } __attribute__((packed)); /* + * REPLY_D3_CONFIG + */ +enum iwlagn_d3_wakeup_filters { + IWLAGN_D3_WAKEUP_RFKILL = BIT(0), + IWLAGN_D3_WAKEUP_SYSASSERT = BIT(1), +}; + +struct iwlagn_d3_config_cmd { + __le32 min_sleep_time; + __le32 wakeup_flags; +} __packed; + +/* * REPLY_WOWLAN_PATTERNS */ #define IWLAGN_WOWLAN_MIN_PATTERN_LEN 16 @@ -3830,19 +3816,16 @@ enum iwlagn_wowlan_wakeup_filters { IWLAGN_WOWLAN_WAKEUP_BEACON_MISS = BIT(2), IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE = BIT(3), IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL = BIT(4), - IWLAGN_WOWLAN_WAKEUP_RFKILL = BIT(5), - IWLAGN_WOWLAN_WAKEUP_UCODE_ERROR = BIT(6), - IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ = BIT(7), - IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE = BIT(8), - IWLAGN_WOWLAN_WAKEUP_ALWAYS = BIT(9), - IWLAGN_WOWLAN_WAKEUP_ENABLE_NET_DETECT = BIT(10), + IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ = BIT(5), + IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE = BIT(6), + IWLAGN_WOWLAN_WAKEUP_ALWAYS = BIT(7), + IWLAGN_WOWLAN_WAKEUP_ENABLE_NET_DETECT = BIT(8), }; struct iwlagn_wowlan_wakeup_filter_cmd { __le32 enabled; __le16 non_qos_seq; - u8 min_sleep_seconds; - u8 reserved; + __le16 reserved; __le16 qos_seq[8]; }; diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c index fcf54160e4e..7bcfa781e0b 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.c +++ b/drivers/net/wireless/iwlwifi/iwl-core.c @@ -60,8 +60,8 @@ static void iwl_init_ht_hw_capab(const struct iwl_priv *priv, ht_info->ht_supported = true; - if (priv->cfg->ht_params && - priv->cfg->ht_params->ht_greenfield_support) + if (cfg(priv)->ht_params && + cfg(priv)->ht_params->ht_greenfield_support) ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD; ht_info->cap |= IEEE80211_HT_CAP_SGI_20; max_bit_rate = MAX_BIT_RATE_20_MHZ; @@ -76,11 +76,7 @@ static void iwl_init_ht_hw_capab(const struct iwl_priv *priv, ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU; ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF; - if (priv->cfg->bt_params && priv->cfg->bt_params->ampdu_factor) - ht_info->ampdu_factor = priv->cfg->bt_params->ampdu_factor; ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF; - if (priv->cfg->bt_params && priv->cfg->bt_params->ampdu_density) - ht_info->ampdu_density = priv->cfg->bt_params->ampdu_density; ht_info->mcs.rx_mask[0] = 0xFF; if (rx_chains_num >= 2) @@ -141,7 +137,7 @@ int iwl_init_geos(struct iwl_priv *priv) sband->bitrates = &rates[IWL_FIRST_OFDM_RATE]; sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE; - if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE) + if (cfg(priv)->sku & EEPROM_SKU_CAP_11N_ENABLE) iwl_init_ht_hw_capab(priv, &sband->ht_cap, IEEE80211_BAND_5GHZ); @@ -151,7 +147,7 @@ int iwl_init_geos(struct iwl_priv *priv) sband->bitrates = rates; sband->n_bitrates = IWL_RATE_COUNT_LEGACY; - if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE) + if (cfg(priv)->sku & EEPROM_SKU_CAP_11N_ENABLE) iwl_init_ht_hw_capab(priv, &sband->ht_cap, IEEE80211_BAND_2GHZ); @@ -206,12 +202,12 @@ int iwl_init_geos(struct iwl_priv *priv) priv->tx_power_next = max_tx_power; if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) && - priv->cfg->sku & EEPROM_SKU_CAP_BAND_52GHZ) { + cfg(priv)->sku & EEPROM_SKU_CAP_BAND_52GHZ) { char buf[32]; - bus_get_hw_id(bus(priv), buf, sizeof(buf)); + bus_get_hw_id_string(bus(priv), buf, sizeof(buf)); IWL_INFO(priv, "Incorrectly detected BG card as ABG. " "Please send your %s to maintainer.\n", buf); - priv->cfg->sku &= ~EEPROM_SKU_CAP_BAND_52GHZ; + cfg(priv)->sku &= ~EEPROM_SKU_CAP_BAND_52GHZ; } IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n", @@ -836,19 +832,6 @@ void iwl_print_rx_config_cmd(struct iwl_priv *priv, } #endif -static void iwlagn_abort_notification_waits(struct iwl_priv *priv) -{ - unsigned long flags; - struct iwl_notification_wait *wait_entry; - - spin_lock_irqsave(&priv->notif_wait_lock, flags); - list_for_each_entry(wait_entry, &priv->notif_waits, list) - wait_entry->aborted = true; - spin_unlock_irqrestore(&priv->notif_wait_lock, flags); - - wake_up_all(&priv->notif_waitq); -} - void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand) { unsigned int reload_msec; @@ -860,7 +843,7 @@ void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand) /* Cancel currently queued command. */ clear_bit(STATUS_HCMD_ACTIVE, &priv->shrd->status); - iwlagn_abort_notification_waits(priv); + iwl_abort_notification_waits(priv->shrd); /* Keep the restart process from trying to send host * commands by clearing the ready bit */ @@ -979,9 +962,9 @@ int iwl_apm_init(struct iwl_priv *priv) bus_apm_config(bus(priv)); /* Configure analog phase-lock-loop before activating to D0A */ - if (priv->cfg->base_params->pll_cfg_val) + if (cfg(priv)->base_params->pll_cfg_val) iwl_set_bit(bus(priv), CSR_ANA_PLL_CFG, - priv->cfg->base_params->pll_cfg_val); + cfg(priv)->base_params->pll_cfg_val); /* * Set "initialization complete" bit to move adapter from @@ -1120,229 +1103,8 @@ int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear) &statistics_cmd); } -int iwlagn_mac_conf_tx(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, u16 queue, - const struct ieee80211_tx_queue_params *params) -{ - struct iwl_priv *priv = hw->priv; - struct iwl_rxon_context *ctx; - unsigned long flags; - int q; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - if (!iwl_is_ready_rf(priv->shrd)) { - IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n"); - return -EIO; - } - if (queue >= AC_NUM) { - IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue); - return 0; - } - - q = AC_NUM - 1 - queue; - - spin_lock_irqsave(&priv->shrd->lock, flags); - - /* - * MULTI-FIXME - * This may need to be done per interface in nl80211/cfg80211/mac80211. - */ - for_each_context(priv, ctx) { - ctx->qos_data.def_qos_parm.ac[q].cw_min = - cpu_to_le16(params->cw_min); - ctx->qos_data.def_qos_parm.ac[q].cw_max = - cpu_to_le16(params->cw_max); - ctx->qos_data.def_qos_parm.ac[q].aifsn = params->aifs; - ctx->qos_data.def_qos_parm.ac[q].edca_txop = - cpu_to_le16((params->txop * 32)); - - ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0; - } - - spin_unlock_irqrestore(&priv->shrd->lock, flags); - - IWL_DEBUG_MAC80211(priv, "leave\n"); - return 0; -} - -int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw) -{ - struct iwl_priv *priv = hw->priv; - - return priv->ibss_manager == IWL_IBSS_MANAGER; -} - -static int iwl_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx) -{ - iwl_connection_init_rx_config(priv, ctx); - - iwlagn_set_rxon_chain(priv, ctx); - - return iwlagn_commit_rxon(priv, ctx); -} - -static int iwl_setup_interface(struct iwl_priv *priv, - struct iwl_rxon_context *ctx) -{ - struct ieee80211_vif *vif = ctx->vif; - int err; - - lockdep_assert_held(&priv->shrd->mutex); - - /* - * This variable will be correct only when there's just - * a single context, but all code using it is for hardware - * that supports only one context. - */ - priv->iw_mode = vif->type; - - ctx->is_active = true; - - err = iwl_set_mode(priv, ctx); - if (err) { - if (!ctx->always_active) - ctx->is_active = false; - return err; - } - - if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist && - vif->type == NL80211_IFTYPE_ADHOC) { - /* - * pretend to have high BT traffic as long as we - * are operating in IBSS mode, as this will cause - * the rate scaling etc. to behave as intended. - */ - priv->bt_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_HIGH; - } - - return 0; -} - -int iwlagn_mac_add_interface(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) -{ - struct iwl_priv *priv = hw->priv; - struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; - struct iwl_rxon_context *tmp, *ctx = NULL; - int err; - enum nl80211_iftype viftype = ieee80211_vif_type_p2p(vif); - - IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n", - viftype, vif->addr); - - cancel_delayed_work_sync(&priv->hw_roc_disable_work); - - mutex_lock(&priv->shrd->mutex); - - iwlagn_disable_roc(priv); - - if (!iwl_is_ready_rf(priv->shrd)) { - IWL_WARN(priv, "Try to add interface when device not ready\n"); - err = -EINVAL; - goto out; - } - - for_each_context(priv, tmp) { - u32 possible_modes = - tmp->interface_modes | tmp->exclusive_interface_modes; - - if (tmp->vif) { - /* check if this busy context is exclusive */ - if (tmp->exclusive_interface_modes & - BIT(tmp->vif->type)) { - err = -EINVAL; - goto out; - } - continue; - } - - if (!(possible_modes & BIT(viftype))) - continue; - - /* have maybe usable context w/o interface */ - ctx = tmp; - break; - } - - if (!ctx) { - err = -EOPNOTSUPP; - goto out; - } - - vif_priv->ctx = ctx; - ctx->vif = vif; - - err = iwl_setup_interface(priv, ctx); - if (!err) - goto out; - - ctx->vif = NULL; - priv->iw_mode = NL80211_IFTYPE_STATION; - out: - mutex_unlock(&priv->shrd->mutex); - - IWL_DEBUG_MAC80211(priv, "leave\n"); - return err; -} - -static void iwl_teardown_interface(struct iwl_priv *priv, - struct ieee80211_vif *vif, - bool mode_change) -{ - struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); - - lockdep_assert_held(&priv->shrd->mutex); - - if (priv->scan_vif == vif) { - iwl_scan_cancel_timeout(priv, 200); - iwl_force_scan_end(priv); - } - - if (!mode_change) { - iwl_set_mode(priv, ctx); - if (!ctx->always_active) - ctx->is_active = false; - } - - /* - * When removing the IBSS interface, overwrite the - * BT traffic load with the stored one from the last - * notification, if any. If this is a device that - * doesn't implement this, this has no effect since - * both values are the same and zero. - */ - if (vif->type == NL80211_IFTYPE_ADHOC) - priv->bt_traffic_load = priv->last_bt_traffic_load; -} - -void iwlagn_mac_remove_interface(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) -{ - struct iwl_priv *priv = hw->priv; - struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); - - IWL_DEBUG_MAC80211(priv, "enter\n"); - - mutex_lock(&priv->shrd->mutex); - - if (WARN_ON(ctx->vif != vif)) { - struct iwl_rxon_context *tmp; - IWL_ERR(priv, "ctx->vif = %p, vif = %p\n", ctx->vif, vif); - for_each_context(priv, tmp) - IWL_ERR(priv, "\tID = %d:\tctx = %p\tctx->vif = %p\n", - tmp->ctxid, tmp, tmp->vif); - } - ctx->vif = NULL; - - iwl_teardown_interface(priv, vif, false); - - mutex_unlock(&priv->shrd->mutex); - - IWL_DEBUG_MAC80211(priv, "leave\n"); - -} #ifdef CONFIG_IWLWIFI_DEBUGFS @@ -1649,97 +1411,13 @@ int iwl_force_reset(struct iwl_priv *priv, int mode, bool external) return 0; } -int iwlagn_mac_change_interface(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - enum nl80211_iftype newtype, bool newp2p) -{ - struct iwl_priv *priv = hw->priv; - struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); - struct iwl_rxon_context *bss_ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - struct iwl_rxon_context *tmp; - enum nl80211_iftype newviftype = newtype; - u32 interface_modes; - int err; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - - newtype = ieee80211_iftype_p2p(newtype, newp2p); - - mutex_lock(&priv->shrd->mutex); - - if (!ctx->vif || !iwl_is_ready_rf(priv->shrd)) { - /* - * Huh? But wait ... this can maybe happen when - * we're in the middle of a firmware restart! - */ - err = -EBUSY; - goto out; - } - - interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes; - - if (!(interface_modes & BIT(newtype))) { - err = -EBUSY; - goto out; - } - - /* - * Refuse a change that should be done by moving from the PAN - * context to the BSS context instead, if the BSS context is - * available and can support the new interface type. - */ - if (ctx->ctxid == IWL_RXON_CTX_PAN && !bss_ctx->vif && - (bss_ctx->interface_modes & BIT(newtype) || - bss_ctx->exclusive_interface_modes & BIT(newtype))) { - BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2); - err = -EBUSY; - goto out; - } - - if (ctx->exclusive_interface_modes & BIT(newtype)) { - for_each_context(priv, tmp) { - if (ctx == tmp) - continue; - - if (!tmp->vif) - continue; - - /* - * The current mode switch would be exclusive, but - * another context is active ... refuse the switch. - */ - err = -EBUSY; - goto out; - } - } - - /* success */ - iwl_teardown_interface(priv, vif, true); - vif->type = newviftype; - vif->p2p = newp2p; - err = iwl_setup_interface(priv, ctx); - WARN_ON(err); - /* - * We've switched internally, but submitting to the - * device may have failed for some reason. Mask this - * error, because otherwise mac80211 will not switch - * (and set the interface type back) and we'll be - * out of sync with it. - */ - err = 0; - - out: - mutex_unlock(&priv->shrd->mutex); - IWL_DEBUG_MAC80211(priv, "leave\n"); - - return err; -} int iwl_cmd_echo_test(struct iwl_priv *priv) { int ret; struct iwl_host_cmd cmd = { .id = REPLY_ECHO, + .len = { 0 }, .flags = CMD_SYNC, }; @@ -1783,7 +1461,7 @@ void iwl_bg_watchdog(unsigned long data) if (iwl_is_rfkill(priv->shrd)) return; - timeout = priv->cfg->base_params->wd_timeout; + timeout = cfg(priv)->base_params->wd_timeout; if (timeout == 0) return; @@ -1808,11 +1486,11 @@ void iwl_bg_watchdog(unsigned long data) void iwl_setup_watchdog(struct iwl_priv *priv) { - unsigned int timeout = priv->cfg->base_params->wd_timeout; + unsigned int timeout = cfg(priv)->base_params->wd_timeout; if (!iwlagn_mod_params.wd_disable) { /* use system default */ - if (timeout && !priv->cfg->base_params->wd_disable) + if (timeout && !cfg(priv)->base_params->wd_disable) mod_timer(&priv->watchdog, jiffies + msecs_to_jiffies(IWL_WD_TICK(timeout))); @@ -1902,34 +1580,6 @@ __le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base, return cpu_to_le32(res); } -void iwl_start_tx_ba_trans_ready(struct iwl_priv *priv, - enum iwl_rxon_context_id ctx, - u8 sta_id, u8 tid) -{ - struct ieee80211_vif *vif; - u8 *addr = priv->stations[sta_id].sta.sta.addr; - - if (ctx == NUM_IWL_RXON_CTX) - ctx = priv->stations[sta_id].ctxid; - vif = priv->contexts[ctx].vif; - - ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid); -} - -void iwl_stop_tx_ba_trans_ready(struct iwl_priv *priv, - enum iwl_rxon_context_id ctx, - u8 sta_id, u8 tid) -{ - struct ieee80211_vif *vif; - u8 *addr = priv->stations[sta_id].sta.sta.addr; - - if (ctx == NUM_IWL_RXON_CTX) - ctx = priv->stations[sta_id].ctxid; - vif = priv->contexts[ctx].vif; - - ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid); -} - void iwl_set_hw_rfkill_state(struct iwl_priv *priv, bool state) { wiphy_rfkill_set_hw_state(priv->hw->wiphy, state); @@ -1937,8 +1587,7 @@ void iwl_set_hw_rfkill_state(struct iwl_priv *priv, bool state) void iwl_nic_config(struct iwl_priv *priv) { - priv->cfg->lib->nic_config(priv); - + cfg(priv)->lib->nic_config(priv); } void iwl_free_skb(struct iwl_priv *priv, struct sk_buff *skb) diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h index f2fc288f3dd..7bf76ab94dd 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.h +++ b/drivers/net/wireless/iwlwifi/iwl-core.h @@ -142,8 +142,6 @@ struct iwl_base_params { * @bt_init_traffic_load: specify initial bt traffic load * @bt_prio_boost: default bt priority boost value * @agg_time_limit: maximum number of uSec in aggregation - * @ampdu_factor: Maximum A-MPDU length factor - * @ampdu_density: Minimum A-MPDU spacing * @bt_sco_disable: uCode should not response to BT in SCO/ESCO mode */ struct iwl_bt_params { @@ -151,8 +149,6 @@ struct iwl_bt_params { u8 bt_init_traffic_load; u8 bt_prio_boost; u16 agg_time_limit; - u8 ampdu_factor; - u8 ampdu_density; bool bt_sco_disable; bool bt_session_2; }; @@ -165,84 +161,10 @@ struct iwl_ht_params { enum ieee80211_smps_mode smps_mode; }; -/** - * struct iwl_cfg - * @name: Offical name of the device - * @fw_name_pre: Firmware filename prefix. The api version and extension - * (.ucode) will be added to filename before loading from disk. The - * filename is constructed as fw_name_pre<api>.ucode. - * @ucode_api_max: Highest version of uCode API supported by driver. - * @ucode_api_ok: oldest version of the uCode API that is OK to load - * without a warning, for use in transitions - * @ucode_api_min: Lowest version of uCode API supported by driver. - * @valid_tx_ant: valid transmit antenna - * @valid_rx_ant: valid receive antenna - * @sku: sku information from EEPROM - * @eeprom_ver: EEPROM version - * @eeprom_calib_ver: EEPROM calibration version - * @lib: pointer to the lib ops - * @additional_nic_config: additional nic configuration - * @base_params: pointer to basic parameters - * @ht_params: point to ht patameters - * @bt_params: pointer to bt parameters - * @pa_type: used by 6000 series only to identify the type of Power Amplifier - * @need_dc_calib: need to perform init dc calibration - * @need_temp_offset_calib: need to perform temperature offset calibration - * @scan_antennas: available antenna for scan operation - * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off) - * @adv_pm: advance power management - * @rx_with_siso_diversity: 1x1 device with rx antenna diversity - * @internal_wimax_coex: internal wifi/wimax combo device - * @iq_invert: I/Q inversion - * @temp_offset_v2: support v2 of temperature offset calibration - * - * We enable the driver to be backward compatible wrt API version. The - * driver specifies which APIs it supports (with @ucode_api_max being the - * highest and @ucode_api_min the lowest). Firmware will only be loaded if - * it has a supported API version. - * - * The ideal usage of this infrastructure is to treat a new ucode API - * release as a new hardware revision. - */ -struct iwl_cfg { - /* params specific to an individual device within a device family */ - const char *name; - const char *fw_name_pre; - const unsigned int ucode_api_max; - const unsigned int ucode_api_ok; - const unsigned int ucode_api_min; - u8 valid_tx_ant; - u8 valid_rx_ant; - u16 sku; - u16 eeprom_ver; - u16 eeprom_calib_ver; - const struct iwl_lib_ops *lib; - void (*additional_nic_config)(struct iwl_priv *priv); - /* params not likely to change within a device family */ - struct iwl_base_params *base_params; - /* params likely to change within a device family */ - struct iwl_ht_params *ht_params; - struct iwl_bt_params *bt_params; - enum iwl_pa_type pa_type; /* if used set to IWL_PA_SYSTEM */ - const bool need_dc_calib; /* if used set to true */ - const bool need_temp_offset_calib; /* if used set to true */ - u8 scan_rx_antennas[IEEE80211_NUM_BANDS]; - enum iwl_led_mode led_mode; - const bool adv_pm; - const bool rx_with_siso_diversity; - const bool internal_wimax_coex; - const bool iq_invert; - const bool temp_offset_v2; -}; - /*************************** * L i b * ***************************/ -int iwlagn_mac_conf_tx(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, u16 queue, - const struct ieee80211_tx_queue_params *params); -int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw); void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx, int hw_decrypt); int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx); @@ -262,13 +184,6 @@ bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv, void iwl_connection_init_rx_config(struct iwl_priv *priv, struct iwl_rxon_context *ctx); void iwl_set_rate(struct iwl_priv *priv); -int iwlagn_mac_add_interface(struct ieee80211_hw *hw, - struct ieee80211_vif *vif); -void iwlagn_mac_remove_interface(struct ieee80211_hw *hw, - struct ieee80211_vif *vif); -int iwlagn_mac_change_interface(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - enum nl80211_iftype newtype, bool newp2p); int iwl_cmd_echo_test(struct iwl_priv *priv); #ifdef CONFIG_IWLWIFI_DEBUGFS int iwl_alloc_traffic_mem(struct iwl_priv *priv); @@ -325,9 +240,6 @@ void iwl_init_scan_params(struct iwl_priv *priv); int iwl_scan_cancel(struct iwl_priv *priv); void iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms); void iwl_force_scan_end(struct iwl_priv *priv); -int iwlagn_mac_hw_scan(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct cfg80211_scan_request *req); void iwl_internal_short_hw_scan(struct iwl_priv *priv); int iwl_force_reset(struct iwl_priv *priv, int mode, bool external); u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame, @@ -381,8 +293,8 @@ static inline const struct ieee80211_supported_band *iwl_get_hw_mode( static inline bool iwl_advanced_bt_coexist(struct iwl_priv *priv) { - return priv->cfg->bt_params && - priv->cfg->bt_params->advanced_bt_coexist; + return cfg(priv)->bt_params && + cfg(priv)->bt_params->advanced_bt_coexist; } static inline void iwl_enable_rfkill_int(struct iwl_priv *priv) diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h index b9f3267e720..fbc3095c7b4 100644 --- a/drivers/net/wireless/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/iwlwifi/iwl-csr.h @@ -284,8 +284,8 @@ #define CSR_HW_REV_TYPE_6x35 CSR_HW_REV_TYPE_6x05 #define CSR_HW_REV_TYPE_2x30 (0x00000C0) #define CSR_HW_REV_TYPE_2x00 (0x0000100) -#define CSR_HW_REV_TYPE_200 (0x0000110) -#define CSR_HW_REV_TYPE_230 (0x0000120) +#define CSR_HW_REV_TYPE_105 (0x0000110) +#define CSR_HW_REV_TYPE_135 (0x0000120) #define CSR_HW_REV_TYPE_NONE (0x00001F0) /* EEPROM REG */ diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h index 69a77e24d22..f8fc2393dd4 100644 --- a/drivers/net/wireless/iwlwifi/iwl-debug.h +++ b/drivers/net/wireless/iwlwifi/iwl-debug.h @@ -47,20 +47,21 @@ do { \ } while (0) #ifdef CONFIG_IWLWIFI_DEBUG -#define IWL_DEBUG(m, level, fmt, args...) \ +#define IWL_DEBUG(m, level, fmt, ...) \ do { \ if (iwl_get_debug_level((m)->shrd) & (level)) \ - dev_printk(KERN_ERR, bus(m)->dev, \ - "%c %s " fmt, in_interrupt() ? 'I' : 'U', \ - __func__ , ## args); \ + dev_err(bus(m)->dev, "%c %s " fmt, \ + in_interrupt() ? 'I' : 'U', __func__, \ + ##__VA_ARGS__); \ } while (0) -#define IWL_DEBUG_LIMIT(m, level, fmt, args...) \ +#define IWL_DEBUG_LIMIT(m, level, fmt, ...) \ do { \ - if (iwl_get_debug_level((m)->shrd) & (level) && net_ratelimit())\ - dev_printk(KERN_ERR, bus(m)->dev, \ - "%c %s " fmt, in_interrupt() ? 'I' : 'U', \ - __func__ , ## args); \ + if (iwl_get_debug_level((m)->shrd) & (level) && \ + net_ratelimit()) \ + dev_err(bus(m)->dev, "%c %s " fmt, \ + in_interrupt() ? 'I' : 'U', __func__, \ + ##__VA_ARGS__); \ } while (0) #define iwl_print_hex_dump(m, level, p, len) \ @@ -70,10 +71,29 @@ do { \ DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \ } while (0) +#define IWL_DEBUG_QUIET_RFKILL(p, fmt, ...) \ +do { \ + if (!iwl_is_rfkill(p->shrd)) \ + dev_err(bus(p)->dev, "%s%c %s " fmt, \ + "", \ + in_interrupt() ? 'I' : 'U', __func__, \ + ##__VA_ARGS__); \ + else if (iwl_get_debug_level(p->shrd) & IWL_DL_RADIO) \ + dev_err(bus(p)->dev, "%s%c %s " fmt, \ + "(RFKILL) ", \ + in_interrupt() ? 'I' : 'U', __func__, \ + ##__VA_ARGS__); \ +} while (0) + #else #define IWL_DEBUG(m, level, fmt, args...) #define IWL_DEBUG_LIMIT(m, level, fmt, args...) #define iwl_print_hex_dump(m, level, p, len) +#define IWL_DEBUG_QUIET_RFKILL(p, fmt, args...) \ +do { \ + if (!iwl_is_rfkill(p->shrd)) \ + IWL_ERR(p, fmt, ##args); \ +} while (0) #endif /* CONFIG_IWLWIFI_DEBUG */ #ifdef CONFIG_IWLWIFI_DEBUGFS @@ -114,48 +134,43 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv) */ /* 0x0000000F - 0x00000001 */ -#define IWL_DL_INFO (1 << 0) -#define IWL_DL_MAC80211 (1 << 1) -#define IWL_DL_HCMD (1 << 2) -#define IWL_DL_STATE (1 << 3) +#define IWL_DL_INFO 0x00000001 +#define IWL_DL_MAC80211 0x00000002 +#define IWL_DL_HCMD 0x00000004 +#define IWL_DL_STATE 0x00000008 /* 0x000000F0 - 0x00000010 */ -#define IWL_DL_MACDUMP (1 << 4) -#define IWL_DL_HCMD_DUMP (1 << 5) -#define IWL_DL_EEPROM (1 << 6) -#define IWL_DL_RADIO (1 << 7) +#define IWL_DL_EEPROM 0x00000040 +#define IWL_DL_RADIO 0x00000080 /* 0x00000F00 - 0x00000100 */ -#define IWL_DL_POWER (1 << 8) -#define IWL_DL_TEMP (1 << 9) -/* reserved (1 << 10) */ -#define IWL_DL_SCAN (1 << 11) +#define IWL_DL_POWER 0x00000100 +#define IWL_DL_TEMP 0x00000200 +#define IWL_DL_SCAN 0x00000800 /* 0x0000F000 - 0x00001000 */ -#define IWL_DL_ASSOC (1 << 12) -#define IWL_DL_DROP (1 << 13) -/* reserved (1 << 14) */ -#define IWL_DL_COEX (1 << 15) +#define IWL_DL_ASSOC 0x00001000 +#define IWL_DL_DROP 0x00002000 +#define IWL_DL_COEX 0x00008000 /* 0x000F0000 - 0x00010000 */ -#define IWL_DL_FW (1 << 16) -#define IWL_DL_RF_KILL (1 << 17) -#define IWL_DL_FW_ERRORS (1 << 18) -#define IWL_DL_LED (1 << 19) +#define IWL_DL_FW 0x00010000 +#define IWL_DL_RF_KILL 0x00020000 +#define IWL_DL_FW_ERRORS 0x00040000 +#define IWL_DL_LED 0x00080000 /* 0x00F00000 - 0x00100000 */ -#define IWL_DL_RATE (1 << 20) -#define IWL_DL_CALIB (1 << 21) -#define IWL_DL_WEP (1 << 22) -#define IWL_DL_TX (1 << 23) +#define IWL_DL_RATE 0x00100000 +#define IWL_DL_CALIB 0x00200000 +#define IWL_DL_WEP 0x00400000 +#define IWL_DL_TX 0x00800000 /* 0x0F000000 - 0x01000000 */ -#define IWL_DL_RX (1 << 24) -#define IWL_DL_ISR (1 << 25) -#define IWL_DL_HT (1 << 26) +#define IWL_DL_RX 0x01000000 +#define IWL_DL_ISR 0x02000000 +#define IWL_DL_HT 0x04000000 /* 0xF0000000 - 0x10000000 */ -#define IWL_DL_11H (1 << 28) -#define IWL_DL_STATS (1 << 29) -#define IWL_DL_TX_REPLY (1 << 30) -#define IWL_DL_QOS (1 << 31) +#define IWL_DL_11H 0x10000000 +#define IWL_DL_STATS 0x20000000 +#define IWL_DL_TX_REPLY 0x40000000 +#define IWL_DL_TX_QUEUES 0x80000000 #define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a) #define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a) -#define IWL_DEBUG_MACDUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_MACDUMP, f, ## a) #define IWL_DEBUG_TEMP(p, f, a...) IWL_DEBUG(p, IWL_DL_TEMP, f, ## a) #define IWL_DEBUG_SCAN(p, f, a...) IWL_DEBUG(p, IWL_DL_SCAN, f, ## a) #define IWL_DEBUG_RX(p, f, a...) IWL_DEBUG(p, IWL_DL_RX, f, ## a) @@ -164,7 +179,6 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv) #define IWL_DEBUG_LED(p, f, a...) IWL_DEBUG(p, IWL_DL_LED, f, ## a) #define IWL_DEBUG_WEP(p, f, a...) IWL_DEBUG(p, IWL_DL_WEP, f, ## a) #define IWL_DEBUG_HC(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD, f, ## a) -#define IWL_DEBUG_HC_DUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD_DUMP, f, ## a) #define IWL_DEBUG_EEPROM(p, f, a...) IWL_DEBUG(p, IWL_DL_EEPROM, f, ## a) #define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a) #define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a) @@ -186,9 +200,7 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv) #define IWL_DEBUG_STATS_LIMIT(p, f, a...) \ IWL_DEBUG_LIMIT(p, IWL_DL_STATS, f, ## a) #define IWL_DEBUG_TX_REPLY(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_REPLY, f, ## a) -#define IWL_DEBUG_TX_REPLY_LIMIT(p, f, a...) \ - IWL_DEBUG_LIMIT(p, IWL_DL_TX_REPLY, f, ## a) -#define IWL_DEBUG_QOS(p, f, a...) IWL_DEBUG(p, IWL_DL_QOS, f, ## a) +#define IWL_DEBUG_TX_QUEUES(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_QUEUES, f, ## a) #define IWL_DEBUG_RADIO(p, f, a...) IWL_DEBUG(p, IWL_DL_RADIO, f, ## a) #define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a) #define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a) diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c index a1670e3f8bf..04a3343f461 100644 --- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c +++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c @@ -234,11 +234,12 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file, /* default is to dump the entire data segment */ if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) { + struct iwl_trans *trans = trans(priv); priv->dbgfs_sram_offset = 0x800000; - if (priv->ucode_type == IWL_UCODE_INIT) - priv->dbgfs_sram_len = priv->ucode_init.data.len; + if (trans->shrd->ucode_type == IWL_UCODE_INIT) + priv->dbgfs_sram_len = trans->ucode_init.data.len; else - priv->dbgfs_sram_len = priv->ucode_rt.data.len; + priv->dbgfs_sram_len = trans->ucode_rt.data.len; } len = priv->dbgfs_sram_len; @@ -341,7 +342,7 @@ static ssize_t iwl_dbgfs_wowlan_sram_read(struct file *file, return simple_read_from_buffer(user_buf, count, ppos, priv->wowlan_sram, - priv->ucode_wowlan.data.len); + trans(priv)->ucode_wowlan.data.len); } static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) @@ -371,15 +372,13 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf, i, station->sta.sta.addr, station->sta.station_flags_msk); pos += scnprintf(buf + pos, bufsz - pos, - "TID\tseq_num\ttxq_id\ttfds\trate_n_flags\n"); + "TID\tseq_num\trate_n_flags\n"); for (j = 0; j < IWL_MAX_TID_COUNT; j++) { - tid_data = &priv->shrd->tid_data[i][j]; + tid_data = &priv->tid_data[i][j]; pos += scnprintf(buf + pos, bufsz - pos, - "%d:\t%#x\t%#x\t%u\t%#x", + "%d:\t%#x\t%#x", j, tid_data->seq_number, - tid_data->agg.txq_id, - tid_data->tfds_in_queue, tid_data->agg.rate_n_flags); if (tid_data->agg.wait_for_ba) @@ -407,7 +406,7 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file, const u8 *ptr; char *buf; u16 eeprom_ver; - size_t eeprom_len = priv->cfg->base_params->eeprom_size; + size_t eeprom_len = cfg(priv)->base_params->eeprom_size; buf_size = 4 * eeprom_len + 256; if (eeprom_len % 16) { @@ -415,7 +414,7 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file, return -ENODATA; } - ptr = priv->eeprom; + ptr = priv->shrd->eeprom; if (!ptr) { IWL_ERR(priv, "Invalid EEPROM/OTP memory\n"); return -ENOMEM; @@ -427,10 +426,10 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file, IWL_ERR(priv, "Can not allocate Buffer\n"); return -ENOMEM; } - eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION); + eeprom_ver = iwl_eeprom_query16(priv->shrd, EEPROM_VERSION); pos += scnprintf(buf + pos, buf_size - pos, "NVM Type: %s, " "version: 0x%x\n", - (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) + (trans(priv)->nvm_device_type == NVM_DEVICE_TYPE_OTP) ? "OTP" : "EEPROM", eeprom_ver); for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) { pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs); @@ -1541,15 +1540,15 @@ static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file, if (tx->tx_power.ant_a || tx->tx_power.ant_b || tx->tx_power.ant_c) { pos += scnprintf(buf + pos, bufsz - pos, "tx power: (1/2 dB step)\n"); - if ((priv->cfg->valid_tx_ant & ANT_A) && tx->tx_power.ant_a) + if ((cfg(priv)->valid_tx_ant & ANT_A) && tx->tx_power.ant_a) pos += scnprintf(buf + pos, bufsz - pos, fmt_hex, "antenna A:", tx->tx_power.ant_a); - if ((priv->cfg->valid_tx_ant & ANT_B) && tx->tx_power.ant_b) + if ((cfg(priv)->valid_tx_ant & ANT_B) && tx->tx_power.ant_b) pos += scnprintf(buf + pos, bufsz - pos, fmt_hex, "antenna B:", tx->tx_power.ant_b); - if ((priv->cfg->valid_tx_ant & ANT_C) && tx->tx_power.ant_c) + if ((cfg(priv)->valid_tx_ant & ANT_C) && tx->tx_power.ant_c) pos += scnprintf(buf + pos, bufsz - pos, fmt_hex, "antenna C:", tx->tx_power.ant_c); @@ -2220,7 +2219,7 @@ static ssize_t iwl_dbgfs_plcp_delta_read(struct file *file, const size_t bufsz = sizeof(buf); pos += scnprintf(buf + pos, bufsz - pos, "%u\n", - priv->cfg->base_params->plcp_delta_threshold); + cfg(priv)->base_params->plcp_delta_threshold); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } @@ -2242,10 +2241,10 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file, return -EINVAL; if ((plcp < IWL_MAX_PLCP_ERR_THRESHOLD_MIN) || (plcp > IWL_MAX_PLCP_ERR_THRESHOLD_MAX)) - priv->cfg->base_params->plcp_delta_threshold = + cfg(priv)->base_params->plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE; else - priv->cfg->base_params->plcp_delta_threshold = plcp; + cfg(priv)->base_params->plcp_delta_threshold = plcp; return count; } @@ -2347,7 +2346,7 @@ static ssize_t iwl_dbgfs_wd_timeout_write(struct file *file, if (timeout < 0 || timeout > IWL_MAX_WD_TIMEOUT) timeout = IWL_DEF_WD_TIMEOUT; - priv->cfg->base_params->wd_timeout = timeout; + cfg(priv)->base_params->wd_timeout = timeout; iwl_setup_watchdog(priv); return count; } @@ -2407,10 +2406,10 @@ static ssize_t iwl_dbgfs_protection_mode_read(struct file *file, char buf[40]; const size_t bufsz = sizeof(buf); - if (priv->cfg->ht_params) + if (cfg(priv)->ht_params) pos += scnprintf(buf + pos, bufsz - pos, "use %s for aggregation\n", - (priv->cfg->ht_params->use_rts_for_aggregation) ? + (cfg(priv)->ht_params->use_rts_for_aggregation) ? "rts/cts" : "cts-to-self"); else pos += scnprintf(buf + pos, bufsz - pos, "N/A"); @@ -2427,7 +2426,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file, int buf_size; int rts; - if (!priv->cfg->ht_params) + if (!cfg(priv)->ht_params) return -EINVAL; memset(buf, 0, sizeof(buf)); @@ -2437,9 +2436,9 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file, if (sscanf(buf, "%d", &rts) != 1) return -EINVAL; if (rts) - priv->cfg->ht_params->use_rts_for_aggregation = true; + cfg(priv)->ht_params->use_rts_for_aggregation = true; else - priv->cfg->ht_params->use_rts_for_aggregation = false; + cfg(priv)->ht_params->use_rts_for_aggregation = false; return count; } diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h index 6c00a447963..e54a4d11e58 100644 --- a/drivers/net/wireless/iwlwifi/iwl-dev.h +++ b/drivers/net/wireless/iwlwifi/iwl-dev.h @@ -60,11 +60,10 @@ struct iwl_tx_queue; /* Default noise level to report when noise measurement is not available. * This may be because we're: - * 1) Not associated (4965, no beacon statistics being sent to driver) + * 1) Not associated no beacon statistics being sent to driver) * 2) Scanning (noise measurement does not apply to associated channel) - * 3) Receiving CCK (3945 delivers noise info only for OFDM frames) * Use default noise value of -127 ... this is below the range of measurable - * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user. + * Rx dBm for all agn devices, so it can indicate "unmeasurable" to user. * Also, -127 works better than 0 when averaging frames with/without * noise info (e.g. averaging might be done in app); measured dBm values are * always negative ... using a negative value as the default keeps all @@ -190,6 +189,69 @@ struct iwl_qos_info { struct iwl_qosparam_cmd def_qos_parm; }; +/** + * enum iwl_agg_state + * + * The state machine of the BA agreement establishment / tear down. + * These states relate to a specific RA / TID. + * + * @IWL_AGG_OFF: aggregation is not used + * @IWL_AGG_ON: aggregation session is up + * @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the + * HW queue to be empty from packets for this RA /TID. + * @IWL_EMPTYING_HW_QUEUE_DELBA: tearing down a BA session - waiting for the + * HW queue to be empty from packets for this RA /TID. + */ +enum iwl_agg_state { + IWL_AGG_OFF = 0, + IWL_AGG_ON, + IWL_EMPTYING_HW_QUEUE_ADDBA, + IWL_EMPTYING_HW_QUEUE_DELBA, +}; + +/** + * struct iwl_ht_agg - aggregation state machine + + * This structs holds the states for the BA agreement establishment and tear + * down. It also holds the state during the BA session itself. This struct is + * duplicated for each RA / TID. + + * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the + * Tx response (REPLY_TX), and the block ack notification + * (REPLY_COMPRESSED_BA). + * @state: state of the BA agreement establishment / tear down. + * @txq_id: Tx queue used by the BA session - used by the transport layer. + * Needed by the upper layer for debugfs only. + * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or + * the first packet to be sent in legacy HW queue in Tx AGG stop flow. + * Basically when next_reclaimed reaches ssn, we can tell mac80211 that + * we are ready to finish the Tx AGG stop / start flow. + * @wait_for_ba: Expect block-ack before next Tx reply + */ +struct iwl_ht_agg { + u32 rate_n_flags; + enum iwl_agg_state state; + u16 txq_id; + u16 ssn; + bool wait_for_ba; +}; + +/** + * struct iwl_tid_data - one for each RA / TID + + * This structs holds the states for each RA / TID. + + * @seq_number: the next WiFi sequence number to use + * @next_reclaimed: the WiFi sequence number of the next packet to be acked. + * This is basically (last acked packet++). + * @agg: aggregation state machine + */ +struct iwl_tid_data { + u16 seq_number; + u16 next_reclaimed; + struct iwl_ht_agg agg; +}; + /* * Structure should be accessed with sta_lock held. When station addition * is in progress (IWL_STA_UCODE_INPROGRESS) it is possible to access only @@ -230,17 +292,6 @@ struct iwl_vif_priv { u8 ibss_bssid_sta_id; }; -/* one for each uCode image (inst/data, boot/init/runtime) */ -struct fw_desc { - void *v_addr; /* access by driver */ - dma_addr_t p_addr; /* access by card's busmaster DMA */ - u32 len; /* bytes */ -}; - -struct fw_img { - struct fw_desc code, data; -}; - /* v1/v2 uCode file layout */ struct iwl_ucode_header { __le32 ver; /* major/minor/API/serial */ @@ -452,29 +503,6 @@ enum iwlagn_chain_noise_state { IWL_CHAIN_NOISE_DONE, }; - -/* - * enum iwl_calib - * defines the order in which results of initial calibrations - * should be sent to the runtime uCode - */ -enum iwl_calib { - IWL_CALIB_XTAL, - IWL_CALIB_DC, - IWL_CALIB_LO, - IWL_CALIB_TX_IQ, - IWL_CALIB_TX_IQ_PERD, - IWL_CALIB_BASE_BAND, - IWL_CALIB_TEMP_OFFSET, - IWL_CALIB_MAX -}; - -/* Opaque calibration results */ -struct iwl_calib_result { - void *buf; - size_t buf_len; -}; - /* Sensitivity calib data */ struct iwl_sensitivity_data { u32 auto_corr_ofdm; @@ -547,16 +575,6 @@ enum iwl_access_mode { IWL_OTP_ACCESS_RELATIVE, }; -/** - * enum iwl_pa_type - Power Amplifier type - * @IWL_PA_SYSTEM: based on uCode configuration - * @IWL_PA_INTERNAL: use Internal only - */ -enum iwl_pa_type { - IWL_PA_SYSTEM = 0, - IWL_PA_INTERNAL = 1, -}; - /* reply_tx_statistics (for _agn devices) */ struct reply_tx_error_statistics { u32 pp_delay; @@ -714,35 +732,6 @@ struct iwl_force_reset { */ #define IWLAGN_EXT_BEACON_TIME_POS 22 -/** - * struct iwl_notification_wait - notification wait entry - * @list: list head for global list - * @fn: function called with the notification - * @cmd: command ID - * - * This structure is not used directly, to wait for a - * notification declare it on the stack, and call - * iwlagn_init_notification_wait() with appropriate - * parameters. Then do whatever will cause the ucode - * to notify the driver, and to wait for that then - * call iwlagn_wait_notification(). - * - * Each notification is one-shot. If at some point we - * need to support multi-shot notifications (which - * can't be allocated on the stack) we need to modify - * the code for them. - */ -struct iwl_notification_wait { - struct list_head list; - - void (*fn)(struct iwl_priv *priv, struct iwl_rx_packet *pkt, - void *data); - void *fn_data; - - u8 cmd; - bool triggered, aborted; -}; - struct iwl_rxon_context { struct ieee80211_vif *vif; @@ -805,14 +794,7 @@ enum iwl_scan_type { IWL_SCAN_ROC, }; -enum iwlagn_ucode_type { - IWL_UCODE_NONE, - IWL_UCODE_REGULAR, - IWL_UCODE_INIT, - IWL_UCODE_WOWLAN, -}; - -#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL +#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE struct iwl_testmode_trace { u32 buff_size; u32 total_size; @@ -822,8 +804,20 @@ struct iwl_testmode_trace { dma_addr_t dma_addr; bool trace_enabled; }; +struct iwl_testmode_sram { + u32 buff_size; + u32 num_chunks; + u8 *buff_addr; + bool sram_readed; +}; #endif +struct iwl_wipan_noa_data { + struct rcu_head rcu_head; + u32 length; + u8 data[]; +}; + struct iwl_priv { /*data shared among all the driver's layers */ @@ -835,7 +829,6 @@ struct iwl_priv { struct ieee80211_channel *ieee_channels; struct ieee80211_rate *ieee_rates; struct kmem_cache *tx_cmd_pool; - struct iwl_cfg *cfg; enum ieee80211_band band; @@ -880,8 +873,7 @@ struct iwl_priv { s32 temperature; /* Celsius */ s32 last_temperature; - /* init calibration results */ - struct iwl_calib_result calib_results[IWL_CALIB_MAX]; + struct iwl_wipan_noa_data __rcu *noa_data; /* Scan related variables */ unsigned long scan_start; @@ -907,23 +899,12 @@ struct iwl_priv { u32 ucode_ver; /* version of ucode, copy of iwl_ucode.ver */ - struct fw_img ucode_rt; - struct fw_img ucode_init; - struct fw_img ucode_wowlan; - - enum iwlagn_ucode_type ucode_type; - u8 ucode_write_complete; /* the image write is complete */ char firmware_name[25]; struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX]; __le16 switch_channel; - struct { - u32 error_event_table; - u32 log_event_table; - } device_pointers; - u16 active_rate; u8 start_calib; @@ -951,17 +932,13 @@ struct iwl_priv { int num_stations; struct iwl_station_entry stations[IWLAGN_STATION_COUNT]; unsigned long ucode_key_table; + struct iwl_tid_data tid_data[IWLAGN_STATION_COUNT][IWL_MAX_TID_COUNT]; u8 mac80211_registered; /* Indication if ieee80211_ops->open has been called */ u8 is_open; - /* eeprom -- this is in the card's little endian byte order */ - u8 *eeprom; - int nvm_device_type; - struct iwl_eeprom_calib_info *calib_info; - enum nl80211_iftype iw_mode; /* Last Rx'd beacon timestamp */ @@ -1017,10 +994,6 @@ struct iwl_priv { /* counts reply_tx error */ struct reply_tx_error_statistics reply_tx_stats; struct reply_agg_tx_error_statistics reply_agg_tx_stats; - /* notification wait support */ - struct list_head notif_waits; - spinlock_t notif_wait_lock; - wait_queue_head_t notif_waitq; /* remain-on-channel offload support */ struct ieee80211_channel *hw_roc_channel; @@ -1098,8 +1071,9 @@ struct iwl_priv { struct led_classdev led; unsigned long blink_on, blink_off; bool led_registered; -#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL +#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE struct iwl_testmode_trace testmode_trace; + struct iwl_testmode_sram testmode_sram; u32 tm_fixed_rate; #endif diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/iwlwifi/iwl-devtrace.c index a635a7e7544..2a2c8de64a0 100644 --- a/drivers/net/wireless/iwlwifi/iwl-devtrace.c +++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.c @@ -28,7 +28,7 @@ /* sparse doesn't like tracepoint macros */ #ifndef __CHECKER__ -#include "iwl-dev.h" +#include "iwl-trans.h" #define CREATE_TRACE_POINTS #include "iwl-devtrace.h" diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h index 8a51c5ccda1..9b212a8f30b 100644 --- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h +++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h @@ -29,7 +29,6 @@ #include <linux/tracepoint.h> -struct iwl_priv; #if !defined(CONFIG_IWLWIFI_DEVICE_TRACING) || defined(__CHECKER__) #undef TRACE_EVENT @@ -37,14 +36,14 @@ struct iwl_priv; static inline void trace_ ## name(proto) {} #endif -#define PRIV_ENTRY __field(struct iwl_priv *, priv) +#define PRIV_ENTRY __field(void *, priv) #define PRIV_ASSIGN __entry->priv = priv #undef TRACE_SYSTEM #define TRACE_SYSTEM iwlwifi_io TRACE_EVENT(iwlwifi_dev_ioread32, - TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val), + TP_PROTO(void *priv, u32 offs, u32 val), TP_ARGS(priv, offs, val), TP_STRUCT__entry( PRIV_ENTRY @@ -60,7 +59,7 @@ TRACE_EVENT(iwlwifi_dev_ioread32, ); TRACE_EVENT(iwlwifi_dev_iowrite8, - TP_PROTO(struct iwl_priv *priv, u32 offs, u8 val), + TP_PROTO(void *priv, u32 offs, u8 val), TP_ARGS(priv, offs, val), TP_STRUCT__entry( PRIV_ENTRY @@ -76,7 +75,7 @@ TRACE_EVENT(iwlwifi_dev_iowrite8, ); TRACE_EVENT(iwlwifi_dev_iowrite32, - TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val), + TP_PROTO(void *priv, u32 offs, u32 val), TP_ARGS(priv, offs, val), TP_STRUCT__entry( PRIV_ENTRY @@ -91,11 +90,40 @@ TRACE_EVENT(iwlwifi_dev_iowrite32, TP_printk("[%p] write io[%#x] = %#x)", __entry->priv, __entry->offs, __entry->val) ); +TRACE_EVENT(iwlwifi_dev_irq, + TP_PROTO(void *priv), + TP_ARGS(priv), + TP_STRUCT__entry( + PRIV_ENTRY + ), + TP_fast_assign( + PRIV_ASSIGN; + ), + /* TP_printk("") doesn't compile */ + TP_printk("%d", 0) +); + +TRACE_EVENT(iwlwifi_dev_ict_read, + TP_PROTO(void *priv, u32 index, u32 value), + TP_ARGS(priv, index, value), + TP_STRUCT__entry( + PRIV_ENTRY + __field(u32, index) + __field(u32, value) + ), + TP_fast_assign( + PRIV_ASSIGN; + __entry->index = index; + __entry->value = value; + ), + TP_printk("read ict[%d] = %#.8x", __entry->index, __entry->value) +); + #undef TRACE_SYSTEM #define TRACE_SYSTEM iwlwifi_ucode TRACE_EVENT(iwlwifi_dev_ucode_cont_event, - TP_PROTO(struct iwl_priv *priv, u32 time, u32 data, u32 ev), + TP_PROTO(void *priv, u32 time, u32 data, u32 ev), TP_ARGS(priv, time, data, ev), TP_STRUCT__entry( PRIV_ENTRY @@ -115,7 +143,7 @@ TRACE_EVENT(iwlwifi_dev_ucode_cont_event, ); TRACE_EVENT(iwlwifi_dev_ucode_wrap_event, - TP_PROTO(struct iwl_priv *priv, u32 wraps, u32 n_entry, u32 p_entry), + TP_PROTO(void *priv, u32 wraps, u32 n_entry, u32 p_entry), TP_ARGS(priv, wraps, n_entry, p_entry), TP_STRUCT__entry( PRIV_ENTRY @@ -139,7 +167,7 @@ TRACE_EVENT(iwlwifi_dev_ucode_wrap_event, #define TRACE_SYSTEM iwlwifi TRACE_EVENT(iwlwifi_dev_hcmd, - TP_PROTO(struct iwl_priv *priv, u32 flags, + TP_PROTO(void *priv, u32 flags, const void *hcmd0, size_t len0, const void *hcmd1, size_t len1, const void *hcmd2, size_t len2), @@ -164,7 +192,7 @@ TRACE_EVENT(iwlwifi_dev_hcmd, ); TRACE_EVENT(iwlwifi_dev_rx, - TP_PROTO(struct iwl_priv *priv, void *rxbuf, size_t len), + TP_PROTO(void *priv, void *rxbuf, size_t len), TP_ARGS(priv, rxbuf, len), TP_STRUCT__entry( PRIV_ENTRY @@ -179,7 +207,7 @@ TRACE_EVENT(iwlwifi_dev_rx, ); TRACE_EVENT(iwlwifi_dev_tx, - TP_PROTO(struct iwl_priv *priv, void *tfd, size_t tfdlen, + TP_PROTO(void *priv, void *tfd, size_t tfdlen, void *buf0, size_t buf0_len, void *buf1, size_t buf1_len), TP_ARGS(priv, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len), @@ -211,7 +239,7 @@ TRACE_EVENT(iwlwifi_dev_tx, ); TRACE_EVENT(iwlwifi_dev_ucode_error, - TP_PROTO(struct iwl_priv *priv, u32 desc, u32 tsf_low, + TP_PROTO(void *priv, u32 desc, u32 tsf_low, u32 data1, u32 data2, u32 line, u32 blink1, u32 blink2, u32 ilink1, u32 ilink2, u32 bcon_time, u32 gp1, u32 gp2, u32 gp3, u32 ucode_ver, u32 hw_ver, @@ -271,7 +299,7 @@ TRACE_EVENT(iwlwifi_dev_ucode_error, ); TRACE_EVENT(iwlwifi_dev_ucode_event, - TP_PROTO(struct iwl_priv *priv, u32 time, u32 data, u32 ev), + TP_PROTO(void *priv, u32 time, u32 data, u32 ev), TP_ARGS(priv, time, data, ev), TP_STRUCT__entry( PRIV_ENTRY diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c index a4e43bd4a54..c1eda9724f4 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c @@ -149,23 +149,23 @@ static const u8 iwl_eeprom_band_7[] = { /* 5.2 ht40 channel */ * EEPROM chip, not a single event, so even reads could conflict if they * weren't arbitrated by the semaphore. */ -static int iwl_eeprom_acquire_semaphore(struct iwl_priv *priv) +static int iwl_eeprom_acquire_semaphore(struct iwl_bus *bus) { u16 count; int ret; for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) { /* Request semaphore */ - iwl_set_bit(bus(priv), CSR_HW_IF_CONFIG_REG, + iwl_set_bit(bus, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM); /* See if we got it */ - ret = iwl_poll_bit(bus(priv), CSR_HW_IF_CONFIG_REG, + ret = iwl_poll_bit(bus, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM, CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM, EEPROM_SEM_TIMEOUT); if (ret >= 0) { - IWL_DEBUG_EEPROM(priv, + IWL_DEBUG_EEPROM(bus, "Acquired semaphore after %d tries.\n", count+1); return ret; @@ -175,39 +175,39 @@ static int iwl_eeprom_acquire_semaphore(struct iwl_priv *priv) return ret; } -static void iwl_eeprom_release_semaphore(struct iwl_priv *priv) +static void iwl_eeprom_release_semaphore(struct iwl_bus *bus) { - iwl_clear_bit(bus(priv), CSR_HW_IF_CONFIG_REG, + iwl_clear_bit(bus, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM); } -static int iwl_eeprom_verify_signature(struct iwl_priv *priv) +static int iwl_eeprom_verify_signature(struct iwl_trans *trans) { - u32 gp = iwl_read32(bus(priv), CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK; + u32 gp = iwl_read32(bus(trans), CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK; int ret = 0; - IWL_DEBUG_EEPROM(priv, "EEPROM signature=0x%08x\n", gp); + IWL_DEBUG_EEPROM(trans, "EEPROM signature=0x%08x\n", gp); switch (gp) { case CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP: - if (priv->nvm_device_type != NVM_DEVICE_TYPE_OTP) { - IWL_ERR(priv, "EEPROM with bad signature: 0x%08x\n", + if (trans->nvm_device_type != NVM_DEVICE_TYPE_OTP) { + IWL_ERR(trans, "EEPROM with bad signature: 0x%08x\n", gp); ret = -ENOENT; } break; case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K: case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K: - if (priv->nvm_device_type != NVM_DEVICE_TYPE_EEPROM) { - IWL_ERR(priv, "OTP with bad signature: 0x%08x\n", gp); + if (trans->nvm_device_type != NVM_DEVICE_TYPE_EEPROM) { + IWL_ERR(trans, "OTP with bad signature: 0x%08x\n", gp); ret = -ENOENT; } break; case CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP: default: - IWL_ERR(priv, "bad EEPROM/OTP signature, type=%s, " + IWL_ERR(trans, "bad EEPROM/OTP signature, type=%s, " "EEPROM_GP=0x%08x\n", - (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) + (trans->nvm_device_type == NVM_DEVICE_TYPE_OTP) ? "OTP" : "EEPROM", gp); ret = -ENOENT; break; @@ -215,11 +215,11 @@ static int iwl_eeprom_verify_signature(struct iwl_priv *priv) return ret; } -u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset) +u16 iwl_eeprom_query16(const struct iwl_shared *shrd, size_t offset) { - if (!priv->eeprom) + if (!shrd->eeprom) return 0; - return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8); + return (u16)shrd->eeprom[offset] | ((u16)shrd->eeprom[offset + 1] << 8); } int iwl_eeprom_check_version(struct iwl_priv *priv) @@ -227,11 +227,11 @@ int iwl_eeprom_check_version(struct iwl_priv *priv) u16 eeprom_ver; u16 calib_ver; - eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION); - calib_ver = iwlagn_eeprom_calib_version(priv); + eeprom_ver = iwl_eeprom_query16(priv->shrd, EEPROM_VERSION); + calib_ver = iwl_eeprom_calib_version(priv->shrd); - if (eeprom_ver < priv->cfg->eeprom_ver || - calib_ver < priv->cfg->eeprom_calib_ver) + if (eeprom_ver < cfg(priv)->eeprom_ver || + calib_ver < cfg(priv)->eeprom_calib_ver) goto err; IWL_INFO(priv, "device EEPROM VER=0x%x, CALIB=0x%x\n", @@ -241,45 +241,46 @@ int iwl_eeprom_check_version(struct iwl_priv *priv) err: IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x " "CALIB=0x%x < 0x%x\n", - eeprom_ver, priv->cfg->eeprom_ver, - calib_ver, priv->cfg->eeprom_calib_ver); + eeprom_ver, cfg(priv)->eeprom_ver, + calib_ver, cfg(priv)->eeprom_calib_ver); return -EINVAL; } int iwl_eeprom_check_sku(struct iwl_priv *priv) { + struct iwl_shared *shrd = priv->shrd; u16 radio_cfg; - if (!priv->cfg->sku) { + if (!cfg(priv)->sku) { /* not using sku overwrite */ - priv->cfg->sku = iwl_eeprom_query16(priv, EEPROM_SKU_CAP); - if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE && - !priv->cfg->ht_params) { + cfg(priv)->sku = iwl_eeprom_query16(shrd, EEPROM_SKU_CAP); + if (cfg(priv)->sku & EEPROM_SKU_CAP_11N_ENABLE && + !cfg(priv)->ht_params) { IWL_ERR(priv, "Invalid 11n configuration\n"); return -EINVAL; } } - if (!priv->cfg->sku) { + if (!cfg(priv)->sku) { IWL_ERR(priv, "Invalid device sku\n"); return -EINVAL; } - IWL_INFO(priv, "Device SKU: 0X%x\n", priv->cfg->sku); + IWL_INFO(priv, "Device SKU: 0x%X\n", cfg(priv)->sku); - if (!priv->cfg->valid_tx_ant && !priv->cfg->valid_rx_ant) { + if (!cfg(priv)->valid_tx_ant && !cfg(priv)->valid_rx_ant) { /* not using .cfg overwrite */ - radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG); - priv->cfg->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg); - priv->cfg->valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg); - if (!priv->cfg->valid_tx_ant || !priv->cfg->valid_rx_ant) { - IWL_ERR(priv, "Invalid chain (0X%x, 0X%x)\n", - priv->cfg->valid_tx_ant, - priv->cfg->valid_rx_ant); + radio_cfg = iwl_eeprom_query16(shrd, EEPROM_RADIO_CONFIG); + cfg(priv)->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg); + cfg(priv)->valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg); + if (!cfg(priv)->valid_tx_ant || !cfg(priv)->valid_rx_ant) { + IWL_ERR(priv, "Invalid chain (0x%X, 0x%X)\n", + cfg(priv)->valid_tx_ant, + cfg(priv)->valid_rx_ant); return -EINVAL; } - IWL_INFO(priv, "Valid Tx ant: 0X%x, Valid Rx ant: 0X%x\n", - priv->cfg->valid_tx_ant, priv->cfg->valid_rx_ant); + IWL_INFO(priv, "Valid Tx ant: 0x%X, Valid Rx ant: 0x%X\n", + cfg(priv)->valid_tx_ant, cfg(priv)->valid_rx_ant); } /* * for some special cases, @@ -289,9 +290,9 @@ int iwl_eeprom_check_sku(struct iwl_priv *priv) return 0; } -void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac) +void iwl_eeprom_get_mac(const struct iwl_shared *shrd, u8 *mac) { - const u8 *addr = iwl_eeprom_query_addr(priv, + const u8 *addr = iwl_eeprom_query_addr(shrd, EEPROM_MAC_ADDRESS); memcpy(mac, addr, ETH_ALEN); } @@ -302,19 +303,19 @@ void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac) * ******************************************************************************/ -static void iwl_set_otp_access(struct iwl_priv *priv, enum iwl_access_mode mode) +static void iwl_set_otp_access(struct iwl_bus *bus, enum iwl_access_mode mode) { - iwl_read32(bus(priv), CSR_OTP_GP_REG); + iwl_read32(bus, CSR_OTP_GP_REG); if (mode == IWL_OTP_ACCESS_ABSOLUTE) - iwl_clear_bit(bus(priv), CSR_OTP_GP_REG, + iwl_clear_bit(bus, CSR_OTP_GP_REG, CSR_OTP_GP_REG_OTP_ACCESS_MODE); else - iwl_set_bit(bus(priv), CSR_OTP_GP_REG, + iwl_set_bit(bus, CSR_OTP_GP_REG, CSR_OTP_GP_REG_OTP_ACCESS_MODE); } -static int iwl_get_nvm_type(struct iwl_priv *priv, u32 hw_rev) +static int iwl_get_nvm_type(struct iwl_bus *bus, u32 hw_rev) { u32 otpgp; int nvm_type; @@ -322,7 +323,7 @@ static int iwl_get_nvm_type(struct iwl_priv *priv, u32 hw_rev) /* OTP only valid for CP/PP and after */ switch (hw_rev & CSR_HW_REV_TYPE_MSK) { case CSR_HW_REV_TYPE_NONE: - IWL_ERR(priv, "Unknown hardware type\n"); + IWL_ERR(bus, "Unknown hardware type\n"); return -ENOENT; case CSR_HW_REV_TYPE_5300: case CSR_HW_REV_TYPE_5350: @@ -331,7 +332,7 @@ static int iwl_get_nvm_type(struct iwl_priv *priv, u32 hw_rev) nvm_type = NVM_DEVICE_TYPE_EEPROM; break; default: - otpgp = iwl_read32(bus(priv), CSR_OTP_GP_REG); + otpgp = iwl_read32(bus, CSR_OTP_GP_REG); if (otpgp & CSR_OTP_GP_REG_DEVICE_SELECT) nvm_type = NVM_DEVICE_TYPE_OTP; else @@ -341,73 +342,73 @@ static int iwl_get_nvm_type(struct iwl_priv *priv, u32 hw_rev) return nvm_type; } -static int iwl_init_otp_access(struct iwl_priv *priv) +static int iwl_init_otp_access(struct iwl_bus *bus) { int ret; /* Enable 40MHz radio clock */ - iwl_write32(bus(priv), CSR_GP_CNTRL, - iwl_read32(bus(priv), CSR_GP_CNTRL) | + iwl_write32(bus, CSR_GP_CNTRL, + iwl_read32(bus, CSR_GP_CNTRL) | CSR_GP_CNTRL_REG_FLAG_INIT_DONE); /* wait for clock to be ready */ - ret = iwl_poll_bit(bus(priv), CSR_GP_CNTRL, + ret = iwl_poll_bit(bus, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); if (ret < 0) - IWL_ERR(priv, "Time out access OTP\n"); + IWL_ERR(bus, "Time out access OTP\n"); else { - iwl_set_bits_prph(bus(priv), APMG_PS_CTRL_REG, + iwl_set_bits_prph(bus, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ); udelay(5); - iwl_clear_bits_prph(bus(priv), APMG_PS_CTRL_REG, + iwl_clear_bits_prph(bus, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ); /* * CSR auto clock gate disable bit - * this is only applicable for HW with OTP shadow RAM */ - if (priv->cfg->base_params->shadow_ram_support) - iwl_set_bit(bus(priv), CSR_DBG_LINK_PWR_MGMT_REG, + if (cfg(bus)->base_params->shadow_ram_support) + iwl_set_bit(bus, CSR_DBG_LINK_PWR_MGMT_REG, CSR_RESET_LINK_PWR_MGMT_DISABLED); } return ret; } -static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, __le16 *eeprom_data) +static int iwl_read_otp_word(struct iwl_bus *bus, u16 addr, __le16 *eeprom_data) { int ret = 0; u32 r; u32 otpgp; - iwl_write32(bus(priv), CSR_EEPROM_REG, + iwl_write32(bus, CSR_EEPROM_REG, CSR_EEPROM_REG_MSK_ADDR & (addr << 1)); - ret = iwl_poll_bit(bus(priv), CSR_EEPROM_REG, + ret = iwl_poll_bit(bus, CSR_EEPROM_REG, CSR_EEPROM_REG_READ_VALID_MSK, CSR_EEPROM_REG_READ_VALID_MSK, IWL_EEPROM_ACCESS_TIMEOUT); if (ret < 0) { - IWL_ERR(priv, "Time out reading OTP[%d]\n", addr); + IWL_ERR(bus, "Time out reading OTP[%d]\n", addr); return ret; } - r = iwl_read32(bus(priv), CSR_EEPROM_REG); + r = iwl_read32(bus, CSR_EEPROM_REG); /* check for ECC errors: */ - otpgp = iwl_read32(bus(priv), CSR_OTP_GP_REG); + otpgp = iwl_read32(bus, CSR_OTP_GP_REG); if (otpgp & CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK) { /* stop in this case */ /* set the uncorrectable OTP ECC bit for acknowledgement */ - iwl_set_bit(bus(priv), CSR_OTP_GP_REG, + iwl_set_bit(bus, CSR_OTP_GP_REG, CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK); - IWL_ERR(priv, "Uncorrectable OTP ECC error, abort OTP read\n"); + IWL_ERR(bus, "Uncorrectable OTP ECC error, abort OTP read\n"); return -EINVAL; } if (otpgp & CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK) { /* continue in this case */ /* set the correctable OTP ECC bit for acknowledgement */ - iwl_set_bit(bus(priv), CSR_OTP_GP_REG, + iwl_set_bit(bus, CSR_OTP_GP_REG, CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK); - IWL_ERR(priv, "Correctable OTP ECC error, continue read\n"); + IWL_ERR(bus, "Correctable OTP ECC error, continue read\n"); } *eeprom_data = cpu_to_le16(r >> 16); return 0; @@ -416,20 +417,20 @@ static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, __le16 *eeprom_dat /* * iwl_is_otp_empty: check for empty OTP */ -static bool iwl_is_otp_empty(struct iwl_priv *priv) +static bool iwl_is_otp_empty(struct iwl_bus *bus) { u16 next_link_addr = 0; __le16 link_value; bool is_empty = false; /* locate the beginning of OTP link list */ - if (!iwl_read_otp_word(priv, next_link_addr, &link_value)) { + if (!iwl_read_otp_word(bus, next_link_addr, &link_value)) { if (!link_value) { - IWL_ERR(priv, "OTP is empty\n"); + IWL_ERR(bus, "OTP is empty\n"); is_empty = true; } } else { - IWL_ERR(priv, "Unable to read first block of OTP list.\n"); + IWL_ERR(bus, "Unable to read first block of OTP list.\n"); is_empty = true; } @@ -446,7 +447,7 @@ static bool iwl_is_otp_empty(struct iwl_priv *priv) * we should read and used to configure the device. * only perform this operation if shadow RAM is disabled */ -static int iwl_find_otp_image(struct iwl_priv *priv, +static int iwl_find_otp_image(struct iwl_bus *bus, u16 *validblockaddr) { u16 next_link_addr = 0, valid_addr; @@ -454,10 +455,10 @@ static int iwl_find_otp_image(struct iwl_priv *priv, int usedblocks = 0; /* set addressing mode to absolute to traverse the link list */ - iwl_set_otp_access(priv, IWL_OTP_ACCESS_ABSOLUTE); + iwl_set_otp_access(bus, IWL_OTP_ACCESS_ABSOLUTE); /* checking for empty OTP or error */ - if (iwl_is_otp_empty(priv)) + if (iwl_is_otp_empty(bus)) return -EINVAL; /* @@ -471,9 +472,9 @@ static int iwl_find_otp_image(struct iwl_priv *priv, */ valid_addr = next_link_addr; next_link_addr = le16_to_cpu(link_value) * sizeof(u16); - IWL_DEBUG_EEPROM(priv, "OTP blocks %d addr 0x%x\n", + IWL_DEBUG_EEPROM(bus, "OTP blocks %d addr 0x%x\n", usedblocks, next_link_addr); - if (iwl_read_otp_word(priv, next_link_addr, &link_value)) + if (iwl_read_otp_word(bus, next_link_addr, &link_value)) return -EINVAL; if (!link_value) { /* @@ -488,10 +489,10 @@ static int iwl_find_otp_image(struct iwl_priv *priv, } /* more in the link list, continue */ usedblocks++; - } while (usedblocks <= priv->cfg->base_params->max_ll_items); + } while (usedblocks <= cfg(bus)->base_params->max_ll_items); /* OTP has no valid blocks */ - IWL_DEBUG_EEPROM(priv, "OTP has no valid blocks\n"); + IWL_DEBUG_EEPROM(bus, "OTP has no valid blocks\n"); return -EINVAL; } @@ -504,28 +505,28 @@ static int iwl_find_otp_image(struct iwl_priv *priv, * iwl_get_max_txpower_avg - get the highest tx power from all chains. * find the highest tx power from all chains for the channel */ -static s8 iwl_get_max_txpower_avg(struct iwl_priv *priv, +static s8 iwl_get_max_txpower_avg(struct iwl_cfg *cfg, struct iwl_eeprom_enhanced_txpwr *enhanced_txpower, int element, s8 *max_txpower_in_half_dbm) { s8 max_txpower_avg = 0; /* (dBm) */ /* Take the highest tx power from any valid chains */ - if ((priv->cfg->valid_tx_ant & ANT_A) && + if ((cfg->valid_tx_ant & ANT_A) && (enhanced_txpower[element].chain_a_max > max_txpower_avg)) max_txpower_avg = enhanced_txpower[element].chain_a_max; - if ((priv->cfg->valid_tx_ant & ANT_B) && + if ((cfg->valid_tx_ant & ANT_B) && (enhanced_txpower[element].chain_b_max > max_txpower_avg)) max_txpower_avg = enhanced_txpower[element].chain_b_max; - if ((priv->cfg->valid_tx_ant & ANT_C) && + if ((cfg->valid_tx_ant & ANT_C) && (enhanced_txpower[element].chain_c_max > max_txpower_avg)) max_txpower_avg = enhanced_txpower[element].chain_c_max; - if (((priv->cfg->valid_tx_ant == ANT_AB) | - (priv->cfg->valid_tx_ant == ANT_BC) | - (priv->cfg->valid_tx_ant == ANT_AC)) && + if (((cfg->valid_tx_ant == ANT_AB) | + (cfg->valid_tx_ant == ANT_BC) | + (cfg->valid_tx_ant == ANT_AC)) && (enhanced_txpower[element].mimo2_max > max_txpower_avg)) max_txpower_avg = enhanced_txpower[element].mimo2_max; - if ((priv->cfg->valid_tx_ant == ANT_ABC) && + if ((cfg->valid_tx_ant == ANT_ABC) && (enhanced_txpower[element].mimo3_max > max_txpower_avg)) max_txpower_avg = enhanced_txpower[element].mimo3_max; @@ -582,6 +583,7 @@ iwl_eeprom_enh_txp_read_element(struct iwl_priv *priv, void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv) { + struct iwl_shared *shrd = priv->shrd; struct iwl_eeprom_enhanced_txpwr *txp_array, *txp; int idx, entries; __le16 *txp_len; @@ -590,10 +592,10 @@ void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv) BUILD_BUG_ON(sizeof(struct iwl_eeprom_enhanced_txpwr) != 8); /* the length is in 16-bit words, but we want entries */ - txp_len = (__le16 *) iwl_eeprom_query_addr(priv, EEPROM_TXP_SZ_OFFS); + txp_len = (__le16 *) iwl_eeprom_query_addr(shrd, EEPROM_TXP_SZ_OFFS); entries = le16_to_cpup(txp_len) * 2 / EEPROM_TXP_ENTRY_LEN; - txp_array = (void *) iwl_eeprom_query_addr(priv, EEPROM_TXP_OFFS); + txp_array = (void *) iwl_eeprom_query_addr(shrd, EEPROM_TXP_OFFS); for (idx = 0; idx < entries; idx++) { txp = &txp_array[idx]; @@ -627,7 +629,7 @@ void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv) ((txp->delta_20_in_40 & 0xf0) >> 4), (txp->delta_20_in_40 & 0x0f)); - max_txp_avg = iwl_get_max_txpower_avg(priv, txp_array, idx, + max_txp_avg = iwl_get_max_txpower_avg(cfg(priv), txp_array, idx, &max_txp_avg_halfdbm); /* @@ -646,12 +648,13 @@ void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv) /** * iwl_eeprom_init - read EEPROM contents * - * Load the EEPROM contents from adapter into priv->eeprom + * Load the EEPROM contents from adapter into shrd->eeprom * * NOTE: This routine uses the non-debug IO access functions. */ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev) { + struct iwl_shared *shrd = priv->shrd; __le16 *e; u32 gp = iwl_read32(bus(priv), CSR_EEPROM_GP); int sz; @@ -660,22 +663,22 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev) u16 validblockaddr = 0; u16 cache_addr = 0; - priv->nvm_device_type = iwl_get_nvm_type(priv, hw_rev); - if (priv->nvm_device_type == -ENOENT) + trans(priv)->nvm_device_type = iwl_get_nvm_type(bus(priv), hw_rev); + if (trans(priv)->nvm_device_type == -ENOENT) return -ENOENT; /* allocate eeprom */ - sz = priv->cfg->base_params->eeprom_size; + sz = cfg(priv)->base_params->eeprom_size; IWL_DEBUG_EEPROM(priv, "NVM size = %d\n", sz); - priv->eeprom = kzalloc(sz, GFP_KERNEL); - if (!priv->eeprom) { + shrd->eeprom = kzalloc(sz, GFP_KERNEL); + if (!shrd->eeprom) { ret = -ENOMEM; goto alloc_err; } - e = (__le16 *)priv->eeprom; + e = (__le16 *)shrd->eeprom; iwl_apm_init(priv); - ret = iwl_eeprom_verify_signature(priv); + ret = iwl_eeprom_verify_signature(trans(priv)); if (ret < 0) { IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp); ret = -ENOENT; @@ -683,16 +686,16 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev) } /* Make sure driver (instead of uCode) is allowed to read EEPROM */ - ret = iwl_eeprom_acquire_semaphore(priv); + ret = iwl_eeprom_acquire_semaphore(bus(priv)); if (ret < 0) { IWL_ERR(priv, "Failed to acquire EEPROM semaphore.\n"); ret = -ENOENT; goto err; } - if (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) { + if (trans(priv)->nvm_device_type == NVM_DEVICE_TYPE_OTP) { - ret = iwl_init_otp_access(priv); + ret = iwl_init_otp_access(bus(priv)); if (ret) { IWL_ERR(priv, "Failed to initialize OTP access.\n"); ret = -ENOENT; @@ -706,8 +709,8 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev) CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK | CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK); /* traversing the linked list if no shadow ram supported */ - if (!priv->cfg->base_params->shadow_ram_support) { - if (iwl_find_otp_image(priv, &validblockaddr)) { + if (!cfg(priv)->base_params->shadow_ram_support) { + if (iwl_find_otp_image(bus(priv), &validblockaddr)) { ret = -ENOENT; goto done; } @@ -716,7 +719,7 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev) addr += sizeof(u16)) { __le16 eeprom_data; - ret = iwl_read_otp_word(priv, addr, &eeprom_data); + ret = iwl_read_otp_word(bus(priv), addr, &eeprom_data); if (ret) goto done; e[cache_addr / 2] = eeprom_data; @@ -744,27 +747,27 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev) } IWL_DEBUG_EEPROM(priv, "NVM Type: %s, version: 0x%x\n", - (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) + (trans(priv)->nvm_device_type == NVM_DEVICE_TYPE_OTP) ? "OTP" : "EEPROM", - iwl_eeprom_query16(priv, EEPROM_VERSION)); + iwl_eeprom_query16(shrd, EEPROM_VERSION)); ret = 0; done: - iwl_eeprom_release_semaphore(priv); + iwl_eeprom_release_semaphore(bus(priv)); err: if (ret) - iwl_eeprom_free(priv); + iwl_eeprom_free(priv->shrd); /* Reset chip to save power until we load uCode during "up". */ iwl_apm_stop(priv); alloc_err: return ret; } -void iwl_eeprom_free(struct iwl_priv *priv) +void iwl_eeprom_free(struct iwl_shared *shrd) { - kfree(priv->eeprom); - priv->eeprom = NULL; + kfree(shrd->eeprom); + shrd->eeprom = NULL; } static void iwl_init_band_reference(const struct iwl_priv *priv, @@ -772,49 +775,50 @@ static void iwl_init_band_reference(const struct iwl_priv *priv, const struct iwl_eeprom_channel **eeprom_ch_info, const u8 **eeprom_ch_index) { - u32 offset = priv->cfg->lib-> + struct iwl_shared *shrd = priv->shrd; + u32 offset = cfg(priv)->lib-> eeprom_ops.regulatory_bands[eep_band - 1]; switch (eep_band) { case 1: /* 2.4GHz band */ *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1); *eeprom_ch_info = (struct iwl_eeprom_channel *) - iwl_eeprom_query_addr(priv, offset); + iwl_eeprom_query_addr(shrd, offset); *eeprom_ch_index = iwl_eeprom_band_1; break; case 2: /* 4.9GHz band */ *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2); *eeprom_ch_info = (struct iwl_eeprom_channel *) - iwl_eeprom_query_addr(priv, offset); + iwl_eeprom_query_addr(shrd, offset); *eeprom_ch_index = iwl_eeprom_band_2; break; case 3: /* 5.2GHz band */ *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3); *eeprom_ch_info = (struct iwl_eeprom_channel *) - iwl_eeprom_query_addr(priv, offset); + iwl_eeprom_query_addr(shrd, offset); *eeprom_ch_index = iwl_eeprom_band_3; break; case 4: /* 5.5GHz band */ *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4); *eeprom_ch_info = (struct iwl_eeprom_channel *) - iwl_eeprom_query_addr(priv, offset); + iwl_eeprom_query_addr(shrd, offset); *eeprom_ch_index = iwl_eeprom_band_4; break; case 5: /* 5.7GHz band */ *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5); *eeprom_ch_info = (struct iwl_eeprom_channel *) - iwl_eeprom_query_addr(priv, offset); + iwl_eeprom_query_addr(shrd, offset); *eeprom_ch_index = iwl_eeprom_band_5; break; case 6: /* 2.4GHz ht40 channels */ *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_6); *eeprom_ch_info = (struct iwl_eeprom_channel *) - iwl_eeprom_query_addr(priv, offset); + iwl_eeprom_query_addr(shrd, offset); *eeprom_ch_index = iwl_eeprom_band_6; break; case 7: /* 5 GHz ht40 channels */ *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_7); *eeprom_ch_info = (struct iwl_eeprom_channel *) - iwl_eeprom_query_addr(priv, offset); + iwl_eeprom_query_addr(shrd, offset); *eeprom_ch_index = iwl_eeprom_band_7; break; default: @@ -979,9 +983,9 @@ int iwl_init_channel_map(struct iwl_priv *priv) } /* Check if we do have HT40 channels */ - if (priv->cfg->lib->eeprom_ops.regulatory_bands[5] == + if (cfg(priv)->lib->eeprom_ops.regulatory_bands[5] == EEPROM_REGULATORY_BAND_NO_HT40 && - priv->cfg->lib->eeprom_ops.regulatory_bands[6] == + cfg(priv)->lib->eeprom_ops.regulatory_bands[6] == EEPROM_REGULATORY_BAND_NO_HT40) return 0; @@ -1017,8 +1021,8 @@ int iwl_init_channel_map(struct iwl_priv *priv) * driver need to process addition information * to determine the max channel tx power limits */ - if (priv->cfg->lib->eeprom_ops.update_enhanced_txpower) - priv->cfg->lib->eeprom_ops.update_enhanced_txpower(priv); + if (cfg(priv)->lib->eeprom_ops.update_enhanced_txpower) + cfg(priv)->lib->eeprom_ops.update_enhanced_txpower(priv); return 0; } @@ -1064,7 +1068,7 @@ void iwl_rf_config(struct iwl_priv *priv) { u16 radio_cfg; - radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG); + radio_cfg = iwl_eeprom_query16(priv->shrd, EEPROM_RADIO_CONFIG); /* write radio config values to register */ if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX) { diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h index c94747e7299..9fa937ec35e 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h @@ -66,6 +66,7 @@ #include <net/mac80211.h> struct iwl_priv; +struct iwl_shared; /* * EEPROM access time values: @@ -305,11 +306,11 @@ struct iwl_eeprom_ops { int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev); -void iwl_eeprom_free(struct iwl_priv *priv); +void iwl_eeprom_free(struct iwl_shared *shrd); int iwl_eeprom_check_version(struct iwl_priv *priv); int iwl_eeprom_check_sku(struct iwl_priv *priv); -const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset); -u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset); +const u8 *iwl_eeprom_query_addr(const struct iwl_shared *shrd, size_t offset); +u16 iwl_eeprom_query16(const struct iwl_shared *shrd, size_t offset); int iwl_init_channel_map(struct iwl_priv *priv); void iwl_free_channel_map(struct iwl_priv *priv); const struct iwl_channel_info *iwl_get_channel_info( diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c index 3ffa8e62b85..d57ea6484bb 100644 --- a/drivers/net/wireless/iwlwifi/iwl-io.c +++ b/drivers/net/wireless/iwlwifi/iwl-io.c @@ -143,7 +143,7 @@ u32 iwl_read_direct32(struct iwl_bus *bus, u32 reg) spin_lock_irqsave(&bus->reg_lock, flags); iwl_grab_nic_access(bus); - value = iwl_read32(bus(bus), reg); + value = iwl_read32(bus, reg); iwl_release_nic_access(bus); spin_unlock_irqrestore(&bus->reg_lock, flags); @@ -283,16 +283,29 @@ u32 iwl_read_targ_mem(struct iwl_bus *bus, u32 addr) return value; } -void iwl_write_targ_mem(struct iwl_bus *bus, u32 addr, u32 val) +int _iwl_write_targ_mem_words(struct iwl_bus *bus, u32 addr, + void *buf, int words) { unsigned long flags; + int offs, result = 0; + u32 *vals = buf; spin_lock_irqsave(&bus->reg_lock, flags); if (!iwl_grab_nic_access(bus)) { iwl_write32(bus, HBUS_TARG_MEM_WADDR, addr); wmb(); - iwl_write32(bus, HBUS_TARG_MEM_WDAT, val); + + for (offs = 0; offs < words; offs++) + iwl_write32(bus, HBUS_TARG_MEM_WDAT, vals[offs]); iwl_release_nic_access(bus); - } + } else + result = -EBUSY; spin_unlock_irqrestore(&bus->reg_lock, flags); + + return result; +} + +int iwl_write_targ_mem(struct iwl_bus *bus, u32 addr, u32 val) +{ + return _iwl_write_targ_mem_words(bus, addr, &val, 1); } diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h index ced2cbeb6ea..aae2eeb331a 100644 --- a/drivers/net/wireless/iwlwifi/iwl-io.h +++ b/drivers/net/wireless/iwlwifi/iwl-io.h @@ -85,6 +85,9 @@ void _iwl_read_targ_mem_words(struct iwl_bus *bus, u32 addr, (bufsize) / sizeof(u32));\ } while (0) +int _iwl_write_targ_mem_words(struct iwl_bus *bus, u32 addr, + void *buf, int words); + u32 iwl_read_targ_mem(struct iwl_bus *bus, u32 addr); -void iwl_write_targ_mem(struct iwl_bus *bus, u32 addr, u32 val); +int iwl_write_targ_mem(struct iwl_bus *bus, u32 addr, u32 val); #endif diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c index eb541735296..14dcbfcdc0f 100644 --- a/drivers/net/wireless/iwlwifi/iwl-led.c +++ b/drivers/net/wireless/iwlwifi/iwl-led.c @@ -137,11 +137,11 @@ static int iwl_led_cmd(struct iwl_priv *priv, } IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n", - priv->cfg->base_params->led_compensation); + cfg(priv)->base_params->led_compensation); led_cmd.on = iwl_blink_compensation(priv, on, - priv->cfg->base_params->led_compensation); + cfg(priv)->base_params->led_compensation); led_cmd.off = iwl_blink_compensation(priv, off, - priv->cfg->base_params->led_compensation); + cfg(priv)->base_params->led_compensation); ret = iwl_send_led_cmd(priv, &led_cmd); if (!ret) { @@ -178,7 +178,7 @@ void iwl_leds_init(struct iwl_priv *priv) int ret; if (mode == IWL_LED_DEFAULT) - mode = priv->cfg->led_mode; + mode = cfg(priv)->led_mode; priv->led.name = kasprintf(GFP_KERNEL, "%s-led", wiphy_name(priv->hw->wiphy)); diff --git a/drivers/net/wireless/iwlwifi/iwl-led.h b/drivers/net/wireless/iwlwifi/iwl-led.h index 1c93dfef693..2550b3c7dcb 100644 --- a/drivers/net/wireless/iwlwifi/iwl-led.h +++ b/drivers/net/wireless/iwlwifi/iwl-led.h @@ -36,20 +36,6 @@ struct iwl_priv; #define IWL_LED_ACTIVITY (0<<1) #define IWL_LED_LINK (1<<1) -/* - * LED mode - * IWL_LED_DEFAULT: use device default - * IWL_LED_RF_STATE: turn LED on/off based on RF state - * LED ON = RF ON - * LED OFF = RF OFF - * IWL_LED_BLINK: adjust led blink rate based on blink table - */ -enum iwl_led_mode { - IWL_LED_DEFAULT, - IWL_LED_RF_STATE, - IWL_LED_BLINK, -}; - void iwlagn_led_enable(struct iwl_priv *priv); void iwl_leds_init(struct iwl_priv *priv); void iwl_leds_exit(struct iwl_priv *priv); diff --git a/drivers/net/wireless/iwlwifi/iwl-mac80211.c b/drivers/net/wireless/iwlwifi/iwl-mac80211.c new file mode 100644 index 00000000000..f980e574e1f --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-mac80211.c @@ -0,0 +1,1601 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/dma-mapping.h> +#include <linux/delay.h> +#include <linux/sched.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/firmware.h> +#include <linux/etherdevice.h> +#include <linux/if_arp.h> + +#include <net/mac80211.h> + +#include <asm/div64.h> + +#include "iwl-eeprom.h" +#include "iwl-wifi.h" +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-io.h" +#include "iwl-agn-calib.h" +#include "iwl-agn.h" +#include "iwl-shared.h" +#include "iwl-bus.h" +#include "iwl-trans.h" + +/***************************************************************************** + * + * mac80211 entry point functions + * + *****************************************************************************/ + +static const struct ieee80211_iface_limit iwlagn_sta_ap_limits[] = { + { + .max = 1, + .types = BIT(NL80211_IFTYPE_STATION), + }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_AP), + }, +}; + +static const struct ieee80211_iface_limit iwlagn_2sta_limits[] = { + { + .max = 2, + .types = BIT(NL80211_IFTYPE_STATION), + }, +}; + +static const struct ieee80211_iface_limit iwlagn_p2p_sta_go_limits[] = { + { + .max = 1, + .types = BIT(NL80211_IFTYPE_STATION), + }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_P2P_GO) | + BIT(NL80211_IFTYPE_AP), + }, +}; + +static const struct ieee80211_iface_limit iwlagn_p2p_2sta_limits[] = { + { + .max = 2, + .types = BIT(NL80211_IFTYPE_STATION), + }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_P2P_CLIENT), + }, +}; + +static const struct ieee80211_iface_combination +iwlagn_iface_combinations_dualmode[] = { + { .num_different_channels = 1, + .max_interfaces = 2, + .beacon_int_infra_match = true, + .limits = iwlagn_sta_ap_limits, + .n_limits = ARRAY_SIZE(iwlagn_sta_ap_limits), + }, + { .num_different_channels = 1, + .max_interfaces = 2, + .limits = iwlagn_2sta_limits, + .n_limits = ARRAY_SIZE(iwlagn_2sta_limits), + }, +}; + +static const struct ieee80211_iface_combination +iwlagn_iface_combinations_p2p[] = { + { .num_different_channels = 1, + .max_interfaces = 2, + .beacon_int_infra_match = true, + .limits = iwlagn_p2p_sta_go_limits, + .n_limits = ARRAY_SIZE(iwlagn_p2p_sta_go_limits), + }, + { .num_different_channels = 1, + .max_interfaces = 2, + .limits = iwlagn_p2p_2sta_limits, + .n_limits = ARRAY_SIZE(iwlagn_p2p_2sta_limits), + }, +}; + +/* + * Not a mac80211 entry point function, but it fits in with all the + * other mac80211 functions grouped here. + */ +int iwlagn_mac_setup_register(struct iwl_priv *priv, + struct iwlagn_ucode_capabilities *capa) +{ + int ret; + struct ieee80211_hw *hw = priv->hw; + struct iwl_rxon_context *ctx; + + hw->rate_control_algorithm = "iwl-agn-rs"; + + /* Tell mac80211 our characteristics */ + hw->flags = IEEE80211_HW_SIGNAL_DBM | + IEEE80211_HW_AMPDU_AGGREGATION | + IEEE80211_HW_NEED_DTIM_PERIOD | + IEEE80211_HW_SPECTRUM_MGMT | + IEEE80211_HW_REPORTS_TX_ACK_STATUS; + + /* + * Including the following line will crash some AP's. This + * workaround removes the stimulus which causes the crash until + * the AP software can be fixed. + hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF; + */ + + hw->flags |= IEEE80211_HW_SUPPORTS_PS | + IEEE80211_HW_SUPPORTS_DYNAMIC_PS; + + if (cfg(priv)->sku & EEPROM_SKU_CAP_11N_ENABLE) + hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | + IEEE80211_HW_SUPPORTS_STATIC_SMPS; + + if (capa->flags & IWL_UCODE_TLV_FLAGS_MFP) + hw->flags |= IEEE80211_HW_MFP_CAPABLE; + + hw->sta_data_size = sizeof(struct iwl_station_priv); + hw->vif_data_size = sizeof(struct iwl_vif_priv); + + for_each_context(priv, ctx) { + hw->wiphy->interface_modes |= ctx->interface_modes; + hw->wiphy->interface_modes |= ctx->exclusive_interface_modes; + } + + BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2); + + if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)) { + hw->wiphy->iface_combinations = iwlagn_iface_combinations_p2p; + hw->wiphy->n_iface_combinations = + ARRAY_SIZE(iwlagn_iface_combinations_p2p); + } else if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) { + hw->wiphy->iface_combinations = + iwlagn_iface_combinations_dualmode; + hw->wiphy->n_iface_combinations = + ARRAY_SIZE(iwlagn_iface_combinations_dualmode); + } + + hw->wiphy->max_remain_on_channel_duration = 1000; + + hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY | + WIPHY_FLAG_DISABLE_BEACON_HINTS | + WIPHY_FLAG_IBSS_RSN; + + if (trans(priv)->ucode_wowlan.code.len && + device_can_wakeup(bus(priv)->dev)) { + hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT | + WIPHY_WOWLAN_DISCONNECT | + WIPHY_WOWLAN_EAP_IDENTITY_REQ | + WIPHY_WOWLAN_RFKILL_RELEASE; + if (!iwlagn_mod_params.sw_crypto) + hw->wiphy->wowlan.flags |= + WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | + WIPHY_WOWLAN_GTK_REKEY_FAILURE; + + hw->wiphy->wowlan.n_patterns = IWLAGN_WOWLAN_MAX_PATTERNS; + hw->wiphy->wowlan.pattern_min_len = + IWLAGN_WOWLAN_MIN_PATTERN_LEN; + hw->wiphy->wowlan.pattern_max_len = + IWLAGN_WOWLAN_MAX_PATTERN_LEN; + } + + if (iwlagn_mod_params.power_save) + hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; + else + hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; + + hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX; + /* we create the 802.11 header and a zero-length SSID element */ + hw->wiphy->max_scan_ie_len = capa->max_probe_length - 24 - 2; + + /* Default value; 4 EDCA QOS priorities */ + hw->queues = 4; + + hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL; + + if (priv->bands[IEEE80211_BAND_2GHZ].n_channels) + priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = + &priv->bands[IEEE80211_BAND_2GHZ]; + if (priv->bands[IEEE80211_BAND_5GHZ].n_channels) + priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = + &priv->bands[IEEE80211_BAND_5GHZ]; + + hw->wiphy->hw_version = bus_get_hw_id(bus(priv)); + + iwl_leds_init(priv); + + ret = ieee80211_register_hw(priv->hw); + if (ret) { + IWL_ERR(priv, "Failed to register hw (error %d)\n", ret); + return ret; + } + priv->mac80211_registered = 1; + + return 0; +} + +void iwlagn_mac_unregister(struct iwl_priv *priv) +{ + if (!priv->mac80211_registered) + return; + iwl_leds_exit(priv); + ieee80211_unregister_hw(priv->hw); + priv->mac80211_registered = 0; +} + +static int __iwl_up(struct iwl_priv *priv) +{ + struct iwl_rxon_context *ctx; + int ret; + + lockdep_assert_held(&priv->shrd->mutex); + + if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) { + IWL_WARN(priv, "Exit pending; will not bring the NIC up\n"); + return -EIO; + } + + for_each_context(priv, ctx) { + ret = iwlagn_alloc_bcast_station(priv, ctx); + if (ret) { + iwl_dealloc_bcast_stations(priv); + return ret; + } + } + + ret = iwl_run_init_ucode(trans(priv)); + if (ret) { + IWL_ERR(priv, "Failed to run INIT ucode: %d\n", ret); + goto error; + } + + ret = iwl_load_ucode_wait_alive(trans(priv), IWL_UCODE_REGULAR); + if (ret) { + IWL_ERR(priv, "Failed to start RT ucode: %d\n", ret); + goto error; + } + + ret = iwl_alive_start(priv); + if (ret) + goto error; + return 0; + + error: + set_bit(STATUS_EXIT_PENDING, &priv->shrd->status); + __iwl_down(priv); + clear_bit(STATUS_EXIT_PENDING, &priv->shrd->status); + + IWL_ERR(priv, "Unable to initialize device.\n"); + return ret; +} + +static int iwlagn_mac_start(struct ieee80211_hw *hw) +{ + struct iwl_priv *priv = hw->priv; + int ret; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + /* we should be verifying the device is ready to be opened */ + mutex_lock(&priv->shrd->mutex); + ret = __iwl_up(priv); + mutex_unlock(&priv->shrd->mutex); + if (ret) + return ret; + + IWL_DEBUG_INFO(priv, "Start UP work done.\n"); + + /* Now we should be done, and the READY bit should be set. */ + if (WARN_ON(!test_bit(STATUS_READY, &priv->shrd->status))) + ret = -EIO; + + iwlagn_led_enable(priv); + + priv->is_open = 1; + IWL_DEBUG_MAC80211(priv, "leave\n"); + return 0; +} + +static void iwlagn_mac_stop(struct ieee80211_hw *hw) +{ + struct iwl_priv *priv = hw->priv; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + if (!priv->is_open) + return; + + priv->is_open = 0; + + iwl_down(priv); + + flush_workqueue(priv->shrd->workqueue); + + /* User space software may expect getting rfkill changes + * even if interface is down */ + iwl_write32(bus(priv), CSR_INT, 0xFFFFFFFF); + iwl_enable_rfkill_int(priv); + + IWL_DEBUG_MAC80211(priv, "leave\n"); +} + +static void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_gtk_rekey_data *data) +{ + struct iwl_priv *priv = hw->priv; + + if (iwlagn_mod_params.sw_crypto) + return; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + mutex_lock(&priv->shrd->mutex); + + if (priv->contexts[IWL_RXON_CTX_BSS].vif != vif) + goto out; + + memcpy(priv->kek, data->kek, NL80211_KEK_LEN); + memcpy(priv->kck, data->kck, NL80211_KCK_LEN); + priv->replay_ctr = + cpu_to_le64(be64_to_cpup((__be64 *)&data->replay_ctr)); + priv->have_rekey_data = true; + + out: + mutex_unlock(&priv->shrd->mutex); + IWL_DEBUG_MAC80211(priv, "leave\n"); +} + +#ifdef CONFIG_PM_SLEEP + +static int iwlagn_mac_suspend(struct ieee80211_hw *hw, + struct cfg80211_wowlan *wowlan) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + int ret; + + if (WARN_ON(!wowlan)) + return -EINVAL; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + mutex_lock(&priv->shrd->mutex); + + /* Don't attempt WoWLAN when not associated, tear down instead. */ + if (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION || + !iwl_is_associated_ctx(ctx)) { + ret = 1; + goto out; + } + + ret = iwlagn_suspend(priv, hw, wowlan); + if (ret) + goto error; + + device_set_wakeup_enable(bus(priv)->dev, true); + + /* Now let the ucode operate on its own */ + iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_SET, + CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE); + + goto out; + + error: + priv->shrd->wowlan = false; + iwlagn_prepare_restart(priv); + ieee80211_restart_hw(priv->hw); + out: + mutex_unlock(&priv->shrd->mutex); + IWL_DEBUG_MAC80211(priv, "leave\n"); + + return ret; +} + +static int iwlagn_mac_resume(struct ieee80211_hw *hw) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + struct ieee80211_vif *vif; + unsigned long flags; + u32 base, status = 0xffffffff; + int ret = -EIO; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + mutex_lock(&priv->shrd->mutex); + + iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_CLR, + CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE); + + base = priv->shrd->device_pointers.error_event_table; + if (iwlagn_hw_valid_rtc_data_addr(base)) { + spin_lock_irqsave(&bus(priv)->reg_lock, flags); + ret = iwl_grab_nic_access_silent(bus(priv)); + if (ret == 0) { + iwl_write32(bus(priv), HBUS_TARG_MEM_RADDR, base); + status = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT); + iwl_release_nic_access(bus(priv)); + } + spin_unlock_irqrestore(&bus(priv)->reg_lock, flags); + +#ifdef CONFIG_IWLWIFI_DEBUGFS + if (ret == 0) { + struct iwl_trans *trans = trans(priv); + if (!priv->wowlan_sram) + priv->wowlan_sram = + kzalloc(trans->ucode_wowlan.data.len, + GFP_KERNEL); + + if (priv->wowlan_sram) + _iwl_read_targ_mem_words( + bus(priv), 0x800000, priv->wowlan_sram, + trans->ucode_wowlan.data.len / 4); + } +#endif + } + + /* we'll clear ctx->vif during iwlagn_prepare_restart() */ + vif = ctx->vif; + + priv->shrd->wowlan = false; + + device_set_wakeup_enable(bus(priv)->dev, false); + + iwlagn_prepare_restart(priv); + + memset((void *)&ctx->active, 0, sizeof(ctx->active)); + iwl_connection_init_rx_config(priv, ctx); + iwlagn_set_rxon_chain(priv, ctx); + + mutex_unlock(&priv->shrd->mutex); + IWL_DEBUG_MAC80211(priv, "leave\n"); + + ieee80211_resume_disconnect(vif); + + return 1; +} + +#endif + +static void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + struct iwl_priv *priv = hw->priv; + + IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, + ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); + + if (iwlagn_tx_skb(priv, skb)) + dev_kfree_skb_any(skb); +} + +static void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_key_conf *keyconf, + struct ieee80211_sta *sta, + u32 iv32, u16 *phase1key) +{ + struct iwl_priv *priv = hw->priv; + + iwl_update_tkip_key(priv, vif, keyconf, sta, iv32, phase1key); +} + +static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; + struct iwl_rxon_context *ctx = vif_priv->ctx; + int ret; + bool is_default_wep_key = false; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + if (iwlagn_mod_params.sw_crypto) { + IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n"); + return -EOPNOTSUPP; + } + + switch (key->cipher) { + case WLAN_CIPHER_SUITE_TKIP: + key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; + /* fall through */ + case WLAN_CIPHER_SUITE_CCMP: + key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + break; + default: + break; + } + + /* + * We could program these keys into the hardware as well, but we + * don't expect much multicast traffic in IBSS and having keys + * for more stations is probably more useful. + * + * Mark key TX-only and return 0. + */ + if (vif->type == NL80211_IFTYPE_ADHOC && + !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) { + key->hw_key_idx = WEP_INVALID_OFFSET; + return 0; + } + + /* If they key was TX-only, accept deletion */ + if (cmd == DISABLE_KEY && key->hw_key_idx == WEP_INVALID_OFFSET) + return 0; + + mutex_lock(&priv->shrd->mutex); + iwl_scan_cancel_timeout(priv, 100); + + BUILD_BUG_ON(WEP_INVALID_OFFSET == IWLAGN_HW_KEY_DEFAULT); + + /* + * If we are getting WEP group key and we didn't receive any key mapping + * so far, we are in legacy wep mode (group key only), otherwise we are + * in 1X mode. + * In legacy wep mode, we use another host command to the uCode. + */ + if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 || + key->cipher == WLAN_CIPHER_SUITE_WEP104) && !sta) { + if (cmd == SET_KEY) + is_default_wep_key = !ctx->key_mapping_keys; + else + is_default_wep_key = + key->hw_key_idx == IWLAGN_HW_KEY_DEFAULT; + } + + + switch (cmd) { + case SET_KEY: + if (is_default_wep_key) { + ret = iwl_set_default_wep_key(priv, vif_priv->ctx, key); + break; + } + ret = iwl_set_dynamic_key(priv, vif_priv->ctx, key, sta); + if (ret) { + /* + * can't add key for RX, but we don't need it + * in the device for TX so still return 0 + */ + ret = 0; + key->hw_key_idx = WEP_INVALID_OFFSET; + } + + IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n"); + break; + case DISABLE_KEY: + if (is_default_wep_key) + ret = iwl_remove_default_wep_key(priv, ctx, key); + else + ret = iwl_remove_dynamic_key(priv, ctx, key, sta); + + IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n"); + break; + default: + ret = -EINVAL; + } + + mutex_unlock(&priv->shrd->mutex); + IWL_DEBUG_MAC80211(priv, "leave\n"); + + return ret; +} + +static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum ieee80211_ampdu_mlme_action action, + struct ieee80211_sta *sta, u16 tid, u16 *ssn, + u8 buf_size) +{ + struct iwl_priv *priv = hw->priv; + int ret = -EINVAL; + struct iwl_station_priv *sta_priv = (void *) sta->drv_priv; + + IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n", + sta->addr, tid); + + if (!(cfg(priv)->sku & EEPROM_SKU_CAP_11N_ENABLE)) + return -EACCES; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + mutex_lock(&priv->shrd->mutex); + + switch (action) { + case IEEE80211_AMPDU_RX_START: + if (iwlagn_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) + break; + IWL_DEBUG_HT(priv, "start Rx\n"); + ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn); + break; + case IEEE80211_AMPDU_RX_STOP: + IWL_DEBUG_HT(priv, "stop Rx\n"); + ret = iwl_sta_rx_agg_stop(priv, sta, tid); + if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) + ret = 0; + break; + case IEEE80211_AMPDU_TX_START: + if (iwlagn_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) + break; + IWL_DEBUG_HT(priv, "start Tx\n"); + ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn); + break; + case IEEE80211_AMPDU_TX_STOP: + IWL_DEBUG_HT(priv, "stop Tx\n"); + ret = iwlagn_tx_agg_stop(priv, vif, sta, tid); + if ((ret == 0) && (priv->agg_tids_count > 0)) { + priv->agg_tids_count--; + IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n", + priv->agg_tids_count); + } + if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) + ret = 0; + if (!priv->agg_tids_count && cfg(priv)->ht_params && + cfg(priv)->ht_params->use_rts_for_aggregation) { + /* + * switch off RTS/CTS if it was previously enabled + */ + sta_priv->lq_sta.lq.general_params.flags &= + ~LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK; + iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif), + &sta_priv->lq_sta.lq, CMD_ASYNC, false); + } + break; + case IEEE80211_AMPDU_TX_OPERATIONAL: + ret = iwlagn_tx_agg_oper(priv, vif, sta, tid, buf_size); + break; + } + mutex_unlock(&priv->shrd->mutex); + IWL_DEBUG_MAC80211(priv, "leave\n"); + return ret; +} + +static int iwlagn_mac_sta_add(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; + struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; + bool is_ap = vif->type == NL80211_IFTYPE_STATION; + int ret = 0; + u8 sta_id; + + IWL_DEBUG_MAC80211(priv, "received request to add station %pM\n", + sta->addr); + mutex_lock(&priv->shrd->mutex); + IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n", + sta->addr); + sta_priv->sta_id = IWL_INVALID_STATION; + + atomic_set(&sta_priv->pending_frames, 0); + if (vif->type == NL80211_IFTYPE_AP) + sta_priv->client = true; + + ret = iwl_add_station_common(priv, vif_priv->ctx, sta->addr, + is_ap, sta, &sta_id); + if (ret) { + IWL_ERR(priv, "Unable to add station %pM (%d)\n", + sta->addr, ret); + /* Should we return success if return code is EEXIST ? */ + goto out; + } + + sta_priv->sta_id = sta_id; + + /* Initialize rate scaling */ + IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n", + sta->addr); + iwl_rs_rate_init(priv, sta, sta_id); + out: + mutex_unlock(&priv->shrd->mutex); + IWL_DEBUG_MAC80211(priv, "leave\n"); + + return ret; +} + +static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_channel_switch *ch_switch) +{ + struct iwl_priv *priv = hw->priv; + const struct iwl_channel_info *ch_info; + struct ieee80211_conf *conf = &hw->conf; + struct ieee80211_channel *channel = ch_switch->channel; + struct iwl_ht_config *ht_conf = &priv->current_ht_config; + /* + * MULTI-FIXME + * When we add support for multiple interfaces, we need to + * revisit this. The channel switch command in the device + * only affects the BSS context, but what does that really + * mean? And what if we get a CSA on the second interface? + * This needs a lot of work. + */ + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + u16 ch; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + mutex_lock(&priv->shrd->mutex); + + if (iwl_is_rfkill(priv->shrd)) + goto out; + + if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status) || + test_bit(STATUS_SCANNING, &priv->shrd->status) || + test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status)) + goto out; + + if (!iwl_is_associated_ctx(ctx)) + goto out; + + if (!cfg(priv)->lib->set_channel_switch) + goto out; + + ch = channel->hw_value; + if (le16_to_cpu(ctx->active.channel) == ch) + goto out; + + ch_info = iwl_get_channel_info(priv, channel->band, ch); + if (!is_channel_valid(ch_info)) { + IWL_DEBUG_MAC80211(priv, "invalid channel\n"); + goto out; + } + + spin_lock_irq(&priv->shrd->lock); + + priv->current_ht_config.smps = conf->smps_mode; + + /* Configure HT40 channels */ + ctx->ht.enabled = conf_is_ht(conf); + if (ctx->ht.enabled) + iwlagn_config_ht40(conf, ctx); + else + ctx->ht.is_40mhz = false; + + if ((le16_to_cpu(ctx->staging.channel) != ch)) + ctx->staging.flags = 0; + + iwl_set_rxon_channel(priv, channel, ctx); + iwl_set_rxon_ht(priv, ht_conf); + iwl_set_flags_for_band(priv, ctx, channel->band, ctx->vif); + + spin_unlock_irq(&priv->shrd->lock); + + iwl_set_rate(priv); + /* + * at this point, staging_rxon has the + * configuration for channel switch + */ + set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status); + priv->switch_channel = cpu_to_le16(ch); + if (cfg(priv)->lib->set_channel_switch(priv, ch_switch)) { + clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status); + priv->switch_channel = 0; + ieee80211_chswitch_done(ctx->vif, false); + } + +out: + mutex_unlock(&priv->shrd->mutex); + IWL_DEBUG_MAC80211(priv, "leave\n"); +} + +static void iwlagn_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, + u64 multicast) +{ + struct iwl_priv *priv = hw->priv; + __le32 filter_or = 0, filter_nand = 0; + struct iwl_rxon_context *ctx; + +#define CHK(test, flag) do { \ + if (*total_flags & (test)) \ + filter_or |= (flag); \ + else \ + filter_nand |= (flag); \ + } while (0) + + IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n", + changed_flags, *total_flags); + + CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK); + /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */ + CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK); + CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK); + +#undef CHK + + mutex_lock(&priv->shrd->mutex); + + for_each_context(priv, ctx) { + ctx->staging.filter_flags &= ~filter_nand; + ctx->staging.filter_flags |= filter_or; + + /* + * Not committing directly because hardware can perform a scan, + * but we'll eventually commit the filter flags change anyway. + */ + } + + mutex_unlock(&priv->shrd->mutex); + + /* + * Receiving all multicast frames is always enabled by the + * default flags setup in iwl_connection_init_rx_config() + * since we currently do not support programming multicast + * filters into the device. + */ + *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS | + FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; +} + +static void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop) +{ + struct iwl_priv *priv = hw->priv; + + mutex_lock(&priv->shrd->mutex); + IWL_DEBUG_MAC80211(priv, "enter\n"); + + if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) { + IWL_DEBUG_TX(priv, "Aborting flush due to device shutdown\n"); + goto done; + } + if (iwl_is_rfkill(priv->shrd)) { + IWL_DEBUG_TX(priv, "Aborting flush due to RF Kill\n"); + goto done; + } + + /* + * mac80211 will not push any more frames for transmit + * until the flush is completed + */ + if (drop) { + IWL_DEBUG_MAC80211(priv, "send flush command\n"); + if (iwlagn_txfifo_flush(priv, IWL_DROP_ALL)) { + IWL_ERR(priv, "flush request fail\n"); + goto done; + } + } + IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n"); + iwl_trans_wait_tx_queue_empty(trans(priv)); +done: + mutex_unlock(&priv->shrd->mutex); + IWL_DEBUG_MAC80211(priv, "leave\n"); +} + +static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw, + struct ieee80211_channel *channel, + enum nl80211_channel_type channel_type, + int duration) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN]; + int err = 0; + + if (!(priv->shrd->valid_contexts & BIT(IWL_RXON_CTX_PAN))) + return -EOPNOTSUPP; + + if (!(ctx->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT))) + return -EOPNOTSUPP; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + mutex_lock(&priv->shrd->mutex); + + if (test_bit(STATUS_SCAN_HW, &priv->shrd->status)) { + err = -EBUSY; + goto out; + } + + priv->hw_roc_channel = channel; + priv->hw_roc_chantype = channel_type; + /* convert from ms to TU */ + priv->hw_roc_duration = DIV_ROUND_UP(1000 * duration, 1024); + priv->hw_roc_start_notified = false; + cancel_delayed_work(&priv->hw_roc_disable_work); + + if (!ctx->is_active) { + static const struct iwl_qos_info default_qos_data = { + .def_qos_parm = { + .ac[0] = { + .cw_min = cpu_to_le16(3), + .cw_max = cpu_to_le16(7), + .aifsn = 2, + .edca_txop = cpu_to_le16(1504), + }, + .ac[1] = { + .cw_min = cpu_to_le16(7), + .cw_max = cpu_to_le16(15), + .aifsn = 2, + .edca_txop = cpu_to_le16(3008), + }, + .ac[2] = { + .cw_min = cpu_to_le16(15), + .cw_max = cpu_to_le16(1023), + .aifsn = 3, + }, + .ac[3] = { + .cw_min = cpu_to_le16(15), + .cw_max = cpu_to_le16(1023), + .aifsn = 7, + }, + }, + }; + + ctx->is_active = true; + ctx->qos_data = default_qos_data; + ctx->staging.dev_type = RXON_DEV_TYPE_P2P; + memcpy(ctx->staging.node_addr, + priv->contexts[IWL_RXON_CTX_BSS].staging.node_addr, + ETH_ALEN); + memcpy(ctx->staging.bssid_addr, + priv->contexts[IWL_RXON_CTX_BSS].staging.node_addr, + ETH_ALEN); + err = iwlagn_commit_rxon(priv, ctx); + if (err) + goto out; + ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK | + RXON_FILTER_PROMISC_MSK | + RXON_FILTER_CTL2HOST_MSK; + + err = iwlagn_commit_rxon(priv, ctx); + if (err) { + iwlagn_disable_roc(priv); + goto out; + } + priv->hw_roc_setup = true; + } + + err = iwl_scan_initiate(priv, ctx->vif, IWL_SCAN_ROC, channel->band); + if (err) + iwlagn_disable_roc(priv); + + out: + mutex_unlock(&priv->shrd->mutex); + IWL_DEBUG_MAC80211(priv, "leave\n"); + + return err; +} + +static int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw) +{ + struct iwl_priv *priv = hw->priv; + + if (!(priv->shrd->valid_contexts & BIT(IWL_RXON_CTX_PAN))) + return -EOPNOTSUPP; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + mutex_lock(&priv->shrd->mutex); + iwl_scan_cancel_timeout(priv, priv->hw_roc_duration); + iwlagn_disable_roc(priv); + mutex_unlock(&priv->shrd->mutex); + IWL_DEBUG_MAC80211(priv, "leave\n"); + + return 0; +} + +static int iwlagn_mac_tx_sync(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + const u8 *bssid, + enum ieee80211_tx_sync_type type) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; + struct iwl_rxon_context *ctx = vif_priv->ctx; + int ret; + u8 sta_id; + + if (ctx->ctxid != IWL_RXON_CTX_PAN) + return 0; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + mutex_lock(&priv->shrd->mutex); + + if (iwl_is_associated_ctx(ctx)) { + ret = 0; + goto out; + } + + if (ctx->preauth_bssid || test_bit(STATUS_SCAN_HW, + &priv->shrd->status)) { + ret = -EBUSY; + goto out; + } + + ret = iwl_add_station_common(priv, ctx, bssid, true, NULL, &sta_id); + if (ret) + goto out; + + if (WARN_ON(sta_id != ctx->ap_sta_id)) { + ret = -EIO; + goto out_remove_sta; + } + + memcpy(ctx->bssid, bssid, ETH_ALEN); + ctx->preauth_bssid = true; + + ret = iwlagn_commit_rxon(priv, ctx); + + if (ret == 0) + goto out; + + out_remove_sta: + iwl_remove_station(priv, sta_id, bssid); + out: + mutex_unlock(&priv->shrd->mutex); + IWL_DEBUG_MAC80211(priv, "leave\n"); + + return ret; +} + +static void iwlagn_mac_finish_tx_sync(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + const u8 *bssid, + enum ieee80211_tx_sync_type type) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; + struct iwl_rxon_context *ctx = vif_priv->ctx; + + if (ctx->ctxid != IWL_RXON_CTX_PAN) + return; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + mutex_lock(&priv->shrd->mutex); + + if (iwl_is_associated_ctx(ctx)) + goto out; + + iwl_remove_station(priv, ctx->ap_sta_id, bssid); + ctx->preauth_bssid = false; + /* no need to commit */ + out: + mutex_unlock(&priv->shrd->mutex); + IWL_DEBUG_MAC80211(priv, "leave\n"); +} + +static void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw, + enum ieee80211_rssi_event rssi_event) +{ + struct iwl_priv *priv = hw->priv; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + mutex_lock(&priv->shrd->mutex); + + if (cfg(priv)->bt_params && + cfg(priv)->bt_params->advanced_bt_coexist) { + if (rssi_event == RSSI_EVENT_LOW) + priv->bt_enable_pspoll = true; + else if (rssi_event == RSSI_EVENT_HIGH) + priv->bt_enable_pspoll = false; + + iwlagn_send_advance_bt_config(priv); + } else { + IWL_DEBUG_MAC80211(priv, "Advanced BT coex disabled," + "ignoring RSSI callback\n"); + } + + mutex_unlock(&priv->shrd->mutex); + IWL_DEBUG_MAC80211(priv, "leave\n"); +} + +static int iwlagn_mac_set_tim(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, bool set) +{ + struct iwl_priv *priv = hw->priv; + + queue_work(priv->shrd->workqueue, &priv->beacon_update); + + return 0; +} + +static int iwlagn_mac_conf_tx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, u16 queue, + const struct ieee80211_tx_queue_params *params) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; + struct iwl_rxon_context *ctx = vif_priv->ctx; + unsigned long flags; + int q; + + if (WARN_ON(!ctx)) + return -EINVAL; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + if (!iwl_is_ready_rf(priv->shrd)) { + IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n"); + return -EIO; + } + + if (queue >= AC_NUM) { + IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue); + return 0; + } + + q = AC_NUM - 1 - queue; + + spin_lock_irqsave(&priv->shrd->lock, flags); + + ctx->qos_data.def_qos_parm.ac[q].cw_min = + cpu_to_le16(params->cw_min); + ctx->qos_data.def_qos_parm.ac[q].cw_max = + cpu_to_le16(params->cw_max); + ctx->qos_data.def_qos_parm.ac[q].aifsn = params->aifs; + ctx->qos_data.def_qos_parm.ac[q].edca_txop = + cpu_to_le16((params->txop * 32)); + + ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0; + + spin_unlock_irqrestore(&priv->shrd->lock, flags); + + IWL_DEBUG_MAC80211(priv, "leave\n"); + return 0; +} + +static int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw) +{ + struct iwl_priv *priv = hw->priv; + + return priv->ibss_manager == IWL_IBSS_MANAGER; +} + +static int iwl_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx) +{ + iwl_connection_init_rx_config(priv, ctx); + + iwlagn_set_rxon_chain(priv, ctx); + + return iwlagn_commit_rxon(priv, ctx); +} + +static int iwl_setup_interface(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + struct ieee80211_vif *vif = ctx->vif; + int err; + + lockdep_assert_held(&priv->shrd->mutex); + + /* + * This variable will be correct only when there's just + * a single context, but all code using it is for hardware + * that supports only one context. + */ + priv->iw_mode = vif->type; + + ctx->is_active = true; + + err = iwl_set_mode(priv, ctx); + if (err) { + if (!ctx->always_active) + ctx->is_active = false; + return err; + } + + if (cfg(priv)->bt_params && cfg(priv)->bt_params->advanced_bt_coexist && + vif->type == NL80211_IFTYPE_ADHOC) { + /* + * pretend to have high BT traffic as long as we + * are operating in IBSS mode, as this will cause + * the rate scaling etc. to behave as intended. + */ + priv->bt_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_HIGH; + } + + return 0; +} + +static int iwlagn_mac_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; + struct iwl_rxon_context *tmp, *ctx = NULL; + int err; + enum nl80211_iftype viftype = ieee80211_vif_type_p2p(vif); + + IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n", + viftype, vif->addr); + + cancel_delayed_work_sync(&priv->hw_roc_disable_work); + + mutex_lock(&priv->shrd->mutex); + + iwlagn_disable_roc(priv); + + if (!iwl_is_ready_rf(priv->shrd)) { + IWL_WARN(priv, "Try to add interface when device not ready\n"); + err = -EINVAL; + goto out; + } + + for_each_context(priv, tmp) { + u32 possible_modes = + tmp->interface_modes | tmp->exclusive_interface_modes; + + if (tmp->vif) { + /* check if this busy context is exclusive */ + if (tmp->exclusive_interface_modes & + BIT(tmp->vif->type)) { + err = -EINVAL; + goto out; + } + continue; + } + + if (!(possible_modes & BIT(viftype))) + continue; + + /* have maybe usable context w/o interface */ + ctx = tmp; + break; + } + + if (!ctx) { + err = -EOPNOTSUPP; + goto out; + } + + vif_priv->ctx = ctx; + ctx->vif = vif; + + err = iwl_setup_interface(priv, ctx); + if (!err) + goto out; + + ctx->vif = NULL; + priv->iw_mode = NL80211_IFTYPE_STATION; + out: + mutex_unlock(&priv->shrd->mutex); + + IWL_DEBUG_MAC80211(priv, "leave\n"); + return err; +} + +static void iwl_teardown_interface(struct iwl_priv *priv, + struct ieee80211_vif *vif, + bool mode_change) +{ + struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); + + lockdep_assert_held(&priv->shrd->mutex); + + if (priv->scan_vif == vif) { + iwl_scan_cancel_timeout(priv, 200); + iwl_force_scan_end(priv); + } + + if (!mode_change) { + iwl_set_mode(priv, ctx); + if (!ctx->always_active) + ctx->is_active = false; + } + + /* + * When removing the IBSS interface, overwrite the + * BT traffic load with the stored one from the last + * notification, if any. If this is a device that + * doesn't implement this, this has no effect since + * both values are the same and zero. + */ + if (vif->type == NL80211_IFTYPE_ADHOC) + priv->bt_traffic_load = priv->last_bt_traffic_load; +} + +static void iwlagn_mac_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + mutex_lock(&priv->shrd->mutex); + + if (WARN_ON(ctx->vif != vif)) { + struct iwl_rxon_context *tmp; + IWL_ERR(priv, "ctx->vif = %p, vif = %p\n", ctx->vif, vif); + for_each_context(priv, tmp) + IWL_ERR(priv, "\tID = %d:\tctx = %p\tctx->vif = %p\n", + tmp->ctxid, tmp, tmp->vif); + } + ctx->vif = NULL; + + iwl_teardown_interface(priv, vif, false); + + mutex_unlock(&priv->shrd->mutex); + + IWL_DEBUG_MAC80211(priv, "leave\n"); + +} + +static int iwlagn_mac_change_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum nl80211_iftype newtype, bool newp2p) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); + struct iwl_rxon_context *bss_ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + struct iwl_rxon_context *tmp; + enum nl80211_iftype newviftype = newtype; + u32 interface_modes; + int err; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + newtype = ieee80211_iftype_p2p(newtype, newp2p); + + mutex_lock(&priv->shrd->mutex); + + if (!ctx->vif || !iwl_is_ready_rf(priv->shrd)) { + /* + * Huh? But wait ... this can maybe happen when + * we're in the middle of a firmware restart! + */ + err = -EBUSY; + goto out; + } + + interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes; + + if (!(interface_modes & BIT(newtype))) { + err = -EBUSY; + goto out; + } + + /* + * Refuse a change that should be done by moving from the PAN + * context to the BSS context instead, if the BSS context is + * available and can support the new interface type. + */ + if (ctx->ctxid == IWL_RXON_CTX_PAN && !bss_ctx->vif && + (bss_ctx->interface_modes & BIT(newtype) || + bss_ctx->exclusive_interface_modes & BIT(newtype))) { + BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2); + err = -EBUSY; + goto out; + } + + if (ctx->exclusive_interface_modes & BIT(newtype)) { + for_each_context(priv, tmp) { + if (ctx == tmp) + continue; + + if (!tmp->vif) + continue; + + /* + * The current mode switch would be exclusive, but + * another context is active ... refuse the switch. + */ + err = -EBUSY; + goto out; + } + } + + /* success */ + iwl_teardown_interface(priv, vif, true); + vif->type = newviftype; + vif->p2p = newp2p; + err = iwl_setup_interface(priv, ctx); + WARN_ON(err); + /* + * We've switched internally, but submitting to the + * device may have failed for some reason. Mask this + * error, because otherwise mac80211 will not switch + * (and set the interface type back) and we'll be + * out of sync with it. + */ + err = 0; + + out: + mutex_unlock(&priv->shrd->mutex); + IWL_DEBUG_MAC80211(priv, "leave\n"); + + return err; +} + +static int iwlagn_mac_hw_scan(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_scan_request *req) +{ + struct iwl_priv *priv = hw->priv; + int ret; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + if (req->n_channels == 0) + return -EINVAL; + + mutex_lock(&priv->shrd->mutex); + + /* + * If an internal scan is in progress, just set + * up the scan_request as per above. + */ + if (priv->scan_type != IWL_SCAN_NORMAL) { + IWL_DEBUG_SCAN(priv, + "SCAN request during internal scan - defer\n"); + priv->scan_request = req; + priv->scan_vif = vif; + ret = 0; + } else { + priv->scan_request = req; + priv->scan_vif = vif; + /* + * mac80211 will only ask for one band at a time + * so using channels[0] here is ok + */ + ret = iwl_scan_initiate(priv, vif, IWL_SCAN_NORMAL, + req->channels[0]->band); + if (ret) { + priv->scan_request = NULL; + priv->scan_vif = NULL; + } + } + + IWL_DEBUG_MAC80211(priv, "leave\n"); + + mutex_unlock(&priv->shrd->mutex); + + return ret; +} + +static int iwlagn_mac_sta_remove(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; + int ret; + + IWL_DEBUG_MAC80211(priv, "enter: received request to remove " + "station %pM\n", sta->addr); + mutex_lock(&priv->shrd->mutex); + IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n", + sta->addr); + ret = iwl_remove_station(priv, sta_priv->sta_id, sta->addr); + if (ret) + IWL_DEBUG_QUIET_RFKILL(priv, "Error removing station %pM\n", + sta->addr); + mutex_unlock(&priv->shrd->mutex); + IWL_DEBUG_MAC80211(priv, "leave\n"); + + return ret; +} + +static void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id) +{ + unsigned long flags; + + spin_lock_irqsave(&priv->shrd->sta_lock, flags); + priv->stations[sta_id].sta.station_flags &= ~STA_FLG_PWR_SAVE_MSK; + priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK; + priv->stations[sta_id].sta.sta.modify_mask = 0; + priv->stations[sta_id].sta.sleep_tx_count = 0; + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); + spin_unlock_irqrestore(&priv->shrd->sta_lock, flags); + +} + +static void iwlagn_mac_sta_notify(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum sta_notify_cmd cmd, + struct ieee80211_sta *sta) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; + int sta_id; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + switch (cmd) { + case STA_NOTIFY_SLEEP: + WARN_ON(!sta_priv->client); + sta_priv->asleep = true; + if (atomic_read(&sta_priv->pending_frames) > 0) + ieee80211_sta_block_awake(hw, sta, true); + break; + case STA_NOTIFY_AWAKE: + WARN_ON(!sta_priv->client); + if (!sta_priv->asleep) + break; + sta_priv->asleep = false; + sta_id = iwl_sta_id(sta); + if (sta_id != IWL_INVALID_STATION) + iwl_sta_modify_ps_wake(priv, sta_id); + break; + default: + break; + } + IWL_DEBUG_MAC80211(priv, "leave\n"); +} + +struct ieee80211_ops iwlagn_hw_ops = { + .tx = iwlagn_mac_tx, + .start = iwlagn_mac_start, + .stop = iwlagn_mac_stop, +#ifdef CONFIG_PM_SLEEP + .suspend = iwlagn_mac_suspend, + .resume = iwlagn_mac_resume, +#endif + .add_interface = iwlagn_mac_add_interface, + .remove_interface = iwlagn_mac_remove_interface, + .change_interface = iwlagn_mac_change_interface, + .config = iwlagn_mac_config, + .configure_filter = iwlagn_configure_filter, + .set_key = iwlagn_mac_set_key, + .update_tkip_key = iwlagn_mac_update_tkip_key, + .set_rekey_data = iwlagn_mac_set_rekey_data, + .conf_tx = iwlagn_mac_conf_tx, + .bss_info_changed = iwlagn_bss_info_changed, + .ampdu_action = iwlagn_mac_ampdu_action, + .hw_scan = iwlagn_mac_hw_scan, + .sta_notify = iwlagn_mac_sta_notify, + .sta_add = iwlagn_mac_sta_add, + .sta_remove = iwlagn_mac_sta_remove, + .channel_switch = iwlagn_mac_channel_switch, + .flush = iwlagn_mac_flush, + .tx_last_beacon = iwlagn_mac_tx_last_beacon, + .remain_on_channel = iwlagn_mac_remain_on_channel, + .cancel_remain_on_channel = iwlagn_mac_cancel_remain_on_channel, + .rssi_callback = iwlagn_mac_rssi_callback, + CFG80211_TESTMODE_CMD(iwlagn_mac_testmode_cmd) + CFG80211_TESTMODE_DUMP(iwlagn_mac_testmode_dump) + .tx_sync = iwlagn_mac_tx_sync, + .finish_tx_sync = iwlagn_mac_finish_tx_sync, + .set_tim = iwlagn_mac_set_tim, +}; + +/* This function both allocates and initializes hw and priv. */ +struct ieee80211_hw *iwl_alloc_all(void) +{ + struct iwl_priv *priv; + /* mac80211 allocates memory for this device instance, including + * space for this driver's private structure */ + struct ieee80211_hw *hw; + + hw = ieee80211_alloc_hw(sizeof(struct iwl_priv), &iwlagn_hw_ops); + if (!hw) + goto out; + + priv = hw->priv; + priv->hw = hw; + +out: + return hw; +} diff --git a/drivers/net/wireless/iwlwifi/iwl-pci.c b/drivers/net/wireless/iwlwifi/iwl-pci.c index 1800029911a..fb30ea7ca96 100644 --- a/drivers/net/wireless/iwlwifi/iwl-pci.c +++ b/drivers/net/wireless/iwlwifi/iwl-pci.c @@ -135,7 +135,7 @@ static void iwl_pci_apm_config(struct iwl_bus *bus) } } -static void iwl_pci_get_hw_id(struct iwl_bus *bus, char buf[], +static void iwl_pci_get_hw_id_string(struct iwl_bus *bus, char buf[], int buf_len) { struct pci_dev *pci_dev = IWL_BUS_GET_PCI_DEV(bus); @@ -144,6 +144,13 @@ static void iwl_pci_get_hw_id(struct iwl_bus *bus, char buf[], pci_dev->subsystem_device); } +static u32 iwl_pci_get_hw_id(struct iwl_bus *bus) +{ + struct pci_dev *pci_dev = IWL_BUS_GET_PCI_DEV(bus); + + return (pci_dev->device << 16) + pci_dev->subsystem_device; +} + static void iwl_pci_write8(struct iwl_bus *bus, u32 ofs, u8 val) { iowrite8(val, IWL_BUS_GET_PCI_BUS(bus)->hw_base + ofs); @@ -163,6 +170,7 @@ static u32 iwl_pci_read32(struct iwl_bus *bus, u32 ofs) static const struct iwl_bus_ops bus_ops_pci = { .get_pm_support = iwl_pci_is_pm_supported, .apm_config = iwl_pci_apm_config, + .get_hw_id_string = iwl_pci_get_hw_id_string, .get_hw_id = iwl_pci_get_hw_id, .write8 = iwl_pci_write8, .write32 = iwl_pci_write32, @@ -256,6 +264,8 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { {IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_2agn_sff_cfg)}, {IWL_PCI_DEVICE(0x0085, 0xC220, iwl6005_2agn_sff_cfg)}, {IWL_PCI_DEVICE(0x0082, 0x1341, iwl6005_2agn_d_cfg)}, + {IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_2agn_cfg)},/* low 5GHz active */ + {IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_2agn_cfg)},/* high 5GHz active */ /* 6x30 Series */ {IWL_PCI_DEVICE(0x008A, 0x5305, iwl1030_bgn_cfg)}, @@ -325,46 +335,28 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { {IWL_PCI_DEVICE(0x0890, 0x4022, iwl2000_2bgn_cfg)}, {IWL_PCI_DEVICE(0x0891, 0x4222, iwl2000_2bgn_cfg)}, {IWL_PCI_DEVICE(0x0890, 0x4422, iwl2000_2bgn_cfg)}, - {IWL_PCI_DEVICE(0x0890, 0x4026, iwl2000_2bg_cfg)}, - {IWL_PCI_DEVICE(0x0891, 0x4226, iwl2000_2bg_cfg)}, - {IWL_PCI_DEVICE(0x0890, 0x4426, iwl2000_2bg_cfg)}, {IWL_PCI_DEVICE(0x0890, 0x4822, iwl2000_2bgn_d_cfg)}, /* 2x30 Series */ {IWL_PCI_DEVICE(0x0887, 0x4062, iwl2030_2bgn_cfg)}, {IWL_PCI_DEVICE(0x0888, 0x4262, iwl2030_2bgn_cfg)}, {IWL_PCI_DEVICE(0x0887, 0x4462, iwl2030_2bgn_cfg)}, - {IWL_PCI_DEVICE(0x0887, 0x4066, iwl2030_2bg_cfg)}, - {IWL_PCI_DEVICE(0x0888, 0x4266, iwl2030_2bg_cfg)}, - {IWL_PCI_DEVICE(0x0887, 0x4466, iwl2030_2bg_cfg)}, /* 6x35 Series */ {IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)}, {IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)}, {IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)}, - {IWL_PCI_DEVICE(0x088E, 0x4064, iwl6035_2abg_cfg)}, - {IWL_PCI_DEVICE(0x088F, 0x4264, iwl6035_2abg_cfg)}, - {IWL_PCI_DEVICE(0x088E, 0x4464, iwl6035_2abg_cfg)}, - {IWL_PCI_DEVICE(0x088E, 0x4066, iwl6035_2bg_cfg)}, - {IWL_PCI_DEVICE(0x088F, 0x4266, iwl6035_2bg_cfg)}, - {IWL_PCI_DEVICE(0x088E, 0x4466, iwl6035_2bg_cfg)}, /* 105 Series */ {IWL_PCI_DEVICE(0x0894, 0x0022, iwl105_bgn_cfg)}, {IWL_PCI_DEVICE(0x0895, 0x0222, iwl105_bgn_cfg)}, {IWL_PCI_DEVICE(0x0894, 0x0422, iwl105_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0894, 0x0026, iwl105_bg_cfg)}, - {IWL_PCI_DEVICE(0x0895, 0x0226, iwl105_bg_cfg)}, - {IWL_PCI_DEVICE(0x0894, 0x0426, iwl105_bg_cfg)}, {IWL_PCI_DEVICE(0x0894, 0x0822, iwl105_bgn_d_cfg)}, /* 135 Series */ {IWL_PCI_DEVICE(0x0892, 0x0062, iwl135_bgn_cfg)}, {IWL_PCI_DEVICE(0x0893, 0x0262, iwl135_bgn_cfg)}, {IWL_PCI_DEVICE(0x0892, 0x0462, iwl135_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0892, 0x0066, iwl135_bg_cfg)}, - {IWL_PCI_DEVICE(0x0893, 0x0266, iwl135_bg_cfg)}, - {IWL_PCI_DEVICE(0x0892, 0x0466, iwl135_bg_cfg)}, {0} }; diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c index 4eaab204322..2b188a6025b 100644 --- a/drivers/net/wireless/iwlwifi/iwl-power.c +++ b/drivers/net/wireless/iwlwifi/iwl-power.c @@ -167,7 +167,7 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv, u8 skip; u32 slp_itrvl; - if (priv->cfg->adv_pm) { + if (cfg(priv)->adv_pm) { table = apm_range_2; if (period <= IWL_DTIM_RANGE_1_MAX) table = apm_range_1; @@ -221,7 +221,7 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv, cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA; if (iwl_advanced_bt_coexist(priv)) { - if (!priv->cfg->bt_params->bt_sco_disable) + if (!cfg(priv)->bt_params->bt_sco_disable) cmd->flags |= IWL_POWER_BT_SCO_ENA; else cmd->flags &= ~IWL_POWER_BT_SCO_ENA; @@ -307,7 +307,7 @@ static void iwl_power_fill_sleep_cmd(struct iwl_priv *priv, cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA; if (iwl_advanced_bt_coexist(priv)) { - if (!priv->cfg->bt_params->bt_sco_disable) + if (!cfg(priv)->bt_params->bt_sco_disable) cmd->flags |= IWL_POWER_BT_SCO_ENA; else cmd->flags &= ~IWL_POWER_BT_SCO_ENA; @@ -350,7 +350,7 @@ static void iwl_power_build_cmd(struct iwl_priv *priv, if (priv->shrd->wowlan) iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, dtimper); - else if (!priv->cfg->base_params->no_idle_support && + else if (!cfg(priv)->base_params->no_idle_support && priv->hw->conf.flags & IEEE80211_CONF_IDLE) iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, 20); else if (iwl_tt_is_low_power_state(priv)) { diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c index e5d727f537d..084aa2c4ccf 100644 --- a/drivers/net/wireless/iwlwifi/iwl-scan.c +++ b/drivers/net/wireless/iwlwifi/iwl-scan.c @@ -416,6 +416,8 @@ static u16 iwl_limit_dwell(struct iwl_priv *priv, u16 dwell_time) if (!iwl_is_associated_ctx(ctx)) continue; + if (ctx->staging.dev_type == RXON_DEV_TYPE_P2P) + continue; value = ctx->beacon_int; if (!value) value = IWL_PASSIVE_DWELL_BASE; @@ -678,7 +680,8 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) priv->contexts[IWL_RXON_CTX_BSS].active.flags & RXON_FLG_CHANNEL_MODE_MSK) >> RXON_FLG_CHANNEL_MODE_POS; - if (chan_mod == CHANNEL_MODE_PURE_40) { + if ((priv->scan_request && priv->scan_request->no_cck) || + chan_mod == CHANNEL_MODE_PURE_40) { rate = IWL_RATE_6M_PLCP; } else { rate = IWL_RATE_1M_PLCP; @@ -688,8 +691,8 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) * Internal scans are passive, so we can indiscriminately set * the BT ignore flag on 2.4 GHz since it applies to TX only. */ - if (priv->cfg->bt_params && - priv->cfg->bt_params->advanced_bt_coexist) + if (cfg(priv)->bt_params && + cfg(priv)->bt_params->advanced_bt_coexist) scan->tx_cmd.tx_flags |= TX_CMD_FLG_IGNORE_BT; break; case IEEE80211_BAND_5GHZ: @@ -730,12 +733,12 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) band = priv->scan_band; - if (priv->cfg->scan_rx_antennas[band]) - rx_ant = priv->cfg->scan_rx_antennas[band]; + if (cfg(priv)->scan_rx_antennas[band]) + rx_ant = cfg(priv)->scan_rx_antennas[band]; if (band == IEEE80211_BAND_2GHZ && - priv->cfg->bt_params && - priv->cfg->bt_params->advanced_bt_coexist) { + cfg(priv)->bt_params && + cfg(priv)->bt_params->advanced_bt_coexist) { /* transmit 2.4 GHz probes only on first antenna */ scan_tx_antennas = first_antenna(scan_tx_antennas); } @@ -759,8 +762,8 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) rx_ant = first_antenna(active_chains); } - if (priv->cfg->bt_params && - priv->cfg->bt_params->advanced_bt_coexist && + if (cfg(priv)->bt_params && + cfg(priv)->bt_params->advanced_bt_coexist && priv->bt_full_concurrent) { /* operated as 1x1 in full concurrency mode */ rx_ant = first_antenna(rx_ant); @@ -938,51 +941,6 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv, return 0; } -int iwlagn_mac_hw_scan(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct cfg80211_scan_request *req) -{ - struct iwl_priv *priv = hw->priv; - int ret; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - - if (req->n_channels == 0) - return -EINVAL; - - mutex_lock(&priv->shrd->mutex); - - /* - * If an internal scan is in progress, just set - * up the scan_request as per above. - */ - if (priv->scan_type != IWL_SCAN_NORMAL) { - IWL_DEBUG_SCAN(priv, - "SCAN request during internal scan - defer\n"); - priv->scan_request = req; - priv->scan_vif = vif; - ret = 0; - } else { - priv->scan_request = req; - priv->scan_vif = vif; - /* - * mac80211 will only ask for one band at a time - * so using channels[0] here is ok - */ - ret = iwl_scan_initiate(priv, vif, IWL_SCAN_NORMAL, - req->channels[0]->band); - if (ret) { - priv->scan_request = NULL; - priv->scan_vif = NULL; - } - } - - IWL_DEBUG_MAC80211(priv, "leave\n"); - - mutex_unlock(&priv->shrd->mutex); - - return ret; -} /* * internal short scan, this function should only been called while associated. diff --git a/drivers/net/wireless/iwlwifi/iwl-shared.h b/drivers/net/wireless/iwlwifi/iwl-shared.h index 14eaf37ce3b..dc55cc4a810 100644 --- a/drivers/net/wireless/iwlwifi/iwl-shared.h +++ b/drivers/net/wireless/iwlwifi/iwl-shared.h @@ -94,9 +94,9 @@ * This implementation is iwl-pci.c */ -struct iwl_cfg; struct iwl_bus; struct iwl_priv; +struct iwl_trans; struct iwl_sensitivity_ranges; struct iwl_trans_ops; @@ -107,6 +107,10 @@ struct iwl_trans_ops; extern struct iwl_mod_params iwlagn_mod_params; +#define IWL_DISABLE_HT_ALL BIT(0) +#define IWL_DISABLE_HT_TXAGG BIT(1) +#define IWL_DISABLE_HT_RXAGG BIT(2) + /** * struct iwl_mod_params * @@ -114,7 +118,8 @@ extern struct iwl_mod_params iwlagn_mod_params; * * @sw_crypto: using hardware encryption, default = 0 * @num_of_queues: number of tx queue, HW dependent - * @disable_11n: 11n capabilities enabled, default = 0 + * @disable_11n: disable 11n capabilities, default = 0, + * use IWL_DISABLE_HT_* constants * @amsdu_size_8K: enable 8K amsdu size, default = 1 * @antenna: both antennas (use diversity), default = 0 * @restart_fw: restart firmware, default = 1 @@ -135,7 +140,7 @@ extern struct iwl_mod_params iwlagn_mod_params; struct iwl_mod_params { int sw_crypto; int num_of_queues; - int disable_11n; + unsigned int disable_11n; int amsdu_size_8K; int antenna; int restart_fw; @@ -174,8 +179,6 @@ struct iwl_mod_params { * @ct_kill_exit_threshold: when to reeable the device - in hw dependent unit * relevant for 1000, 6000 and up * @wd_timeout: TX queues watchdog timeout - * @calib_init_cfg: setup initial calibrations for the hw - * @calib_rt_cfg: setup runtime calibrations for the hw * @struct iwl_sensitivity_ranges: range of sensitivity values */ struct iwl_hw_params { @@ -195,67 +198,148 @@ struct iwl_hw_params { u32 ct_kill_exit_threshold; unsigned int wd_timeout; - u32 calib_init_cfg; - u32 calib_rt_cfg; const struct iwl_sensitivity_ranges *sens; }; /** - * enum iwl_agg_state + * enum iwl_ucode_type * - * The state machine of the BA agreement establishment / tear down. - * These states relate to a specific RA / TID. + * The type of ucode currently loaded on the hardware. * - * @IWL_AGG_OFF: aggregation is not used - * @IWL_AGG_ON: aggregation session is up - * @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the - * HW queue to be empty from packets for this RA /TID. - * @IWL_EMPTYING_HW_QUEUE_DELBA: tearing down a BA session - waiting for the - * HW queue to be empty from packets for this RA /TID. + * @IWL_UCODE_NONE: No ucode loaded + * @IWL_UCODE_REGULAR: Normal runtime ucode + * @IWL_UCODE_INIT: Initial ucode + * @IWL_UCODE_WOWLAN: Wake on Wireless enabled ucode */ -enum iwl_agg_state { - IWL_AGG_OFF = 0, - IWL_AGG_ON, - IWL_EMPTYING_HW_QUEUE_ADDBA, - IWL_EMPTYING_HW_QUEUE_DELBA, +enum iwl_ucode_type { + IWL_UCODE_NONE, + IWL_UCODE_REGULAR, + IWL_UCODE_INIT, + IWL_UCODE_WOWLAN, }; /** - * struct iwl_ht_agg - aggregation state machine - - * This structs holds the states for the BA agreement establishment and tear - * down. It also holds the state during the BA session itself. This struct is - * duplicated for each RA / TID. - - * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the - * Tx response (REPLY_TX), and the block ack notification - * (REPLY_COMPRESSED_BA). - * @state: state of the BA agreement establishment / tear down. - * @txq_id: Tx queue used by the BA session - used by the transport layer. - * Needed by the upper layer for debugfs only. - * @wait_for_ba: Expect block-ack before next Tx reply + * struct iwl_notification_wait - notification wait entry + * @list: list head for global list + * @fn: function called with the notification + * @cmd: command ID + * + * This structure is not used directly, to wait for a + * notification declare it on the stack, and call + * iwlagn_init_notification_wait() with appropriate + * parameters. Then do whatever will cause the ucode + * to notify the driver, and to wait for that then + * call iwlagn_wait_notification(). + * + * Each notification is one-shot. If at some point we + * need to support multi-shot notifications (which + * can't be allocated on the stack) we need to modify + * the code for them. */ -struct iwl_ht_agg { - u32 rate_n_flags; - enum iwl_agg_state state; - u16 txq_id; - bool wait_for_ba; +struct iwl_notification_wait { + struct list_head list; + + void (*fn)(struct iwl_trans *trans, struct iwl_rx_packet *pkt, + void *data); + void *fn_data; + + u8 cmd; + bool triggered, aborted; }; /** - * struct iwl_tid_data - one for each RA / TID + * enum iwl_pa_type - Power Amplifier type + * @IWL_PA_SYSTEM: based on uCode configuration + * @IWL_PA_INTERNAL: use Internal only + */ +enum iwl_pa_type { + IWL_PA_SYSTEM = 0, + IWL_PA_INTERNAL = 1, +}; - * This structs holds the states for each RA / TID. +/* + * LED mode + * IWL_LED_DEFAULT: use device default + * IWL_LED_RF_STATE: turn LED on/off based on RF state + * LED ON = RF ON + * LED OFF = RF OFF + * IWL_LED_BLINK: adjust led blink rate based on blink table + */ +enum iwl_led_mode { + IWL_LED_DEFAULT, + IWL_LED_RF_STATE, + IWL_LED_BLINK, +}; - * @seq_number: the next WiFi sequence number to use - * @tfds_in_queue: number of packets sent to the HW queues. - * Exported for debugfs only - * @agg: aggregation state machine +/** + * struct iwl_cfg + * @name: Offical name of the device + * @fw_name_pre: Firmware filename prefix. The api version and extension + * (.ucode) will be added to filename before loading from disk. The + * filename is constructed as fw_name_pre<api>.ucode. + * @ucode_api_max: Highest version of uCode API supported by driver. + * @ucode_api_ok: oldest version of the uCode API that is OK to load + * without a warning, for use in transitions + * @ucode_api_min: Lowest version of uCode API supported by driver. + * @valid_tx_ant: valid transmit antenna + * @valid_rx_ant: valid receive antenna + * @sku: sku information from EEPROM + * @eeprom_ver: EEPROM version + * @eeprom_calib_ver: EEPROM calibration version + * @lib: pointer to the lib ops + * @additional_nic_config: additional nic configuration + * @base_params: pointer to basic parameters + * @ht_params: point to ht patameters + * @bt_params: pointer to bt parameters + * @pa_type: used by 6000 series only to identify the type of Power Amplifier + * @need_temp_offset_calib: need to perform temperature offset calibration + * @no_xtal_calib: some devices do not need crystal calibration data, + * don't send it to those + * @scan_rx_antennas: available antenna for scan operation + * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off) + * @adv_pm: advance power management + * @rx_with_siso_diversity: 1x1 device with rx antenna diversity + * @internal_wimax_coex: internal wifi/wimax combo device + * @iq_invert: I/Q inversion + * @temp_offset_v2: support v2 of temperature offset calibration + * + * We enable the driver to be backward compatible wrt API version. The + * driver specifies which APIs it supports (with @ucode_api_max being the + * highest and @ucode_api_min the lowest). Firmware will only be loaded if + * it has a supported API version. + * + * The ideal usage of this infrastructure is to treat a new ucode API + * release as a new hardware revision. */ -struct iwl_tid_data { - u16 seq_number; - u16 tfds_in_queue; - struct iwl_ht_agg agg; +struct iwl_cfg { + /* params specific to an individual device within a device family */ + const char *name; + const char *fw_name_pre; + const unsigned int ucode_api_max; + const unsigned int ucode_api_ok; + const unsigned int ucode_api_min; + u8 valid_tx_ant; + u8 valid_rx_ant; + u16 sku; + u16 eeprom_ver; + u16 eeprom_calib_ver; + const struct iwl_lib_ops *lib; + void (*additional_nic_config)(struct iwl_priv *priv); + /* params not likely to change within a device family */ + struct iwl_base_params *base_params; + /* params likely to change within a device family */ + struct iwl_ht_params *ht_params; + struct iwl_bt_params *bt_params; + enum iwl_pa_type pa_type; /* if used set to IWL_PA_SYSTEM */ + const bool need_temp_offset_calib; /* if used set to true */ + const bool no_xtal_calib; + u8 scan_rx_antennas[IEEE80211_NUM_BANDS]; + enum iwl_led_mode led_mode; + const bool adv_pm; + const bool rx_with_siso_diversity; + const bool internal_wimax_coex; + const bool iq_invert; + const bool temp_offset_v2; }; /** @@ -266,15 +350,25 @@ struct iwl_tid_data { * @ucode_owner: IWL_OWNERSHIP_* * @cmd_queue: command queue number * @status: STATUS_* + * @wowlan: are we running wowlan uCode * @valid_contexts: microcode/device supports multiple contexts * @bus: pointer to the bus layer data + * @cfg: see struct iwl_cfg * @priv: pointer to the upper layer data + * @trans: pointer to the transport layer data * @hw_params: see struct iwl_hw_params * @workqueue: the workqueue used by all the layers of the driver * @lock: protect general shared data * @sta_lock: protects the station table. * If lock and sta_lock are needed, lock must be acquired first. * @mutex: + * @wait_command_queue: the wait_queue for SYNC host command nad uCode load + * @eeprom: pointer to the eeprom/OTP image + * @ucode_type: indicator of loaded ucode image + * @notif_waits: things waiting for notification + * @notif_wait_lock: lock protecting notification + * @notif_waitq: head of notification wait queue + * @device_pointers: pointers to ucode event tables */ struct iwl_shared { #ifdef CONFIG_IWLWIFI_DEBUG @@ -290,6 +384,7 @@ struct iwl_shared { u8 valid_contexts; struct iwl_bus *bus; + struct iwl_cfg *cfg; struct iwl_priv *priv; struct iwl_trans *trans; struct iwl_hw_params hw_params; @@ -299,13 +394,29 @@ struct iwl_shared { spinlock_t sta_lock; struct mutex mutex; - struct iwl_tid_data tid_data[IWLAGN_STATION_COUNT][IWL_MAX_TID_COUNT]; - wait_queue_head_t wait_command_queue; + + /* eeprom -- this is in the card's little endian byte order */ + u8 *eeprom; + + /* ucode related variables */ + enum iwl_ucode_type ucode_type; + + /* notification wait support */ + struct list_head notif_waits; + spinlock_t notif_wait_lock; + wait_queue_head_t notif_waitq; + + struct { + u32 error_event_table; + u32 log_event_table; + } device_pointers; + }; /*Whatever _m is (iwl_trans, iwl_priv, iwl_bus, these macros will work */ #define priv(_m) ((_m)->shrd->priv) +#define cfg(_m) ((_m)->shrd->cfg) #define bus(_m) ((_m)->shrd->bus) #define trans(_m) ((_m)->shrd->trans) #define hw_params(_m) ((_m)->shrd->hw_params) @@ -427,12 +538,6 @@ int __must_check iwl_rx_dispatch(struct iwl_priv *priv, struct iwl_device_cmd *cmd); int iwlagn_hw_valid_rtc_data_addr(u32 addr); -void iwl_start_tx_ba_trans_ready(struct iwl_priv *priv, - enum iwl_rxon_context_id ctx, - u8 sta_id, u8 tid); -void iwl_stop_tx_ba_trans_ready(struct iwl_priv *priv, - enum iwl_rxon_context_id ctx, - u8 sta_id, u8 tid); void iwl_set_hw_rfkill_state(struct iwl_priv *priv, bool state); void iwl_nic_config(struct iwl_priv *priv); void iwl_free_skb(struct iwl_priv *priv, struct sk_buff *skb); @@ -445,6 +550,24 @@ bool iwl_check_for_ct_kill(struct iwl_priv *priv); void iwl_stop_sw_queue(struct iwl_priv *priv, u8 ac); void iwl_wake_sw_queue(struct iwl_priv *priv, u8 ac); +/* notification wait support */ +void iwl_abort_notification_waits(struct iwl_shared *shrd); +void __acquires(wait_entry) +iwl_init_notification_wait(struct iwl_shared *shrd, + struct iwl_notification_wait *wait_entry, + u8 cmd, + void (*fn)(struct iwl_trans *trans, + struct iwl_rx_packet *pkt, + void *data), + void *fn_data); +int __must_check __releases(wait_entry) +iwl_wait_notification(struct iwl_shared *shrd, + struct iwl_notification_wait *wait_entry, + unsigned long timeout); +void __releases(wait_entry) +iwl_remove_notification(struct iwl_shared *shrd, + struct iwl_notification_wait *wait_entry); + #ifdef CONFIG_IWLWIFI_DEBUGFS void iwl_reset_traffic_log(struct iwl_priv *priv); #endif /* CONFIG_IWLWIFI_DEBUGFS */ diff --git a/drivers/net/wireless/iwlwifi/iwl-sv-open.c b/drivers/net/wireless/iwlwifi/iwl-testmode.c index 5e50d88f302..4a5cddd2d56 100644 --- a/drivers/net/wireless/iwlwifi/iwl-sv-open.c +++ b/drivers/net/wireless/iwlwifi/iwl-testmode.c @@ -70,6 +70,7 @@ #include <net/mac80211.h> #include <net/netlink.h> +#include "iwl-wifi.h" #include "iwl-dev.h" #include "iwl-core.h" #include "iwl-debug.h" @@ -77,6 +78,7 @@ #include "iwl-agn.h" #include "iwl-testmode.h" #include "iwl-trans.h" +#include "iwl-bus.h" /* The TLVs used in the gnl message policy between the kernel module and * user space application. iwl_testmode_gnl_msg_policy is to be carried @@ -106,6 +108,13 @@ struct nla_policy iwl_testmode_gnl_msg_policy[IWL_TM_ATTR_MAX] = { [IWL_TM_ATTR_FIXRATE] = { .type = NLA_U32, }, [IWL_TM_ATTR_UCODE_OWNER] = { .type = NLA_U8, }, + + [IWL_TM_ATTR_SRAM_ADDR] = { .type = NLA_U32, }, + [IWL_TM_ATTR_SRAM_SIZE] = { .type = NLA_U32, }, + [IWL_TM_ATTR_SRAM_DUMP] = { .type = NLA_UNSPEC, }, + + [IWL_TM_ATTR_FW_VERSION] = { .type = NLA_U32, }, + [IWL_TM_ATTR_DEVICE_ID] = { .type = NLA_U32, }, }; /* @@ -177,6 +186,18 @@ void iwl_testmode_init(struct iwl_priv *priv) { priv->pre_rx_handler = iwl_testmode_ucode_rx_pkt; priv->testmode_trace.trace_enabled = false; + priv->testmode_sram.sram_readed = false; +} + +static void iwl_sram_cleanup(struct iwl_priv *priv) +{ + if (priv->testmode_sram.sram_readed) { + kfree(priv->testmode_sram.buff_addr); + priv->testmode_sram.buff_addr = NULL; + priv->testmode_sram.buff_size = 0; + priv->testmode_sram.num_chunks = 0; + priv->testmode_sram.sram_readed = false; + } } static void iwl_trace_cleanup(struct iwl_priv *priv) @@ -201,6 +222,7 @@ static void iwl_trace_cleanup(struct iwl_priv *priv) void iwl_testmode_cleanup(struct iwl_priv *priv) { iwl_trace_cleanup(priv); + iwl_sram_cleanup(priv); } /* @@ -276,7 +298,7 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb) IWL_INFO(priv, "testmode register access command offset 0x%x\n", ofs); switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) { - case IWL_TM_CMD_APP2DEV_REG_READ32: + case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32: val32 = iwl_read32(bus(priv), ofs); IWL_INFO(priv, "32bit value to read 0x%x\n", val32); @@ -291,7 +313,7 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb) IWL_DEBUG_INFO(priv, "Error sending msg : %d\n", status); break; - case IWL_TM_CMD_APP2DEV_REG_WRITE32: + case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32: if (!tb[IWL_TM_ATTR_REG_VALUE32]) { IWL_DEBUG_INFO(priv, "Error finding value to write\n"); @@ -302,7 +324,7 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb) iwl_write32(bus(priv), ofs, val32); } break; - case IWL_TM_CMD_APP2DEV_REG_WRITE8: + case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8: if (!tb[IWL_TM_ATTR_REG_VALUE8]) { IWL_DEBUG_INFO(priv, "Error finding value to write\n"); return -ENOMSG; @@ -312,6 +334,32 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb) iwl_write8(bus(priv), ofs, val8); } break; + case IWL_TM_CMD_APP2DEV_INDIRECT_REG_READ32: + val32 = iwl_read_prph(bus(priv), ofs); + IWL_INFO(priv, "32bit value to read 0x%x\n", val32); + + skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20); + if (!skb) { + IWL_DEBUG_INFO(priv, "Error allocating memory\n"); + return -ENOMEM; + } + NLA_PUT_U32(skb, IWL_TM_ATTR_REG_VALUE32, val32); + status = cfg80211_testmode_reply(skb); + if (status < 0) + IWL_DEBUG_INFO(priv, + "Error sending msg : %d\n", status); + break; + case IWL_TM_CMD_APP2DEV_INDIRECT_REG_WRITE32: + if (!tb[IWL_TM_ATTR_REG_VALUE32]) { + IWL_DEBUG_INFO(priv, + "Error finding value to write\n"); + return -ENOMSG; + } else { + val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]); + IWL_INFO(priv, "32bit value to write 0x%x\n", val32); + iwl_write_prph(bus(priv), ofs, val32); + } + break; default: IWL_DEBUG_INFO(priv, "Unknown testmode register command ID\n"); return -ENOSYS; @@ -330,24 +378,24 @@ static int iwl_testmode_cfg_init_calib(struct iwl_priv *priv) struct iwl_notification_wait calib_wait; int ret; - iwlagn_init_notification_wait(priv, &calib_wait, + iwl_init_notification_wait(priv->shrd, &calib_wait, CALIBRATION_COMPLETE_NOTIFICATION, NULL, NULL); - ret = iwlagn_init_alive_start(priv); + ret = iwl_init_alive_start(trans(priv)); if (ret) { IWL_DEBUG_INFO(priv, "Error configuring init calibration: %d\n", ret); goto cfg_init_calib_error; } - ret = iwlagn_wait_notification(priv, &calib_wait, 2 * HZ); + ret = iwl_wait_notification(priv->shrd, &calib_wait, 2 * HZ); if (ret) IWL_DEBUG_INFO(priv, "Error detecting" " CALIBRATION_COMPLETE_NOTIFICATION: %d\n", ret); return ret; cfg_init_calib_error: - iwlagn_remove_notification(priv, &calib_wait); + iwl_remove_notification(priv->shrd, &calib_wait); return ret; } @@ -370,14 +418,16 @@ cfg_init_calib_error: static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb) { struct iwl_priv *priv = hw->priv; + struct iwl_trans *trans = trans(priv); struct sk_buff *skb; unsigned char *rsp_data_ptr = NULL; int status = 0, rsp_data_len = 0; + u32 devid; switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) { case IWL_TM_CMD_APP2DEV_GET_DEVICENAME: - rsp_data_ptr = (unsigned char *)priv->cfg->name; - rsp_data_len = strlen(priv->cfg->name); + rsp_data_ptr = (unsigned char *)cfg(priv)->name; + rsp_data_len = strlen(cfg(priv)->name); skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, rsp_data_len + 20); if (!skb) { @@ -396,8 +446,7 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb) break; case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW: - status = iwlagn_load_ucode_wait_alive(priv, &priv->ucode_init, - IWL_UCODE_INIT); + status = iwl_load_ucode_wait_alive(trans, IWL_UCODE_INIT); if (status) IWL_DEBUG_INFO(priv, "Error loading init ucode: %d\n", status); @@ -405,13 +454,11 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb) case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB: iwl_testmode_cfg_init_calib(priv); - iwl_trans_stop_device(trans(priv)); + iwl_trans_stop_device(trans); break; case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW: - status = iwlagn_load_ucode_wait_alive(priv, - &priv->ucode_rt, - IWL_UCODE_REGULAR); + status = iwl_load_ucode_wait_alive(trans, IWL_UCODE_REGULAR); if (status) { IWL_DEBUG_INFO(priv, "Error loading runtime ucode: %d\n", status); @@ -423,10 +470,25 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb) "Error starting the device: %d\n", status); break; + case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW: + iwl_scan_cancel_timeout(priv, 200); + iwl_trans_stop_device(trans); + status = iwl_load_ucode_wait_alive(trans, IWL_UCODE_WOWLAN); + if (status) { + IWL_DEBUG_INFO(priv, + "Error loading WOWLAN ucode: %d\n", status); + break; + } + status = iwl_alive_start(priv); + if (status) + IWL_DEBUG_INFO(priv, + "Error starting the device: %d\n", status); + break; + case IWL_TM_CMD_APP2DEV_GET_EEPROM: - if (priv->eeprom) { + if (priv->shrd->eeprom) { skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, - priv->cfg->base_params->eeprom_size + 20); + cfg(priv)->base_params->eeprom_size + 20); if (!skb) { IWL_DEBUG_INFO(priv, "Error allocating memory\n"); @@ -435,8 +497,8 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb) NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_EEPROM_RSP); NLA_PUT(skb, IWL_TM_ATTR_EEPROM, - priv->cfg->base_params->eeprom_size, - priv->eeprom); + cfg(priv)->base_params->eeprom_size, + priv->shrd->eeprom); status = cfg80211_testmode_reply(skb); if (status < 0) IWL_DEBUG_INFO(priv, @@ -455,6 +517,37 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb) priv->tm_fixed_rate = nla_get_u32(tb[IWL_TM_ATTR_FIXRATE]); break; + case IWL_TM_CMD_APP2DEV_GET_FW_VERSION: + IWL_INFO(priv, "uCode version raw: 0x%x\n", priv->ucode_ver); + + skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20); + if (!skb) { + IWL_DEBUG_INFO(priv, "Error allocating memory\n"); + return -ENOMEM; + } + NLA_PUT_U32(skb, IWL_TM_ATTR_FW_VERSION, priv->ucode_ver); + status = cfg80211_testmode_reply(skb); + if (status < 0) + IWL_DEBUG_INFO(priv, + "Error sending msg : %d\n", status); + break; + + case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID: + devid = bus_get_hw_id(bus(priv)); + IWL_INFO(priv, "hw version: 0x%x\n", devid); + + skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20); + if (!skb) { + IWL_DEBUG_INFO(priv, "Error allocating memory\n"); + return -ENOMEM; + } + NLA_PUT_U32(skb, IWL_TM_ATTR_DEVICE_ID, devid); + status = cfg80211_testmode_reply(skb); + if (status < 0) + IWL_DEBUG_INFO(priv, + "Error sending msg : %d\n", status); + break; + default: IWL_DEBUG_INFO(priv, "Unknown testmode driver command ID\n"); return -ENOSYS; @@ -535,7 +628,7 @@ static int iwl_testmode_trace(struct ieee80211_hw *hw, struct nlattr **tb) } priv->testmode_trace.num_chunks = DIV_ROUND_UP(priv->testmode_trace.buff_size, - TRACE_CHUNK_SIZE); + DUMP_CHUNK_SIZE); break; case IWL_TM_CMD_APP2DEV_END_TRACE: @@ -567,15 +660,15 @@ static int iwl_testmode_trace_dump(struct ieee80211_hw *hw, struct nlattr **tb, idx = cb->args[4]; if (idx >= priv->testmode_trace.num_chunks) return -ENOENT; - length = TRACE_CHUNK_SIZE; + length = DUMP_CHUNK_SIZE; if (((idx + 1) == priv->testmode_trace.num_chunks) && - (priv->testmode_trace.buff_size % TRACE_CHUNK_SIZE)) + (priv->testmode_trace.buff_size % DUMP_CHUNK_SIZE)) length = priv->testmode_trace.buff_size % - TRACE_CHUNK_SIZE; + DUMP_CHUNK_SIZE; NLA_PUT(skb, IWL_TM_ATTR_TRACE_DUMP, length, priv->testmode_trace.trace_addr + - (TRACE_CHUNK_SIZE * idx)); + (DUMP_CHUNK_SIZE * idx)); idx++; cb->args[4] = idx; return 0; @@ -621,6 +714,110 @@ static int iwl_testmode_ownership(struct ieee80211_hw *hw, struct nlattr **tb) return 0; } +/* + * This function handles the user application commands for SRAM data dump + * + * It retrieves the mandatory fields IWL_TM_ATTR_SRAM_ADDR and + * IWL_TM_ATTR_SRAM_SIZE to decide the memory area for SRAM data reading + * + * Several error will be retured, -EBUSY if the SRAM data retrieved by + * previous command has not been delivered to userspace, or -ENOMSG if + * the mandatory fields (IWL_TM_ATTR_SRAM_ADDR,IWL_TM_ATTR_SRAM_SIZE) + * are missing, or -ENOMEM if the buffer allocation fails. + * + * Otherwise 0 is replied indicating the success of the SRAM reading. + * + * @hw: ieee80211_hw object that represents the device + * @tb: gnl message fields from the user space + */ +static int iwl_testmode_sram(struct ieee80211_hw *hw, struct nlattr **tb) +{ + struct iwl_priv *priv = hw->priv; + u32 base, ofs, size, maxsize; + + if (priv->testmode_sram.sram_readed) + return -EBUSY; + + if (!tb[IWL_TM_ATTR_SRAM_ADDR]) { + IWL_DEBUG_INFO(priv, "Error finding SRAM offset address\n"); + return -ENOMSG; + } + ofs = nla_get_u32(tb[IWL_TM_ATTR_SRAM_ADDR]); + if (!tb[IWL_TM_ATTR_SRAM_SIZE]) { + IWL_DEBUG_INFO(priv, "Error finding size for SRAM reading\n"); + return -ENOMSG; + } + size = nla_get_u32(tb[IWL_TM_ATTR_SRAM_SIZE]); + switch (priv->shrd->ucode_type) { + case IWL_UCODE_REGULAR: + maxsize = trans(priv)->ucode_rt.data.len; + break; + case IWL_UCODE_INIT: + maxsize = trans(priv)->ucode_init.data.len; + break; + case IWL_UCODE_WOWLAN: + maxsize = trans(priv)->ucode_wowlan.data.len; + break; + case IWL_UCODE_NONE: + IWL_DEBUG_INFO(priv, "Error, uCode does not been loaded\n"); + return -ENOSYS; + default: + IWL_DEBUG_INFO(priv, "Error, unsupported uCode type\n"); + return -ENOSYS; + } + if ((ofs + size) > maxsize) { + IWL_DEBUG_INFO(priv, "Invalid offset/size: out of range\n"); + return -EINVAL; + } + priv->testmode_sram.buff_size = (size / 4) * 4; + priv->testmode_sram.buff_addr = + kmalloc(priv->testmode_sram.buff_size, GFP_KERNEL); + if (priv->testmode_sram.buff_addr == NULL) { + IWL_DEBUG_INFO(priv, "Error allocating memory\n"); + return -ENOMEM; + } + base = 0x800000; + _iwl_read_targ_mem_words(bus(priv), base + ofs, + priv->testmode_sram.buff_addr, + priv->testmode_sram.buff_size / 4); + priv->testmode_sram.num_chunks = + DIV_ROUND_UP(priv->testmode_sram.buff_size, DUMP_CHUNK_SIZE); + priv->testmode_sram.sram_readed = true; + return 0; +} + +static int iwl_testmode_sram_dump(struct ieee80211_hw *hw, struct nlattr **tb, + struct sk_buff *skb, + struct netlink_callback *cb) +{ + struct iwl_priv *priv = hw->priv; + int idx, length; + + if (priv->testmode_sram.sram_readed) { + idx = cb->args[4]; + if (idx >= priv->testmode_sram.num_chunks) { + iwl_sram_cleanup(priv); + return -ENOENT; + } + length = DUMP_CHUNK_SIZE; + if (((idx + 1) == priv->testmode_sram.num_chunks) && + (priv->testmode_sram.buff_size % DUMP_CHUNK_SIZE)) + length = priv->testmode_sram.buff_size % + DUMP_CHUNK_SIZE; + + NLA_PUT(skb, IWL_TM_ATTR_SRAM_DUMP, length, + priv->testmode_sram.buff_addr + + (DUMP_CHUNK_SIZE * idx)); + idx++; + cb->args[4] = idx; + return 0; + } else + return -EFAULT; + + nla_put_failure: + return -ENOBUFS; +} + /* The testmode gnl message handler that takes the gnl message from the * user space and parses it per the policy iwl_testmode_gnl_msg_policy, then @@ -668,9 +865,11 @@ int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len) IWL_DEBUG_INFO(priv, "testmode cmd to uCode\n"); result = iwl_testmode_ucode(hw, tb); break; - case IWL_TM_CMD_APP2DEV_REG_READ32: - case IWL_TM_CMD_APP2DEV_REG_WRITE32: - case IWL_TM_CMD_APP2DEV_REG_WRITE8: + case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32: + case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32: + case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8: + case IWL_TM_CMD_APP2DEV_INDIRECT_REG_READ32: + case IWL_TM_CMD_APP2DEV_INDIRECT_REG_WRITE32: IWL_DEBUG_INFO(priv, "testmode cmd to register\n"); result = iwl_testmode_reg(hw, tb); break; @@ -680,6 +879,9 @@ int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len) case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW: case IWL_TM_CMD_APP2DEV_GET_EEPROM: case IWL_TM_CMD_APP2DEV_FIXRATE_REQ: + case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW: + case IWL_TM_CMD_APP2DEV_GET_FW_VERSION: + case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID: IWL_DEBUG_INFO(priv, "testmode cmd to driver\n"); result = iwl_testmode_driver(hw, tb); break; @@ -696,6 +898,11 @@ int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len) result = iwl_testmode_ownership(hw, tb); break; + case IWL_TM_CMD_APP2DEV_READ_SRAM: + IWL_DEBUG_INFO(priv, "testmode sram read cmd to driver\n"); + result = iwl_testmode_sram(hw, tb); + break; + default: IWL_DEBUG_INFO(priv, "Unknown testmode command\n"); result = -ENOSYS; @@ -744,6 +951,10 @@ int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb, IWL_DEBUG_INFO(priv, "uCode trace cmd to driver\n"); result = iwl_testmode_trace_dump(hw, tb, skb, cb); break; + case IWL_TM_CMD_APP2DEV_DUMP_SRAM: + IWL_DEBUG_INFO(priv, "testmode sram dump cmd to driver\n"); + result = iwl_testmode_sram_dump(hw, tb, skb, cb); + break; default: result = -EINVAL; break; diff --git a/drivers/net/wireless/iwlwifi/iwl-testmode.h b/drivers/net/wireless/iwlwifi/iwl-testmode.h index b980bda4b0f..26138f11034 100644 --- a/drivers/net/wireless/iwlwifi/iwl-testmode.h +++ b/drivers/net/wireless/iwlwifi/iwl-testmode.h @@ -76,9 +76,9 @@ * the actual uCode host command ID is carried with * IWL_TM_ATTR_UCODE_CMD_ID * - * @IWL_TM_CMD_APP2DEV_REG_READ32: - * @IWL_TM_CMD_APP2DEV_REG_WRITE32: - * @IWL_TM_CMD_APP2DEV_REG_WRITE8: + * @IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32: + * @IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32: + * @IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8: * commands from user applicaiton to access register * * @IWL_TM_CMD_APP2DEV_GET_DEVICENAME: retrieve device name @@ -103,16 +103,30 @@ * @IWL_TM_CMD_DEV2APP_EEPROM_RSP: * commands from kernel space to carry the eeprom response * to user application + * * @IWL_TM_CMD_APP2DEV_OWNERSHIP: * commands from user application to own change the ownership of the uCode * if application has the ownership, the only host command from * testmode will deliver to uCode. Default owner is driver + * + * @IWL_TM_CMD_APP2DEV_INDIRECT_REG_READ32: + * @IWL_TM_CMD_APP2DEV_INDIRECT_REG_WRITE32: + * commands from user applicaiton to indirectly access peripheral register + * + * @IWL_TM_CMD_APP2DEV_READ_SRAM: + * @IWL_TM_CMD_APP2DEV_DUMP_SRAM: + * commands from user applicaiton to read data in sram + * + * @IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW: load Weak On Wireless LAN uCode image + * @IWL_TM_CMD_APP2DEV_GET_FW_VERSION: retrieve uCode version + * @IWL_TM_CMD_APP2DEV_GET_DEVICE_ID: retrieve ID information in device + * */ enum iwl_tm_cmd_t { IWL_TM_CMD_APP2DEV_UCODE = 1, - IWL_TM_CMD_APP2DEV_REG_READ32 = 2, - IWL_TM_CMD_APP2DEV_REG_WRITE32 = 3, - IWL_TM_CMD_APP2DEV_REG_WRITE8 = 4, + IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32 = 2, + IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32 = 3, + IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8 = 4, IWL_TM_CMD_APP2DEV_GET_DEVICENAME = 5, IWL_TM_CMD_APP2DEV_LOAD_INIT_FW = 6, IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB = 7, @@ -126,7 +140,14 @@ enum iwl_tm_cmd_t { IWL_TM_CMD_DEV2APP_UCODE_RX_PKT = 15, IWL_TM_CMD_DEV2APP_EEPROM_RSP = 16, IWL_TM_CMD_APP2DEV_OWNERSHIP = 17, - IWL_TM_CMD_MAX = 18, + IWL_TM_CMD_APP2DEV_INDIRECT_REG_READ32 = 18, + IWL_TM_CMD_APP2DEV_INDIRECT_REG_WRITE32 = 19, + IWL_TM_CMD_APP2DEV_READ_SRAM = 20, + IWL_TM_CMD_APP2DEV_DUMP_SRAM = 21, + IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW = 22, + IWL_TM_CMD_APP2DEV_GET_FW_VERSION = 23, + IWL_TM_CMD_APP2DEV_GET_DEVICE_ID = 24, + IWL_TM_CMD_MAX = 25, }; /* @@ -196,6 +217,26 @@ enum iwl_tm_cmd_t { * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_OWNERSHIP, * The mandatory fields are: * IWL_TM_ATTR_UCODE_OWNER for the new owner + * + * @IWL_TM_ATTR_SRAM_ADDR: + * @IWL_TM_ATTR_SRAM_SIZE: + * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_READ_SRAM, + * The mandatory fields are: + * IWL_TM_ATTR_SRAM_ADDR for the address in sram + * IWL_TM_ATTR_SRAM_SIZE for the buffer size of data reading + * + * @IWL_TM_ATTR_SRAM_DUMP: + * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_DUMP_SRAM, + * IWL_TM_ATTR_SRAM_DUMP for the data in sram + * + * @IWL_TM_ATTR_FW_VERSION: + * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_GET_FW_VERSION, + * IWL_TM_ATTR_FW_VERSION for the uCode version + * + * @IWL_TM_ATTR_DEVICE_ID: + * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_GET_DEVICE_ID, + * IWL_TM_ATTR_DEVICE_ID for the device ID information + * */ enum iwl_tm_attr_t { IWL_TM_ATTR_NOT_APPLICABLE = 0, @@ -213,7 +254,12 @@ enum iwl_tm_attr_t { IWL_TM_ATTR_TRACE_DUMP = 12, IWL_TM_ATTR_FIXRATE = 13, IWL_TM_ATTR_UCODE_OWNER = 14, - IWL_TM_ATTR_MAX = 15, + IWL_TM_ATTR_SRAM_ADDR = 15, + IWL_TM_ATTR_SRAM_SIZE = 16, + IWL_TM_ATTR_SRAM_DUMP = 17, + IWL_TM_ATTR_FW_VERSION = 18, + IWL_TM_ATTR_DEVICE_ID = 19, + IWL_TM_ATTR_MAX = 20, }; /* uCode trace buffer */ @@ -221,6 +267,8 @@ enum iwl_tm_attr_t { #define TRACE_BUFF_SIZE_MIN 0x20000 #define TRACE_BUFF_SIZE_DEF TRACE_BUFF_SIZE_MIN #define TRACE_BUFF_PADD 0x2000 -#define TRACE_CHUNK_SIZE (PAGE_SIZE - 1024) + +/* Maximum data size of each dump it packet */ +#define DUMP_CHUNK_SIZE (PAGE_SIZE - 1024) #endif diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h index 2b6756e8b8f..f6debf91d7b 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h +++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h @@ -219,9 +219,7 @@ struct iwl_trans_pcie { /* INT ICT Table */ __le32 *ict_tbl; - void *ict_tbl_vir; dma_addr_t ict_tbl_dma; - dma_addr_t aligned_ict_tbl_dma; int ict_index; u32 inta; bool use_ict; @@ -236,6 +234,7 @@ struct iwl_trans_pcie { const u8 *ac_to_fifo[NUM_IWL_RXON_CTX]; const u8 *ac_to_queue[NUM_IWL_RXON_CTX]; u8 mcast_queue[NUM_IWL_RXON_CTX]; + u8 agg_txq[IWLAGN_STATION_COUNT][IWL_MAX_TID_COUNT]; struct iwl_tx_queue *txq; unsigned long txq_ctx_active_msk; @@ -280,20 +279,16 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans, struct iwl_tx_queue *txq, u16 byte_cnt); -void iwl_trans_pcie_txq_agg_disable(struct iwl_trans *trans, int txq_id); int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, - enum iwl_rxon_context_id ctx, int sta_id, - int tid); + int sta_id, int tid); void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index); void iwl_trans_tx_queue_set_status(struct iwl_trans *trans, struct iwl_tx_queue *txq, int tx_fifo_id, int scd_retry); -int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans, - enum iwl_rxon_context_id ctx, int sta_id, - int tid, u16 *ssn); +int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans, int sta_id, int tid); void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, enum iwl_rxon_context_id ctx, - int sta_id, int tid, int frame_limit); + int sta_id, int tid, int frame_limit, u16 ssn); void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq, int index, enum dma_data_direction dma_dir); int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index, @@ -354,8 +349,13 @@ static inline void iwl_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq) txq->swq_id = (hwq << 2) | ac; } +static inline u8 iwl_get_queue_ac(struct iwl_tx_queue *txq) +{ + return txq->swq_id & 0x3; +} + static inline void iwl_wake_queue(struct iwl_trans *trans, - struct iwl_tx_queue *txq) + struct iwl_tx_queue *txq, const char *msg) { u8 queue = txq->swq_id; u8 ac = queue & 3; @@ -363,13 +363,22 @@ static inline void iwl_wake_queue(struct iwl_trans *trans, struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - if (test_and_clear_bit(hwq, trans_pcie->queue_stopped)) - if (atomic_dec_return(&trans_pcie->queue_stop_count[ac]) <= 0) + if (test_and_clear_bit(hwq, trans_pcie->queue_stopped)) { + if (atomic_dec_return(&trans_pcie->queue_stop_count[ac]) <= 0) { iwl_wake_sw_queue(priv(trans), ac); + IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d ac %d. %s", + hwq, ac, msg); + } else { + IWL_DEBUG_TX_QUEUES(trans, "Don't wake hwq %d ac %d" + " stop count %d. %s", + hwq, ac, atomic_read(&trans_pcie-> + queue_stop_count[ac]), msg); + } + } } static inline void iwl_stop_queue(struct iwl_trans *trans, - struct iwl_tx_queue *txq) + struct iwl_tx_queue *txq, const char *msg) { u8 queue = txq->swq_id; u8 ac = queue & 3; @@ -377,9 +386,23 @@ static inline void iwl_stop_queue(struct iwl_trans *trans, struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - if (!test_and_set_bit(hwq, trans_pcie->queue_stopped)) - if (atomic_inc_return(&trans_pcie->queue_stop_count[ac]) > 0) + if (!test_and_set_bit(hwq, trans_pcie->queue_stopped)) { + if (atomic_inc_return(&trans_pcie->queue_stop_count[ac]) > 0) { iwl_stop_sw_queue(priv(trans), ac); + IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d ac %d" + " stop count %d. %s", + hwq, ac, atomic_read(&trans_pcie-> + queue_stop_count[ac]), msg); + } else { + IWL_DEBUG_TX_QUEUES(trans, "Don't stop hwq %d ac %d" + " stop count %d. %s", + hwq, ac, atomic_read(&trans_pcie-> + queue_stop_count[ac]), msg); + } + } else { + IWL_DEBUG_TX_QUEUES(trans, "stop hwq %d, but it is stopped/ %s", + hwq, msg); + } } #ifdef ieee80211_stop_queue diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c index 374c68cc1d7..752493f0040 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c +++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c @@ -594,8 +594,8 @@ static void iwl_dump_nic_error_log(struct iwl_trans *trans) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - base = priv->device_pointers.error_event_table; - if (priv->ucode_type == IWL_UCODE_INIT) { + base = trans->shrd->device_pointers.error_event_table; + if (trans->shrd->ucode_type == IWL_UCODE_INIT) { if (!base) base = priv->init_errlog_ptr; } else { @@ -607,7 +607,7 @@ static void iwl_dump_nic_error_log(struct iwl_trans *trans) IWL_ERR(trans, "Not valid error log pointer 0x%08X for %s uCode\n", base, - (priv->ucode_type == IWL_UCODE_INIT) + (trans->shrd->ucode_type == IWL_UCODE_INIT) ? "Init" : "RT"); return; } @@ -648,6 +648,21 @@ static void iwl_dump_nic_error_log(struct iwl_trans *trans) IWL_ERR(trans, "0x%08X | hw version\n", table.hw_ver); IWL_ERR(trans, "0x%08X | board version\n", table.brd_ver); IWL_ERR(trans, "0x%08X | hcmd\n", table.hcmd); + + IWL_ERR(trans, "0x%08X | isr0\n", table.isr0); + IWL_ERR(trans, "0x%08X | isr1\n", table.isr1); + IWL_ERR(trans, "0x%08X | isr2\n", table.isr2); + IWL_ERR(trans, "0x%08X | isr3\n", table.isr3); + IWL_ERR(trans, "0x%08X | isr4\n", table.isr4); + IWL_ERR(trans, "0x%08X | isr_pref\n", table.isr_pref); + IWL_ERR(trans, "0x%08X | wait_event\n", table.wait_event); + IWL_ERR(trans, "0x%08X | l2p_control\n", table.l2p_control); + IWL_ERR(trans, "0x%08X | l2p_duration\n", table.l2p_duration); + IWL_ERR(trans, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid); + IWL_ERR(trans, "0x%08X | l2p_addr_match\n", table.l2p_addr_match); + IWL_ERR(trans, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel); + IWL_ERR(trans, "0x%08X | timestamp\n", table.u_timestamp); + IWL_ERR(trans, "0x%08X | flow_handler\n", table.flow_handler); } /** @@ -657,7 +672,7 @@ static void iwl_irq_handle_error(struct iwl_trans *trans) { struct iwl_priv *priv = priv(trans); /* W/A for WiFi/WiMAX coex and WiMAX own the RF */ - if (priv->cfg->internal_wimax_coex && + if (cfg(priv)->internal_wimax_coex && (!(iwl_read_prph(bus(trans), APMG_CLK_CTRL_REG) & APMS_CLK_VAL_MRB_FUNC_MODE) || (iwl_read_prph(bus(trans), APMG_PS_CTRL_REG) & @@ -709,8 +724,8 @@ static int iwl_print_event_log(struct iwl_trans *trans, u32 start_idx, if (num_events == 0) return pos; - base = priv->device_pointers.log_event_table; - if (priv->ucode_type == IWL_UCODE_INIT) { + base = trans->shrd->device_pointers.log_event_table; + if (trans->shrd->ucode_type == IWL_UCODE_INIT) { if (!base) base = priv->init_evtlog_ptr; } else { @@ -823,8 +838,8 @@ int iwl_dump_nic_event_log(struct iwl_trans *trans, bool full_log, size_t bufsz = 0; struct iwl_priv *priv = priv(trans); - base = priv->device_pointers.log_event_table; - if (priv->ucode_type == IWL_UCODE_INIT) { + base = trans->shrd->device_pointers.log_event_table; + if (trans->shrd->ucode_type == IWL_UCODE_INIT) { logsize = priv->init_evtlog_size; if (!base) base = priv->init_evtlog_ptr; @@ -838,7 +853,7 @@ int iwl_dump_nic_event_log(struct iwl_trans *trans, bool full_log, IWL_ERR(trans, "Invalid event log pointer 0x%08X for %s uCode\n", base, - (priv->ucode_type == IWL_UCODE_INIT) + (trans->shrd->ucode_type == IWL_UCODE_INIT) ? "Init" : "RT"); return -EINVAL; } @@ -1108,7 +1123,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans) isr_stats->tx++; handled |= CSR_INT_BIT_FH_TX; /* Wake up uCode load routine, now that load is complete */ - priv(trans)->ucode_write_complete = 1; + trans->ucode_write_complete = 1; wake_up(&trans->shrd->wait_command_queue); } @@ -1136,7 +1151,11 @@ void iwl_irq_tasklet(struct iwl_trans *trans) * ICT functions * ******************************************************************************/ -#define ICT_COUNT (PAGE_SIZE/sizeof(u32)) + +/* a device (PCI-E) page is 4096 bytes long */ +#define ICT_SHIFT 12 +#define ICT_SIZE (1 << ICT_SHIFT) +#define ICT_COUNT (ICT_SIZE / sizeof(u32)) /* Free dram table */ void iwl_free_isr_ict(struct iwl_trans *trans) @@ -1144,21 +1163,19 @@ void iwl_free_isr_ict(struct iwl_trans *trans) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - if (trans_pcie->ict_tbl_vir) { - dma_free_coherent(bus(trans)->dev, - (sizeof(u32) * ICT_COUNT) + PAGE_SIZE, - trans_pcie->ict_tbl_vir, + if (trans_pcie->ict_tbl) { + dma_free_coherent(bus(trans)->dev, ICT_SIZE, + trans_pcie->ict_tbl, trans_pcie->ict_tbl_dma); - trans_pcie->ict_tbl_vir = NULL; - memset(&trans_pcie->ict_tbl_dma, 0, - sizeof(trans_pcie->ict_tbl_dma)); - memset(&trans_pcie->aligned_ict_tbl_dma, 0, - sizeof(trans_pcie->aligned_ict_tbl_dma)); + trans_pcie->ict_tbl = NULL; + trans_pcie->ict_tbl_dma = 0; } } -/* allocate dram shared table it is a PAGE_SIZE aligned +/* + * allocate dram shared table, it is an aligned memory + * block of ICT_SIZE. * also reset all data related to ICT table interrupt. */ int iwl_alloc_isr_ict(struct iwl_trans *trans) @@ -1166,36 +1183,26 @@ int iwl_alloc_isr_ict(struct iwl_trans *trans) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - /* allocate shrared data table */ - trans_pcie->ict_tbl_vir = - dma_alloc_coherent(bus(trans)->dev, - (sizeof(u32) * ICT_COUNT) + PAGE_SIZE, - &trans_pcie->ict_tbl_dma, GFP_KERNEL); - if (!trans_pcie->ict_tbl_vir) + trans_pcie->ict_tbl = + dma_alloc_coherent(bus(trans)->dev, ICT_SIZE, + &trans_pcie->ict_tbl_dma, + GFP_KERNEL); + if (!trans_pcie->ict_tbl) return -ENOMEM; - /* align table to PAGE_SIZE boundary */ - trans_pcie->aligned_ict_tbl_dma = - ALIGN(trans_pcie->ict_tbl_dma, PAGE_SIZE); - - IWL_DEBUG_ISR(trans, "ict dma addr %Lx dma aligned %Lx diff %d\n", - (unsigned long long)trans_pcie->ict_tbl_dma, - (unsigned long long)trans_pcie->aligned_ict_tbl_dma, - (int)(trans_pcie->aligned_ict_tbl_dma - - trans_pcie->ict_tbl_dma)); + /* just an API sanity check ... it is guaranteed to be aligned */ + if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) { + iwl_free_isr_ict(trans); + return -EINVAL; + } - trans_pcie->ict_tbl = trans_pcie->ict_tbl_vir + - (trans_pcie->aligned_ict_tbl_dma - - trans_pcie->ict_tbl_dma); + IWL_DEBUG_ISR(trans, "ict dma addr %Lx\n", + (unsigned long long)trans_pcie->ict_tbl_dma); - IWL_DEBUG_ISR(trans, "ict vir addr %p vir aligned %p diff %d\n", - trans_pcie->ict_tbl, trans_pcie->ict_tbl_vir, - (int)(trans_pcie->aligned_ict_tbl_dma - - trans_pcie->ict_tbl_dma)); + IWL_DEBUG_ISR(trans, "ict vir addr %p\n", trans_pcie->ict_tbl); /* reset table and index to all 0 */ - memset(trans_pcie->ict_tbl_vir, 0, - (sizeof(u32) * ICT_COUNT) + PAGE_SIZE); + memset(trans_pcie->ict_tbl, 0, ICT_SIZE); trans_pcie->ict_index = 0; /* add periodic RX interrupt */ @@ -1213,23 +1220,20 @@ int iwl_reset_ict(struct iwl_trans *trans) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - if (!trans_pcie->ict_tbl_vir) + if (!trans_pcie->ict_tbl) return 0; spin_lock_irqsave(&trans->shrd->lock, flags); iwl_disable_interrupts(trans); - memset(&trans_pcie->ict_tbl[0], 0, sizeof(u32) * ICT_COUNT); + memset(trans_pcie->ict_tbl, 0, ICT_SIZE); - val = trans_pcie->aligned_ict_tbl_dma >> PAGE_SHIFT; + val = trans_pcie->ict_tbl_dma >> ICT_SHIFT; val |= CSR_DRAM_INT_TBL_ENABLE; val |= CSR_DRAM_INIT_TBL_WRAP_CHECK; - IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%X " - "aligned dma address %Lx\n", - val, - (unsigned long long)trans_pcie->aligned_ict_tbl_dma); + IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val); iwl_write32(bus(trans), CSR_DRAM_INT_TBL_REG, val); trans_pcie->use_ict = true; @@ -1266,6 +1270,8 @@ static irqreturn_t iwl_isr(int irq, void *data) if (!trans) return IRQ_NONE; + trace_iwlwifi_dev_irq(priv(trans)); + trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); spin_lock_irqsave(&trans->shrd->lock, flags); @@ -1340,6 +1346,7 @@ irqreturn_t iwl_isr_ict(int irq, void *data) struct iwl_trans_pcie *trans_pcie; u32 inta, inta_mask; u32 val = 0; + u32 read; unsigned long flags; if (!trans) @@ -1353,6 +1360,8 @@ irqreturn_t iwl_isr_ict(int irq, void *data) if (!trans_pcie->use_ict) return iwl_isr(irq, data); + trace_iwlwifi_dev_irq(priv(trans)); + spin_lock_irqsave(&trans->shrd->lock, flags); /* Disable (but don't clear!) interrupts here to avoid @@ -1367,24 +1376,29 @@ irqreturn_t iwl_isr_ict(int irq, void *data) /* Ignore interrupt if there's nothing in NIC to service. * This may be due to IRQ shared with another device, * or due to sporadic interrupts thrown from our NIC. */ - if (!trans_pcie->ict_tbl[trans_pcie->ict_index]) { + read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); + trace_iwlwifi_dev_ict_read(priv(trans), trans_pcie->ict_index, read); + if (!read) { IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n"); goto none; } - /* read all entries that not 0 start with ict_index */ - while (trans_pcie->ict_tbl[trans_pcie->ict_index]) { - - val |= le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); + /* + * Collect all entries up to the first 0, starting from ict_index; + * note we already read at ict_index. + */ + do { + val |= read; IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n", - trans_pcie->ict_index, - le32_to_cpu( - trans_pcie->ict_tbl[trans_pcie->ict_index])); + trans_pcie->ict_index, read); trans_pcie->ict_tbl[trans_pcie->ict_index] = 0; trans_pcie->ict_index = iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT); - } + read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); + trace_iwlwifi_dev_ict_read(priv(trans), trans_pcie->ict_index, + read); + } while (read); /* We should not get this value, just ignore it. */ if (val == 0xffffffff) @@ -1411,7 +1425,7 @@ irqreturn_t iwl_isr_ict(int irq, void *data) if (likely(inta)) tasklet_schedule(&trans_pcie->irq_tasklet); else if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status) && - !trans_pcie->inta) { + !trans_pcie->inta) { /* Allow interrupt if was disabled by this handler and * no tasklet was schedules, We should not enable interrupt, * tasklet will enable it. @@ -1427,7 +1441,7 @@ irqreturn_t iwl_isr_ict(int irq, void *data) * only Re-enable if disabled by irq. */ if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status) && - !trans_pcie->inta) + !trans_pcie->inta) iwl_enable_interrupts(trans); spin_unlock_irqrestore(&trans->shrd->lock, flags); diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c index 4a0c95302a7..bd29568177e 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c @@ -408,6 +408,7 @@ static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id) void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index) { + IWL_DEBUG_TX_QUEUES(trans, "Q %d WrPtr: %d", txq_id, index & 0xff); iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR, (index & 0xff) | (txq_id << 8)); iwl_write_prph(bus(trans), SCD_QUEUE_RDPTR(txq_id), index); @@ -430,7 +431,7 @@ void iwl_trans_tx_queue_set_status(struct iwl_trans *trans, txq->sched_retry = scd_retry; - IWL_DEBUG_INFO(trans, "%s %s Queue %d on FIFO %d\n", + IWL_DEBUG_TX_QUEUES(trans, "%s %s Queue %d on FIFO %d\n", active ? "Activate" : "Deactivate", scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id); } @@ -446,14 +447,21 @@ static inline int get_fifo_from_tid(struct iwl_trans_pcie *trans_pcie, return -EINVAL; } +static inline bool is_agg_txqid_valid(struct iwl_trans *trans, int txq_id) +{ + if (txq_id < IWLAGN_FIRST_AMPDU_QUEUE) + return false; + return txq_id < (IWLAGN_FIRST_AMPDU_QUEUE + + hw_params(trans).num_ampdu_queues); +} + void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, enum iwl_rxon_context_id ctx, int sta_id, - int tid, int frame_limit) + int tid, int frame_limit, u16 ssn) { - int tx_fifo, txq_id, ssn_idx; + int tx_fifo, txq_id; u16 ra_tid; unsigned long flags; - struct iwl_tid_data *tid_data; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -469,11 +477,15 @@ void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, return; } - spin_lock_irqsave(&trans->shrd->sta_lock, flags); - tid_data = &trans->shrd->tid_data[sta_id][tid]; - ssn_idx = SEQ_TO_SN(tid_data->seq_number); - txq_id = tid_data->agg.txq_id; - spin_unlock_irqrestore(&trans->shrd->sta_lock, flags); + txq_id = trans_pcie->agg_txq[sta_id][tid]; + if (WARN_ON_ONCE(is_agg_txqid_valid(trans, txq_id) == false)) { + IWL_ERR(trans, + "queue number out of range: %d, must be %d to %d\n", + txq_id, IWLAGN_FIRST_AMPDU_QUEUE, + IWLAGN_FIRST_AMPDU_QUEUE + + hw_params(trans).num_ampdu_queues - 1); + return; + } ra_tid = BUILD_RAxTID(sta_id, tid); @@ -493,9 +505,9 @@ void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, /* Place first TFD at index corresponding to start sequence number. * Assumes that ssn_idx is valid (!= 0xFFF) */ - trans_pcie->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); - trans_pcie->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); - iwl_trans_set_wr_ptrs(trans, txq_id, ssn_idx); + trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff); + trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff); + iwl_trans_set_wr_ptrs(trans, txq_id, ssn); /* Set up Tx window size and frame limit for this queue */ iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr + @@ -539,12 +551,9 @@ static int iwlagn_txq_ctx_activate_free(struct iwl_trans *trans) } int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans, - enum iwl_rxon_context_id ctx, int sta_id, - int tid, u16 *ssn) + int sta_id, int tid) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_tid_data *tid_data; - unsigned long flags; int txq_id; txq_id = iwlagn_txq_ctx_activate_free(trans); @@ -553,117 +562,39 @@ int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans, return -ENXIO; } - spin_lock_irqsave(&trans->shrd->sta_lock, flags); - tid_data = &trans->shrd->tid_data[sta_id][tid]; - *ssn = SEQ_TO_SN(tid_data->seq_number); - tid_data->agg.txq_id = txq_id; + trans_pcie->agg_txq[sta_id][tid] = txq_id; iwl_set_swq_id(&trans_pcie->txq[txq_id], get_ac_from_tid(tid), txq_id); - tid_data = &trans->shrd->tid_data[sta_id][tid]; - if (tid_data->tfds_in_queue == 0) { - IWL_DEBUG_HT(trans, "HW queue is empty\n"); - tid_data->agg.state = IWL_AGG_ON; - iwl_start_tx_ba_trans_ready(priv(trans), ctx, sta_id, tid); - } else { - IWL_DEBUG_HT(trans, "HW queue is NOT empty: %d packets in HW" - "queue\n", tid_data->tfds_in_queue); - tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA; - } - spin_unlock_irqrestore(&trans->shrd->sta_lock, flags); - return 0; } -void iwl_trans_pcie_txq_agg_disable(struct iwl_trans *trans, int txq_id) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - iwlagn_tx_queue_stop_scheduler(trans, txq_id); - - iwl_clear_bits_prph(bus(trans), SCD_AGGR_SEL, (1 << txq_id)); - - trans_pcie->txq[txq_id].q.read_ptr = 0; - trans_pcie->txq[txq_id].q.write_ptr = 0; - /* supposes that ssn_idx is valid (!= 0xFFF) */ - iwl_trans_set_wr_ptrs(trans, txq_id, 0); - - iwl_clear_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id)); - iwl_txq_ctx_deactivate(trans_pcie, txq_id); - iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 0, 0); -} - -int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, - enum iwl_rxon_context_id ctx, int sta_id, - int tid) +int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int sta_id, int tid) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - unsigned long flags; - int read_ptr, write_ptr; - struct iwl_tid_data *tid_data; - int txq_id; - - spin_lock_irqsave(&trans->shrd->sta_lock, flags); + u8 txq_id = trans_pcie->agg_txq[sta_id][tid]; - tid_data = &trans->shrd->tid_data[sta_id][tid]; - txq_id = tid_data->agg.txq_id; - - if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) || - (IWLAGN_FIRST_AMPDU_QUEUE + - hw_params(trans).num_ampdu_queues <= txq_id)) { + if (WARN_ON_ONCE(is_agg_txqid_valid(trans, txq_id) == false)) { IWL_ERR(trans, "queue number out of range: %d, must be %d to %d\n", txq_id, IWLAGN_FIRST_AMPDU_QUEUE, IWLAGN_FIRST_AMPDU_QUEUE + hw_params(trans).num_ampdu_queues - 1); - spin_unlock_irqrestore(&trans->shrd->sta_lock, flags); return -EINVAL; } - switch (trans->shrd->tid_data[sta_id][tid].agg.state) { - case IWL_EMPTYING_HW_QUEUE_ADDBA: - /* - * This can happen if the peer stops aggregation - * again before we've had a chance to drain the - * queue we selected previously, i.e. before the - * session was really started completely. - */ - IWL_DEBUG_HT(trans, "AGG stop before setup done\n"); - goto turn_off; - case IWL_AGG_ON: - break; - default: - IWL_WARN(trans, "Stopping AGG while state not ON " - "or starting for %d on %d (%d)\n", sta_id, tid, - trans->shrd->tid_data[sta_id][tid].agg.state); - spin_unlock_irqrestore(&trans->shrd->sta_lock, flags); - return 0; - } - - write_ptr = trans_pcie->txq[txq_id].q.write_ptr; - read_ptr = trans_pcie->txq[txq_id].q.read_ptr; - - /* The queue is not empty */ - if (write_ptr != read_ptr) { - IWL_DEBUG_HT(trans, "Stopping a non empty AGG HW QUEUE\n"); - trans->shrd->tid_data[sta_id][tid].agg.state = - IWL_EMPTYING_HW_QUEUE_DELBA; - spin_unlock_irqrestore(&trans->shrd->sta_lock, flags); - return 0; - } - - IWL_DEBUG_HT(trans, "HW queue is empty\n"); -turn_off: - trans->shrd->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF; - - /* do not restore/save irqs */ - spin_unlock(&trans->shrd->sta_lock); - spin_lock(&trans->shrd->lock); - - iwl_trans_pcie_txq_agg_disable(trans, txq_id); + iwlagn_tx_queue_stop_scheduler(trans, txq_id); - spin_unlock_irqrestore(&trans->shrd->lock, flags); + iwl_clear_bits_prph(bus(trans), SCD_AGGR_SEL, (1 << txq_id)); - iwl_stop_tx_ba_trans_ready(priv(trans), ctx, sta_id, tid); + trans_pcie->agg_txq[sta_id][tid] = 0; + trans_pcie->txq[txq_id].q.read_ptr = 0; + trans_pcie->txq[txq_id].q.write_ptr = 0; + /* supposes that ssn_idx is valid (!= 0xFFF) */ + iwl_trans_set_wr_ptrs(trans, txq_id, 0); + iwl_clear_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id)); + iwl_txq_ctx_deactivate(trans_pcie, txq_id); + iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 0, 0); return 0; } @@ -982,7 +913,8 @@ static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd) ret = iwl_enqueue_hcmd(trans, cmd); if (ret < 0) { - IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n", + IWL_DEBUG_QUIET_RFKILL(trans, + "Error sending %s: enqueue_hcmd failed: %d\n", get_cmd_string(cmd->id), ret); return ret; } @@ -1000,6 +932,20 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd) IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", get_cmd_string(cmd->id)); + if (test_bit(STATUS_EXIT_PENDING, &trans->shrd->status)) + return -EBUSY; + + + if (test_bit(STATUS_RF_KILL_HW, &trans->shrd->status)) { + IWL_ERR(trans, "Command %s aborted: RF KILL Switch\n", + get_cmd_string(cmd->id)); + return -ECANCELED; + } + if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) { + IWL_ERR(trans, "Command %s failed: FW Error\n", + get_cmd_string(cmd->id)); + return -EIO; + } set_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status); IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", get_cmd_string(cmd->id)); @@ -1008,7 +954,8 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd) if (cmd_idx < 0) { ret = cmd_idx; clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status); - IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n", + IWL_DEBUG_QUIET_RFKILL(trans, + "Error sending %s: enqueue_hcmd failed: %d\n", get_cmd_string(cmd->id), ret); return ret; } @@ -1022,12 +969,12 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd) &trans_pcie->txq[trans->shrd->cmd_queue]; struct iwl_queue *q = &txq->q; - IWL_ERR(trans, + IWL_DEBUG_QUIET_RFKILL(trans, "Error sending %s: time out after %dms.\n", get_cmd_string(cmd->id), jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); - IWL_ERR(trans, + IWL_DEBUG_QUIET_RFKILL(trans, "Current CMD queue read_ptr %d write_ptr %d\n", q->read_ptr, q->write_ptr); @@ -1039,18 +986,6 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd) } } - if (test_bit(STATUS_RF_KILL_HW, &trans->shrd->status)) { - IWL_ERR(trans, "Command %s aborted: RF KILL Switch\n", - get_cmd_string(cmd->id)); - ret = -ECANCELED; - goto fail; - } - if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) { - IWL_ERR(trans, "Command %s failed: FW Error\n", - get_cmd_string(cmd->id)); - ret = -EIO; - goto fail; - } if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) { IWL_ERR(trans, "Error: Response NULL in '%s'\n", get_cmd_string(cmd->id)); @@ -1071,7 +1006,7 @@ cancel: trans_pcie->txq[trans->shrd->cmd_queue].meta[cmd_idx].flags &= ~CMD_WANT_SKB; } -fail: + if (cmd->reply_page) { iwl_free_pages(trans->shrd, cmd->reply_page); cmd->reply_page = 0; @@ -1115,9 +1050,6 @@ int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index, return 0; } - IWL_DEBUG_TX_REPLY(trans, "reclaim: [%d, %d, %d]\n", txq_id, - q->read_ptr, index); - if (WARN_ON(!skb_queue_empty(skbs))) return 0; diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c index 5f17ab8e76b..67d6e324e26 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c +++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c @@ -88,18 +88,16 @@ static int iwl_trans_rx_alloc(struct iwl_trans *trans) return -EINVAL; /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */ - rxq->bd = dma_alloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, - &rxq->bd_dma, GFP_KERNEL); + rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, + &rxq->bd_dma, GFP_KERNEL); if (!rxq->bd) goto err_bd; - memset(rxq->bd, 0, sizeof(__le32) * RX_QUEUE_SIZE); /*Allocate the driver's pointer to receive buffer status */ - rxq->rb_stts = dma_alloc_coherent(dev, sizeof(*rxq->rb_stts), - &rxq->rb_stts_dma, GFP_KERNEL); + rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts), + &rxq->rb_stts_dma, GFP_KERNEL); if (!rxq->rb_stts) goto err_rb_stts; - memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); return 0; @@ -1044,7 +1042,7 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx, - u8 sta_id) + u8 sta_id, u8 tid) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; @@ -1058,13 +1056,12 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, dma_addr_t txcmd_phys; dma_addr_t scratch_phys; u16 len, firstlen, secondlen; - u16 seq_number = 0; u8 wait_write_ptr = 0; u8 txq_id; - u8 tid = 0; bool is_agg = false; __le16 fc = hdr->frame_control; u8 hdr_len = ieee80211_hdrlen(fc); + u16 __maybe_unused wifi_seq; /* * Send this frame after DTIM -- there's a special queue @@ -1085,36 +1082,28 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, txq_id = trans_pcie->ac_to_queue[ctx][skb_get_queue_mapping(skb)]; - if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) { - u8 *qc = NULL; - struct iwl_tid_data *tid_data; - qc = ieee80211_get_qos_ctl(hdr); - tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; - tid_data = &trans->shrd->tid_data[sta_id][tid]; - - if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) - return -1; - - seq_number = tid_data->seq_number; - seq_number &= IEEE80211_SCTL_SEQ; - hdr->seq_ctrl = hdr->seq_ctrl & - cpu_to_le16(IEEE80211_SCTL_FRAG); - hdr->seq_ctrl |= cpu_to_le16(seq_number); - seq_number += 0x10; - /* aggregation is on for this <sta,tid> */ - if (info->flags & IEEE80211_TX_CTL_AMPDU) { - WARN_ON_ONCE(tid_data->agg.state != IWL_AGG_ON); - txq_id = tid_data->agg.txq_id; - is_agg = true; - } + /* aggregation is on for this <sta,tid> */ + if (info->flags & IEEE80211_TX_CTL_AMPDU) { + WARN_ON(tid >= IWL_MAX_TID_COUNT); + txq_id = trans_pcie->agg_txq[sta_id][tid]; + is_agg = true; } - /* Copy MAC header from skb into command buffer */ - memcpy(tx_cmd->hdr, hdr, hdr_len); - txq = &trans_pcie->txq[txq_id]; q = &txq->q; + /* In AGG mode, the index in the ring must correspond to the WiFi + * sequence number. This is a HW requirements to help the SCD to parse + * the BA. + * Check here that the packets are in the right place on the ring. + */ +#ifdef CONFIG_IWLWIFI_DEBUG + wifi_seq = SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); + WARN_ONCE(is_agg && ((wifi_seq & 0xff) != q->write_ptr), + "Q: %d WiFi Seq %d tfdNum %d", + txq_id, wifi_seq, q->write_ptr); +#endif + /* Set up driver data for this TFD */ txq->skbs[q->write_ptr] = skb; txq->cmd[q->write_ptr] = dev_cmd; @@ -1212,13 +1201,6 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); iwl_txq_update_write_ptr(trans, txq); - if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) { - trans->shrd->tid_data[sta_id][tid].tfds_in_queue++; - if (!ieee80211_has_morefrags(fc)) - trans->shrd->tid_data[sta_id][tid].seq_number = - seq_number; - } - /* * At this point the frame is "transmitted" successfully * and we will get a TX status notification eventually, @@ -1230,7 +1212,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, txq->need_update = 1; iwl_txq_update_write_ptr(trans, txq); } else { - iwl_stop_queue(trans, txq); + iwl_stop_queue(trans, txq, "Queue is full"); } } return 0; @@ -1267,101 +1249,48 @@ static int iwl_trans_pcie_request_irq(struct iwl_trans *trans) return 0; } -static int iwlagn_txq_check_empty(struct iwl_trans *trans, - int sta_id, u8 tid, int txq_id) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_queue *q = &trans_pcie->txq[txq_id].q; - struct iwl_tid_data *tid_data = &trans->shrd->tid_data[sta_id][tid]; - - lockdep_assert_held(&trans->shrd->sta_lock); - - switch (trans->shrd->tid_data[sta_id][tid].agg.state) { - case IWL_EMPTYING_HW_QUEUE_DELBA: - /* We are reclaiming the last packet of the */ - /* aggregated HW queue */ - if ((txq_id == tid_data->agg.txq_id) && - (q->read_ptr == q->write_ptr)) { - IWL_DEBUG_HT(trans, - "HW queue empty: continue DELBA flow\n"); - iwl_trans_pcie_txq_agg_disable(trans, txq_id); - tid_data->agg.state = IWL_AGG_OFF; - iwl_stop_tx_ba_trans_ready(priv(trans), - NUM_IWL_RXON_CTX, - sta_id, tid); - iwl_wake_queue(trans, &trans_pcie->txq[txq_id]); - } - break; - case IWL_EMPTYING_HW_QUEUE_ADDBA: - /* We are reclaiming the last packet of the queue */ - if (tid_data->tfds_in_queue == 0) { - IWL_DEBUG_HT(trans, - "HW queue empty: continue ADDBA flow\n"); - tid_data->agg.state = IWL_AGG_ON; - iwl_start_tx_ba_trans_ready(priv(trans), - NUM_IWL_RXON_CTX, - sta_id, tid); - } - break; - default: - break; - } - - return 0; -} - -static void iwl_free_tfds_in_queue(struct iwl_trans *trans, - int sta_id, int tid, int freed) -{ - lockdep_assert_held(&trans->shrd->sta_lock); - - if (trans->shrd->tid_data[sta_id][tid].tfds_in_queue >= freed) - trans->shrd->tid_data[sta_id][tid].tfds_in_queue -= freed; - else { - IWL_DEBUG_TX(trans, "free more than tfds_in_queue (%u:%d)\n", - trans->shrd->tid_data[sta_id][tid].tfds_in_queue, - freed); - trans->shrd->tid_data[sta_id][tid].tfds_in_queue = 0; - } -} - -static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid, +static int iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid, int txq_id, int ssn, u32 status, struct sk_buff_head *skbs) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; - enum iwl_agg_state agg_state; /* n_bd is usually 256 => n_bd - 1 = 0xff */ int tfd_num = ssn & (txq->q.n_bd - 1); int freed = 0; - bool cond; txq->time_stamp = jiffies; - if (txq->sched_retry) { - agg_state = - trans->shrd->tid_data[txq->sta_id][txq->tid].agg.state; - cond = (agg_state != IWL_EMPTYING_HW_QUEUE_DELBA); - } else { - cond = (status != TX_STATUS_FAIL_PASSIVE_NO_RX); + if (unlikely(txq_id >= IWLAGN_FIRST_AMPDU_QUEUE && + txq_id != trans_pcie->agg_txq[sta_id][tid])) { + /* + * FIXME: this is a uCode bug which need to be addressed, + * log the information and return for now. + * Since it is can possibly happen very often and in order + * not to fill the syslog, don't use IWL_ERR or IWL_WARN + */ + IWL_DEBUG_TX_QUEUES(trans, "Bad queue mapping txq_id %d, " + "agg_txq[sta_id[tid] %d", txq_id, + trans_pcie->agg_txq[sta_id][tid]); + return 1; } if (txq->q.read_ptr != tfd_num) { - IWL_DEBUG_TX_REPLY(trans, "Retry scheduler reclaim " - "scd_ssn=%d idx=%d txq=%d swq=%d\n", - ssn , tfd_num, txq_id, txq->swq_id); + IWL_DEBUG_TX_REPLY(trans, "[Q %d | AC %d] %d -> %d (%d)\n", + txq_id, iwl_get_queue_ac(txq), txq->q.read_ptr, + tfd_num, ssn); freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs); - if (iwl_queue_space(&txq->q) > txq->q.low_mark && cond) - iwl_wake_queue(trans, txq); + if (iwl_queue_space(&txq->q) > txq->q.low_mark && + (!txq->sched_retry || + status != TX_STATUS_FAIL_PASSIVE_NO_RX)) + iwl_wake_queue(trans, txq, "Packets reclaimed"); } - - iwl_free_tfds_in_queue(trans, sta_id, tid, freed); - iwlagn_txq_check_empty(trans, sta_id, tid, txq_id); + return 0; } static void iwl_trans_pcie_free(struct iwl_trans *trans) { + iwl_calib_free_results(trans); iwl_trans_pcie_tx_free(trans); iwl_trans_pcie_rx_free(trans); free_irq(bus(trans)->irq, trans); @@ -1417,7 +1346,8 @@ static int iwl_trans_pcie_resume(struct iwl_trans *trans) #endif /* CONFIG_PM_SLEEP */ static void iwl_trans_pcie_wake_any_queue(struct iwl_trans *trans, - enum iwl_rxon_context_id ctx) + enum iwl_rxon_context_id ctx, + const char *msg) { u8 ac, txq_id; struct iwl_trans_pcie *trans_pcie = @@ -1425,11 +1355,11 @@ static void iwl_trans_pcie_wake_any_queue(struct iwl_trans *trans, for (ac = 0; ac < AC_NUM; ac++) { txq_id = trans_pcie->ac_to_queue[ctx][ac]; - IWL_DEBUG_INFO(trans, "Queue Status: Q[%d] %s\n", + IWL_DEBUG_TX_QUEUES(trans, "Queue Status: Q[%d] %s\n", ac, (atomic_read(&trans_pcie->queue_stop_count[ac]) > 0) ? "stopped" : "awake"); - iwl_wake_queue(trans, &trans_pcie->txq[txq_id]); + iwl_wake_queue(trans, &trans_pcie->txq[txq_id], msg); } } @@ -1452,11 +1382,12 @@ static struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd) return iwl_trans; } -static void iwl_trans_pcie_stop_queue(struct iwl_trans *trans, int txq_id) +static void iwl_trans_pcie_stop_queue(struct iwl_trans *trans, int txq_id, + const char *msg) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - iwl_stop_queue(trans, &trans_pcie->txq[txq_id]); + iwl_stop_queue(trans, &trans_pcie->txq[txq_id], msg); } #define IWL_FLUSH_WAIT_MS 2000 @@ -1511,8 +1442,12 @@ static int iwl_trans_pcie_check_stuck_queue(struct iwl_trans *trans, int cnt) if (time_after(jiffies, timeout)) { IWL_ERR(trans, "Queue %d stuck for %u ms.\n", q->id, hw_params(trans).wd_timeout); - IWL_ERR(trans, "Current read_ptr %d write_ptr %d\n", + IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n", q->read_ptr, q->write_ptr); + IWL_ERR(trans, "Current HW read_ptr %d write_ptr %d\n", + iwl_read_prph(bus(trans), SCD_QUEUE_RDPTR(cnt)) + & (TFD_QUEUE_SIZE_MAX - 1), + iwl_read_prph(bus(trans), SCD_QUEUE_WRPTR(cnt))); return 1; } diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h index c5923125c3f..e6bf3f55477 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/iwlwifi/iwl-trans.h @@ -171,32 +171,31 @@ struct iwl_trans_ops { void (*tx_start)(struct iwl_trans *trans); void (*wake_any_queue)(struct iwl_trans *trans, - enum iwl_rxon_context_id ctx); + enum iwl_rxon_context_id ctx, + const char *msg); int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd); int (*tx)(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx, - u8 sta_id); - void (*reclaim)(struct iwl_trans *trans, int sta_id, int tid, + u8 sta_id, u8 tid); + int (*reclaim)(struct iwl_trans *trans, int sta_id, int tid, int txq_id, int ssn, u32 status, struct sk_buff_head *skbs); int (*tx_agg_disable)(struct iwl_trans *trans, - enum iwl_rxon_context_id ctx, int sta_id, - int tid); + int sta_id, int tid); int (*tx_agg_alloc)(struct iwl_trans *trans, - enum iwl_rxon_context_id ctx, int sta_id, int tid, - u16 *ssn); + int sta_id, int tid); void (*tx_agg_setup)(struct iwl_trans *trans, enum iwl_rxon_context_id ctx, int sta_id, int tid, - int frame_limit); + int frame_limit, u16 ssn); void (*kick_nic)(struct iwl_trans *trans); void (*free)(struct iwl_trans *trans); - void (*stop_queue)(struct iwl_trans *trans, int q); + void (*stop_queue)(struct iwl_trans *trans, int q, const char *msg); int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir); int (*check_stuck_queue)(struct iwl_trans *trans, int q); @@ -207,17 +206,54 @@ struct iwl_trans_ops { #endif }; +/* one for each uCode image (inst/data, boot/init/runtime) */ +struct fw_desc { + dma_addr_t p_addr; /* hardware address */ + void *v_addr; /* software address */ + u32 len; /* size in bytes */ +}; + +struct fw_img { + struct fw_desc code; /* firmware code image */ + struct fw_desc data; /* firmware data image */ +}; + +/* Opaque calibration results */ +struct iwl_calib_result { + struct list_head list; + size_t cmd_len; + struct iwl_calib_hdr hdr; + /* data follows */ +}; + /** * struct iwl_trans - transport common data * @ops - pointer to iwl_trans_ops * @shrd - pointer to iwl_shared which holds shared data from the upper layer * @hcmd_lock: protects HCMD + * @ucode_write_complete: indicates that the ucode has been copied. + * @ucode_rt: run time ucode image + * @ucode_init: init ucode image + * @ucode_wowlan: wake on wireless ucode image (optional) + * @nvm_device_type: indicates OTP or eeprom + * @calib_results: list head for init calibration results */ struct iwl_trans { const struct iwl_trans_ops *ops; struct iwl_shared *shrd; spinlock_t hcmd_lock; + u8 ucode_write_complete; /* the image write is complete */ + struct fw_img ucode_rt; + struct fw_img ucode_init; + struct fw_img ucode_wowlan; + + /* eeprom related variables */ + int nvm_device_type; + + /* init calibration results */ + struct list_head calib_results; + /* pointer to trans specific struct */ /*Ensure that this pointer will always be aligned to sizeof pointer */ char trans_specific[0] __attribute__((__aligned__(sizeof(void *)))); @@ -249,9 +285,10 @@ static inline void iwl_trans_tx_start(struct iwl_trans *trans) } static inline void iwl_trans_wake_any_queue(struct iwl_trans *trans, - enum iwl_rxon_context_id ctx) + enum iwl_rxon_context_id ctx, + const char *msg) { - trans->ops->wake_any_queue(trans, ctx); + trans->ops->wake_any_queue(trans, ctx, msg); } @@ -266,39 +303,38 @@ int iwl_trans_send_cmd_pdu(struct iwl_trans *trans, u8 id, static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx, - u8 sta_id) + u8 sta_id, u8 tid) { - return trans->ops->tx(trans, skb, dev_cmd, ctx, sta_id); + return trans->ops->tx(trans, skb, dev_cmd, ctx, sta_id, tid); } -static inline void iwl_trans_reclaim(struct iwl_trans *trans, int sta_id, +static inline int iwl_trans_reclaim(struct iwl_trans *trans, int sta_id, int tid, int txq_id, int ssn, u32 status, struct sk_buff_head *skbs) { - trans->ops->reclaim(trans, sta_id, tid, txq_id, ssn, status, skbs); + return trans->ops->reclaim(trans, sta_id, tid, txq_id, ssn, + status, skbs); } static inline int iwl_trans_tx_agg_disable(struct iwl_trans *trans, - enum iwl_rxon_context_id ctx, int sta_id, int tid) { - return trans->ops->tx_agg_disable(trans, ctx, sta_id, tid); + return trans->ops->tx_agg_disable(trans, sta_id, tid); } static inline int iwl_trans_tx_agg_alloc(struct iwl_trans *trans, - enum iwl_rxon_context_id ctx, - int sta_id, int tid, u16 *ssn) + int sta_id, int tid) { - return trans->ops->tx_agg_alloc(trans, ctx, sta_id, tid, ssn); + return trans->ops->tx_agg_alloc(trans, sta_id, tid); } static inline void iwl_trans_tx_agg_setup(struct iwl_trans *trans, enum iwl_rxon_context_id ctx, int sta_id, int tid, - int frame_limit) + int frame_limit, u16 ssn) { - trans->ops->tx_agg_setup(trans, ctx, sta_id, tid, frame_limit); + trans->ops->tx_agg_setup(trans, ctx, sta_id, tid, frame_limit, ssn); } static inline void iwl_trans_kick_nic(struct iwl_trans *trans) @@ -311,9 +347,10 @@ static inline void iwl_trans_free(struct iwl_trans *trans) trans->ops->free(trans); } -static inline void iwl_trans_stop_queue(struct iwl_trans *trans, int q) +static inline void iwl_trans_stop_queue(struct iwl_trans *trans, int q, + const char *msg) { - trans->ops->stop_queue(trans, q); + trans->ops->stop_queue(trans, q, msg); } static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans) @@ -348,4 +385,13 @@ static inline int iwl_trans_resume(struct iwl_trans *trans) ******************************************************/ extern const struct iwl_trans_ops trans_ops_pcie; +int iwl_alloc_fw_desc(struct iwl_bus *bus, struct fw_desc *desc, + const void *data, size_t len); +void iwl_dealloc_ucode(struct iwl_trans *trans); + +int iwl_send_calib_results(struct iwl_trans *trans); +int iwl_calib_set(struct iwl_trans *trans, + const struct iwl_calib_hdr *cmd, int len); +void iwl_calib_free_results(struct iwl_trans *trans); + #endif /* __iwl_trans_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-ucode.c index 8ba0dd54e37..36a1b5b2585 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c +++ b/drivers/net/wireless/iwlwifi/iwl-ucode.c @@ -31,7 +31,9 @@ #include <linux/module.h> #include <linux/init.h> #include <linux/sched.h> +#include <linux/dma-mapping.h> +#include "iwl-wifi.h" #include "iwl-dev.h" #include "iwl-core.h" #include "iwl-io.h" @@ -72,51 +74,98 @@ static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = { {COEX_CU_RSRVD2_RP, COEX_CU_RSRVD2_WP, 0, COEX_RSRVD2_FLAGS} }; +/****************************************************************************** + * + * uCode download functions + * + ******************************************************************************/ + +static void iwl_free_fw_desc(struct iwl_bus *bus, struct fw_desc *desc) +{ + if (desc->v_addr) + dma_free_coherent(bus->dev, desc->len, + desc->v_addr, desc->p_addr); + desc->v_addr = NULL; + desc->len = 0; +} + +static void iwl_free_fw_img(struct iwl_bus *bus, struct fw_img *img) +{ + iwl_free_fw_desc(bus, &img->code); + iwl_free_fw_desc(bus, &img->data); +} + +void iwl_dealloc_ucode(struct iwl_trans *trans) +{ + iwl_free_fw_img(bus(trans), &trans->ucode_rt); + iwl_free_fw_img(bus(trans), &trans->ucode_init); + iwl_free_fw_img(bus(trans), &trans->ucode_wowlan); +} + +int iwl_alloc_fw_desc(struct iwl_bus *bus, struct fw_desc *desc, + const void *data, size_t len) +{ + if (!len) { + desc->v_addr = NULL; + return -EINVAL; + } + + desc->v_addr = dma_alloc_coherent(bus->dev, len, + &desc->p_addr, GFP_KERNEL); + if (!desc->v_addr) + return -ENOMEM; + + desc->len = len; + memcpy(desc->v_addr, data, len); + return 0; +} + /* * ucode */ -static int iwlagn_load_section(struct iwl_priv *priv, const char *name, +static int iwl_load_section(struct iwl_trans *trans, const char *name, struct fw_desc *image, u32 dst_addr) { + struct iwl_bus *bus = bus(trans); dma_addr_t phy_addr = image->p_addr; u32 byte_cnt = image->len; int ret; - priv->ucode_write_complete = 0; + trans->ucode_write_complete = 0; - iwl_write_direct32(bus(priv), + iwl_write_direct32(bus, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE); - iwl_write_direct32(bus(priv), + iwl_write_direct32(bus, FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr); - iwl_write_direct32(bus(priv), + iwl_write_direct32(bus, FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL), phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK); - iwl_write_direct32(bus(priv), + iwl_write_direct32(bus, FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL), (iwl_get_dma_hi_addr(phy_addr) << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt); - iwl_write_direct32(bus(priv), + iwl_write_direct32(bus, FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL), 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM | 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX | FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID); - iwl_write_direct32(bus(priv), + iwl_write_direct32(bus, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE | FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD); - IWL_DEBUG_FW(priv, "%s uCode section being loaded...\n", name); - ret = wait_event_timeout(priv->shrd->wait_command_queue, - priv->ucode_write_complete, 5 * HZ); + IWL_DEBUG_FW(bus, "%s uCode section being loaded...\n", name); + ret = wait_event_timeout(trans->shrd->wait_command_queue, + trans->ucode_write_complete, 5 * HZ); if (!ret) { - IWL_ERR(priv, "Could not load the %s uCode section\n", + IWL_ERR(trans, "Could not load the %s uCode section\n", name); return -ETIMEDOUT; } @@ -124,41 +173,65 @@ static int iwlagn_load_section(struct iwl_priv *priv, const char *name, return 0; } -static int iwlagn_load_given_ucode(struct iwl_priv *priv, - struct fw_img *image) +static inline struct fw_img *iwl_get_ucode_image(struct iwl_trans *trans, + enum iwl_ucode_type ucode_type) +{ + switch (ucode_type) { + case IWL_UCODE_INIT: + return &trans->ucode_init; + case IWL_UCODE_WOWLAN: + return &trans->ucode_wowlan; + case IWL_UCODE_REGULAR: + return &trans->ucode_rt; + case IWL_UCODE_NONE: + break; + } + return NULL; +} + +static int iwl_load_given_ucode(struct iwl_trans *trans, + enum iwl_ucode_type ucode_type) { int ret = 0; + struct fw_img *image = iwl_get_ucode_image(trans, ucode_type); - ret = iwlagn_load_section(priv, "INST", &image->code, + + if (!image) { + IWL_ERR(trans, "Invalid ucode requested (%d)\n", + ucode_type); + return -EINVAL; + } + + ret = iwl_load_section(trans, "INST", &image->code, IWLAGN_RTC_INST_LOWER_BOUND); if (ret) return ret; - return iwlagn_load_section(priv, "DATA", &image->data, + return iwl_load_section(trans, "DATA", &image->data, IWLAGN_RTC_DATA_LOWER_BOUND); } /* * Calibration */ -static int iwlagn_set_Xtal_calib(struct iwl_priv *priv) +static int iwl_set_Xtal_calib(struct iwl_trans *trans) { struct iwl_calib_xtal_freq_cmd cmd; __le16 *xtal_calib = - (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_XTAL); + (__le16 *)iwl_eeprom_query_addr(trans->shrd, EEPROM_XTAL); iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD); cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]); cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]); - return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL], - (u8 *)&cmd, sizeof(cmd)); + return iwl_calib_set(trans, (void *)&cmd, sizeof(cmd)); } -static int iwlagn_set_temperature_offset_calib(struct iwl_priv *priv) +static int iwl_set_temperature_offset_calib(struct iwl_trans *trans) { struct iwl_calib_temperature_offset_cmd cmd; __le16 *offset_calib = - (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_RAW_TEMPERATURE); + (__le16 *)iwl_eeprom_query_addr(trans->shrd, + EEPROM_RAW_TEMPERATURE); memset(&cmd, 0, sizeof(cmd)); iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD); @@ -166,49 +239,48 @@ static int iwlagn_set_temperature_offset_calib(struct iwl_priv *priv) if (!(cmd.radio_sensor_offset)) cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET; - IWL_DEBUG_CALIB(priv, "Radio sensor offset: %d\n", + IWL_DEBUG_CALIB(trans, "Radio sensor offset: %d\n", le16_to_cpu(cmd.radio_sensor_offset)); - return iwl_calib_set(&priv->calib_results[IWL_CALIB_TEMP_OFFSET], - (u8 *)&cmd, sizeof(cmd)); + return iwl_calib_set(trans, (void *)&cmd, sizeof(cmd)); } -static int iwlagn_set_temperature_offset_calib_v2(struct iwl_priv *priv) +static int iwl_set_temperature_offset_calib_v2(struct iwl_trans *trans) { struct iwl_calib_temperature_offset_v2_cmd cmd; - __le16 *offset_calib_high = (__le16 *)iwl_eeprom_query_addr(priv, + __le16 *offset_calib_high = (__le16 *)iwl_eeprom_query_addr(trans->shrd, EEPROM_KELVIN_TEMPERATURE); __le16 *offset_calib_low = - (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_RAW_TEMPERATURE); + (__le16 *)iwl_eeprom_query_addr(trans->shrd, + EEPROM_RAW_TEMPERATURE); struct iwl_eeprom_calib_hdr *hdr; memset(&cmd, 0, sizeof(cmd)); iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD); - hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv, + hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(trans->shrd, EEPROM_CALIB_ALL); memcpy(&cmd.radio_sensor_offset_high, offset_calib_high, sizeof(*offset_calib_high)); memcpy(&cmd.radio_sensor_offset_low, offset_calib_low, sizeof(*offset_calib_low)); if (!(cmd.radio_sensor_offset_low)) { - IWL_DEBUG_CALIB(priv, "no info in EEPROM, use default\n"); + IWL_DEBUG_CALIB(trans, "no info in EEPROM, use default\n"); cmd.radio_sensor_offset_low = DEFAULT_RADIO_SENSOR_OFFSET; cmd.radio_sensor_offset_high = DEFAULT_RADIO_SENSOR_OFFSET; } memcpy(&cmd.burntVoltageRef, &hdr->voltage, sizeof(hdr->voltage)); - IWL_DEBUG_CALIB(priv, "Radio sensor offset high: %d\n", + IWL_DEBUG_CALIB(trans, "Radio sensor offset high: %d\n", le16_to_cpu(cmd.radio_sensor_offset_high)); - IWL_DEBUG_CALIB(priv, "Radio sensor offset low: %d\n", + IWL_DEBUG_CALIB(trans, "Radio sensor offset low: %d\n", le16_to_cpu(cmd.radio_sensor_offset_low)); - IWL_DEBUG_CALIB(priv, "Voltage Ref: %d\n", + IWL_DEBUG_CALIB(trans, "Voltage Ref: %d\n", le16_to_cpu(cmd.burntVoltageRef)); - return iwl_calib_set(&priv->calib_results[IWL_CALIB_TEMP_OFFSET], - (u8 *)&cmd, sizeof(cmd)); + return iwl_calib_set(trans, (void *)&cmd, sizeof(cmd)); } -static int iwlagn_send_calib_cfg(struct iwl_priv *priv) +static int iwl_send_calib_cfg(struct iwl_trans *trans) { struct iwl_calib_cfg_cmd calib_cfg_cmd; struct iwl_host_cmd cmd = { @@ -224,7 +296,7 @@ static int iwlagn_send_calib_cfg(struct iwl_priv *priv) calib_cfg_cmd.ucd_calib_cfg.flags = IWL_CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_MSK; - return iwl_trans_send_cmd(trans(priv), &cmd); + return iwl_trans_send_cmd(trans, &cmd); } int iwlagn_rx_calib_result(struct iwl_priv *priv, @@ -234,60 +306,37 @@ int iwlagn_rx_calib_result(struct iwl_priv *priv, struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->u.raw; int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; - int index; /* reduce the size of the length field itself */ len -= 4; - /* Define the order in which the results will be sent to the runtime - * uCode. iwl_send_calib_results sends them in a row according to - * their index. We sort them here - */ - switch (hdr->op_code) { - case IWL_PHY_CALIBRATE_DC_CMD: - index = IWL_CALIB_DC; - break; - case IWL_PHY_CALIBRATE_LO_CMD: - index = IWL_CALIB_LO; - break; - case IWL_PHY_CALIBRATE_TX_IQ_CMD: - index = IWL_CALIB_TX_IQ; - break; - case IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD: - index = IWL_CALIB_TX_IQ_PERD; - break; - case IWL_PHY_CALIBRATE_BASE_BAND_CMD: - index = IWL_CALIB_BASE_BAND; - break; - default: - IWL_ERR(priv, "Unknown calibration notification %d\n", - hdr->op_code); - return -1; - } - iwl_calib_set(&priv->calib_results[index], pkt->u.raw, len); + if (iwl_calib_set(trans(priv), hdr, len)) + IWL_ERR(priv, "Failed to record calibration data %d\n", + hdr->op_code); + return 0; } -int iwlagn_init_alive_start(struct iwl_priv *priv) +int iwl_init_alive_start(struct iwl_trans *trans) { int ret; - if (priv->cfg->bt_params && - priv->cfg->bt_params->advanced_bt_coexist) { + if (cfg(trans)->bt_params && + cfg(trans)->bt_params->advanced_bt_coexist) { /* * Tell uCode we are ready to perform calibration * need to perform this before any calibration * no need to close the envlope since we are going * to load the runtime uCode later. */ - ret = iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN, + ret = iwl_send_bt_env(trans, IWL_BT_COEX_ENV_OPEN, BT_COEX_PRIO_TBL_EVT_INIT_CALIB2); if (ret) return ret; } - ret = iwlagn_send_calib_cfg(priv); + ret = iwl_send_calib_cfg(trans); if (ret) return ret; @@ -295,21 +344,21 @@ int iwlagn_init_alive_start(struct iwl_priv *priv) * temperature offset calibration is only needed for runtime ucode, * so prepare the value now. */ - if (priv->cfg->need_temp_offset_calib) { - if (priv->cfg->temp_offset_v2) - return iwlagn_set_temperature_offset_calib_v2(priv); + if (cfg(trans)->need_temp_offset_calib) { + if (cfg(trans)->temp_offset_v2) + return iwl_set_temperature_offset_calib_v2(trans); else - return iwlagn_set_temperature_offset_calib(priv); + return iwl_set_temperature_offset_calib(trans); } return 0; } -static int iwlagn_send_wimax_coex(struct iwl_priv *priv) +static int iwl_send_wimax_coex(struct iwl_trans *trans) { struct iwl_wimax_coex_cmd coex_cmd; - if (priv->cfg->base_params->support_wimax_coexist) { + if (cfg(trans)->base_params->support_wimax_coexist) { /* UnMask wake up src at associated sleep */ coex_cmd.flags = COEX_FLAGS_ASSOC_WA_UNMASK_MSK; @@ -328,12 +377,12 @@ static int iwlagn_send_wimax_coex(struct iwl_priv *priv) /* coexistence is disabled */ memset(&coex_cmd, 0, sizeof(coex_cmd)); } - return iwl_trans_send_cmd_pdu(trans(priv), + return iwl_trans_send_cmd_pdu(trans, COEX_PRIORITY_TABLE_CMD, CMD_SYNC, sizeof(coex_cmd), &coex_cmd); } -static const u8 iwlagn_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = { +static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = { ((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) | (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)), ((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) | @@ -355,61 +404,64 @@ static const u8 iwlagn_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = { 0, 0, 0, 0, 0, 0, 0 }; -void iwlagn_send_prio_tbl(struct iwl_priv *priv) +void iwl_send_prio_tbl(struct iwl_trans *trans) { struct iwl_bt_coex_prio_table_cmd prio_tbl_cmd; - memcpy(prio_tbl_cmd.prio_tbl, iwlagn_bt_prio_tbl, - sizeof(iwlagn_bt_prio_tbl)); - if (iwl_trans_send_cmd_pdu(trans(priv), + memcpy(prio_tbl_cmd.prio_tbl, iwl_bt_prio_tbl, + sizeof(iwl_bt_prio_tbl)); + if (iwl_trans_send_cmd_pdu(trans, REPLY_BT_COEX_PRIO_TABLE, CMD_SYNC, sizeof(prio_tbl_cmd), &prio_tbl_cmd)) - IWL_ERR(priv, "failed to send BT prio tbl command\n"); + IWL_ERR(trans, "failed to send BT prio tbl command\n"); } -int iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type) +int iwl_send_bt_env(struct iwl_trans *trans, u8 action, u8 type) { struct iwl_bt_coex_prot_env_cmd env_cmd; int ret; env_cmd.action = action; env_cmd.type = type; - ret = iwl_trans_send_cmd_pdu(trans(priv), + ret = iwl_trans_send_cmd_pdu(trans, REPLY_BT_COEX_PROT_ENV, CMD_SYNC, sizeof(env_cmd), &env_cmd); if (ret) - IWL_ERR(priv, "failed to send BT env command\n"); + IWL_ERR(trans, "failed to send BT env command\n"); return ret; } -static int iwlagn_alive_notify(struct iwl_priv *priv) +static int iwl_alive_notify(struct iwl_trans *trans) { + struct iwl_priv *priv = priv(trans); struct iwl_rxon_context *ctx; int ret; if (!priv->tx_cmd_pool) priv->tx_cmd_pool = - kmem_cache_create("iwlagn_dev_cmd", + kmem_cache_create("iwl_dev_cmd", sizeof(struct iwl_device_cmd), sizeof(void *), 0, NULL); if (!priv->tx_cmd_pool) return -ENOMEM; - iwl_trans_tx_start(trans(priv)); + iwl_trans_tx_start(trans); for_each_context(priv, ctx) ctx->last_tx_rejected = false; - ret = iwlagn_send_wimax_coex(priv); + ret = iwl_send_wimax_coex(trans); if (ret) return ret; - ret = iwlagn_set_Xtal_calib(priv); - if (ret) - return ret; + if (!cfg(priv)->no_xtal_calib) { + ret = iwl_set_Xtal_calib(trans); + if (ret) + return ret; + } - return iwl_send_calib_results(priv); + return iwl_send_calib_results(trans); } @@ -418,7 +470,7 @@ static int iwlagn_alive_notify(struct iwl_priv *priv) * using sample data 100 bytes apart. If these sample points are good, * it's a pretty good bet that everything between them is good, too. */ -static int iwl_verify_inst_sparse(struct iwl_priv *priv, +static int iwl_verify_inst_sparse(struct iwl_bus *bus, struct fw_desc *fw_desc) { __le32 *image = (__le32 *)fw_desc->v_addr; @@ -426,15 +478,15 @@ static int iwl_verify_inst_sparse(struct iwl_priv *priv, u32 val; u32 i; - IWL_DEBUG_FW(priv, "ucode inst image size is %u\n", len); + IWL_DEBUG_FW(bus, "ucode inst image size is %u\n", len); for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) { /* read data comes through single port, auto-incr addr */ /* NOTE: Use the debugless read so we don't flood kernel log * if IWL_DL_IO is set */ - iwl_write_direct32(bus(priv), HBUS_TARG_MEM_RADDR, + iwl_write_direct32(bus, HBUS_TARG_MEM_RADDR, i + IWLAGN_RTC_INST_LOWER_BOUND); - val = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT); + val = iwl_read32(bus, HBUS_TARG_MEM_RDAT); if (val != le32_to_cpu(*image)) return -EIO; } @@ -442,7 +494,7 @@ static int iwl_verify_inst_sparse(struct iwl_priv *priv, return 0; } -static void iwl_print_mismatch_inst(struct iwl_priv *priv, +static void iwl_print_mismatch_inst(struct iwl_bus *bus, struct fw_desc *fw_desc) { __le32 *image = (__le32 *)fw_desc->v_addr; @@ -451,18 +503,18 @@ static void iwl_print_mismatch_inst(struct iwl_priv *priv, u32 offs; int errors = 0; - IWL_DEBUG_FW(priv, "ucode inst image size is %u\n", len); + IWL_DEBUG_FW(bus, "ucode inst image size is %u\n", len); - iwl_write_direct32(bus(priv), HBUS_TARG_MEM_RADDR, + iwl_write_direct32(bus, HBUS_TARG_MEM_RADDR, IWLAGN_RTC_INST_LOWER_BOUND); for (offs = 0; offs < len && errors < 20; offs += sizeof(u32), image++) { /* read data comes through single port, auto-incr addr */ - val = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT); + val = iwl_read32(bus, HBUS_TARG_MEM_RDAT); if (val != le32_to_cpu(*image)) { - IWL_ERR(priv, "uCode INST section at " + IWL_ERR(bus, "uCode INST section at " "offset 0x%x, is 0x%x, s/b 0x%x\n", offs, val, le32_to_cpu(*image)); errors++; @@ -474,91 +526,163 @@ static void iwl_print_mismatch_inst(struct iwl_priv *priv, * iwl_verify_ucode - determine which instruction image is in SRAM, * and verify its contents */ -static int iwl_verify_ucode(struct iwl_priv *priv, struct fw_img *img) +static int iwl_verify_ucode(struct iwl_trans *trans, + enum iwl_ucode_type ucode_type) { - if (!iwl_verify_inst_sparse(priv, &img->code)) { - IWL_DEBUG_FW(priv, "uCode is good in inst SRAM\n"); + struct fw_img *img = iwl_get_ucode_image(trans, ucode_type); + + if (!img) { + IWL_ERR(trans, "Invalid ucode requested (%d)\n", ucode_type); + return -EINVAL; + } + + if (!iwl_verify_inst_sparse(bus(trans), &img->code)) { + IWL_DEBUG_FW(trans, "uCode is good in inst SRAM\n"); return 0; } - IWL_ERR(priv, "UCODE IMAGE IN INSTRUCTION SRAM NOT VALID!!\n"); + IWL_ERR(trans, "UCODE IMAGE IN INSTRUCTION SRAM NOT VALID!!\n"); - iwl_print_mismatch_inst(priv, &img->code); + iwl_print_mismatch_inst(bus(trans), &img->code); return -EIO; } -struct iwlagn_alive_data { +struct iwl_alive_data { bool valid; u8 subtype; }; -static void iwlagn_alive_fn(struct iwl_priv *priv, +static void iwl_alive_fn(struct iwl_trans *trans, struct iwl_rx_packet *pkt, void *data) { - struct iwlagn_alive_data *alive_data = data; + struct iwl_alive_data *alive_data = data; struct iwl_alive_resp *palive; palive = &pkt->u.alive_frame; - IWL_DEBUG_FW(priv, "Alive ucode status 0x%08X revision " + IWL_DEBUG_FW(trans, "Alive ucode status 0x%08X revision " "0x%01X 0x%01X\n", palive->is_valid, palive->ver_type, palive->ver_subtype); - priv->device_pointers.error_event_table = + trans->shrd->device_pointers.error_event_table = le32_to_cpu(palive->error_event_table_ptr); - priv->device_pointers.log_event_table = + trans->shrd->device_pointers.log_event_table = le32_to_cpu(palive->log_event_table_ptr); alive_data->subtype = palive->ver_subtype; alive_data->valid = palive->is_valid == UCODE_VALID_OK; } +/* notification wait support */ +void iwl_init_notification_wait(struct iwl_shared *shrd, + struct iwl_notification_wait *wait_entry, + u8 cmd, + void (*fn)(struct iwl_trans *trans, + struct iwl_rx_packet *pkt, + void *data), + void *fn_data) +{ + wait_entry->fn = fn; + wait_entry->fn_data = fn_data; + wait_entry->cmd = cmd; + wait_entry->triggered = false; + wait_entry->aborted = false; + + spin_lock_bh(&shrd->notif_wait_lock); + list_add(&wait_entry->list, &shrd->notif_waits); + spin_unlock_bh(&shrd->notif_wait_lock); +} + +int iwl_wait_notification(struct iwl_shared *shrd, + struct iwl_notification_wait *wait_entry, + unsigned long timeout) +{ + int ret; + + ret = wait_event_timeout(shrd->notif_waitq, + wait_entry->triggered || wait_entry->aborted, + timeout); + + spin_lock_bh(&shrd->notif_wait_lock); + list_del(&wait_entry->list); + spin_unlock_bh(&shrd->notif_wait_lock); + + if (wait_entry->aborted) + return -EIO; + + /* return value is always >= 0 */ + if (ret <= 0) + return -ETIMEDOUT; + return 0; +} + +void iwl_remove_notification(struct iwl_shared *shrd, + struct iwl_notification_wait *wait_entry) +{ + spin_lock_bh(&shrd->notif_wait_lock); + list_del(&wait_entry->list); + spin_unlock_bh(&shrd->notif_wait_lock); +} + +void iwl_abort_notification_waits(struct iwl_shared *shrd) +{ + unsigned long flags; + struct iwl_notification_wait *wait_entry; + + spin_lock_irqsave(&shrd->notif_wait_lock, flags); + list_for_each_entry(wait_entry, &shrd->notif_waits, list) + wait_entry->aborted = true; + spin_unlock_irqrestore(&shrd->notif_wait_lock, flags); + + wake_up_all(&shrd->notif_waitq); +} + #define UCODE_ALIVE_TIMEOUT HZ #define UCODE_CALIB_TIMEOUT (2*HZ) -int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv, - struct fw_img *image, - enum iwlagn_ucode_type ucode_type) +int iwl_load_ucode_wait_alive(struct iwl_trans *trans, + enum iwl_ucode_type ucode_type) { struct iwl_notification_wait alive_wait; - struct iwlagn_alive_data alive_data; + struct iwl_alive_data alive_data; int ret; - enum iwlagn_ucode_type old_type; + enum iwl_ucode_type old_type; - ret = iwl_trans_start_device(trans(priv)); + ret = iwl_trans_start_device(trans); if (ret) return ret; - iwlagn_init_notification_wait(priv, &alive_wait, REPLY_ALIVE, - iwlagn_alive_fn, &alive_data); + iwl_init_notification_wait(trans->shrd, &alive_wait, REPLY_ALIVE, + iwl_alive_fn, &alive_data); - old_type = priv->ucode_type; - priv->ucode_type = ucode_type; + old_type = trans->shrd->ucode_type; + trans->shrd->ucode_type = ucode_type; - ret = iwlagn_load_given_ucode(priv, image); + ret = iwl_load_given_ucode(trans, ucode_type); if (ret) { - priv->ucode_type = old_type; - iwlagn_remove_notification(priv, &alive_wait); + trans->shrd->ucode_type = old_type; + iwl_remove_notification(trans->shrd, &alive_wait); return ret; } - iwl_trans_kick_nic(trans(priv)); + iwl_trans_kick_nic(trans); /* * Some things may run in the background now, but we * just wait for the ALIVE notification here. */ - ret = iwlagn_wait_notification(priv, &alive_wait, UCODE_ALIVE_TIMEOUT); + ret = iwl_wait_notification(trans->shrd, &alive_wait, + UCODE_ALIVE_TIMEOUT); if (ret) { - priv->ucode_type = old_type; + trans->shrd->ucode_type = old_type; return ret; } if (!alive_data.valid) { - IWL_ERR(priv, "Loaded ucode is not valid!\n"); - priv->ucode_type = old_type; + IWL_ERR(trans, "Loaded ucode is not valid!\n"); + trans->shrd->ucode_type = old_type; return -EIO; } @@ -568,9 +692,9 @@ int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv, * skip it for WoWLAN. */ if (ucode_type != IWL_UCODE_WOWLAN) { - ret = iwl_verify_ucode(priv, image); + ret = iwl_verify_ucode(trans, ucode_type); if (ret) { - priv->ucode_type = old_type; + trans->shrd->ucode_type = old_type; return ret; } @@ -578,42 +702,41 @@ int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv, msleep(5); } - ret = iwlagn_alive_notify(priv); + ret = iwl_alive_notify(trans); if (ret) { - IWL_WARN(priv, + IWL_WARN(trans, "Could not complete ALIVE transition: %d\n", ret); - priv->ucode_type = old_type; + trans->shrd->ucode_type = old_type; return ret; } return 0; } -int iwlagn_run_init_ucode(struct iwl_priv *priv) +int iwl_run_init_ucode(struct iwl_trans *trans) { struct iwl_notification_wait calib_wait; int ret; - lockdep_assert_held(&priv->shrd->mutex); + lockdep_assert_held(&trans->shrd->mutex); /* No init ucode required? Curious, but maybe ok */ - if (!priv->ucode_init.code.len) + if (!trans->ucode_init.code.len) return 0; - if (priv->ucode_type != IWL_UCODE_NONE) + if (trans->shrd->ucode_type != IWL_UCODE_NONE) return 0; - iwlagn_init_notification_wait(priv, &calib_wait, + iwl_init_notification_wait(trans->shrd, &calib_wait, CALIBRATION_COMPLETE_NOTIFICATION, NULL, NULL); /* Will also start the device */ - ret = iwlagn_load_ucode_wait_alive(priv, &priv->ucode_init, - IWL_UCODE_INIT); + ret = iwl_load_ucode_wait_alive(trans, IWL_UCODE_INIT); if (ret) goto error; - ret = iwlagn_init_alive_start(priv); + ret = iwl_init_alive_start(trans); if (ret) goto error; @@ -621,14 +744,15 @@ int iwlagn_run_init_ucode(struct iwl_priv *priv) * Some things may run in the background now, but we * just wait for the calibration complete notification. */ - ret = iwlagn_wait_notification(priv, &calib_wait, UCODE_CALIB_TIMEOUT); + ret = iwl_wait_notification(trans->shrd, &calib_wait, + UCODE_CALIB_TIMEOUT); goto out; error: - iwlagn_remove_notification(priv, &calib_wait); + iwl_remove_notification(trans->shrd, &calib_wait); out: /* Whatever happened, stop the device */ - iwl_trans_stop_device(trans(priv)); + iwl_trans_stop_device(trans); return ret; } diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-calib.h b/drivers/net/wireless/iwlwifi/iwl-wifi.h index f46c80e6e00..18501101a53 100644 --- a/drivers/net/wireless/iwlegacy/iwl-4965-calib.h +++ b/drivers/net/wireless/iwlwifi/iwl-wifi.h @@ -59,17 +59,16 @@ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ -#ifndef __iwl_4965_calib_h__ -#define __iwl_4965_calib_h__ -#include "iwl-dev.h" -#include "iwl-core.h" -#include "iwl-commands.h" +#ifndef __iwl_wifi_h__ +#define __iwl_wifi_h__ -void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp); -void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp); -void iwl4965_init_sensitivity(struct iwl_priv *priv); -void iwl4965_reset_run_time_calib(struct iwl_priv *priv); -void iwl4965_calib_free_results(struct iwl_priv *priv); +#include "iwl-shared.h" -#endif /* __iwl_4965_calib_h__ */ +int iwl_send_bt_env(struct iwl_trans *trans, u8 action, u8 type); +void iwl_send_prio_tbl(struct iwl_trans *trans); +int iwl_init_alive_start(struct iwl_trans *trans); +int iwl_run_init_ucode(struct iwl_trans *trans); +int iwl_load_ucode_wait_alive(struct iwl_trans *trans, + enum iwl_ucode_type ucode_type); +#endif /* __iwl_wifi_h__ */ diff --git a/drivers/net/wireless/iwmc3200wifi/cfg80211.c b/drivers/net/wireless/iwmc3200wifi/cfg80211.c index c42be81e979..48e8218fd23 100644 --- a/drivers/net/wireless/iwmc3200wifi/cfg80211.c +++ b/drivers/net/wireless/iwmc3200wifi/cfg80211.c @@ -165,11 +165,15 @@ static int iwm_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, struct key_params *params) { struct iwm_priv *iwm = ndev_to_iwm(ndev); - struct iwm_key *key = &iwm->keys[key_index]; + struct iwm_key *key; int ret; IWM_DBG_WEXT(iwm, DBG, "Adding key for %pM\n", mac_addr); + if (key_index >= IWM_NUM_KEYS) + return -ENOENT; + + key = &iwm->keys[key_index]; memset(key, 0, sizeof(struct iwm_key)); ret = iwm_key_init(key, key_index, mac_addr, params); if (ret < 0) { @@ -214,8 +218,12 @@ static int iwm_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev, u8 key_index, bool pairwise, const u8 *mac_addr) { struct iwm_priv *iwm = ndev_to_iwm(ndev); - struct iwm_key *key = &iwm->keys[key_index]; + struct iwm_key *key; + if (key_index >= IWM_NUM_KEYS) + return -ENOENT; + + key = &iwm->keys[key_index]; if (!iwm->keys[key_index].key_len) { IWM_DBG_WEXT(iwm, DBG, "Key %d not used\n", key_index); return 0; @@ -236,6 +244,9 @@ static int iwm_cfg80211_set_default_key(struct wiphy *wiphy, IWM_DBG_WEXT(iwm, DBG, "Default key index is: %d\n", key_index); + if (key_index >= IWM_NUM_KEYS) + return -ENOENT; + if (!iwm->keys[key_index].key_len) { IWM_ERR(iwm, "Key %d not used\n", key_index); return -EINVAL; diff --git a/drivers/net/wireless/iwmc3200wifi/main.c b/drivers/net/wireless/iwmc3200wifi/main.c index 98a179f98ea..1f868b166d1 100644 --- a/drivers/net/wireless/iwmc3200wifi/main.c +++ b/drivers/net/wireless/iwmc3200wifi/main.c @@ -91,11 +91,11 @@ static struct iwm_conf def_iwm_conf = { .mac_addr = {0x00, 0x02, 0xb3, 0x01, 0x02, 0x03}, }; -static int modparam_reset; +static bool modparam_reset; module_param_named(reset, modparam_reset, bool, 0644); MODULE_PARM_DESC(reset, "reset on firmware errors (default 0 [not reset])"); -static int modparam_wimax_enable = 1; +static bool modparam_wimax_enable = true; module_param_named(wimax_enable, modparam_wimax_enable, bool, 0644); MODULE_PARM_DESC(wimax_enable, "Enable wimax core (default 1 [wimax enabled])"); @@ -130,7 +130,7 @@ static void iwm_disconnect_work(struct work_struct *work) iwm_invalidate_mlme_profile(iwm); clear_bit(IWM_STATUS_ASSOCIATED, &iwm->status); - iwm->umac_profile_active = 0; + iwm->umac_profile_active = false; memset(iwm->bssid, 0, ETH_ALEN); iwm->channel = 0; diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c index a414768f40f..7d708f4395f 100644 --- a/drivers/net/wireless/iwmc3200wifi/rx.c +++ b/drivers/net/wireless/iwmc3200wifi/rx.c @@ -660,7 +660,7 @@ static int iwm_mlme_profile_invalidate(struct iwm_priv *iwm, u8 *buf, clear_bit(IWM_STATUS_SME_CONNECTING, &iwm->status); clear_bit(IWM_STATUS_ASSOCIATED, &iwm->status); - iwm->umac_profile_active = 0; + iwm->umac_profile_active = false; memset(iwm->bssid, 0, ETH_ALEN); iwm->channel = 0; @@ -735,7 +735,7 @@ static int iwm_mlme_update_sta_table(struct iwm_priv *iwm, u8 *buf, umac_sta->mac_addr, umac_sta->flags & UMAC_STA_FLAG_QOS); - sta->valid = 1; + sta->valid = true; sta->qos = umac_sta->flags & UMAC_STA_FLAG_QOS; sta->color = GET_VAL8(umac_sta->sta_id, LMAC_STA_COLOR); memcpy(sta->addr, umac_sta->mac_addr, ETH_ALEN); @@ -750,12 +750,12 @@ static int iwm_mlme_update_sta_table(struct iwm_priv *iwm, u8 *buf, sta = &iwm->sta_table[GET_VAL8(umac_sta->sta_id, LMAC_STA_ID)]; if (!memcmp(sta->addr, umac_sta->mac_addr, ETH_ALEN)) - sta->valid = 0; + sta->valid = false; break; case UMAC_OPCODE_CLEAR_ALL: for (i = 0; i < IWM_STA_TABLE_NUM; i++) - iwm->sta_table[i].valid = 0; + iwm->sta_table[i].valid = false; break; default: @@ -1203,7 +1203,7 @@ static int iwm_ntf_wifi_if_wrapper(struct iwm_priv *iwm, u8 *buf, switch (hdr->oid) { case UMAC_WIFI_IF_CMD_SET_PROFILE: - iwm->umac_profile_active = 1; + iwm->umac_profile_active = true; break; default: break; @@ -1363,7 +1363,7 @@ static int iwm_rx_handle_nonwifi(struct iwm_priv *iwm, u8 *buf, */ list_for_each_entry(cmd, &iwm->nonwifi_pending_cmd, pending) if (cmd->seq_num == seq_num) { - cmd->resp_received = 1; + cmd->resp_received = true; cmd->buf.len = buf_size; memcpy(cmd->buf.hdr, buf, buf_size); wake_up_interruptible(&iwm->nonwifi_queue); diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c index a7f1ab28940..a7cd311cb1b 100644 --- a/drivers/net/wireless/libertas/cfg.c +++ b/drivers/net/wireless/libertas/cfg.c @@ -485,6 +485,7 @@ static int lbs_cfg_set_channel(struct wiphy *wiphy, static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy, struct cmd_header *resp) { + struct cfg80211_bss *bss; struct cmd_ds_802_11_scan_rsp *scanresp = (void *)resp; int bsssize; const u8 *pos; @@ -632,12 +633,14 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy, LBS_SCAN_RSSI_TO_MBM(rssi)/100); if (channel && - !(channel->flags & IEEE80211_CHAN_DISABLED)) - cfg80211_inform_bss(wiphy, channel, + !(channel->flags & IEEE80211_CHAN_DISABLED)) { + bss = cfg80211_inform_bss(wiphy, channel, bssid, get_unaligned_le64(tsfdesc), capa, intvl, ie, ielen, LBS_SCAN_RSSI_TO_MBM(rssi), GFP_KERNEL); + cfg80211_put_bss(bss); + } } else lbs_deb_scan("scan response: missing BSS channel IE\n"); @@ -728,9 +731,11 @@ static void lbs_scan_worker(struct work_struct *work) le16_to_cpu(scan_cmd->hdr.size), lbs_ret_scan, 0); - if (priv->scan_channel >= priv->scan_req->n_channels) + if (priv->scan_channel >= priv->scan_req->n_channels) { /* Mark scan done */ + cancel_delayed_work(&priv->scan_work); lbs_scan_done(priv); + } /* Restart network */ if (carrier) @@ -759,12 +764,12 @@ static void _internal_start_scan(struct lbs_private *priv, bool internal, request->n_ssids, request->n_channels, request->ie_len); priv->scan_channel = 0; - queue_delayed_work(priv->work_thread, &priv->scan_work, - msecs_to_jiffies(50)); - priv->scan_req = request; priv->internal_scan = internal; + queue_delayed_work(priv->work_thread, &priv->scan_work, + msecs_to_jiffies(50)); + lbs_deb_leave(LBS_DEB_CFG80211); } @@ -1720,6 +1725,7 @@ static void lbs_join_post(struct lbs_private *priv, 2 + 2 + /* atim */ 2 + 8]; /* extended rates */ u8 *fake = fake_ie; + struct cfg80211_bss *bss; lbs_deb_enter(LBS_DEB_CFG80211); @@ -1763,14 +1769,15 @@ static void lbs_join_post(struct lbs_private *priv, *fake++ = 0x6c; lbs_deb_hex(LBS_DEB_CFG80211, "IE", fake_ie, fake - fake_ie); - cfg80211_inform_bss(priv->wdev->wiphy, - params->channel, - bssid, - 0, - capability, - params->beacon_interval, - fake_ie, fake - fake_ie, - 0, GFP_KERNEL); + bss = cfg80211_inform_bss(priv->wdev->wiphy, + params->channel, + bssid, + 0, + capability, + params->beacon_interval, + fake_ie, fake - fake_ie, + 0, GFP_KERNEL); + cfg80211_put_bss(bss); memcpy(priv->wdev->ssid, params->ssid, params->ssid_len); priv->wdev->ssid_len = params->ssid_len; diff --git a/drivers/net/wireless/libertas/ethtool.c b/drivers/net/wireless/libertas/ethtool.c index 885ddc1c4fe..f955b2d66ed 100644 --- a/drivers/net/wireless/libertas/ethtool.c +++ b/drivers/net/wireless/libertas/ethtool.c @@ -13,13 +13,14 @@ static void lbs_ethtool_get_drvinfo(struct net_device *dev, { struct lbs_private *priv = dev->ml_priv; - snprintf(info->fw_version, 32, "%u.%u.%u.p%u", + snprintf(info->fw_version, sizeof(info->fw_version), + "%u.%u.%u.p%u", priv->fwrelease >> 24 & 0xff, priv->fwrelease >> 16 & 0xff, priv->fwrelease >> 8 & 0xff, priv->fwrelease & 0xff); - strcpy(info->driver, "libertas"); - strcpy(info->version, lbs_driver_version); + strlcpy(info->driver, "libertas", sizeof(info->driver)); + strlcpy(info->version, lbs_driver_version, sizeof(info->version)); } /* diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c index e2693517986..3f7bf4d912b 100644 --- a/drivers/net/wireless/libertas/if_cs.c +++ b/drivers/net/wireless/libertas/if_cs.c @@ -859,7 +859,7 @@ static int if_cs_probe(struct pcmcia_device *p_dev) * Most of the libertas cards can do unaligned register access, but some * weird ones cannot. That's especially true for the CF8305 card. */ - card->align_regs = 0; + card->align_regs = false; card->model = get_model(p_dev->manf_id, p_dev->card_id); if (card->model == MODEL_UNKNOWN) { @@ -871,7 +871,7 @@ static int if_cs_probe(struct pcmcia_device *p_dev) /* Check if we have a current silicon */ prod_id = if_cs_read8(card, IF_CS_PRODUCT_ID); if (card->model == MODEL_8305) { - card->align_regs = 1; + card->align_regs = true; if (prod_id < IF_CS_CF8305_B1_REV) { pr_err("8305 rev B0 and older are not supported\n"); ret = -ENODEV; diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c index 728baa44525..50b1ee7721e 100644 --- a/drivers/net/wireless/libertas/if_spi.c +++ b/drivers/net/wireless/libertas/if_spi.c @@ -1291,7 +1291,6 @@ static struct spi_driver libertas_spi_driver = { .remove = __devexit_p(libertas_spi_remove), .driver = { .name = "libertas_spi", - .bus = &spi_bus_type, .owner = THIS_MODULE, .pm = &if_spi_pm_ops, }, diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c index ceb51b6e670..a03457292c8 100644 --- a/drivers/net/wireless/libertas_tf/main.c +++ b/drivers/net/wireless/libertas_tf/main.c @@ -719,11 +719,11 @@ void lbtf_bcn_sent(struct lbtf_private *priv) return; if (skb_queue_empty(&priv->bc_ps_buf)) { - bool tx_buff_bc = 0; + bool tx_buff_bc = false; while ((skb = ieee80211_get_buffered_bc(priv->hw, priv->vif))) { skb_queue_tail(&priv->bc_ps_buf, skb); - tx_buff_bc = 1; + tx_buff_bc = true; } if (tx_buff_bc) { ieee80211_stop_queues(priv->hw); diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 523ad55a288..4b9e730d2c8 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -37,7 +37,8 @@ MODULE_AUTHOR("Jouni Malinen"); MODULE_DESCRIPTION("Software simulator of 802.11 radio(s) for mac80211"); MODULE_LICENSE("GPL"); -int wmediumd_pid; +static u32 wmediumd_pid; + static int radios = 2; module_param(radios, int, 0444); MODULE_PARM_DESC(radios, "Number of simulated radios"); @@ -665,7 +666,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb) { bool ack; struct ieee80211_tx_info *txi; - int _pid; + u32 _pid; mac80211_hwsim_monitor_rx(hw, skb); @@ -676,7 +677,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb) } /* wmediumd mode check */ - _pid = wmediumd_pid; + _pid = ACCESS_ONCE(wmediumd_pid); if (_pid) return mac80211_hwsim_tx_frame_nl(hw, skb, _pid); @@ -707,7 +708,7 @@ static int mac80211_hwsim_start(struct ieee80211_hw *hw) { struct mac80211_hwsim_data *data = hw->priv; wiphy_debug(hw->wiphy, "%s\n", __func__); - data->started = 1; + data->started = true; return 0; } @@ -715,7 +716,7 @@ static int mac80211_hwsim_start(struct ieee80211_hw *hw) static void mac80211_hwsim_stop(struct ieee80211_hw *hw) { struct mac80211_hwsim_data *data = hw->priv; - data->started = 0; + data->started = false; del_timer(&data->beacon_timer); wiphy_debug(hw->wiphy, "%s\n", __func__); } @@ -764,7 +765,7 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac, struct ieee80211_hw *hw = arg; struct sk_buff *skb; struct ieee80211_tx_info *info; - int _pid; + u32 _pid; hwsim_check_magic(vif); @@ -781,7 +782,7 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac, mac80211_hwsim_monitor_rx(hw, skb); /* wmediumd mode check */ - _pid = wmediumd_pid; + _pid = ACCESS_ONCE(wmediumd_pid); if (_pid) return mac80211_hwsim_tx_frame_nl(hw, skb, _pid); @@ -1254,7 +1255,7 @@ static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif) struct hwsim_vif_priv *vp = (void *)vif->drv_priv; struct sk_buff *skb; struct ieee80211_pspoll *pspoll; - int _pid; + u32 _pid; if (!vp->assoc) return; @@ -1275,7 +1276,7 @@ static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif) memcpy(pspoll->ta, mac, ETH_ALEN); /* wmediumd mode check */ - _pid = wmediumd_pid; + _pid = ACCESS_ONCE(wmediumd_pid); if (_pid) return mac80211_hwsim_tx_frame_nl(data->hw, skb, _pid); @@ -1292,7 +1293,7 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac, struct hwsim_vif_priv *vp = (void *)vif->drv_priv; struct sk_buff *skb; struct ieee80211_hdr *hdr; - int _pid; + u32 _pid; if (!vp->assoc) return; @@ -1314,7 +1315,7 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac, memcpy(hdr->addr3, vp->bssid, ETH_ALEN); /* wmediumd mode check */ - _pid = wmediumd_pid; + _pid = ACCESS_ONCE(wmediumd_pid); if (_pid) return mac80211_hwsim_tx_frame_nl(data->hw, skb, _pid); @@ -1634,8 +1635,6 @@ static int hwsim_init_netlink(void) int rc; printk(KERN_INFO "mac80211_hwsim: initializing netlink\n"); - wmediumd_pid = 0; - rc = genl_register_family_with_ops(&hwsim_genl_family, hwsim_ops, ARRAY_SIZE(hwsim_ops)); if (rc) @@ -1748,6 +1747,8 @@ static int __init init_mac80211_hwsim(void) IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | IEEE80211_HW_AMPDU_AGGREGATION; + hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; + /* ask mac80211 to reserve space for magic */ hw->vif_data_size = sizeof(struct hwsim_vif_priv); hw->sta_data_size = sizeof(struct hwsim_sta_priv); diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c index 7aa9aa0ac95..681d3f2a4c2 100644 --- a/drivers/net/wireless/mwifiex/11n_rxreorder.c +++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c @@ -33,7 +33,7 @@ * Since the buffer is linear, the function uses rotation to simulate * circular buffer. */ -static int +static void mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv, struct mwifiex_rx_reorder_tbl *rx_reor_tbl_ptr, int start_win) @@ -71,8 +71,6 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv, rx_reor_tbl_ptr->start_win = start_win; spin_unlock_irqrestore(&priv->rx_pkt_lock, flags); - - return 0; } /* @@ -83,7 +81,7 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv, * Since the buffer is linear, the function uses rotation to simulate * circular buffer. */ -static int +static void mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv, struct mwifiex_rx_reorder_tbl *rx_reor_tbl_ptr) { @@ -119,7 +117,6 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv, rx_reor_tbl_ptr->start_win = (rx_reor_tbl_ptr->start_win + i) &(MAX_TID_VALUE - 1); spin_unlock_irqrestore(&priv->rx_pkt_lock, flags); - return 0; } /* @@ -405,7 +402,7 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv, u8 *ta, u8 pkt_type, void *payload) { struct mwifiex_rx_reorder_tbl *rx_reor_tbl_ptr; - int start_win, end_win, win_size, ret; + int start_win, end_win, win_size; u16 pkt_index; rx_reor_tbl_ptr = @@ -452,11 +449,8 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv, start_win = (end_win - win_size) + 1; else start_win = (MAX_TID_VALUE - (win_size - seq_num)) + 1; - ret = mwifiex_11n_dispatch_pkt_until_start_win(priv, + mwifiex_11n_dispatch_pkt_until_start_win(priv, rx_reor_tbl_ptr, start_win); - - if (ret) - return ret; } if (pkt_type != PKT_TYPE_BAR) { @@ -475,9 +469,9 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv, * Dispatch all packets sequentially from start_win until a * hole is found and adjust the start_win appropriately */ - ret = mwifiex_11n_scan_and_dispatch(priv, rx_reor_tbl_ptr); + mwifiex_11n_scan_and_dispatch(priv, rx_reor_tbl_ptr); - return ret; + return 0; } /* diff --git a/drivers/net/wireless/mwifiex/Kconfig b/drivers/net/wireless/mwifiex/Kconfig index 8f2797aa0c6..2a078cea830 100644 --- a/drivers/net/wireless/mwifiex/Kconfig +++ b/drivers/net/wireless/mwifiex/Kconfig @@ -10,12 +10,12 @@ config MWIFIEX mwifiex. config MWIFIEX_SDIO - tristate "Marvell WiFi-Ex Driver for SD8787" + tristate "Marvell WiFi-Ex Driver for SD8787/SD8797" depends on MWIFIEX && MMC select FW_LOADER ---help--- This adds support for wireless adapters based on Marvell - 8787 chipset with SDIO interface. + 8787/8797 chipsets with SDIO interface. If you choose to build it as a module, it will be called mwifiex_sdio. diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c index 462c71067bf..c3b6c4652cd 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c @@ -24,50 +24,26 @@ * This function maps the nl802.11 channel type into driver channel type. * * The mapping is as follows - - * NL80211_CHAN_NO_HT -> NO_SEC_CHANNEL - * NL80211_CHAN_HT20 -> NO_SEC_CHANNEL - * NL80211_CHAN_HT40PLUS -> SEC_CHANNEL_ABOVE - * NL80211_CHAN_HT40MINUS -> SEC_CHANNEL_BELOW - * Others -> NO_SEC_CHANNEL + * NL80211_CHAN_NO_HT -> IEEE80211_HT_PARAM_CHA_SEC_NONE + * NL80211_CHAN_HT20 -> IEEE80211_HT_PARAM_CHA_SEC_NONE + * NL80211_CHAN_HT40PLUS -> IEEE80211_HT_PARAM_CHA_SEC_ABOVE + * NL80211_CHAN_HT40MINUS -> IEEE80211_HT_PARAM_CHA_SEC_BELOW + * Others -> IEEE80211_HT_PARAM_CHA_SEC_NONE */ -static int -mwifiex_cfg80211_channel_type_to_mwifiex_channels(enum nl80211_channel_type - channel_type) +static u8 +mwifiex_cfg80211_channel_type_to_sec_chan_offset(enum nl80211_channel_type + channel_type) { switch (channel_type) { case NL80211_CHAN_NO_HT: case NL80211_CHAN_HT20: - return NO_SEC_CHANNEL; + return IEEE80211_HT_PARAM_CHA_SEC_NONE; case NL80211_CHAN_HT40PLUS: - return SEC_CHANNEL_ABOVE; + return IEEE80211_HT_PARAM_CHA_SEC_ABOVE; case NL80211_CHAN_HT40MINUS: - return SEC_CHANNEL_BELOW; - default: - return NO_SEC_CHANNEL; - } -} - -/* - * This function maps the driver channel type into nl802.11 channel type. - * - * The mapping is as follows - - * NO_SEC_CHANNEL -> NL80211_CHAN_HT20 - * SEC_CHANNEL_ABOVE -> NL80211_CHAN_HT40PLUS - * SEC_CHANNEL_BELOW -> NL80211_CHAN_HT40MINUS - * Others -> NL80211_CHAN_HT20 - */ -static enum nl80211_channel_type -mwifiex_channels_to_cfg80211_channel_type(int channel_type) -{ - switch (channel_type) { - case NO_SEC_CHANNEL: - return NL80211_CHAN_HT20; - case SEC_CHANNEL_ABOVE: - return NL80211_CHAN_HT40PLUS; - case SEC_CHANNEL_BELOW: - return NL80211_CHAN_HT40MINUS; + return IEEE80211_HT_PARAM_CHA_SEC_BELOW; default: - return NL80211_CHAN_HT20; + return IEEE80211_HT_PARAM_CHA_SEC_NONE; } } @@ -120,10 +96,11 @@ mwifiex_cfg80211_del_key(struct wiphy *wiphy, struct net_device *netdev, static int mwifiex_cfg80211_set_tx_power(struct wiphy *wiphy, enum nl80211_tx_power_setting type, - int dbm) + int mbm) { struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy); struct mwifiex_power_cfg power_cfg; + int dbm = MBM_TO_DBM(mbm); if (type == NL80211_TX_POWER_FIXED) { power_cfg.is_power_auto = 0; @@ -330,37 +307,51 @@ mwifiex_set_rf_channel(struct mwifiex_private *priv, enum nl80211_channel_type channel_type) { struct mwifiex_chan_freq_power cfp; - struct mwifiex_ds_band_cfg band_cfg; u32 config_bands = 0; struct wiphy *wiphy = priv->wdev->wiphy; + struct mwifiex_adapter *adapter = priv->adapter; if (chan) { - memset(&band_cfg, 0, sizeof(band_cfg)); /* Set appropriate bands */ - if (chan->band == IEEE80211_BAND_2GHZ) - config_bands = BAND_B | BAND_G | BAND_GN; - else - config_bands = BAND_AN | BAND_A; - if (priv->bss_mode == NL80211_IFTYPE_STATION - || priv->bss_mode == NL80211_IFTYPE_UNSPECIFIED) { - band_cfg.config_bands = config_bands; - } else if (priv->bss_mode == NL80211_IFTYPE_ADHOC) { - band_cfg.config_bands = config_bands; - band_cfg.adhoc_start_band = config_bands; + if (chan->band == IEEE80211_BAND_2GHZ) { + if (channel_type == NL80211_CHAN_NO_HT) + if (priv->adapter->config_bands == BAND_B || + priv->adapter->config_bands == BAND_G) + config_bands = + priv->adapter->config_bands; + else + config_bands = BAND_B | BAND_G; + else + config_bands = BAND_B | BAND_G | BAND_GN; + } else { + if (channel_type == NL80211_CHAN_NO_HT) + config_bands = BAND_A; + else + config_bands = BAND_AN | BAND_A; } - band_cfg.sec_chan_offset = - mwifiex_cfg80211_channel_type_to_mwifiex_channels + if (!((config_bands | adapter->fw_bands) & + ~adapter->fw_bands)) { + adapter->config_bands = config_bands; + if (priv->bss_mode == NL80211_IFTYPE_ADHOC) { + adapter->adhoc_start_band = config_bands; + if ((config_bands & BAND_GN) || + (config_bands & BAND_AN)) + adapter->adhoc_11n_enabled = true; + else + adapter->adhoc_11n_enabled = false; + } + } + adapter->sec_chan_offset = + mwifiex_cfg80211_channel_type_to_sec_chan_offset (channel_type); - - if (mwifiex_set_radio_band_cfg(priv, &band_cfg)) - return -EFAULT; + adapter->channel_type = channel_type; mwifiex_send_domain_info_cmd_fw(wiphy); } wiphy_dbg(wiphy, "info: setting band %d, channel offset %d and " - "mode %d\n", config_bands, band_cfg.sec_chan_offset, + "mode %d\n", config_bands, adapter->sec_chan_offset, priv->bss_mode); if (!chan) return 0; @@ -696,9 +687,9 @@ static int mwifiex_cfg80211_set_bitrate_mask(struct wiphy *wiphy, const u8 *peer, const struct cfg80211_bitrate_mask *mask) { - struct mwifiex_ds_band_cfg band_cfg; struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); int index = 0, mode = 0, i; + struct mwifiex_adapter *adapter = priv->adapter; /* Currently only 2.4GHz is supported */ for (i = 0; i < mwifiex_band_2ghz.n_bitrates; i++) { @@ -720,16 +711,15 @@ static int mwifiex_cfg80211_set_bitrate_mask(struct wiphy *wiphy, mode |= BAND_B; } - memset(&band_cfg, 0, sizeof(band_cfg)); - band_cfg.config_bands = mode; - - if (priv->bss_mode == NL80211_IFTYPE_ADHOC) - band_cfg.adhoc_start_band = mode; - - band_cfg.sec_chan_offset = NO_SEC_CHANNEL; - - if (mwifiex_set_radio_band_cfg(priv, &band_cfg)) - return -EFAULT; + if (!((mode | adapter->fw_bands) & ~adapter->fw_bands)) { + adapter->config_bands = mode; + if (priv->bss_mode == NL80211_IFTYPE_ADHOC) { + adapter->adhoc_start_band = mode; + adapter->adhoc_11n_enabled = false; + } + } + adapter->sec_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE; + adapter->channel_type = NL80211_CHAN_NO_HT; wiphy_debug(wiphy, "info: device configured in 802.11%s%s mode\n", (mode & BAND_B) ? "b" : "", @@ -750,17 +740,13 @@ mwifiex_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev, { struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); - if (priv->disconnect) - return -EBUSY; - - priv->disconnect = 1; if (mwifiex_deauthenticate(priv, NULL)) return -EFAULT; wiphy_dbg(wiphy, "info: successfully disconnected from %pM:" " reason code %d\n", priv->cfg_bssid, reason_code); - queue_work(priv->workqueue, &priv->cfg_workqueue); + memset(priv->cfg_bssid, 0, ETH_ALEN); return 0; } @@ -780,6 +766,7 @@ static int mwifiex_cfg80211_inform_ibss_bss(struct mwifiex_private *priv) { struct ieee80211_channel *chan; struct mwifiex_bss_info bss_info; + struct cfg80211_bss *bss; int ie_len; u8 ie_buf[IEEE80211_MAX_SSID_LEN + sizeof(struct ieee_types_header)]; enum ieee80211_band band; @@ -800,9 +787,10 @@ static int mwifiex_cfg80211_inform_ibss_bss(struct mwifiex_private *priv) ieee80211_channel_to_frequency(bss_info.bss_chan, band)); - cfg80211_inform_bss(priv->wdev->wiphy, chan, + bss = cfg80211_inform_bss(priv->wdev->wiphy, chan, bss_info.bssid, 0, WLAN_CAPABILITY_IBSS, 0, ie_buf, ie_len, 0, GFP_KERNEL); + cfg80211_put_bss(bss); memcpy(priv->cfg_bssid, bss_info.bssid, ETH_ALEN); return 0; @@ -851,8 +839,7 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid, if (channel) ret = mwifiex_set_rf_channel(priv, channel, - mwifiex_channels_to_cfg80211_channel_type - (priv->adapter->chan_offset)); + priv->adapter->channel_type); ret = mwifiex_set_encode(priv, NULL, 0, 0, 1); /* Disable keys */ @@ -978,27 +965,32 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); int ret = 0; - if (priv->assoc_request) - return -EBUSY; - if (priv->bss_mode == NL80211_IFTYPE_ADHOC) { wiphy_err(wiphy, "received infra assoc request " "when station is in ibss mode\n"); goto done; } - priv->assoc_request = -EINPROGRESS; - wiphy_dbg(wiphy, "info: Trying to associate to %s and bssid %pM\n", (char *) sme->ssid, sme->bssid); ret = mwifiex_cfg80211_assoc(priv, sme->ssid_len, sme->ssid, sme->bssid, priv->bss_mode, sme->channel, sme, 0); - - priv->assoc_request = 1; done: - priv->assoc_result = ret; - queue_work(priv->workqueue, &priv->cfg_workqueue); + if (!ret) { + cfg80211_connect_result(priv->netdev, priv->cfg_bssid, NULL, 0, + NULL, 0, WLAN_STATUS_SUCCESS, + GFP_KERNEL); + dev_dbg(priv->adapter->dev, + "info: associated to bssid %pM successfully\n", + priv->cfg_bssid); + } else { + dev_dbg(priv->adapter->dev, + "info: association to bssid %pM failed\n", + priv->cfg_bssid); + memset(priv->cfg_bssid, 0, ETH_ALEN); + } + return ret; } @@ -1015,28 +1007,29 @@ mwifiex_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev, struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy); int ret = 0; - if (priv->ibss_join_request) - return -EBUSY; - if (priv->bss_mode != NL80211_IFTYPE_ADHOC) { wiphy_err(wiphy, "request to join ibss received " "when station is not in ibss mode\n"); goto done; } - priv->ibss_join_request = -EINPROGRESS; - wiphy_dbg(wiphy, "info: trying to join to %s and bssid %pM\n", (char *) params->ssid, params->bssid); ret = mwifiex_cfg80211_assoc(priv, params->ssid_len, params->ssid, params->bssid, priv->bss_mode, params->channel, NULL, params->privacy); - - priv->ibss_join_request = 1; done: - priv->ibss_join_result = ret; - queue_work(priv->workqueue, &priv->cfg_workqueue); + if (!ret) { + cfg80211_ibss_joined(priv->netdev, priv->cfg_bssid, GFP_KERNEL); + dev_dbg(priv->adapter->dev, + "info: joined/created adhoc network with bssid" + " %pM successfully\n", priv->cfg_bssid); + } else { + dev_dbg(priv->adapter->dev, + "info: failed creating/joining adhoc network\n"); + } + return ret; } @@ -1051,17 +1044,12 @@ mwifiex_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev) { struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy); - if (priv->disconnect) - return -EBUSY; - - priv->disconnect = 1; - wiphy_dbg(wiphy, "info: disconnecting from essid %pM\n", priv->cfg_bssid); if (mwifiex_deauthenticate(priv, NULL)) return -EFAULT; - queue_work(priv->workqueue, &priv->cfg_workqueue); + memset(priv->cfg_bssid, 0, ETH_ALEN); return 0; } @@ -1078,15 +1066,42 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_scan_request *request) { struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); + int i; + struct ieee80211_channel *chan; wiphy_dbg(wiphy, "info: received scan request on %s\n", dev->name); - if (priv->scan_request && priv->scan_request != request) - return -EBUSY; - priv->scan_request = request; - queue_work(priv->workqueue, &priv->cfg_workqueue); + priv->user_scan_cfg = kzalloc(sizeof(struct mwifiex_user_scan_cfg), + GFP_KERNEL); + if (!priv->user_scan_cfg) { + dev_err(priv->adapter->dev, "failed to alloc scan_req\n"); + return -ENOMEM; + } + for (i = 0; i < request->n_ssids; i++) { + memcpy(priv->user_scan_cfg->ssid_list[i].ssid, + request->ssids[i].ssid, request->ssids[i].ssid_len); + priv->user_scan_cfg->ssid_list[i].max_len = + request->ssids[i].ssid_len; + } + for (i = 0; i < request->n_channels; i++) { + chan = request->channels[i]; + priv->user_scan_cfg->chan_list[i].chan_number = chan->hw_value; + priv->user_scan_cfg->chan_list[i].radio_type = chan->band; + + if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN) + priv->user_scan_cfg->chan_list[i].scan_type = + MWIFIEX_SCAN_TYPE_PASSIVE; + else + priv->user_scan_cfg->chan_list[i].scan_type = + MWIFIEX_SCAN_TYPE_ACTIVE; + + priv->user_scan_cfg->chan_list[i].scan_time = 0; + } + if (mwifiex_set_user_scan_ioctl(priv, priv->user_scan_cfg)) + return -EFAULT; + return 0; } @@ -1292,10 +1307,6 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct net_device *dev) priv->media_connected = false; - cancel_work_sync(&priv->cfg_workqueue); - flush_workqueue(priv->workqueue); - destroy_workqueue(priv->workqueue); - priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED; return 0; @@ -1373,9 +1384,6 @@ int mwifiex_register_cfg80211(struct mwifiex_private *priv) memcpy(wdev->wiphy->perm_addr, priv->curr_addr, ETH_ALEN); wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; - /* We are using custom domains */ - wdev->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY; - /* Reserve space for bss band information */ wdev->wiphy->bss_priv_size = sizeof(u8); @@ -1404,100 +1412,3 @@ int mwifiex_register_cfg80211(struct mwifiex_private *priv) return ret; } - -/* - * This function handles the result of different pending network operations. - * - * The following operations are handled and CFG802.11 subsystem is - * notified accordingly - - * - Scan request completion - * - Association request completion - * - IBSS join request completion - * - Disconnect request completion - */ -void -mwifiex_cfg80211_results(struct work_struct *work) -{ - struct mwifiex_private *priv = - container_of(work, struct mwifiex_private, cfg_workqueue); - struct mwifiex_user_scan_cfg *scan_req; - int ret = 0, i; - struct ieee80211_channel *chan; - - if (priv->scan_request) { - scan_req = kzalloc(sizeof(struct mwifiex_user_scan_cfg), - GFP_KERNEL); - if (!scan_req) { - dev_err(priv->adapter->dev, "failed to alloc " - "scan_req\n"); - return; - } - for (i = 0; i < priv->scan_request->n_ssids; i++) { - memcpy(scan_req->ssid_list[i].ssid, - priv->scan_request->ssids[i].ssid, - priv->scan_request->ssids[i].ssid_len); - scan_req->ssid_list[i].max_len = - priv->scan_request->ssids[i].ssid_len; - } - for (i = 0; i < priv->scan_request->n_channels; i++) { - chan = priv->scan_request->channels[i]; - scan_req->chan_list[i].chan_number = chan->hw_value; - scan_req->chan_list[i].radio_type = chan->band; - if (chan->flags & IEEE80211_CHAN_DISABLED) - scan_req->chan_list[i].scan_type = - MWIFIEX_SCAN_TYPE_PASSIVE; - else - scan_req->chan_list[i].scan_type = - MWIFIEX_SCAN_TYPE_ACTIVE; - scan_req->chan_list[i].scan_time = 0; - } - if (mwifiex_set_user_scan_ioctl(priv, scan_req)) - ret = -EFAULT; - priv->scan_result_status = ret; - dev_dbg(priv->adapter->dev, "info: %s: sending scan results\n", - __func__); - cfg80211_scan_done(priv->scan_request, - (priv->scan_result_status < 0)); - priv->scan_request = NULL; - kfree(scan_req); - } - - if (priv->assoc_request == 1) { - if (!priv->assoc_result) { - cfg80211_connect_result(priv->netdev, priv->cfg_bssid, - NULL, 0, NULL, 0, - WLAN_STATUS_SUCCESS, - GFP_KERNEL); - dev_dbg(priv->adapter->dev, - "info: associated to bssid %pM successfully\n", - priv->cfg_bssid); - } else { - dev_dbg(priv->adapter->dev, - "info: association to bssid %pM failed\n", - priv->cfg_bssid); - memset(priv->cfg_bssid, 0, ETH_ALEN); - } - priv->assoc_request = 0; - priv->assoc_result = 0; - } - - if (priv->ibss_join_request == 1) { - if (!priv->ibss_join_result) { - cfg80211_ibss_joined(priv->netdev, priv->cfg_bssid, - GFP_KERNEL); - dev_dbg(priv->adapter->dev, - "info: joined/created adhoc network with bssid" - " %pM successfully\n", priv->cfg_bssid); - } else { - dev_dbg(priv->adapter->dev, - "info: failed creating/joining adhoc network\n"); - } - priv->ibss_join_request = 0; - priv->ibss_join_result = 0; - } - - if (priv->disconnect) { - memset(priv->cfg_bssid, 0, ETH_ALEN); - priv->disconnect = 0; - } -} diff --git a/drivers/net/wireless/mwifiex/cfg80211.h b/drivers/net/wireless/mwifiex/cfg80211.h index 8d010f2500c..76c76c60438 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.h +++ b/drivers/net/wireless/mwifiex/cfg80211.h @@ -26,5 +26,4 @@ int mwifiex_register_cfg80211(struct mwifiex_private *); -void mwifiex_cfg80211_results(struct work_struct *work); #endif diff --git a/drivers/net/wireless/mwifiex/cfp.c b/drivers/net/wireless/mwifiex/cfp.c index f2e6de03805..1782a77f15d 100644 --- a/drivers/net/wireless/mwifiex/cfp.c +++ b/drivers/net/wireless/mwifiex/cfp.c @@ -75,18 +75,32 @@ static u8 supported_rates_n[N_SUPPORTED_RATES] = { 0x02, 0x04, 0 }; * This function maps an index in supported rates table into * the corresponding data rate. */ -u32 mwifiex_index_to_data_rate(u8 index, u8 ht_info) +u32 mwifiex_index_to_data_rate(struct mwifiex_private *priv, u8 index, + u8 ht_info) { - u16 mcs_rate[4][8] = { - {0x1b, 0x36, 0x51, 0x6c, 0xa2, 0xd8, 0xf3, 0x10e} - , /* LG 40M */ - {0x1e, 0x3c, 0x5a, 0x78, 0xb4, 0xf0, 0x10e, 0x12c} - , /* SG 40M */ - {0x0d, 0x1a, 0x27, 0x34, 0x4e, 0x68, 0x75, 0x82} - , /* LG 20M */ - {0x0e, 0x1c, 0x2b, 0x39, 0x56, 0x73, 0x82, 0x90} - }; /* SG 20M */ - + /* + * For every mcs_rate line, the first 8 bytes are for stream 1x1, + * and all 16 bytes are for stream 2x2. + */ + u16 mcs_rate[4][16] = { + /* LGI 40M */ + { 0x1b, 0x36, 0x51, 0x6c, 0xa2, 0xd8, 0xf3, 0x10e, + 0x36, 0x6c, 0xa2, 0xd8, 0x144, 0x1b0, 0x1e6, 0x21c }, + + /* SGI 40M */ + { 0x1e, 0x3c, 0x5a, 0x78, 0xb4, 0xf0, 0x10e, 0x12c, + 0x3c, 0x78, 0xb4, 0xf0, 0x168, 0x1e0, 0x21c, 0x258 }, + + /* LGI 20M */ + { 0x0d, 0x1a, 0x27, 0x34, 0x4e, 0x68, 0x75, 0x82, + 0x1a, 0x34, 0x4e, 0x68, 0x9c, 0xd0, 0xea, 0x104 }, + + /* SGI 20M */ + { 0x0e, 0x1c, 0x2b, 0x39, 0x56, 0x73, 0x82, 0x90, + 0x1c, 0x39, 0x56, 0x73, 0xad, 0xe7, 0x104, 0x120 } + }; + u32 mcs_num_supp = + (priv->adapter->hw_dev_mcs_support == HT_STREAM_2X2) ? 16 : 8; u32 rate; if (ht_info & BIT(0)) { @@ -95,7 +109,7 @@ u32 mwifiex_index_to_data_rate(u8 index, u8 ht_info) rate = 0x0D; /* MCS 32 SGI rate */ else rate = 0x0C; /* MCS 32 LGI rate */ - } else if (index < 8) { + } else if (index < mcs_num_supp) { if (ht_info & BIT(1)) { if (ht_info & BIT(2)) /* SGI, 40M */ diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h index 0cc5d73cb0c..51c5417c569 100644 --- a/drivers/net/wireless/mwifiex/fw.h +++ b/drivers/net/wireless/mwifiex/fw.h @@ -165,6 +165,7 @@ enum MWIFIEX_802_11_WEP_STATUS { #define GET_RXMCSSUPP(DevMCSSupported) (DevMCSSupported & 0x0f) #define SETHT_MCS32(x) (x[4] |= 1) +#define HT_STREAM_2X2 0x22 #define SET_SECONDARYCHAN(RadioType, SECCHAN) (RadioType |= (SECCHAN << 4)) @@ -375,8 +376,6 @@ enum mwifiex_chan_scan_mode_bitmasks { MWIFIEX_DISABLE_CHAN_FILT = BIT(1), }; -#define SECOND_CHANNEL_BELOW 0x30 -#define SECOND_CHANNEL_ABOVE 0x10 struct mwifiex_chan_scan_param_set { u8 radio_type; u8 chan_number; @@ -673,7 +672,7 @@ struct host_cmd_ds_802_11_ad_hoc_start { union ieee_types_phy_param_set phy_param_set; u16 reserved1; __le16 cap_info_bitmap; - u8 DataRate[HOSTCMD_SUPPORTED_RATES]; + u8 data_rate[HOSTCMD_SUPPORTED_RATES]; } __packed; struct host_cmd_ds_802_11_ad_hoc_result { diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c index d792b3fb7c1..e05b417a3fa 100644 --- a/drivers/net/wireless/mwifiex/init.c +++ b/drivers/net/wireless/mwifiex/init.c @@ -187,8 +187,6 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter) struct mwifiex_opt_sleep_confirm *sleep_cfm_buf = NULL; skb_put(adapter->sleep_cfm, sizeof(struct mwifiex_opt_sleep_confirm)); - sleep_cfm_buf = (struct mwifiex_opt_sleep_confirm *) - (adapter->sleep_cfm->data); adapter->cmd_sent = false; @@ -248,12 +246,14 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter) memset(adapter->event_body, 0, sizeof(adapter->event_body)); adapter->hw_dot_11n_dev_cap = 0; adapter->hw_dev_mcs_support = 0; - adapter->chan_offset = 0; + adapter->sec_chan_offset = 0; adapter->adhoc_11n_enabled = false; mwifiex_wmm_init(adapter); if (adapter->sleep_cfm) { + sleep_cfm_buf = (struct mwifiex_opt_sleep_confirm *) + adapter->sleep_cfm->data; memset(sleep_cfm_buf, 0, adapter->sleep_cfm->len); sleep_cfm_buf->command = cpu_to_le16(HostCmd_CMD_802_11_PS_MODE_ENH); @@ -283,6 +283,45 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter) } /* + * This function sets trans_start per tx_queue + */ +void mwifiex_set_trans_start(struct net_device *dev) +{ + int i; + + for (i = 0; i < dev->num_tx_queues; i++) + netdev_get_tx_queue(dev, i)->trans_start = jiffies; + + dev->trans_start = jiffies; +} + +/* + * This function wakes up all queues in net_device + */ +void mwifiex_wake_up_net_dev_queue(struct net_device *netdev, + struct mwifiex_adapter *adapter) +{ + unsigned long dev_queue_flags; + + spin_lock_irqsave(&adapter->queue_lock, dev_queue_flags); + netif_tx_wake_all_queues(netdev); + spin_unlock_irqrestore(&adapter->queue_lock, dev_queue_flags); +} + +/* + * This function stops all queues in net_device + */ +void mwifiex_stop_net_dev_queue(struct net_device *netdev, + struct mwifiex_adapter *adapter) +{ + unsigned long dev_queue_flags; + + spin_lock_irqsave(&adapter->queue_lock, dev_queue_flags); + netif_tx_stop_all_queues(netdev); + spin_unlock_irqrestore(&adapter->queue_lock, dev_queue_flags); +} + +/* * This function releases the lock variables and frees the locks and * associated locks. */ @@ -359,6 +398,7 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter) spin_lock_init(&adapter->int_lock); spin_lock_init(&adapter->main_proc_lock); spin_lock_init(&adapter->mwifiex_cmd_lock); + spin_lock_init(&adapter->queue_lock); for (i = 0; i < adapter->priv_num; i++) { if (adapter->priv[i]) { priv = adapter->priv[i]; diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h index e0b68e7c8ca..d5d81f1fe41 100644 --- a/drivers/net/wireless/mwifiex/ioctl.h +++ b/drivers/net/wireless/mwifiex/ioctl.h @@ -62,17 +62,6 @@ enum { BAND_AN = 16, }; -#define NO_SEC_CHANNEL 0 -#define SEC_CHANNEL_ABOVE 1 -#define SEC_CHANNEL_BELOW 3 - -struct mwifiex_ds_band_cfg { - u32 config_bands; - u32 adhoc_start_band; - u32 adhoc_channel; - u32 sec_chan_offset; -}; - enum { ADHOC_IDLE, ADHOC_STARTED, diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c index 62b4c293860..0b0eb5efba9 100644 --- a/drivers/net/wireless/mwifiex/join.c +++ b/drivers/net/wireless/mwifiex/join.c @@ -724,8 +724,8 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv, u32 cmd_append_size = 0; u32 i; u16 tmp_cap; - uint16_t ht_cap_info; struct mwifiex_ie_types_chan_list_param_set *chan_tlv; + u8 radio_type; struct mwifiex_ie_types_htcap *ht_cap; struct mwifiex_ie_types_htinfo *ht_info; @@ -837,8 +837,8 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv, bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_ACCEPT_ALL; } - memset(adhoc_start->DataRate, 0, sizeof(adhoc_start->DataRate)); - mwifiex_get_active_data_rates(priv, adhoc_start->DataRate); + memset(adhoc_start->data_rate, 0, sizeof(adhoc_start->data_rate)); + mwifiex_get_active_data_rates(priv, adhoc_start->data_rate); if ((adapter->adhoc_start_band & BAND_G) && (priv->curr_pkt_filter & HostCmd_ACT_MAC_ADHOC_G_PROTECTION_ON)) { if (mwifiex_send_cmd_async(priv, HostCmd_CMD_MAC_CONTROL, @@ -850,20 +850,19 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv, } } /* Find the last non zero */ - for (i = 0; i < sizeof(adhoc_start->DataRate) && - adhoc_start->DataRate[i]; - i++) - ; + for (i = 0; i < sizeof(adhoc_start->data_rate); i++) + if (!adhoc_start->data_rate[i]) + break; priv->curr_bss_params.num_of_rates = i; /* Copy the ad-hoc creating rates into Current BSS rate structure */ memcpy(&priv->curr_bss_params.data_rates, - &adhoc_start->DataRate, priv->curr_bss_params.num_of_rates); + &adhoc_start->data_rate, priv->curr_bss_params.num_of_rates); dev_dbg(adapter->dev, "info: ADHOC_S_CMD: rates=%02x %02x %02x %02x\n", - adhoc_start->DataRate[0], adhoc_start->DataRate[1], - adhoc_start->DataRate[2], adhoc_start->DataRate[3]); + adhoc_start->data_rate[0], adhoc_start->data_rate[1], + adhoc_start->data_rate[2], adhoc_start->data_rate[3]); dev_dbg(adapter->dev, "info: ADHOC_S_CMD: AD-HOC Start command is ready\n"); @@ -886,12 +885,14 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv, = mwifiex_band_to_radio_type(priv->curr_bss_params.band); if (adapter->adhoc_start_band & BAND_GN || adapter->adhoc_start_band & BAND_AN) { - if (adapter->chan_offset == SEC_CHANNEL_ABOVE) + if (adapter->sec_chan_offset == + IEEE80211_HT_PARAM_CHA_SEC_ABOVE) chan_tlv->chan_scan_param[0].radio_type |= - SECOND_CHANNEL_ABOVE; - else if (adapter->chan_offset == SEC_CHANNEL_BELOW) + (IEEE80211_HT_PARAM_CHA_SEC_ABOVE << 4); + else if (adapter->sec_chan_offset == + IEEE80211_HT_PARAM_CHA_SEC_ABOVE) chan_tlv->chan_scan_param[0].radio_type |= - SECOND_CHANNEL_BELOW; + (IEEE80211_HT_PARAM_CHA_SEC_BELOW << 4); } dev_dbg(adapter->dev, "info: ADHOC_S_CMD: TLV Band = %d\n", chan_tlv->chan_scan_param[0].radio_type); @@ -914,55 +915,40 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv, } if (adapter->adhoc_11n_enabled) { - { - ht_cap = (struct mwifiex_ie_types_htcap *) pos; - memset(ht_cap, 0, - sizeof(struct mwifiex_ie_types_htcap)); - ht_cap->header.type = - cpu_to_le16(WLAN_EID_HT_CAPABILITY); - ht_cap->header.len = - cpu_to_le16(sizeof(struct ieee80211_ht_cap)); - ht_cap_info = le16_to_cpu(ht_cap->ht_cap.cap_info); - - ht_cap_info |= IEEE80211_HT_CAP_SGI_20; - if (adapter->chan_offset) { - ht_cap_info |= IEEE80211_HT_CAP_SGI_40; - ht_cap_info |= IEEE80211_HT_CAP_DSSSCCK40; - ht_cap_info |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; - SETHT_MCS32(ht_cap->ht_cap.mcs.rx_mask); - } + /* Fill HT CAPABILITY */ + ht_cap = (struct mwifiex_ie_types_htcap *) pos; + memset(ht_cap, 0, sizeof(struct mwifiex_ie_types_htcap)); + ht_cap->header.type = cpu_to_le16(WLAN_EID_HT_CAPABILITY); + ht_cap->header.len = + cpu_to_le16(sizeof(struct ieee80211_ht_cap)); + radio_type = mwifiex_band_to_radio_type( + priv->adapter->config_bands); + mwifiex_fill_cap_info(priv, radio_type, ht_cap); + + pos += sizeof(struct mwifiex_ie_types_htcap); + cmd_append_size += + sizeof(struct mwifiex_ie_types_htcap); - ht_cap->ht_cap.ampdu_params_info - = IEEE80211_HT_MAX_AMPDU_64K; - ht_cap->ht_cap.mcs.rx_mask[0] = 0xff; - pos += sizeof(struct mwifiex_ie_types_htcap); - cmd_append_size += - sizeof(struct mwifiex_ie_types_htcap); - } - { - ht_info = (struct mwifiex_ie_types_htinfo *) pos; - memset(ht_info, 0, - sizeof(struct mwifiex_ie_types_htinfo)); - ht_info->header.type = - cpu_to_le16(WLAN_EID_HT_INFORMATION); - ht_info->header.len = - cpu_to_le16(sizeof(struct ieee80211_ht_info)); - ht_info->ht_info.control_chan = - (u8) priv->curr_bss_params.bss_descriptor. - channel; - if (adapter->chan_offset) { - ht_info->ht_info.ht_param = - adapter->chan_offset; - ht_info->ht_info.ht_param |= + /* Fill HT INFORMATION */ + ht_info = (struct mwifiex_ie_types_htinfo *) pos; + memset(ht_info, 0, sizeof(struct mwifiex_ie_types_htinfo)); + ht_info->header.type = cpu_to_le16(WLAN_EID_HT_INFORMATION); + ht_info->header.len = + cpu_to_le16(sizeof(struct ieee80211_ht_info)); + + ht_info->ht_info.control_chan = + (u8) priv->curr_bss_params.bss_descriptor.channel; + if (adapter->sec_chan_offset) { + ht_info->ht_info.ht_param = adapter->sec_chan_offset; + ht_info->ht_info.ht_param |= IEEE80211_HT_PARAM_CHAN_WIDTH_ANY; - } - ht_info->ht_info.operation_mode = - cpu_to_le16(IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT); - ht_info->ht_info.basic_set[0] = 0xff; - pos += sizeof(struct mwifiex_ie_types_htinfo); - cmd_append_size += - sizeof(struct mwifiex_ie_types_htinfo); } + ht_info->ht_info.operation_mode = + cpu_to_le16(IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT); + ht_info->ht_info.basic_set[0] = 0xff; + pos += sizeof(struct mwifiex_ie_types_htinfo); + cmd_append_size += + sizeof(struct mwifiex_ie_types_htinfo); } cmd->size = cpu_to_le16((u16) diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c index 67e6db7d672..84be196188c 100644 --- a/drivers/net/wireless/mwifiex/main.c +++ b/drivers/net/wireless/mwifiex/main.c @@ -401,7 +401,7 @@ mwifiex_fill_buffer(struct sk_buff *skb) static int mwifiex_open(struct net_device *dev) { - netif_start_queue(dev); + netif_tx_start_all_queues(dev); return 0; } @@ -465,8 +465,8 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) atomic_inc(&priv->adapter->tx_pending); if (atomic_read(&priv->adapter->tx_pending) >= MAX_TX_PENDING) { - netif_stop_queue(priv->netdev); - dev->trans_start = jiffies; + mwifiex_set_trans_start(dev); + mwifiex_stop_net_dev_queue(priv->netdev, priv->adapter); } queue_work(priv->adapter->workqueue, &priv->adapter->main_work); @@ -533,7 +533,7 @@ mwifiex_tx_timeout(struct net_device *dev) dev_err(priv->adapter->dev, "%lu : Tx timeout, bss_index=%d\n", jiffies, priv->bss_index); - dev->trans_start = jiffies; + mwifiex_set_trans_start(dev); priv->num_tx_timeout++; } @@ -586,8 +586,6 @@ void mwifiex_init_priv_params(struct mwifiex_private *priv, priv->media_connected = false; memset(&priv->nick_name, 0, sizeof(priv->nick_name)); priv->num_tx_timeout = 0; - priv->workqueue = create_singlethread_workqueue("cfg80211_wq"); - INIT_WORK(&priv->cfg_workqueue, mwifiex_cfg80211_results); memcpy(dev->dev_addr, priv->curr_addr, ETH_ALEN); } @@ -793,7 +791,8 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem) priv = adapter->priv[i]; if (priv && priv->netdev) { if (!netif_queue_stopped(priv->netdev)) - netif_stop_queue(priv->netdev); + mwifiex_stop_net_dev_queue(priv->netdev, + adapter); if (netif_carrier_ok(priv->netdev)) netif_carrier_off(priv->netdev); } diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h index 30f138b6fa4..3186aa437f4 100644 --- a/drivers/net/wireless/mwifiex/main.h +++ b/drivers/net/wireless/mwifiex/main.h @@ -453,15 +453,8 @@ struct mwifiex_private { u8 scan_pending_on_block; u8 report_scan_result; struct cfg80211_scan_request *scan_request; - int scan_result_status; - int assoc_request; - u16 assoc_result; - int ibss_join_request; - u16 ibss_join_result; - bool disconnect; + struct mwifiex_user_scan_cfg *user_scan_cfg; u8 cfg_bssid[6]; - struct workqueue_struct *workqueue; - struct work_struct cfg_workqueue; u8 country_code[IEEE80211_COUNTRY_STRING_LEN]; struct wps wps; u8 scan_block; @@ -647,7 +640,8 @@ struct mwifiex_adapter { u32 hw_dot_11n_dev_cap; u8 hw_dev_mcs_support; u8 adhoc_11n_enabled; - u8 chan_offset; + u8 sec_chan_offset; + enum nl80211_channel_type channel_type; struct mwifiex_dbg dbg; u8 arp_filter[ARP_FILTER_MAX_BUF_SIZE]; u32 arp_filter_size; @@ -655,10 +649,19 @@ struct mwifiex_adapter { struct mwifiex_wait_queue cmd_wait_q; u8 scan_wait_q_woken; struct cmd_ctrl_node *cmd_queued; + spinlock_t queue_lock; /* lock for tx queues */ }; int mwifiex_init_lock_list(struct mwifiex_adapter *adapter); +void mwifiex_set_trans_start(struct net_device *dev); + +void mwifiex_stop_net_dev_queue(struct net_device *netdev, + struct mwifiex_adapter *adapter); + +void mwifiex_wake_up_net_dev_queue(struct net_device *netdev, + struct mwifiex_adapter *adapter); + int mwifiex_init_fw(struct mwifiex_adapter *adapter); int mwifiex_init_fw_complete(struct mwifiex_adapter *adapter); @@ -775,7 +778,8 @@ struct mwifiex_chan_freq_power * struct mwifiex_chan_freq_power *mwifiex_get_cfp_by_band_and_freq_from_cfg80211( struct mwifiex_private *priv, u8 band, u32 freq); -u32 mwifiex_index_to_data_rate(u8 index, u8 ht_info); +u32 mwifiex_index_to_data_rate(struct mwifiex_private *priv, u8 index, + u8 ht_info); u32 mwifiex_find_freq_from_band_chan(u8, u8); int mwifiex_cmd_append_vsie_tlv(struct mwifiex_private *priv, u16 vsie_mask, u8 **buffer); @@ -951,8 +955,6 @@ int mwifiex_main_process(struct mwifiex_adapter *); int mwifiex_bss_set_channel(struct mwifiex_private *, struct mwifiex_chan_freq_power *cfp); -int mwifiex_set_radio_band_cfg(struct mwifiex_private *, - struct mwifiex_ds_band_cfg *); int mwifiex_get_bss_info(struct mwifiex_private *, struct mwifiex_bss_info *); int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv, diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c index d34acf082d3..405350940a4 100644 --- a/drivers/net/wireless/mwifiex/pcie.c +++ b/drivers/net/wireless/mwifiex/pcie.c @@ -386,8 +386,7 @@ static int mwifiex_pcie_create_txbd_ring(struct mwifiex_adapter *adapter) card->txbd_ring_vbase = kzalloc(card->txbd_ring_size, GFP_KERNEL); if (!card->txbd_ring_vbase) { dev_err(adapter->dev, "Unable to allocate buffer for txbd ring.\n"); - kfree(card->txbd_ring_vbase); - return -1; + return -ENOMEM; } card->txbd_ring_pbase = virt_to_phys(card->txbd_ring_vbase); @@ -477,7 +476,7 @@ static int mwifiex_pcie_create_rxbd_ring(struct mwifiex_adapter *adapter) if (!card->rxbd_ring_vbase) { dev_err(adapter->dev, "Unable to allocate buffer for " "rxbd_ring.\n"); - return -1; + return -ENOMEM; } card->rxbd_ring_pbase = virt_to_phys(card->rxbd_ring_vbase); @@ -570,7 +569,7 @@ static int mwifiex_pcie_create_evtbd_ring(struct mwifiex_adapter *adapter) if (!card->evtbd_ring_vbase) { dev_err(adapter->dev, "Unable to allocate buffer. " "Terminating download\n"); - return -1; + return -ENOMEM; } card->evtbd_ring_pbase = virt_to_phys(card->evtbd_ring_vbase); @@ -1229,15 +1228,16 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter, if (!skb) return 0; - if (rdptr >= MWIFIEX_MAX_EVT_BD) + if (rdptr >= MWIFIEX_MAX_EVT_BD) { dev_err(adapter->dev, "event_complete: Invalid rdptr 0x%x\n", rdptr); + return -EINVAL; + } /* Read the event ring write pointer set by firmware */ if (mwifiex_read_reg(adapter, REG_EVTBD_WRPTR, &wrptr)) { dev_err(adapter->dev, "event_complete: failed to read REG_EVTBD_WRPTR\n"); - ret = -1; - goto done; + return -1; } if (!card->evt_buf_list[rdptr]) { @@ -1266,15 +1266,9 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter, /* Write the event ring read pointer in to REG_EVTBD_RDPTR */ if (mwifiex_write_reg(adapter, REG_EVTBD_RDPTR, card->evtbd_rdptr)) { dev_err(adapter->dev, "event_complete: failed to read REG_EVTBD_RDPTR\n"); - ret = -1; - goto done; + return -1; } -done: - /* Free the buffer for failure case */ - if (ret && skb) - dev_kfree_skb_any(skb); - dev_dbg(adapter->dev, "info: Check Events Again\n"); ret = mwifiex_pcie_process_event_ready(adapter); @@ -1672,9 +1666,8 @@ static int mwifiex_pcie_host_to_card(struct mwifiex_adapter *adapter, u8 type, struct sk_buff *skb, struct mwifiex_tx_param *tx_param) { - if (!adapter || !skb) { - dev_err(adapter->dev, "Invalid parameter in %s <%p, %p>\n", - __func__, adapter, skb); + if (!skb) { + dev_err(adapter->dev, "Passed NULL skb to %s\n", __func__); return -1; } diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c index 8d3ab378662..6396d3318ea 100644 --- a/drivers/net/wireless/mwifiex/scan.c +++ b/drivers/net/wireless/mwifiex/scan.c @@ -500,7 +500,6 @@ mwifiex_scan_create_channel_list(struct mwifiex_private *priv, struct ieee80211_channel *ch; struct mwifiex_adapter *adapter = priv->adapter; int chan_idx = 0, i; - u8 scan_type; for (band = 0; (band < IEEE80211_NUM_BANDS) ; band++) { @@ -514,19 +513,20 @@ mwifiex_scan_create_channel_list(struct mwifiex_private *priv, if (ch->flags & IEEE80211_CHAN_DISABLED) continue; scan_chan_list[chan_idx].radio_type = band; - scan_type = ch->flags & IEEE80211_CHAN_PASSIVE_SCAN; + if (user_scan_in && user_scan_in->chan_list[0].scan_time) scan_chan_list[chan_idx].max_scan_time = cpu_to_le16((u16) user_scan_in-> chan_list[0].scan_time); - else if (scan_type == MWIFIEX_SCAN_TYPE_PASSIVE) + else if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN) scan_chan_list[chan_idx].max_scan_time = cpu_to_le16(adapter->passive_scan_time); else scan_chan_list[chan_idx].max_scan_time = cpu_to_le16(adapter->active_scan_time); - if (scan_type == MWIFIEX_SCAN_TYPE_PASSIVE) + + if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN) scan_chan_list[chan_idx].chan_scan_mode_bitmap |= MWIFIEX_PASSIVE_SCAN; else @@ -1391,11 +1391,8 @@ int mwifiex_set_user_scan_ioctl(struct mwifiex_private *priv, { int status; - priv->adapter->scan_wait_q_woken = false; - status = mwifiex_scan_networks(priv, scan_req); - if (!status) - status = mwifiex_wait_queue_complete(priv->adapter); + queue_work(priv->adapter->workqueue, &priv->adapter->main_work); return status; } @@ -1537,11 +1534,6 @@ done: return 0; } -static void mwifiex_free_bss_priv(struct cfg80211_bss *bss) -{ - kfree(bss->priv); -} - /* * This function handles the command response of scan. * @@ -1767,7 +1759,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv, cap_info_bitmap, beacon_period, ie_buf, ie_len, rssi, GFP_KERNEL); *(u8 *)bss->priv = band; - bss->free_priv = mwifiex_free_bss_priv; + cfg80211_put_bss(bss); if (priv->media_connected && !memcmp(bssid, priv->curr_bss_params.bss_descriptor @@ -1801,6 +1793,14 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv, up(&priv->async_sem); } + if (priv->user_scan_cfg) { + dev_dbg(priv->adapter->dev, "info: %s: sending scan " + "results\n", __func__); + cfg80211_scan_done(priv->scan_request, 0); + priv->scan_request = NULL; + kfree(priv->user_scan_cfg); + priv->user_scan_cfg = NULL; + } } else { /* Get scan command from scan_pending_q and put to cmd_pending_q */ diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c index 283171bbced..d39d8457f25 100644 --- a/drivers/net/wireless/mwifiex/sdio.c +++ b/drivers/net/wireless/mwifiex/sdio.c @@ -256,10 +256,13 @@ static int mwifiex_sdio_resume(struct device *dev) /* Device ID for SD8787 */ #define SDIO_DEVICE_ID_MARVELL_8787 (0x9119) +/* Device ID for SD8797 */ +#define SDIO_DEVICE_ID_MARVELL_8797 (0x9129) /* WLAN IDs */ static const struct sdio_device_id mwifiex_ids[] = { {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8787)}, + {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8797)}, {}, }; @@ -1084,7 +1087,7 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter, (adapter->ioport | 0x1000 | (card->mpa_rx.ports << 4)) + card->mpa_rx.start_port, 1)) - return -1; + goto error; curr_ptr = card->mpa_rx.buf; @@ -1127,12 +1130,29 @@ rx_curr_single: if (mwifiex_sdio_card_to_host(adapter, &pkt_type, skb->data, skb->len, adapter->ioport + port)) - return -1; + goto error; mwifiex_decode_rx_packet(adapter, skb, pkt_type); } return 0; + +error: + if (MP_RX_AGGR_IN_PROGRESS(card)) { + /* Multiport-aggregation transfer failed - cleanup */ + for (pind = 0; pind < card->mpa_rx.pkt_cnt; pind++) { + /* copy pkt to deaggr buf */ + skb_deaggr = card->mpa_rx.skb_arr[pind]; + dev_kfree_skb_any(skb_deaggr); + } + MP_RX_AGGR_BUF_RESET(card); + } + + if (f_do_rx_cur) + /* Single transfer pending. Free curr buff also */ + dev_kfree_skb_any(skb); + + return -1; } /* @@ -1268,7 +1288,6 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) dev_dbg(adapter->dev, "info: CFG reg val =%x\n", cr); - dev_kfree_skb_any(skb); return -1; } } @@ -1573,7 +1592,16 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter) sdio_set_drvdata(func, card); adapter->dev = &func->dev; - strcpy(adapter->fw_name, SD8787_DEFAULT_FW_NAME); + + switch (func->device) { + case SDIO_DEVICE_ID_MARVELL_8797: + strcpy(adapter->fw_name, SD8797_DEFAULT_FW_NAME); + break; + case SDIO_DEVICE_ID_MARVELL_8787: + default: + strcpy(adapter->fw_name, SD8787_DEFAULT_FW_NAME); + break; + } return 0; @@ -1630,14 +1658,14 @@ static int mwifiex_init_sdio(struct mwifiex_adapter *adapter) card->mpa_tx.pkt_cnt = 0; card->mpa_tx.start_port = 0; - card->mpa_tx.enabled = 0; + card->mpa_tx.enabled = 1; card->mpa_tx.pkt_aggr_limit = SDIO_MP_AGGR_DEF_PKT_LIMIT; card->mpa_rx.buf_len = 0; card->mpa_rx.pkt_cnt = 0; card->mpa_rx.start_port = 0; - card->mpa_rx.enabled = 0; + card->mpa_rx.enabled = 1; card->mpa_rx.pkt_aggr_limit = SDIO_MP_AGGR_DEF_PKT_LIMIT; /* Allocate buffers for SDIO MP-A */ @@ -1774,4 +1802,5 @@ MODULE_AUTHOR("Marvell International Ltd."); MODULE_DESCRIPTION("Marvell WiFi-Ex SDIO Driver version " SDIO_VERSION); MODULE_VERSION(SDIO_VERSION); MODULE_LICENSE("GPL v2"); -MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin"); +MODULE_FIRMWARE(SD8787_DEFAULT_FW_NAME); +MODULE_FIRMWARE(SD8797_DEFAULT_FW_NAME); diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/mwifiex/sdio.h index 3f711801e58..a3fb322205b 100644 --- a/drivers/net/wireless/mwifiex/sdio.h +++ b/drivers/net/wireless/mwifiex/sdio.h @@ -29,6 +29,7 @@ #include "main.h" #define SD8787_DEFAULT_FW_NAME "mrvl/sd8787_uapsta.bin" +#define SD8797_DEFAULT_FW_NAME "mrvl/sd8797_uapsta.bin" #define BLOCK_MODE 1 #define BYTE_MODE 0 diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c index ea6518d1c9e..6e443ffa046 100644 --- a/drivers/net/wireless/mwifiex/sta_cmd.c +++ b/drivers/net/wireless/mwifiex/sta_cmd.c @@ -748,7 +748,7 @@ static int mwifiex_cmd_802_11_rf_channel(struct mwifiex_private *priv, cpu_to_le16(HostCmd_SCAN_RADIO_TYPE_A); rf_type = le16_to_cpu(rf_chan->rf_type); - SET_SECONDARYCHAN(rf_type, priv->adapter->chan_offset); + SET_SECONDARYCHAN(rf_type, priv->adapter->sec_chan_offset); rf_chan->current_channel = cpu_to_le16(*channel); } rf_chan->action = cpu_to_le16(cmd_action); diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c index 7a16b0c417a..e812db8b695 100644 --- a/drivers/net/wireless/mwifiex/sta_cmdresp.c +++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c @@ -508,7 +508,7 @@ static int mwifiex_ret_802_11_tx_rate_query(struct mwifiex_private *priv, priv->tx_htinfo = resp->params.tx_rate.ht_info; if (!priv->is_data_rate_auto) priv->data_rate = - mwifiex_index_to_data_rate(priv->tx_rate, + mwifiex_index_to_data_rate(priv, priv->tx_rate, priv->tx_htinfo); return 0; diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c index f204810e833..d7aa21da84d 100644 --- a/drivers/net/wireless/mwifiex/sta_event.c +++ b/drivers/net/wireless/mwifiex/sta_event.c @@ -115,18 +115,17 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv) if (adapter->num_cmd_timeout && adapter->curr_cmd) return; priv->media_connected = false; - if (!priv->disconnect) { - priv->disconnect = 1; - dev_dbg(adapter->dev, "info: successfully disconnected from" - " %pM: reason code %d\n", priv->cfg_bssid, - WLAN_REASON_DEAUTH_LEAVING); - cfg80211_disconnected(priv->netdev, - WLAN_REASON_DEAUTH_LEAVING, NULL, 0, - GFP_KERNEL); - queue_work(priv->workqueue, &priv->cfg_workqueue); + dev_dbg(adapter->dev, "info: successfully disconnected from" + " %pM: reason code %d\n", priv->cfg_bssid, + WLAN_REASON_DEAUTH_LEAVING); + if (priv->bss_mode == NL80211_IFTYPE_STATION) { + cfg80211_disconnected(priv->netdev, WLAN_REASON_DEAUTH_LEAVING, + NULL, 0, GFP_KERNEL); } + memset(priv->cfg_bssid, 0, ETH_ALEN); + if (!netif_queue_stopped(priv->netdev)) - netif_stop_queue(priv->netdev); + mwifiex_stop_net_dev_queue(priv->netdev, adapter); if (netif_carrier_ok(priv->netdev)) netif_carrier_off(priv->netdev); /* Reset wireless stats signal info */ @@ -201,7 +200,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) if (!netif_carrier_ok(priv->netdev)) netif_carrier_on(priv->netdev); if (netif_queue_stopped(priv->netdev)) - netif_wake_queue(priv->netdev); + mwifiex_wake_up_net_dev_queue(priv->netdev, adapter); break; case EVENT_DEAUTHENTICATED: @@ -292,7 +291,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) priv->adhoc_is_link_sensed = false; mwifiex_clean_txrx(priv); if (!netif_queue_stopped(priv->netdev)) - netif_stop_queue(priv->netdev); + mwifiex_stop_net_dev_queue(priv->netdev, adapter); if (netif_carrier_ok(priv->netdev)) netif_carrier_off(priv->netdev); break; diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c index 1679c2593b7..470ca75ec25 100644 --- a/drivers/net/wireless/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/mwifiex/sta_ioctl.c @@ -239,7 +239,7 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss, "associating...\n"); if (!netif_queue_stopped(priv->netdev)) - netif_stop_queue(priv->netdev); + mwifiex_stop_net_dev_queue(priv->netdev, adapter); /* Clear any past association response stored for * application retrieval */ @@ -270,7 +270,7 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss, ret = mwifiex_check_network_compatibility(priv, bss_desc); if (!netif_queue_stopped(priv->netdev)) - netif_stop_queue(priv->netdev); + mwifiex_stop_net_dev_queue(priv->netdev, adapter); if (!ret) { dev_dbg(adapter->dev, "info: network found in scan" @@ -477,67 +477,6 @@ int mwifiex_get_bss_info(struct mwifiex_private *priv, } /* - * The function sets band configurations. - * - * it performs extra checks to make sure the Ad-Hoc - * band and channel are compatible. Otherwise it returns an error. - * - */ -int mwifiex_set_radio_band_cfg(struct mwifiex_private *priv, - struct mwifiex_ds_band_cfg *radio_cfg) -{ - struct mwifiex_adapter *adapter = priv->adapter; - u8 infra_band, adhoc_band; - u32 adhoc_channel; - - infra_band = (u8) radio_cfg->config_bands; - adhoc_band = (u8) radio_cfg->adhoc_start_band; - adhoc_channel = radio_cfg->adhoc_channel; - - /* SET Infra band */ - if ((infra_band | adapter->fw_bands) & ~adapter->fw_bands) - return -1; - - adapter->config_bands = infra_band; - - /* SET Ad-hoc Band */ - if ((adhoc_band | adapter->fw_bands) & ~adapter->fw_bands) - return -1; - - if (adhoc_band) - adapter->adhoc_start_band = adhoc_band; - adapter->chan_offset = (u8) radio_cfg->sec_chan_offset; - /* - * If no adhoc_channel is supplied verify if the existing adhoc - * channel compiles with new adhoc_band - */ - if (!adhoc_channel) { - if (!mwifiex_get_cfp_by_band_and_channel_from_cfg80211 - (priv, adapter->adhoc_start_band, - priv->adhoc_channel)) { - /* Pass back the default channel */ - radio_cfg->adhoc_channel = DEFAULT_AD_HOC_CHANNEL; - if ((adapter->adhoc_start_band & BAND_A) - || (adapter->adhoc_start_band & BAND_AN)) - radio_cfg->adhoc_channel = - DEFAULT_AD_HOC_CHANNEL_A; - } - } else { /* Retrurn error if adhoc_band and - adhoc_channel combination is invalid */ - if (!mwifiex_get_cfp_by_band_and_channel_from_cfg80211 - (priv, adapter->adhoc_start_band, (u16) adhoc_channel)) - return -1; - priv->adhoc_channel = (u8) adhoc_channel; - } - if ((adhoc_band & BAND_GN) || (adhoc_band & BAND_AN)) - adapter->adhoc_11n_enabled = true; - else - adapter->adhoc_11n_enabled = false; - - return 0; -} - -/* * The function disables auto deep sleep mode. */ int mwifiex_disable_auto_ds(struct mwifiex_private *priv) @@ -837,8 +776,8 @@ int mwifiex_drv_get_data_rate(struct mwifiex_private *priv, if (!ret) { if (rate->is_rate_auto) - rate->rate = mwifiex_index_to_data_rate(priv->tx_rate, - priv->tx_htinfo); + rate->rate = mwifiex_index_to_data_rate(priv, + priv->tx_rate, priv->tx_htinfo); else rate->rate = priv->data_rate; } else { diff --git a/drivers/net/wireless/mwifiex/sta_rx.c b/drivers/net/wireless/mwifiex/sta_rx.c index 27430512f7c..5e1ef7e5da4 100644 --- a/drivers/net/wireless/mwifiex/sta_rx.c +++ b/drivers/net/wireless/mwifiex/sta_rx.c @@ -126,6 +126,9 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter, u16 rx_pkt_type; struct mwifiex_private *priv = adapter->priv[rx_info->bss_index]; + if (!priv) + return -1; + local_rx_pd = (struct rxpd *) (skb->data); rx_pkt_type = local_rx_pd->rx_pkt_type; @@ -189,12 +192,11 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter, (u8) local_rx_pd->rx_pkt_type, skb); - if (ret || (rx_pkt_type == PKT_TYPE_BAR)) { - if (priv && (ret == -1)) - priv->stats.rx_dropped++; - + if (ret || (rx_pkt_type == PKT_TYPE_BAR)) dev_kfree_skb_any(skb); - } + + if (ret) + priv->stats.rx_dropped++; return ret; } diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c index a206f412875..d9274a1b77a 100644 --- a/drivers/net/wireless/mwifiex/txrx.c +++ b/drivers/net/wireless/mwifiex/txrx.c @@ -134,7 +134,7 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter, if (!priv) goto done; - priv->netdev->trans_start = jiffies; + mwifiex_set_trans_start(priv->netdev); if (!status) { priv->stats.tx_packets++; priv->stats.tx_bytes += skb->len; @@ -152,7 +152,8 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter, if ((GET_BSS_ROLE(tpriv) == MWIFIEX_BSS_ROLE_STA) && (tpriv->media_connected)) { if (netif_queue_stopped(tpriv->netdev)) - netif_wake_queue(tpriv->netdev); + mwifiex_wake_up_net_dev_queue(tpriv->netdev, + adapter); } } done: diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c index 995695c28d5..7becea3dec6 100644 --- a/drivers/net/wireless/mwl8k.c +++ b/drivers/net/wireless/mwl8k.c @@ -28,10 +28,10 @@ #define MWL8K_DESC "Marvell TOPDOG(R) 802.11 Wireless Network Driver" #define MWL8K_NAME KBUILD_MODNAME -#define MWL8K_VERSION "0.12" +#define MWL8K_VERSION "0.13" /* Module parameters */ -static unsigned ap_mode_default; +static bool ap_mode_default; module_param(ap_mode_default, bool, 0); MODULE_PARM_DESC(ap_mode_default, "Set to 1 to make ap mode the default instead of sta mode"); @@ -198,6 +198,7 @@ struct mwl8k_priv { /* firmware access */ struct mutex fw_mutex; struct task_struct *fw_mutex_owner; + struct task_struct *hw_restart_owner; int fw_mutex_depth; struct completion *hostcmd_wait; @@ -262,6 +263,10 @@ struct mwl8k_priv { */ struct ieee80211_tx_queue_params wmm_params[MWL8K_TX_WMM_QUEUES]; + /* To perform the task of reloading the firmware */ + struct work_struct fw_reload; + bool hw_restart_in_progress; + /* async firmware loading state */ unsigned fw_state; char *fw_pref; @@ -738,10 +743,10 @@ static int mwl8k_load_firmware(struct ieee80211_hw *hw) ready_code = ioread32(priv->regs + MWL8K_HIU_INT_CODE); if (ready_code == MWL8K_FWAP_READY) { - priv->ap_fw = 1; + priv->ap_fw = true; break; } else if (ready_code == MWL8K_FWSTA_READY) { - priv->ap_fw = 0; + priv->ap_fw = false; break; } @@ -1498,6 +1503,18 @@ static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw) might_sleep(); + /* Since fw restart is in progress, allow only the firmware + * commands from the restart code and block the other + * commands since they are going to fail in any case since + * the firmware has crashed + */ + if (priv->hw_restart_in_progress) { + if (priv->hw_restart_owner == current) + return 0; + else + return -EBUSY; + } + /* * The TX queues are stopped at this point, so this test * doesn't need to take ->tx_lock. @@ -1541,6 +1558,8 @@ static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw) wiphy_err(hw->wiphy, "tx rings stuck for %d ms\n", MWL8K_TX_WAIT_TIMEOUT_MS); mwl8k_dump_tx_rings(hw); + priv->hw_restart_in_progress = true; + ieee80211_queue_work(hw, &priv->fw_reload); rc = -ETIMEDOUT; } @@ -2058,7 +2077,9 @@ static int mwl8k_fw_lock(struct ieee80211_hw *hw) rc = mwl8k_tx_wait_empty(hw); if (rc) { - ieee80211_wake_queues(hw); + if (!priv->hw_restart_in_progress) + ieee80211_wake_queues(hw); + mutex_unlock(&priv->fw_mutex); return rc; @@ -2077,7 +2098,9 @@ static void mwl8k_fw_unlock(struct ieee80211_hw *hw) struct mwl8k_priv *priv = hw->priv; if (!--priv->fw_mutex_depth) { - ieee80211_wake_queues(hw); + if (!priv->hw_restart_in_progress) + ieee80211_wake_queues(hw); + priv->fw_mutex_owner = NULL; mutex_unlock(&priv->fw_mutex); } @@ -4398,7 +4421,8 @@ static void mwl8k_stop(struct ieee80211_hw *hw) struct mwl8k_priv *priv = hw->priv; int i; - mwl8k_cmd_radio_disable(hw); + if (!priv->hw_restart_in_progress) + mwl8k_cmd_radio_disable(hw); ieee80211_stop_queues(hw); @@ -4499,6 +4523,16 @@ static int mwl8k_add_interface(struct ieee80211_hw *hw, return 0; } +static void mwl8k_remove_vif(struct mwl8k_priv *priv, struct mwl8k_vif *vif) +{ + /* Has ieee80211_restart_hw re-added the removed interfaces? */ + if (!priv->macids_used) + return; + + priv->macids_used &= ~(1 << vif->macid); + list_del(&vif->list); +} + static void mwl8k_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { @@ -4510,8 +4544,54 @@ static void mwl8k_remove_interface(struct ieee80211_hw *hw, mwl8k_cmd_set_mac_addr(hw, vif, "\x00\x00\x00\x00\x00\x00"); - priv->macids_used &= ~(1 << mwl8k_vif->macid); - list_del(&mwl8k_vif->list); + mwl8k_remove_vif(priv, mwl8k_vif); +} + +static void mwl8k_hw_restart_work(struct work_struct *work) +{ + struct mwl8k_priv *priv = + container_of(work, struct mwl8k_priv, fw_reload); + struct ieee80211_hw *hw = priv->hw; + struct mwl8k_device_info *di; + int rc; + + /* If some command is waiting for a response, clear it */ + if (priv->hostcmd_wait != NULL) { + complete(priv->hostcmd_wait); + priv->hostcmd_wait = NULL; + } + + priv->hw_restart_owner = current; + di = priv->device_info; + mwl8k_fw_lock(hw); + + if (priv->ap_fw) + rc = mwl8k_reload_firmware(hw, di->fw_image_ap); + else + rc = mwl8k_reload_firmware(hw, di->fw_image_sta); + + if (rc) + goto fail; + + priv->hw_restart_owner = NULL; + priv->hw_restart_in_progress = false; + + /* + * This unlock will wake up the queues and + * also opens the command path for other + * commands + */ + mwl8k_fw_unlock(hw); + + ieee80211_restart_hw(hw); + + wiphy_err(hw->wiphy, "Firmware restarted successfully\n"); + + return; +fail: + mwl8k_fw_unlock(hw); + + wiphy_err(hw->wiphy, "Firmware restart failed\n"); } static int mwl8k_config(struct ieee80211_hw *hw, u32 changed) @@ -5024,7 +5104,11 @@ mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, for (i = 0; i < MAX_AMPDU_ATTEMPTS; i++) { rc = mwl8k_check_ba(hw, stream); - if (!rc) + /* If HW restart is in progress mwl8k_post_cmd will + * return -EBUSY. Avoid retrying mwl8k_check_ba in + * such cases + */ + if (!rc || rc == -EBUSY) break; /* * HW queues take time to be flushed, give them @@ -5044,14 +5128,14 @@ mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid); break; case IEEE80211_AMPDU_TX_STOP: - if (stream == NULL) - break; - if (stream->state == AMPDU_STREAM_ACTIVE) { - spin_unlock(&priv->stream_lock); - mwl8k_destroy_ba(hw, stream); - spin_lock(&priv->stream_lock); + if (stream) { + if (stream->state == AMPDU_STREAM_ACTIVE) { + spin_unlock(&priv->stream_lock); + mwl8k_destroy_ba(hw, stream); + spin_lock(&priv->stream_lock); + } + mwl8k_remove_stream(hw, stream); } - mwl8k_remove_stream(hw, stream); ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid); break; case IEEE80211_AMPDU_TX_OPERATIONAL: @@ -5263,12 +5347,15 @@ fail: mwl8k_release_firmware(priv); } +#define MAX_RESTART_ATTEMPTS 1 static int mwl8k_init_firmware(struct ieee80211_hw *hw, char *fw_image, bool nowait) { struct mwl8k_priv *priv = hw->priv; int rc; + int count = MAX_RESTART_ATTEMPTS; +retry: /* Reset firmware and hardware */ mwl8k_hw_reset(priv); @@ -5290,6 +5377,16 @@ static int mwl8k_init_firmware(struct ieee80211_hw *hw, char *fw_image, /* Reclaim memory once firmware is successfully loaded */ mwl8k_release_firmware(priv); + if (rc && count) { + /* FW did not start successfully; + * lets try one more time + */ + count--; + wiphy_err(hw->wiphy, "Trying to reload the firmware again\n"); + msleep(20); + goto retry; + } + return rc; } @@ -5365,7 +5462,14 @@ static int mwl8k_probe_hw(struct ieee80211_hw *hw) goto err_free_queues; } - memset(priv->ampdu, 0, sizeof(priv->ampdu)); + /* + * When hw restart is requested, + * mac80211 will take care of clearing + * the ampdu streams, so do not clear + * the ampdu state here + */ + if (!priv->hw_restart_in_progress) + memset(priv->ampdu, 0, sizeof(priv->ampdu)); /* * Temporarily enable interrupts. Initial firmware host @@ -5439,10 +5543,20 @@ static int mwl8k_reload_firmware(struct ieee80211_hw *hw, char *fw_image) { int i, rc = 0; struct mwl8k_priv *priv = hw->priv; + struct mwl8k_vif *vif, *tmp_vif; mwl8k_stop(hw); mwl8k_rxq_deinit(hw, 0); + /* + * All the existing interfaces are re-added by the ieee80211_reconfig; + * which means driver should remove existing interfaces before calling + * ieee80211_restart_hw + */ + if (priv->hw_restart_in_progress) + list_for_each_entry_safe(vif, tmp_vif, &priv->vif_list, list) + mwl8k_remove_vif(priv, vif); + for (i = 0; i < mwl8k_tx_queues(priv); i++) mwl8k_txq_deinit(hw, i); @@ -5454,6 +5568,9 @@ static int mwl8k_reload_firmware(struct ieee80211_hw *hw, char *fw_image) if (rc) goto fail; + if (priv->hw_restart_in_progress) + return rc; + rc = mwl8k_start(hw); if (rc) goto fail; @@ -5517,13 +5634,15 @@ static int mwl8k_firmware_load_success(struct mwl8k_priv *priv) INIT_LIST_HEAD(&priv->vif_list); /* Set default radio state and preamble */ - priv->radio_on = 0; - priv->radio_short_preamble = 0; + priv->radio_on = false; + priv->radio_short_preamble = false; /* Finalize join worker */ INIT_WORK(&priv->finalize_join_worker, mwl8k_finalize_join_worker); /* Handle watchdog ba events */ INIT_WORK(&priv->watchdog_ba_handle, mwl8k_watchdog_ba_events); + /* To reload the firmware if it crashes */ + INIT_WORK(&priv->fw_reload, mwl8k_hw_restart_work); /* TX reclaim and RX tasklets. */ tasklet_init(&priv->poll_tx_task, mwl8k_tx_poll, (unsigned long)hw); @@ -5667,6 +5786,9 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev, rc = mwl8k_init_firmware(hw, priv->fw_pref, true); if (rc) goto err_stop_firmware; + + priv->hw_restart_in_progress = false; + return rc; err_stop_firmware: diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c index b52acc4b408..9fb77d0319f 100644 --- a/drivers/net/wireless/orinoco/main.c +++ b/drivers/net/wireless/orinoco/main.c @@ -121,7 +121,7 @@ module_param(orinoco_debug, int, 0644); MODULE_PARM_DESC(orinoco_debug, "Debug level"); #endif -static int suppress_linkstatus; /* = 0 */ +static bool suppress_linkstatus; /* = 0 */ module_param(suppress_linkstatus, bool, 0644); MODULE_PARM_DESC(suppress_linkstatus, "Don't log link status changes"); diff --git a/drivers/net/wireless/orinoco/scan.c b/drivers/net/wireless/orinoco/scan.c index e99ca1c1e0d..96e39edfec7 100644 --- a/drivers/net/wireless/orinoco/scan.c +++ b/drivers/net/wireless/orinoco/scan.c @@ -76,6 +76,7 @@ static void orinoco_add_hostscan_result(struct orinoco_private *priv, { struct wiphy *wiphy = priv_to_wiphy(priv); struct ieee80211_channel *channel; + struct cfg80211_bss *cbss; u8 *ie; u8 ie_buf[46]; u64 timestamp; @@ -121,9 +122,10 @@ static void orinoco_add_hostscan_result(struct orinoco_private *priv, beacon_interval = le16_to_cpu(bss->a.beacon_interv); signal = SIGNAL_TO_MBM(le16_to_cpu(bss->a.level)); - cfg80211_inform_bss(wiphy, channel, bss->a.bssid, timestamp, - capability, beacon_interval, ie_buf, ie_len, - signal, GFP_KERNEL); + cbss = cfg80211_inform_bss(wiphy, channel, bss->a.bssid, timestamp, + capability, beacon_interval, ie_buf, ie_len, + signal, GFP_KERNEL); + cfg80211_put_bss(cbss); } void orinoco_add_extscan_result(struct orinoco_private *priv, @@ -132,6 +134,7 @@ void orinoco_add_extscan_result(struct orinoco_private *priv, { struct wiphy *wiphy = priv_to_wiphy(priv); struct ieee80211_channel *channel; + struct cfg80211_bss *cbss; const u8 *ie; u64 timestamp; s32 signal; @@ -152,9 +155,10 @@ void orinoco_add_extscan_result(struct orinoco_private *priv, ie = bss->data; signal = SIGNAL_TO_MBM(bss->level); - cfg80211_inform_bss(wiphy, channel, bss->bssid, timestamp, - capability, beacon_interval, ie, ie_len, - signal, GFP_KERNEL); + cbss = cfg80211_inform_bss(wiphy, channel, bss->bssid, timestamp, + capability, beacon_interval, ie, ie_len, + signal, GFP_KERNEL); + cfg80211_put_bss(cbss); } void orinoco_add_hostscan_results(struct orinoco_private *priv, diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c index db4d9a02f26..af2ca1a9c7d 100644 --- a/drivers/net/wireless/p54/main.c +++ b/drivers/net/wireless/p54/main.c @@ -27,7 +27,7 @@ #include "p54.h" #include "lmac.h" -static int modparam_nohwcrypt; +static bool modparam_nohwcrypt; module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>"); diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c index 78d0d698855..7faed62c637 100644 --- a/drivers/net/wireless/p54/p54spi.c +++ b/drivers/net/wireless/p54/p54spi.c @@ -581,11 +581,7 @@ static void p54spi_op_stop(struct ieee80211_hw *dev) struct p54s_priv *priv = dev->priv; unsigned long flags; - if (mutex_lock_interruptible(&priv->mutex)) { - /* FIXME: how to handle this error? */ - return; - } - + mutex_lock(&priv->mutex); WARN_ON(priv->fw_state != FW_STATE_READY); p54spi_power_off(priv); @@ -704,7 +700,6 @@ static int __devexit p54spi_remove(struct spi_device *spi) static struct spi_driver p54spi_driver = { .driver = { .name = "p54spi", - .bus = &spi_bus_type, .owner = THIS_MODULE, }, diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c index 6ed9c323e3c..42b97bc3847 100644 --- a/drivers/net/wireless/p54/txrx.c +++ b/drivers/net/wireless/p54/txrx.c @@ -242,7 +242,7 @@ void p54_free_skb(struct ieee80211_hw *dev, struct sk_buff *skb) skb_unlink(skb, &priv->tx_queue); p54_tx_qos_accounting_free(priv, skb); - dev_kfree_skb_any(skb); + ieee80211_free_txskb(dev, skb); } EXPORT_SYMBOL_GPL(p54_free_skb); @@ -788,7 +788,7 @@ void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb) &hdr_flags, &aid, &burst_allowed); if (p54_tx_qos_accounting_alloc(priv, skb, queue)) { - dev_kfree_skb_any(skb); + ieee80211_free_txskb(dev, skb); return; } diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c index bc2ba80c47b..4e44b1af119 100644 --- a/drivers/net/wireless/prism54/isl_ioctl.c +++ b/drivers/net/wireless/prism54/isl_ioctl.c @@ -2493,323 +2493,7 @@ prism54_set_mac_address(struct net_device *ndev, void *addr) return ret; } -/* Note: currently, use hostapd ioctl from the Host AP driver for WPA - * support. This is to be replaced with Linux wireless extensions once they - * get WPA support. */ - -/* Note II: please leave all this together as it will be easier to remove later, - * once wireless extensions add WPA support -mcgrof */ - -/* PRISM54_HOSTAPD ioctl() cmd: */ -enum { - PRISM2_SET_ENCRYPTION = 6, - PRISM2_HOSTAPD_SET_GENERIC_ELEMENT = 12, - PRISM2_HOSTAPD_MLME = 13, - PRISM2_HOSTAPD_SCAN_REQ = 14, -}; - #define PRISM54_SET_WPA SIOCIWFIRSTPRIV+12 -#define PRISM54_HOSTAPD SIOCIWFIRSTPRIV+25 -#define PRISM54_DROP_UNENCRYPTED SIOCIWFIRSTPRIV+26 - -#define PRISM2_HOSTAPD_MAX_BUF_SIZE 1024 -#define PRISM2_HOSTAPD_GENERIC_ELEMENT_HDR_LEN \ - offsetof(struct prism2_hostapd_param, u.generic_elem.data) - -/* Maximum length for algorithm names (-1 for nul termination) - * used in ioctl() */ -#define HOSTAP_CRYPT_ALG_NAME_LEN 16 - -struct prism2_hostapd_param { - u32 cmd; - u8 sta_addr[ETH_ALEN]; - union { - struct { - u8 alg[HOSTAP_CRYPT_ALG_NAME_LEN]; - u32 flags; - u32 err; - u8 idx; - u8 seq[8]; /* sequence counter (set: RX, get: TX) */ - u16 key_len; - u8 key[0]; - } crypt; - struct { - u8 len; - u8 data[0]; - } generic_elem; - struct { -#define MLME_STA_DEAUTH 0 -#define MLME_STA_DISASSOC 1 - u16 cmd; - u16 reason_code; - } mlme; - struct { - u8 ssid_len; - u8 ssid[32]; - } scan_req; - } u; -}; - - -static int -prism2_ioctl_set_encryption(struct net_device *dev, - struct prism2_hostapd_param *param, - int param_len) -{ - islpci_private *priv = netdev_priv(dev); - int rvalue = 0, force = 0; - int authen = DOT11_AUTH_OS, invoke = 0, exunencrypt = 0; - union oid_res_t r; - - /* with the new API, it's impossible to get a NULL pointer. - * New version of iwconfig set the IW_ENCODE_NOKEY flag - * when no key is given, but older versions don't. */ - - if (param->u.crypt.key_len > 0) { - /* we have a key to set */ - int index = param->u.crypt.idx; - int current_index; - struct obj_key key = { DOT11_PRIV_TKIP, 0, "" }; - - /* get the current key index */ - rvalue = mgt_get_request(priv, DOT11_OID_DEFKEYID, 0, NULL, &r); - current_index = r.u; - /* Verify that the key is not marked as invalid */ - if (!(param->u.crypt.flags & IW_ENCODE_NOKEY)) { - key.length = param->u.crypt.key_len > sizeof (param->u.crypt.key) ? - sizeof (param->u.crypt.key) : param->u.crypt.key_len; - memcpy(key.key, param->u.crypt.key, key.length); - if (key.length == 32) - /* we want WPA-PSK */ - key.type = DOT11_PRIV_TKIP; - if ((index < 0) || (index > 3)) - /* no index provided use the current one */ - index = current_index; - - /* now send the key to the card */ - rvalue |= - mgt_set_request(priv, DOT11_OID_DEFKEYX, index, - &key); - } - /* - * If a valid key is set, encryption should be enabled - * (user may turn it off later). - * This is also how "iwconfig ethX key on" works - */ - if ((index == current_index) && (key.length > 0)) - force = 1; - } else { - int index = (param->u.crypt.flags & IW_ENCODE_INDEX) - 1; - if ((index >= 0) && (index <= 3)) { - /* we want to set the key index */ - rvalue |= - mgt_set_request(priv, DOT11_OID_DEFKEYID, 0, - &index); - } else { - if (!(param->u.crypt.flags & IW_ENCODE_MODE)) { - /* we cannot do anything. Complain. */ - return -EINVAL; - } - } - } - /* now read the flags */ - if (param->u.crypt.flags & IW_ENCODE_DISABLED) { - /* Encoding disabled, - * authen = DOT11_AUTH_OS; - * invoke = 0; - * exunencrypt = 0; */ - } - if (param->u.crypt.flags & IW_ENCODE_OPEN) - /* Encode but accept non-encoded packets. No auth */ - invoke = 1; - if ((param->u.crypt.flags & IW_ENCODE_RESTRICTED) || force) { - /* Refuse non-encoded packets. Auth */ - authen = DOT11_AUTH_BOTH; - invoke = 1; - exunencrypt = 1; - } - /* do the change if requested */ - if ((param->u.crypt.flags & IW_ENCODE_MODE) || force) { - rvalue |= - mgt_set_request(priv, DOT11_OID_AUTHENABLE, 0, &authen); - rvalue |= - mgt_set_request(priv, DOT11_OID_PRIVACYINVOKED, 0, &invoke); - rvalue |= - mgt_set_request(priv, DOT11_OID_EXUNENCRYPTED, 0, - &exunencrypt); - } - return rvalue; -} - -static int -prism2_ioctl_set_generic_element(struct net_device *ndev, - struct prism2_hostapd_param *param, - int param_len) -{ - islpci_private *priv = netdev_priv(ndev); - int max_len, len, alen, ret=0; - struct obj_attachment *attach; - - len = param->u.generic_elem.len; - max_len = param_len - PRISM2_HOSTAPD_GENERIC_ELEMENT_HDR_LEN; - if (max_len < 0 || max_len < len) - return -EINVAL; - - alen = sizeof(*attach) + len; - attach = kzalloc(alen, GFP_KERNEL); - if (attach == NULL) - return -ENOMEM; - -#define WLAN_FC_TYPE_MGMT 0 -#define WLAN_FC_STYPE_ASSOC_REQ 0 -#define WLAN_FC_STYPE_REASSOC_REQ 2 - - /* Note: endianness is covered by mgt_set_varlen */ - - attach->type = (WLAN_FC_TYPE_MGMT << 2) | - (WLAN_FC_STYPE_ASSOC_REQ << 4); - attach->id = -1; - attach->size = len; - memcpy(attach->data, param->u.generic_elem.data, len); - - ret = mgt_set_varlen(priv, DOT11_OID_ATTACHMENT, attach, len); - - if (ret == 0) { - attach->type = (WLAN_FC_TYPE_MGMT << 2) | - (WLAN_FC_STYPE_REASSOC_REQ << 4); - - ret = mgt_set_varlen(priv, DOT11_OID_ATTACHMENT, attach, len); - - if (ret == 0) - printk(KERN_DEBUG "%s: WPA IE Attachment was set\n", - ndev->name); - } - - kfree(attach); - return ret; - -} - -static int -prism2_ioctl_mlme(struct net_device *dev, struct prism2_hostapd_param *param) -{ - return -EOPNOTSUPP; -} - -static int -prism2_ioctl_scan_req(struct net_device *ndev, - struct prism2_hostapd_param *param) -{ - islpci_private *priv = netdev_priv(ndev); - struct iw_request_info info; - int i, rvalue; - struct obj_bsslist *bsslist; - u32 noise = 0; - char *extra = ""; - char *current_ev = "foo"; - union oid_res_t r; - - if (islpci_get_state(priv) < PRV_STATE_INIT) { - /* device is not ready, fail gently */ - return 0; - } - - /* first get the noise value. We will use it to report the link quality */ - rvalue = mgt_get_request(priv, DOT11_OID_NOISEFLOOR, 0, NULL, &r); - noise = r.u; - - /* Ask the device for a list of known bss. We can report at most - * IW_MAX_AP=64 to the range struct. But the device won't repport anything - * if you change the value of IWMAX_BSS=24. - */ - rvalue |= mgt_get_request(priv, DOT11_OID_BSSLIST, 0, NULL, &r); - bsslist = r.ptr; - - info.cmd = PRISM54_HOSTAPD; - info.flags = 0; - - /* ok now, scan the list and translate its info */ - for (i = 0; i < min(IW_MAX_AP, (int) bsslist->nr); i++) - current_ev = prism54_translate_bss(ndev, &info, current_ev, - extra + IW_SCAN_MAX_DATA, - &(bsslist->bsslist[i]), - noise); - kfree(bsslist); - - return rvalue; -} - -static int -prism54_hostapd(struct net_device *ndev, struct iw_point *p) -{ - struct prism2_hostapd_param *param; - int ret = 0; - u32 uwrq; - - printk(KERN_DEBUG "prism54_hostapd - len=%d\n", p->length); - if (p->length < sizeof(struct prism2_hostapd_param) || - p->length > PRISM2_HOSTAPD_MAX_BUF_SIZE || !p->pointer) - return -EINVAL; - - param = memdup_user(p->pointer, p->length); - if (IS_ERR(param)) - return PTR_ERR(param); - - switch (param->cmd) { - case PRISM2_SET_ENCRYPTION: - printk(KERN_DEBUG "%s: Caught WPA supplicant set encryption request\n", - ndev->name); - ret = prism2_ioctl_set_encryption(ndev, param, p->length); - break; - case PRISM2_HOSTAPD_SET_GENERIC_ELEMENT: - printk(KERN_DEBUG "%s: Caught WPA supplicant set WPA IE request\n", - ndev->name); - ret = prism2_ioctl_set_generic_element(ndev, param, - p->length); - break; - case PRISM2_HOSTAPD_MLME: - printk(KERN_DEBUG "%s: Caught WPA supplicant MLME request\n", - ndev->name); - ret = prism2_ioctl_mlme(ndev, param); - break; - case PRISM2_HOSTAPD_SCAN_REQ: - printk(KERN_DEBUG "%s: Caught WPA supplicant scan request\n", - ndev->name); - ret = prism2_ioctl_scan_req(ndev, param); - break; - case PRISM54_SET_WPA: - printk(KERN_DEBUG "%s: Caught WPA supplicant wpa init request\n", - ndev->name); - uwrq = 1; - ret = prism54_set_wpa(ndev, NULL, &uwrq, NULL); - break; - case PRISM54_DROP_UNENCRYPTED: - printk(KERN_DEBUG "%s: Caught WPA drop unencrypted request\n", - ndev->name); -#if 0 - uwrq = 0x01; - mgt_set(priv, DOT11_OID_EXUNENCRYPTED, &uwrq); - down_write(&priv->mib_sem); - mgt_commit(priv); - up_write(&priv->mib_sem); -#endif - /* Not necessary, as set_wpa does it, should we just do it here though? */ - ret = 0; - break; - default: - printk(KERN_DEBUG "%s: Caught a WPA supplicant request that is not supported\n", - ndev->name); - ret = -EOPNOTSUPP; - break; - } - - if (ret == 0 && copy_to_user(p->pointer, param, p->length)) - ret = -EFAULT; - - kfree(param); - - return ret; -} static int prism54_set_wpa(struct net_device *ndev, struct iw_request_info *info, @@ -3223,20 +2907,3 @@ const struct iw_handler_def prism54_handler_def = { .private_args = (struct iw_priv_args *) prism54_private_args, .get_wireless_stats = prism54_get_wireless_stats, }; - -/* For wpa_supplicant */ - -int -prism54_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) -{ - struct iwreq *wrq = (struct iwreq *) rq; - int ret = -1; - switch (cmd) { - case PRISM54_HOSTAPD: - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - ret = prism54_hostapd(ndev, &wrq->u.data); - return ret; - } - return -EOPNOTSUPP; -} diff --git a/drivers/net/wireless/prism54/isl_ioctl.h b/drivers/net/wireless/prism54/isl_ioctl.h index bcfbfb9281d..a34bceb6e3c 100644 --- a/drivers/net/wireless/prism54/isl_ioctl.h +++ b/drivers/net/wireless/prism54/isl_ioctl.h @@ -43,8 +43,6 @@ void prism54_wpa_bss_ie_clean(islpci_private *priv); int prism54_set_mac_address(struct net_device *, void *); -int prism54_ioctl(struct net_device *, struct ifreq *, int); - extern const struct iw_handler_def prism54_handler_def; #endif /* _ISL_IOCTL_H */ diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c index 5d0f61508a2..5970ff6f40c 100644 --- a/drivers/net/wireless/prism54/islpci_dev.c +++ b/drivers/net/wireless/prism54/islpci_dev.c @@ -793,8 +793,8 @@ islpci_set_multicast_list(struct net_device *dev) static void islpci_ethtool_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); } static const struct ethtool_ops islpci_ethtool_ops = { @@ -804,7 +804,6 @@ static const struct ethtool_ops islpci_ethtool_ops = { static const struct net_device_ops islpci_netdev_ops = { .ndo_open = islpci_open, .ndo_stop = islpci_close, - .ndo_do_ioctl = prism54_ioctl, .ndo_start_xmit = islpci_eth_transmit, .ndo_tx_timeout = islpci_eth_tx_timeout, .ndo_set_mac_address = prism54_set_mac_address, diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c index 0021e494851..04fec1fa6e0 100644 --- a/drivers/net/wireless/ray_cs.c +++ b/drivers/net/wireless/ray_cs.c @@ -2426,7 +2426,7 @@ static void rx_authenticate(ray_dev_t *local, struct rcs __iomem *prcs, unsigned int pkt_addr, int rx_len) { UCHAR buff[256]; - struct rx_msg *msg = (struct rx_msg *)buff; + struct ray_rx_msg *msg = (struct ray_rx_msg *) buff; del_timer(&local->timer); @@ -2513,7 +2513,7 @@ static void rx_deauthenticate(ray_dev_t *local, struct rcs __iomem *prcs, unsigned int pkt_addr, int rx_len) { /* UCHAR buff[256]; - struct rx_msg *msg = (struct rx_msg *)buff; + struct ray_rx_msg *msg = (struct ray_rx_msg *) buff; */ pr_debug("Deauthentication frame received\n"); local->authentication_state = UNAUTHENTICATED; diff --git a/drivers/net/wireless/rayctl.h b/drivers/net/wireless/rayctl.h index d7646f299bd..3c3b98b152c 100644 --- a/drivers/net/wireless/rayctl.h +++ b/drivers/net/wireless/rayctl.h @@ -566,9 +566,9 @@ struct phy_header { UCHAR hdr_3; UCHAR hdr_4; }; -struct rx_msg { +struct ray_rx_msg { struct mac_header mac; - UCHAR var[1]; + UCHAR var[0]; }; struct tx_msg { diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c index 0c13840a7de..3802c31fefc 100644 --- a/drivers/net/wireless/rndis_wlan.c +++ b/drivers/net/wireless/rndis_wlan.c @@ -244,6 +244,10 @@ enum ndis_80211_power_mode { NDIS_80211_POWER_MODE_FAST_PSP, }; +enum ndis_80211_pmkid_cand_list_flag_bits { + NDIS_80211_PMKID_CAND_PREAUTH = cpu_to_le32(1 << 0) +}; + struct ndis_80211_auth_request { __le32 length; u8 bssid[6]; @@ -387,19 +391,17 @@ struct ndis_80211_capability { struct ndis_80211_bssid_info { u8 bssid[6]; u8 pmkid[16]; -}; +} __packed; struct ndis_80211_pmkid { __le32 length; __le32 bssid_info_count; struct ndis_80211_bssid_info bssid_info[0]; -}; +} __packed; /* * private data */ -#define NET_TYPE_11FB 0 - #define CAP_MODE_80211A 1 #define CAP_MODE_80211B 2 #define CAP_MODE_80211G 4 @@ -414,6 +416,7 @@ struct ndis_80211_pmkid { #define RNDIS_WLAN_ALG_TKIP (1<<1) #define RNDIS_WLAN_ALG_CCMP (1<<2) +#define RNDIS_WLAN_NUM_KEYS 4 #define RNDIS_WLAN_KEY_MGMT_NONE 0 #define RNDIS_WLAN_KEY_MGMT_802_1X (1<<0) #define RNDIS_WLAN_KEY_MGMT_PSK (1<<1) @@ -516,7 +519,7 @@ struct rndis_wlan_private { /* encryption stuff */ int encr_tx_key_index; - struct rndis_wlan_encr_key encr_keys[4]; + struct rndis_wlan_encr_key encr_keys[RNDIS_WLAN_NUM_KEYS]; int wpa_version; u8 command_buffer[COMMAND_BUFFER_SIZE]; @@ -1346,6 +1349,32 @@ static int set_channel(struct usbnet *usbdev, int channel) return ret; } +static struct ieee80211_channel *get_current_channel(struct usbnet *usbdev, + u16 *beacon_interval) +{ + struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); + struct ieee80211_channel *channel; + struct ndis_80211_conf config; + int len, ret; + + /* Get channel and beacon interval */ + len = sizeof(config); + ret = rndis_query_oid(usbdev, OID_802_11_CONFIGURATION, &config, &len); + netdev_dbg(usbdev->net, "%s(): OID_802_11_CONFIGURATION -> %d\n", + __func__, ret); + if (ret < 0) + return NULL; + + channel = ieee80211_get_channel(priv->wdev.wiphy, + KHZ_TO_MHZ(le32_to_cpu(config.ds_config))); + if (!channel) + return NULL; + + if (beacon_interval) + *beacon_interval = le16_to_cpu(config.beacon_period); + return channel; +} + /* index must be 0 - N, as per NDIS */ static int add_wep_key(struct usbnet *usbdev, const u8 *key, int key_len, int index) @@ -1535,6 +1564,9 @@ static int remove_key(struct usbnet *usbdev, int index, const u8 *bssid) bool is_wpa; int ret; + if (index >= RNDIS_WLAN_NUM_KEYS) + return -ENOENT; + if (priv->encr_keys[index].len == 0) return 0; @@ -1972,11 +2004,12 @@ static int rndis_scan(struct wiphy *wiphy, struct net_device *dev, return ret; } -static struct cfg80211_bss *rndis_bss_info_update(struct usbnet *usbdev, - struct ndis_80211_bssid_ex *bssid) +static bool rndis_bss_info_update(struct usbnet *usbdev, + struct ndis_80211_bssid_ex *bssid) { struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); struct ieee80211_channel *channel; + struct cfg80211_bss *bss; s32 signal; u64 timestamp; u16 capability; @@ -2015,9 +2048,12 @@ static struct cfg80211_bss *rndis_bss_info_update(struct usbnet *usbdev, capability = le16_to_cpu(fixed->capabilities); beacon_interval = le16_to_cpu(fixed->beacon_interval); - return cfg80211_inform_bss(priv->wdev.wiphy, channel, bssid->mac, + bss = cfg80211_inform_bss(priv->wdev.wiphy, channel, bssid->mac, timestamp, capability, beacon_interval, ie, ie_len, signal, GFP_KERNEL); + cfg80211_put_bss(bss); + + return (bss != NULL); } static struct ndis_80211_bssid_ex *next_bssid_list_item( @@ -2451,6 +2487,9 @@ static int rndis_set_default_key(struct wiphy *wiphy, struct net_device *netdev, netdev_dbg(usbdev->net, "%s(%i)\n", __func__, key_index); + if (key_index >= RNDIS_WLAN_NUM_KEYS) + return -ENOENT; + priv->encr_tx_key_index = key_index; if (is_wpa_key(priv, key_index)) @@ -2639,12 +2678,12 @@ static void rndis_wlan_craft_connected_bss(struct usbnet *usbdev, u8 *bssid, { struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); struct ieee80211_channel *channel; - struct ndis_80211_conf config; struct ndis_80211_ssid ssid; + struct cfg80211_bss *bss; s32 signal; u64 timestamp; u16 capability; - u16 beacon_interval; + u16 beacon_interval = 0; __le32 rssi; u8 ie_buf[34]; int len, ret, ie_len; @@ -2669,22 +2708,10 @@ static void rndis_wlan_craft_connected_bss(struct usbnet *usbdev, u8 *bssid, } /* Get channel and beacon interval */ - len = sizeof(config); - ret = rndis_query_oid(usbdev, OID_802_11_CONFIGURATION, &config, &len); - netdev_dbg(usbdev->net, "%s(): OID_802_11_CONFIGURATION -> %d\n", - __func__, ret); - if (ret >= 0) { - beacon_interval = le16_to_cpu(config.beacon_period); - channel = ieee80211_get_channel(priv->wdev.wiphy, - KHZ_TO_MHZ(le32_to_cpu(config.ds_config))); - if (!channel) { - netdev_warn(usbdev->net, "%s(): could not get channel." - "\n", __func__); - return; - } - } else { - netdev_warn(usbdev->net, "%s(): could not get configuration.\n", - __func__); + channel = get_current_channel(usbdev, &beacon_interval); + if (!channel) { + netdev_warn(usbdev->net, "%s(): could not get channel.\n", + __func__); return; } @@ -2714,9 +2741,10 @@ static void rndis_wlan_craft_connected_bss(struct usbnet *usbdev, u8 *bssid, bssid, (u32)timestamp, capability, beacon_interval, ie_len, ssid.essid, signal); - cfg80211_inform_bss(priv->wdev.wiphy, channel, bssid, + bss = cfg80211_inform_bss(priv->wdev.wiphy, channel, bssid, timestamp, capability, beacon_interval, ie_buf, ie_len, signal, GFP_KERNEL); + cfg80211_put_bss(bss); } /* @@ -2828,8 +2856,9 @@ static void rndis_wlan_do_link_up_work(struct usbnet *usbdev) req_ie_len, resp_ie, resp_ie_len, 0, GFP_KERNEL); else - cfg80211_roamed(usbdev->net, NULL, bssid, - req_ie, req_ie_len, + cfg80211_roamed(usbdev->net, + get_current_channel(usbdev, NULL), + bssid, req_ie, req_ie_len, resp_ie, resp_ie_len, GFP_KERNEL); } else if (priv->infra_mode == NDIS_80211_INFRA_ADHOC) cfg80211_ibss_joined(usbdev->net, bssid, GFP_KERNEL); @@ -2995,25 +3024,13 @@ static void rndis_wlan_pmkid_cand_list_indication(struct usbnet *usbdev, for (i = 0; i < le32_to_cpu(cand_list->num_candidates); i++) { struct ndis_80211_pmkid_candidate *cand = &cand_list->candidate_list[i]; + bool preauth = !!(cand->flags & NDIS_80211_PMKID_CAND_PREAUTH); - netdev_dbg(usbdev->net, "cand[%i]: flags: 0x%08x, bssid: %pM\n", - i, le32_to_cpu(cand->flags), cand->bssid); - -#if 0 - struct iw_pmkid_cand pcand; - union iwreq_data wrqu; + netdev_dbg(usbdev->net, "cand[%i]: flags: 0x%08x, preauth: %d, bssid: %pM\n", + i, le32_to_cpu(cand->flags), preauth, cand->bssid); - memset(&pcand, 0, sizeof(pcand)); - if (le32_to_cpu(cand->flags) & 0x01) - pcand.flags |= IW_PMKID_CAND_PREAUTH; - pcand.index = i; - memcpy(pcand.bssid.sa_data, cand->bssid, ETH_ALEN); - - memset(&wrqu, 0, sizeof(wrqu)); - wrqu.data.length = sizeof(pcand); - wireless_send_event(usbdev->net, IWEVPMKIDCAND, &wrqu, - (u8 *)&pcand); -#endif + cfg80211_pmksa_candidate_notify(usbdev->net, i, cand->bssid, + preauth, GFP_ATOMIC); } } diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c index 53c5f878f61..de7d41f21a6 100644 --- a/drivers/net/wireless/rt2x00/rt2500usb.c +++ b/drivers/net/wireless/rt2x00/rt2500usb.c @@ -39,7 +39,7 @@ /* * Allow hardware encryption to be disabled. */ -static int modparam_nohwcrypt; +static bool modparam_nohwcrypt; module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h index 4778620347c..2571a2fa3d0 100644 --- a/drivers/net/wireless/rt2x00/rt2800.h +++ b/drivers/net/wireless/rt2x00/rt2800.h @@ -50,7 +50,7 @@ * RF2853 2.4G/5G 3T3R * RF3320 2.4G 1T1R(RT3350/RT3370/RT3390) * RF3322 2.4G 2T2R(RT3352/RT3371/RT3372/RT3391/RT3392) - * RF3853 2.4G/5G 3T3R(RT3883/RT3563/RT3573/RT3593/RT3662) + * RF3053 2.4G/5G 3T3R(RT3883/RT3563/RT3573/RT3593/RT3662) * RF5370 2.4G 1T1R * RF5390 2.4G 1T1R */ @@ -66,7 +66,7 @@ #define RF2853 0x000a #define RF3320 0x000b #define RF3322 0x000c -#define RF3853 0x000d +#define RF3053 0x000d #define RF5370 0x5370 #define RF5390 0x5390 diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 1ba079dffb1..22a1a8fc6e0 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -1203,8 +1203,10 @@ void rt2800_config_filter(struct rt2x00_dev *rt2x00dev, !(filter_flags & FIF_CONTROL)); rt2x00_set_field32(®, RX_FILTER_CFG_DROP_PSPOLL, !(filter_flags & FIF_PSPOLL)); - rt2x00_set_field32(®, RX_FILTER_CFG_DROP_BA, 1); - rt2x00_set_field32(®, RX_FILTER_CFG_DROP_BAR, 0); + rt2x00_set_field32(®, RX_FILTER_CFG_DROP_BA, + !(filter_flags & FIF_CONTROL)); + rt2x00_set_field32(®, RX_FILTER_CFG_DROP_BAR, + !(filter_flags & FIF_CONTROL)); rt2x00_set_field32(®, RX_FILTER_CFG_DROP_CNTL, !(filter_flags & FIF_CONTROL)); rt2800_register_write(rt2x00dev, RX_FILTER_CFG, reg); @@ -1942,19 +1944,24 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, info->default_power2 = TXPOWER_A_TO_DEV(info->default_power2); } - if (rt2x00_rf(rt2x00dev, RF2020) || - rt2x00_rf(rt2x00dev, RF3020) || - rt2x00_rf(rt2x00dev, RF3021) || - rt2x00_rf(rt2x00dev, RF3022) || - rt2x00_rf(rt2x00dev, RF3320)) + switch (rt2x00dev->chip.rf) { + case RF2020: + case RF3020: + case RF3021: + case RF3022: + case RF3320: rt2800_config_channel_rf3xxx(rt2x00dev, conf, rf, info); - else if (rt2x00_rf(rt2x00dev, RF3052)) + break; + case RF3052: rt2800_config_channel_rf3052(rt2x00dev, conf, rf, info); - else if (rt2x00_rf(rt2x00dev, RF5370) || - rt2x00_rf(rt2x00dev, RF5390)) + break; + case RF5370: + case RF5390: rt2800_config_channel_rf53xx(rt2x00dev, conf, rf, info); - else + break; + default: rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info); + } /* * Change BBP settings @@ -3930,15 +3937,18 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev) rt2x00_set_chip(rt2x00dev, rt2x00_get_field32(reg, MAC_CSR0_CHIPSET), value, rt2x00_get_field32(reg, MAC_CSR0_REVISION)); - if (!rt2x00_rt(rt2x00dev, RT2860) && - !rt2x00_rt(rt2x00dev, RT2872) && - !rt2x00_rt(rt2x00dev, RT2883) && - !rt2x00_rt(rt2x00dev, RT3070) && - !rt2x00_rt(rt2x00dev, RT3071) && - !rt2x00_rt(rt2x00dev, RT3090) && - !rt2x00_rt(rt2x00dev, RT3390) && - !rt2x00_rt(rt2x00dev, RT3572) && - !rt2x00_rt(rt2x00dev, RT5390)) { + switch (rt2x00dev->chip.rt) { + case RT2860: + case RT2872: + case RT2883: + case RT3070: + case RT3071: + case RT3090: + case RT3390: + case RT3572: + case RT5390: + break; + default: ERROR(rt2x00dev, "Invalid RT chipset detected.\n"); return -ENODEV; } @@ -4552,6 +4562,9 @@ int rt2800_get_survey(struct ieee80211_hw *hw, int idx, survey->channel_time_ext_busy = busy_ext / 1000; } + if (!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)) + survey->filled |= SURVEY_INFO_IN_USE; + return 0; } diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c index da48c8ac27b..4941a1a2321 100644 --- a/drivers/net/wireless/rt2x00/rt2800pci.c +++ b/drivers/net/wireless/rt2x00/rt2800pci.c @@ -50,7 +50,7 @@ /* * Allow hardware encryption to be disabled. */ -static int modparam_nohwcrypt = 0; +static bool modparam_nohwcrypt = false; module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c index 377876315b8..f8eb49f5ac2 100644 --- a/drivers/net/wireless/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/rt2x00/rt2800usb.c @@ -45,7 +45,7 @@ /* * Allow hardware encryption to be disabled. */ -static int modparam_nohwcrypt; +static bool modparam_nohwcrypt; module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); @@ -400,10 +400,10 @@ static void rt2800usb_write_tx_desc(struct queue_entry *entry, /* * The size of TXINFO_W0_USB_DMA_TX_PKT_LEN is * TXWI + 802.11 header + L2 pad + payload + pad, - * so need to decrease size of TXINFO and USB end pad. + * so need to decrease size of TXINFO. */ rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_TX_PKT_LEN, - entry->skb->len - TXINFO_DESC_SIZE - 4); + roundup(entry->skb->len, 4) - TXINFO_DESC_SIZE); rt2x00_set_field32(&word, TXINFO_W0_WIV, !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags)); rt2x00_set_field32(&word, TXINFO_W0_QSEL, 2); @@ -421,37 +421,20 @@ static void rt2800usb_write_tx_desc(struct queue_entry *entry, skbdesc->desc_len = TXINFO_DESC_SIZE + TXWI_DESC_SIZE; } -static void rt2800usb_write_tx_data(struct queue_entry *entry, - struct txentry_desc *txdesc) +/* + * TX data initialization + */ +static int rt2800usb_get_tx_data_len(struct queue_entry *entry) { - unsigned int len; - int err; - - rt2800_write_tx_data(entry, txdesc); - /* - * pad(1~3 bytes) is added after each 802.11 payload. - * USB end pad(4 bytes) is added at each USB bulk out packet end. + * pad(1~3 bytes) is needed after each 802.11 payload. + * USB end pad(4 bytes) is needed at each USB bulk out packet end. * TX frame format is : * | TXINFO | TXWI | 802.11 header | L2 pad | payload | pad | USB end pad | * |<------------- tx_pkt_len ------------->| */ - len = roundup(entry->skb->len, 4) + 4; - err = skb_padto(entry->skb, len); - if (unlikely(err)) { - WARNING(entry->queue->rt2x00dev, "TX SKB padding error, out of memory\n"); - return; - } - entry->skb->len = len; -} - -/* - * TX data initialization - */ -static int rt2800usb_get_tx_data_len(struct queue_entry *entry) -{ - return entry->skb->len; + return roundup(entry->skb->len, 4) + 4; } /* @@ -807,7 +790,7 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = { .flush_queue = rt2x00usb_flush_queue, .tx_dma_done = rt2800usb_tx_dma_done, .write_tx_desc = rt2800usb_write_tx_desc, - .write_tx_data = rt2800usb_write_tx_data, + .write_tx_data = rt2800_write_tx_data, .write_beacon = rt2800_write_beacon, .clear_beacon = rt2800_clear_beacon, .get_tx_data_len = rt2800usb_get_tx_data_len, @@ -914,12 +897,14 @@ static struct usb_device_id rt2800usb_device_table[] = { { USB_DEVICE(0x050d, 0x8053) }, { USB_DEVICE(0x050d, 0x805c) }, { USB_DEVICE(0x050d, 0x815c) }, + { USB_DEVICE(0x050d, 0x825a) }, { USB_DEVICE(0x050d, 0x825b) }, { USB_DEVICE(0x050d, 0x935a) }, { USB_DEVICE(0x050d, 0x935b) }, /* Buffalo */ { USB_DEVICE(0x0411, 0x00e8) }, { USB_DEVICE(0x0411, 0x0158) }, + { USB_DEVICE(0x0411, 0x015d) }, { USB_DEVICE(0x0411, 0x016f) }, { USB_DEVICE(0x0411, 0x01a2) }, /* Corega */ @@ -934,6 +919,8 @@ static struct usb_device_id rt2800usb_device_table[] = { { USB_DEVICE(0x07d1, 0x3c0e) }, { USB_DEVICE(0x07d1, 0x3c0f) }, { USB_DEVICE(0x07d1, 0x3c11) }, + { USB_DEVICE(0x07d1, 0x3c13) }, + { USB_DEVICE(0x07d1, 0x3c15) }, { USB_DEVICE(0x07d1, 0x3c16) }, /* Draytek */ { USB_DEVICE(0x07fa, 0x7712) }, @@ -943,6 +930,7 @@ static struct usb_device_id rt2800usb_device_table[] = { { USB_DEVICE(0x7392, 0x7711) }, { USB_DEVICE(0x7392, 0x7717) }, { USB_DEVICE(0x7392, 0x7718) }, + { USB_DEVICE(0x7392, 0x7722) }, /* Encore */ { USB_DEVICE(0x203d, 0x1480) }, { USB_DEVICE(0x203d, 0x14a9) }, @@ -976,6 +964,8 @@ static struct usb_device_id rt2800usb_device_table[] = { { USB_DEVICE(0x13b1, 0x0031) }, { USB_DEVICE(0x1737, 0x0070) }, { USB_DEVICE(0x1737, 0x0071) }, + { USB_DEVICE(0x1737, 0x0077) }, + { USB_DEVICE(0x1737, 0x0078) }, /* Logitec */ { USB_DEVICE(0x0789, 0x0162) }, { USB_DEVICE(0x0789, 0x0163) }, @@ -999,9 +989,13 @@ static struct usb_device_id rt2800usb_device_table[] = { { USB_DEVICE(0x0db0, 0x871b) }, { USB_DEVICE(0x0db0, 0x871c) }, { USB_DEVICE(0x0db0, 0x899a) }, + /* Ovislink */ + { USB_DEVICE(0x1b75, 0x3071) }, + { USB_DEVICE(0x1b75, 0x3072) }, /* Para */ { USB_DEVICE(0x20b8, 0x8888) }, /* Pegatron */ + { USB_DEVICE(0x1d4d, 0x0002) }, { USB_DEVICE(0x1d4d, 0x000c) }, { USB_DEVICE(0x1d4d, 0x000e) }, { USB_DEVICE(0x1d4d, 0x0011) }, @@ -1054,7 +1048,9 @@ static struct usb_device_id rt2800usb_device_table[] = { /* Sparklan */ { USB_DEVICE(0x15a9, 0x0006) }, /* Sweex */ + { USB_DEVICE(0x177f, 0x0153) }, { USB_DEVICE(0x177f, 0x0302) }, + { USB_DEVICE(0x177f, 0x0313) }, /* U-Media */ { USB_DEVICE(0x157e, 0x300e) }, { USB_DEVICE(0x157e, 0x3013) }, @@ -1138,27 +1134,24 @@ static struct usb_device_id rt2800usb_device_table[] = { { USB_DEVICE(0x13d3, 0x3322) }, /* Belkin */ { USB_DEVICE(0x050d, 0x1003) }, - { USB_DEVICE(0x050d, 0x825a) }, /* Buffalo */ { USB_DEVICE(0x0411, 0x012e) }, { USB_DEVICE(0x0411, 0x0148) }, { USB_DEVICE(0x0411, 0x0150) }, - { USB_DEVICE(0x0411, 0x015d) }, /* Corega */ { USB_DEVICE(0x07aa, 0x0041) }, { USB_DEVICE(0x07aa, 0x0042) }, { USB_DEVICE(0x18c5, 0x0008) }, /* D-Link */ { USB_DEVICE(0x07d1, 0x3c0b) }, - { USB_DEVICE(0x07d1, 0x3c13) }, - { USB_DEVICE(0x07d1, 0x3c15) }, { USB_DEVICE(0x07d1, 0x3c17) }, { USB_DEVICE(0x2001, 0x3c17) }, /* Edimax */ { USB_DEVICE(0x7392, 0x4085) }, - { USB_DEVICE(0x7392, 0x7722) }, /* Encore */ { USB_DEVICE(0x203d, 0x14a1) }, + /* Fujitsu Stylistic 550 */ + { USB_DEVICE(0x1690, 0x0761) }, /* Gemtek */ { USB_DEVICE(0x15a9, 0x0010) }, /* Gigabyte */ @@ -1170,20 +1163,13 @@ static struct usb_device_id rt2800usb_device_table[] = { /* LevelOne */ { USB_DEVICE(0x1740, 0x0605) }, { USB_DEVICE(0x1740, 0x0615) }, - /* Linksys */ - { USB_DEVICE(0x1737, 0x0077) }, - { USB_DEVICE(0x1737, 0x0078) }, /* Logitec */ { USB_DEVICE(0x0789, 0x0168) }, { USB_DEVICE(0x0789, 0x0169) }, /* Motorola */ { USB_DEVICE(0x100d, 0x9032) }, - /* Ovislink */ - { USB_DEVICE(0x1b75, 0x3071) }, - { USB_DEVICE(0x1b75, 0x3072) }, /* Pegatron */ { USB_DEVICE(0x05a6, 0x0101) }, - { USB_DEVICE(0x1d4d, 0x0002) }, { USB_DEVICE(0x1d4d, 0x0010) }, /* Planex */ { USB_DEVICE(0x2019, 0x5201) }, @@ -1202,9 +1188,6 @@ static struct usb_device_id rt2800usb_device_table[] = { { USB_DEVICE(0x083a, 0xc522) }, { USB_DEVICE(0x083a, 0xd522) }, { USB_DEVICE(0x083a, 0xf511) }, - /* Sweex */ - { USB_DEVICE(0x177f, 0x0153) }, - { USB_DEVICE(0x177f, 0x0313) }, /* Zyxel */ { USB_DEVICE(0x0586, 0x341a) }, #endif diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h index 99ff12d0c29..b03b22c47b1 100644 --- a/drivers/net/wireless/rt2x00/rt2x00.h +++ b/drivers/net/wireless/rt2x00/rt2x00.h @@ -189,9 +189,9 @@ struct rt2x00_chip { #define RT3090 0x3090 /* 2.4GHz PCIe */ #define RT3390 0x3390 #define RT3572 0x3572 -#define RT3593 0x3593 /* PCIe */ +#define RT3593 0x3593 #define RT3883 0x3883 /* WSOC */ -#define RT5390 0x5390 /* 2.4GHz */ +#define RT5390 0x5390 /* 2.4GHz */ u16 rf; u16 rev; diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c index edd317fa7c0..c3e1aa7c1a8 100644 --- a/drivers/net/wireless/rt2x00/rt2x00dev.c +++ b/drivers/net/wireless/rt2x00/rt2x00dev.c @@ -831,11 +831,11 @@ static int rt2x00lib_probe_hw_modes(struct rt2x00_dev *rt2x00dev, if (spec->supported_rates & SUPPORT_RATE_OFDM) num_rates += 8; - channels = kzalloc(sizeof(*channels) * spec->num_channels, GFP_KERNEL); + channels = kcalloc(spec->num_channels, sizeof(*channels), GFP_KERNEL); if (!channels) return -ENOMEM; - rates = kzalloc(sizeof(*rates) * num_rates, GFP_KERNEL); + rates = kcalloc(num_rates, sizeof(*rates), GFP_KERNEL); if (!rates) goto exit_free_channels; diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c index bf0acff0780..ede3c58e678 100644 --- a/drivers/net/wireless/rt2x00/rt2x00mac.c +++ b/drivers/net/wireless/rt2x00/rt2x00mac.c @@ -160,7 +160,7 @@ void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) exit_fail: rt2x00queue_pause_queue(queue); exit_free_skb: - dev_kfree_skb_any(skb); + ieee80211_free_txskb(hw, skb); } EXPORT_SYMBOL_GPL(rt2x00mac_tx); diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c index 1e31050dafc..2eea3866504 100644 --- a/drivers/net/wireless/rt2x00/rt2x00usb.c +++ b/drivers/net/wireless/rt2x00/rt2x00usb.c @@ -298,12 +298,22 @@ static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry, void* data) return false; /* - * USB devices cannot blindly pass the skb->len as the - * length of the data to usb_fill_bulk_urb. Pass the skb - * to the driver to determine what the length should be. + * USB devices require certain padding at the end of each frame + * and urb. Those paddings are not included in skbs. Pass entry + * to the driver to determine what the overall length should be. */ length = rt2x00dev->ops->lib->get_tx_data_len(entry); + status = skb_padto(entry->skb, length); + if (unlikely(status)) { + /* TODO: report something more appropriate than IO_FAILED. */ + WARNING(rt2x00dev, "TX SKB padding error, out of memory\n"); + set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); + rt2x00lib_dmadone(entry); + + return false; + } + usb_fill_bulk_urb(entry_priv->urb, usb_dev, usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint), entry->skb->data, length, diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c index bf55b4a311e..e0c6d117429 100644 --- a/drivers/net/wireless/rt2x00/rt61pci.c +++ b/drivers/net/wireless/rt2x00/rt61pci.c @@ -41,7 +41,7 @@ /* * Allow hardware encryption to be disabled. */ -static int modparam_nohwcrypt = 0; +static bool modparam_nohwcrypt = false; module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c index cfb19dbb0a6..1c69c737086 100644 --- a/drivers/net/wireless/rt2x00/rt73usb.c +++ b/drivers/net/wireless/rt2x00/rt73usb.c @@ -40,7 +40,7 @@ /* * Allow hardware encryption to be disabled. */ -static int modparam_nohwcrypt; +static bool modparam_nohwcrypt; module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c index b4ce93436d2..8d6eb0f56c0 100644 --- a/drivers/net/wireless/rtlwifi/base.c +++ b/drivers/net/wireless/rtlwifi/base.c @@ -345,9 +345,9 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw) if (is_valid_ether_addr(rtlefuse->dev_addr)) { SET_IEEE80211_PERM_ADDR(hw, rtlefuse->dev_addr); } else { - u8 rtlmac[] = { 0x00, 0xe0, 0x4c, 0x81, 0x92, 0x00 }; - get_random_bytes((rtlmac + (ETH_ALEN - 1)), 1); - SET_IEEE80211_PERM_ADDR(hw, rtlmac); + u8 rtlmac1[] = { 0x00, 0xe0, 0x4c, 0x81, 0x92, 0x00 }; + get_random_bytes((rtlmac1 + (ETH_ALEN - 1)), 1); + SET_IEEE80211_PERM_ADDR(hw, rtlmac1); } } @@ -396,7 +396,7 @@ void rtl_init_rfkill(struct ieee80211_hw *hw) u8 valid = 0; /*set init state to on */ - rtlpriv->rfkill.rfkill_state = 1; + rtlpriv->rfkill.rfkill_state = true; wiphy_rfkill_set_hw_state(hw->wiphy, 0); radio_state = rtlpriv->cfg->ops->radio_onoff_checking(hw, &valid); @@ -448,12 +448,12 @@ int rtl_init_core(struct ieee80211_hw *hw) /* <4> locks */ mutex_init(&rtlpriv->locks.conf_mutex); + mutex_init(&rtlpriv->locks.ps_mutex); spin_lock_init(&rtlpriv->locks.ips_lock); spin_lock_init(&rtlpriv->locks.irq_th_lock); spin_lock_init(&rtlpriv->locks.h2c_lock); spin_lock_init(&rtlpriv->locks.rf_ps_lock); spin_lock_init(&rtlpriv->locks.rf_lock); - spin_lock_init(&rtlpriv->locks.lps_lock); spin_lock_init(&rtlpriv->locks.waitq_lock); spin_lock_init(&rtlpriv->locks.cck_and_rw_pagea_lock); diff --git a/drivers/net/wireless/rtlwifi/base.h b/drivers/net/wireless/rtlwifi/base.h index 4ae905983d0..f66b5757f6b 100644 --- a/drivers/net/wireless/rtlwifi/base.h +++ b/drivers/net/wireless/rtlwifi/base.h @@ -76,7 +76,7 @@ enum ap_peer { SET_BITS_TO_LE_2BYTE(_hdr, 8, 1, _val) #define SET_80211_PS_POLL_AID(_hdr, _val) \ - (*(u16 *)((u8 *)(_hdr) + 2) = le16_to_cpu(_val)) + (*(u16 *)((u8 *)(_hdr) + 2) = _val) #define SET_80211_PS_POLL_BSSID(_hdr, _val) \ memcpy(((u8 *)(_hdr)) + 4, (u8 *)(_val), ETH_ALEN) #define SET_80211_PS_POLL_TA(_hdr, _val) \ diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c index eb61061821e..39e0907a3c4 100644 --- a/drivers/net/wireless/rtlwifi/pci.c +++ b/drivers/net/wireless/rtlwifi/pci.c @@ -78,7 +78,7 @@ static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw) u8 init_aspm; ppsc->reg_rfps_level = 0; - ppsc->support_aspm = 0; + ppsc->support_aspm = false; /*Update PCI ASPM setting */ ppsc->const_amdpci_aspm = rtlpci->const_amdpci_aspm; @@ -570,9 +570,9 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio) if (ieee80211_is_nullfunc(fc)) { if (ieee80211_has_pm(fc)) { rtlpriv->mac80211.offchan_delay = true; - rtlpriv->psc.state_inap = 1; + rtlpriv->psc.state_inap = true; } else { - rtlpriv->psc.state_inap = 0; + rtlpriv->psc.state_inap = false; } } @@ -610,7 +610,7 @@ tx_status_ok: if (((rtlpriv->link_info.num_rx_inperiod + rtlpriv->link_info.num_tx_inperiod) > 8) || (rtlpriv->link_info.num_rx_inperiod > 2)) { - tasklet_schedule(&rtlpriv->works.ips_leave_tasklet); + schedule_work(&rtlpriv->works.lps_leave_work); } } @@ -736,7 +736,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) if (((rtlpriv->link_info.num_rx_inperiod + rtlpriv->link_info.num_tx_inperiod) > 8) || (rtlpriv->link_info.num_rx_inperiod > 2)) { - tasklet_schedule(&rtlpriv->works.ips_leave_tasklet); + schedule_work(&rtlpriv->works.lps_leave_work); } dev_kfree_skb_any(skb); @@ -780,6 +780,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id) unsigned long flags; u32 inta = 0; u32 intb = 0; + irqreturn_t ret = IRQ_HANDLED; spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags); @@ -787,8 +788,10 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id) rtlpriv->cfg->ops->interrupt_recognized(hw, &inta, &intb); /*Shared IRQ or HW disappared */ - if (!inta || inta == 0xffff) + if (!inta || inta == 0xffff) { + ret = IRQ_NONE; goto done; + } /*<1> beacon related */ if (inta & rtlpriv->cfg->maps[RTL_IMR_TBDOK]) { @@ -890,12 +893,9 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id) if (rtlpriv->rtlhal.earlymode_enable) tasklet_schedule(&rtlpriv->works.irq_tasklet); - spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags); - return IRQ_HANDLED; - done: spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags); - return IRQ_HANDLED; + return ret; } static void _rtl_pci_irq_tasklet(struct ieee80211_hw *hw) @@ -903,11 +903,6 @@ static void _rtl_pci_irq_tasklet(struct ieee80211_hw *hw) _rtl_pci_tx_chk_waitq(hw); } -static void _rtl_pci_ips_leave_tasklet(struct ieee80211_hw *hw) -{ - rtl_lps_leave(hw); -} - static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -945,6 +940,15 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw) return; } +static void rtl_lps_leave_work_callback(struct work_struct *work) +{ + struct rtl_works *rtlworks = + container_of(work, struct rtl_works, lps_leave_work); + struct ieee80211_hw *hw = rtlworks->hw; + + rtl_lps_leave(hw); +} + static void _rtl_pci_init_trx_var(struct ieee80211_hw *hw) { struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); @@ -1006,9 +1010,7 @@ static void _rtl_pci_init_struct(struct ieee80211_hw *hw, tasklet_init(&rtlpriv->works.irq_prepare_bcn_tasklet, (void (*)(unsigned long))_rtl_pci_prepare_bcn_tasklet, (unsigned long)hw); - tasklet_init(&rtlpriv->works.ips_leave_tasklet, - (void (*)(unsigned long))_rtl_pci_ips_leave_tasklet, - (unsigned long)hw); + INIT_WORK(&rtlpriv->works.lps_leave_work, rtl_lps_leave_work_callback); } static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw, @@ -1478,7 +1480,7 @@ static void rtl_pci_deinit(struct ieee80211_hw *hw) synchronize_irq(rtlpci->pdev->irq); tasklet_kill(&rtlpriv->works.irq_tasklet); - tasklet_kill(&rtlpriv->works.ips_leave_tasklet); + cancel_work_sync(&rtlpriv->works.lps_leave_work); flush_workqueue(rtlpriv->works.rtl_wq); destroy_workqueue(rtlpriv->works.rtl_wq); @@ -1553,7 +1555,7 @@ static void rtl_pci_stop(struct ieee80211_hw *hw) set_hal_stop(rtlhal); rtlpriv->cfg->ops->disable_interrupt(hw); - tasklet_kill(&rtlpriv->works.ips_leave_tasklet); + cancel_work_sync(&rtlpriv->works.lps_leave_work); spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags); while (ppsc->rfchange_inprogress) { diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c index 55c8e50f45f..130fdd99d57 100644 --- a/drivers/net/wireless/rtlwifi/ps.c +++ b/drivers/net/wireless/rtlwifi/ps.c @@ -237,11 +237,12 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw) struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); enum rf_pwrstate rtstate; + unsigned long flags; if (mac->opmode != NL80211_IFTYPE_STATION) return; - spin_lock(&rtlpriv->locks.ips_lock); + spin_lock_irqsave(&rtlpriv->locks.ips_lock, flags); if (ppsc->inactiveps) { rtstate = ppsc->rfpwr_state; @@ -257,7 +258,7 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw) } } - spin_unlock(&rtlpriv->locks.ips_lock); + spin_unlock_irqrestore(&rtlpriv->locks.ips_lock, flags); } /*for FW LPS*/ @@ -395,7 +396,7 @@ void rtl_lps_enter(struct ieee80211_hw *hw) if (mac->link_state != MAC80211_LINKED) return; - spin_lock_irq(&rtlpriv->locks.lps_lock); + mutex_lock(&rtlpriv->locks.ps_mutex); /* Idle for a while if we connect to AP a while ago. */ if (mac->cnt_after_linked >= 2) { @@ -407,7 +408,7 @@ void rtl_lps_enter(struct ieee80211_hw *hw) } } - spin_unlock_irq(&rtlpriv->locks.lps_lock); + mutex_unlock(&rtlpriv->locks.ps_mutex); } /*Leave the leisure power save mode.*/ @@ -416,9 +417,8 @@ void rtl_lps_leave(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - unsigned long flags; - spin_lock_irqsave(&rtlpriv->locks.lps_lock, flags); + mutex_lock(&rtlpriv->locks.ps_mutex); if (ppsc->fwctrl_lps) { if (ppsc->dot11_psmode != EACTIVE) { @@ -439,7 +439,7 @@ void rtl_lps_leave(struct ieee80211_hw *hw) rtl_lps_set_psmode(hw, EACTIVE); } } - spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flags); + mutex_unlock(&rtlpriv->locks.ps_mutex); } /* For sw LPS*/ @@ -540,9 +540,9 @@ void rtl_swlps_rf_awake(struct ieee80211_hw *hw) RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM); } - spin_lock_irq(&rtlpriv->locks.lps_lock); + mutex_lock(&rtlpriv->locks.ps_mutex); rtl_ps_set_rf_state(hw, ERFON, RF_CHANGE_BY_PS); - spin_unlock_irq(&rtlpriv->locks.lps_lock); + mutex_unlock(&rtlpriv->locks.ps_mutex); } void rtl_swlps_rfon_wq_callback(void *data) @@ -575,9 +575,9 @@ void rtl_swlps_rf_sleep(struct ieee80211_hw *hw) if (rtlpriv->link_info.busytraffic) return; - spin_lock_irq(&rtlpriv->locks.lps_lock); + mutex_lock(&rtlpriv->locks.ps_mutex); rtl_ps_set_rf_state(hw, ERFSLEEP, RF_CHANGE_BY_PS); - spin_unlock_irq(&rtlpriv->locks.lps_lock); + mutex_unlock(&rtlpriv->locks.ps_mutex); if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM && !RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) { diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c index 950c65a15b8..931d97979b0 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c +++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c @@ -73,6 +73,34 @@ static void _rtl92c_enable_fw_download(struct ieee80211_hw *hw, bool enable) } } +static void rtl_block_fw_writeN(struct ieee80211_hw *hw, const u8 *buffer, + u32 size) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 blockSize = REALTEK_USB_VENQT_MAX_BUF_SIZE - 20; + u8 *bufferPtr = (u8 *) buffer; + u32 i, offset, blockCount, remainSize; + + blockCount = size / blockSize; + remainSize = size % blockSize; + + for (i = 0; i < blockCount; i++) { + offset = i * blockSize; + rtlpriv->io.writeN_sync(rtlpriv, + (FW_8192C_START_ADDRESS + offset), + (void *)(bufferPtr + offset), + blockSize); + } + + if (remainSize) { + offset = blockCount * blockSize; + rtlpriv->io.writeN_sync(rtlpriv, + (FW_8192C_START_ADDRESS + offset), + (void *)(bufferPtr + offset), + remainSize); + } +} + static void _rtl92c_fw_block_write(struct ieee80211_hw *hw, const u8 *buffer, u32 size) { @@ -81,23 +109,30 @@ static void _rtl92c_fw_block_write(struct ieee80211_hw *hw, u8 *bufferPtr = (u8 *) buffer; u32 *pu4BytePtr = (u32 *) buffer; u32 i, offset, blockCount, remainSize; + u32 data; + if (rtlpriv->io.writeN_sync) { + rtl_block_fw_writeN(hw, buffer, size); + return; + } blockCount = size / blockSize; remainSize = size % blockSize; + if (remainSize) { + /* the last word is < 4 bytes - pad it with zeros */ + for (i = 0; i < 4 - remainSize; i++) + *(bufferPtr + size + i) = 0; + blockCount++; + } for (i = 0; i < blockCount; i++) { offset = i * blockSize; + /* for big-endian platforms, the firmware data need to be byte + * swapped as it was read as a byte string and will be written + * as 32-bit dwords and byte swapped when written + */ + data = le32_to_cpu(*(__le32 *)(pu4BytePtr + i)); rtl_write_dword(rtlpriv, (FW_8192C_START_ADDRESS + offset), - *(pu4BytePtr + i)); - } - - if (remainSize) { - offset = blockCount * blockSize; - bufferPtr += offset; - for (i = 0; i < remainSize; i++) { - rtl_write_byte(rtlpriv, (FW_8192C_START_ADDRESS + - offset + i), *(bufferPtr + i)); - } + data); } } @@ -227,10 +262,10 @@ int rtl92c_download_fw(struct ieee80211_hw *hw) u32 fwsize; enum version_8192c version = rtlhal->version; - pr_info("Loading firmware file %s\n", rtlpriv->cfg->fw_name); if (!rtlhal->pfirmware) return 1; + pr_info("Loading firmware file %s\n", rtlpriv->cfg->fw_name); pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware; pfwdata = (u8 *) rtlhal->pfirmware; fwsize = rtlhal->fwsize; @@ -238,8 +273,9 @@ int rtl92c_download_fw(struct ieee80211_hw *hw) if (IS_FW_HEADER_EXIST(pfwheader)) { RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, ("Firmware Version(%d), Signature(%#x),Size(%d)\n", - pfwheader->version, pfwheader->signature, - (uint)sizeof(struct rtl92c_firmware_header))); + le16_to_cpu(pfwheader->version), + le16_to_cpu(pfwheader->signature), + (uint)sizeof(struct rtl92c_firmware_header))); pfwdata = pfwdata + sizeof(struct rtl92c_firmware_header); fwsize = fwsize - sizeof(struct rtl92c_firmware_header); diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h index 3d5823c1262..cec5a3a1cc5 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h +++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h @@ -32,32 +32,32 @@ #define FW_8192C_SIZE 0x3000 #define FW_8192C_START_ADDRESS 0x1000 -#define FW_8192C_END_ADDRESS 0x3FFF +#define FW_8192C_END_ADDRESS 0x1FFF #define FW_8192C_PAGE_SIZE 4096 #define FW_8192C_POLLING_DELAY 5 #define FW_8192C_POLLING_TIMEOUT_COUNT 100 #define IS_FW_HEADER_EXIST(_pfwhdr) \ - ((_pfwhdr->signature&0xFFF0) == 0x92C0 ||\ - (_pfwhdr->signature&0xFFF0) == 0x88C0) + ((le16_to_cpu(_pfwhdr->signature)&0xFFF0) == 0x92C0 ||\ + (le16_to_cpu(_pfwhdr->signature)&0xFFF0) == 0x88C0) struct rtl92c_firmware_header { - u16 signature; + __le16 signature; u8 category; u8 function; - u16 version; + __le16 version; u8 subversion; u8 rsvd1; u8 month; u8 date; u8 hour; u8 minute; - u16 ramcodeSize; - u16 rsvd2; - u32 svnindex; - u32 rsvd3; - u32 rsvd4; - u32 rsvd5; + __le16 ramcodeSize; + __le16 rsvd2; + __le32 svnindex; + __le32 rsvd3; + __le32 rsvd4; + __le32 rsvd5; }; enum rtl8192c_h2c_cmd { @@ -94,5 +94,6 @@ void rtl92c_firmware_selfreset(struct ieee80211_hw *hw); void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode); void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished); void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus); +void usb_writeN_async(struct rtl_priv *rtlpriv, u32 addr, void *data, u16 len); #endif diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c index f2aa33dc4d7..89ef6982ce5 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c @@ -98,9 +98,9 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw) rtl8192ce_bt_reg_init(hw); - rtlpriv->dm.dm_initialgain_enable = 1; + rtlpriv->dm.dm_initialgain_enable = true; rtlpriv->dm.dm_flag = 0; - rtlpriv->dm.disable_framebursting = 0; + rtlpriv->dm.disable_framebursting = false; rtlpriv->dm.thermalvalue = 0; rtlpci->transmit_config = CFENDFORM | BIT(12) | BIT(13); diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c index 814c05df51e..124cf633861 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c @@ -498,7 +498,7 @@ static void _rtl92cu_read_adapter_info(struct ieee80211_hw *hw) } RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD, ("MAP\n"), hwinfo, HWSET_MAX_SIZE); - eeprom_id = *((u16 *)&hwinfo[0]); + eeprom_id = le16_to_cpu(*((__le16 *)&hwinfo[0])); if (eeprom_id != RTL8190_EEPROM_ID) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("EEPROM ID(%#x) is invalid!!\n", eeprom_id)); @@ -516,13 +516,14 @@ static void _rtl92cu_read_adapter_info(struct ieee80211_hw *hw) pr_info("MAC address: %pM\n", rtlefuse->dev_addr); _rtl92cu_read_txpower_info_from_hwpg(hw, rtlefuse->autoload_failflag, hwinfo); - rtlefuse->eeprom_vid = *(u16 *)&hwinfo[EEPROM_VID]; - rtlefuse->eeprom_did = *(u16 *)&hwinfo[EEPROM_DID]; + rtlefuse->eeprom_vid = le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_VID]); + rtlefuse->eeprom_did = le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_DID]); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, (" VID = 0x%02x PID = 0x%02x\n", rtlefuse->eeprom_vid, rtlefuse->eeprom_did)); rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN]; - rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION]; + rtlefuse->eeprom_version = + le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_VERSION]); rtlefuse->txpwr_fromeprom = true; rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID]; RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, @@ -2435,7 +2436,7 @@ bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid) "%x\n", ppsc->hwradiooff, e_rfpowerstate_toset)); } if (actuallyset) { - ppsc->hwradiooff = 1; + ppsc->hwradiooff = true; if (e_rfpowerstate_toset == ERFON) { if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) && RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM)) diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c index 060a06f4a88..9e0c8fcdf90 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c @@ -84,6 +84,7 @@ void rtl92c_read_chip_version(struct ieee80211_hw *hw) } } rtlhal->version = (enum version_8192c)chip_version; + pr_info("rtl8192cu: Chip version 0x%x\n", chip_version); switch (rtlhal->version) { case VERSION_NORMAL_TSMC_CHIP_92C_1T2R: RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c index c244f2f1b83..3527c7957b4 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c @@ -57,9 +57,9 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw) const struct firmware *firmware; int err; - rtlpriv->dm.dm_initialgain_enable = 1; + rtlpriv->dm.dm_initialgain_enable = true; rtlpriv->dm.dm_flag = 0; - rtlpriv->dm.disable_framebursting = 0; + rtlpriv->dm.disable_framebursting = false; rtlpriv->dm.thermalvalue = 0; rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug; rtlpriv->rtlhal.pfirmware = vmalloc(0x4000); @@ -275,6 +275,8 @@ static struct usb_device_id rtl8192c_usb_ids[] = { {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8191, rtl92cu_hal_cfg)}, /****** 8188CU ********/ + /* RTL8188CTV */ + {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x018a, rtl92cu_hal_cfg)}, /* 8188CE-VAU USB minCard */ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8170, rtl92cu_hal_cfg)}, /* 8188cu 1*1 dongle */ @@ -291,14 +293,14 @@ static struct usb_device_id rtl8192c_usb_ids[] = { {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817e, rtl92cu_hal_cfg)}, /* 8188RU in Alfa AWUS036NHR */ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817f, rtl92cu_hal_cfg)}, + /* RTL8188CUS-VL */ + {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x818a, rtl92cu_hal_cfg)}, /* 8188 Combo for BC4 */ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8754, rtl92cu_hal_cfg)}, /****** 8192CU ********/ - /* 8191cu 1*2 */ - {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8177, rtl92cu_hal_cfg)}, /* 8192cu 2*2 */ - {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817b, rtl92cu_hal_cfg)}, + {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8178, rtl92cu_hal_cfg)}, /* 8192CE-VAU USB minCard */ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817c, rtl92cu_hal_cfg)}, @@ -309,13 +311,17 @@ static struct usb_device_id rtl8192c_usb_ids[] = { {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/ {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/ {RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/ - {RTL_USB_DEVICE(0x0Df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/ + {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/ + {RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/ {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/ /* HP - Lite-On ,8188CUS Slim Combo */ {RTL_USB_DEVICE(0x103c, 0x1629, rtl92cu_hal_cfg)}, {RTL_USB_DEVICE(0x13d3, 0x3357, rtl92cu_hal_cfg)}, /* AzureWave */ {RTL_USB_DEVICE(0x2001, 0x3308, rtl92cu_hal_cfg)}, /*D-Link - Alpha*/ + {RTL_USB_DEVICE(0x2019, 0x4902, rtl92cu_hal_cfg)}, /*Planex - Etop*/ {RTL_USB_DEVICE(0x2019, 0xab2a, rtl92cu_hal_cfg)}, /*Planex - Abocom*/ + /*SW-WF02-AD15 -Abocom*/ + {RTL_USB_DEVICE(0x2019, 0xab2e, rtl92cu_hal_cfg)}, {RTL_USB_DEVICE(0x2019, 0xed17, rtl92cu_hal_cfg)}, /*PCI - Edimax*/ {RTL_USB_DEVICE(0x20f4, 0x648b, rtl92cu_hal_cfg)}, /*TRENDnet - Cameo*/ {RTL_USB_DEVICE(0x7392, 0x7811, rtl92cu_hal_cfg)}, /*Edimax - Edimax*/ @@ -326,14 +332,36 @@ static struct usb_device_id rtl8192c_usb_ids[] = { {RTL_USB_DEVICE(0x4855, 0x0091, rtl92cu_hal_cfg)}, /* NetweeN-Feixun */ {RTL_USB_DEVICE(0x9846, 0x9041, rtl92cu_hal_cfg)}, /* Netgear Cameo */ + /****** 8188 RU ********/ + /* Netcore */ + {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x317f, rtl92cu_hal_cfg)}, + + /****** 8188CUS Slim Solo********/ + {RTL_USB_DEVICE(0x04f2, 0xaff7, rtl92cu_hal_cfg)}, /*Xavi*/ + {RTL_USB_DEVICE(0x04f2, 0xaff9, rtl92cu_hal_cfg)}, /*Xavi*/ + {RTL_USB_DEVICE(0x04f2, 0xaffa, rtl92cu_hal_cfg)}, /*Xavi*/ + + /****** 8188CUS Slim Combo ********/ + {RTL_USB_DEVICE(0x04f2, 0xaff8, rtl92cu_hal_cfg)}, /*Xavi*/ + {RTL_USB_DEVICE(0x04f2, 0xaffb, rtl92cu_hal_cfg)}, /*Xavi*/ + {RTL_USB_DEVICE(0x04f2, 0xaffc, rtl92cu_hal_cfg)}, /*Xavi*/ + {RTL_USB_DEVICE(0x2019, 0x1201, rtl92cu_hal_cfg)}, /*Planex-Vencer*/ + /****** 8192CU ********/ + {RTL_USB_DEVICE(0x050d, 0x2102, rtl92cu_hal_cfg)}, /*Belcom-Sercomm*/ + {RTL_USB_DEVICE(0x050d, 0x2103, rtl92cu_hal_cfg)}, /*Belcom-Edimax*/ {RTL_USB_DEVICE(0x0586, 0x341f, rtl92cu_hal_cfg)}, /*Zyxel -Abocom*/ {RTL_USB_DEVICE(0x07aa, 0x0056, rtl92cu_hal_cfg)}, /*ATKK-Gemtek*/ {RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Funai -Abocom*/ + {RTL_USB_DEVICE(0x0846, 0x9021, rtl92cu_hal_cfg)}, /*Netgear-Sercomm*/ + {RTL_USB_DEVICE(0x0b05, 0x17ab, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/ + {RTL_USB_DEVICE(0x0df6, 0x0061, rtl92cu_hal_cfg)}, /*Sitecom-Edimax*/ + {RTL_USB_DEVICE(0x0e66, 0x0019, rtl92cu_hal_cfg)}, /*Hawking-Edimax*/ {RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/ {RTL_USB_DEVICE(0x2001, 0x3309, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/ {RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/ {RTL_USB_DEVICE(0x2019, 0xab2b, rtl92cu_hal_cfg)}, /*Planex -Abocom*/ + {RTL_USB_DEVICE(0x20f4, 0x624d, rtl92cu_hal_cfg)}, /*TRENDNet*/ {RTL_USB_DEVICE(0x7392, 0x7822, rtl92cu_hal_cfg)}, /*Edimax -Edimax*/ {} }; diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c index bc33b147f44..b3cc7b94999 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c @@ -491,7 +491,7 @@ static void _rtl_tx_desc_checksum(u8 *txdesc) SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, 0); for (index = 0; index < 16; index++) checksum = checksum ^ (*(ptr + index)); - SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, checksum); + SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, cpu_to_le16(checksum)); } void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c index 149493f4c25..7911c9c8708 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c @@ -99,9 +99,9 @@ static int rtl92d_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->dm.dm_initialgain_enable = true; rtlpriv->dm.dm_flag = 0; - rtlpriv->dm.disable_framebursting = 0; + rtlpriv->dm.disable_framebursting = false; rtlpriv->dm.thermalvalue = 0; - rtlpriv->dm.useramask = 1; + rtlpriv->dm.useramask = true; /* dual mac */ if (rtlpriv->rtlhal.current_bandtype == BAND_ON_5G) diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c index 92f49d522c5..78723cf5949 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c @@ -98,9 +98,9 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw) int err = 0; u16 earlyrxthreshold = 7; - rtlpriv->dm.dm_initialgain_enable = 1; + rtlpriv->dm.dm_initialgain_enable = true; rtlpriv->dm.dm_flag = 0; - rtlpriv->dm.disable_framebursting = 0; + rtlpriv->dm.disable_framebursting = false; rtlpriv->dm.thermalvalue = 0; rtlpriv->dm.useramask = true; diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c index 54cb8a60514..e956fa71d04 100644 --- a/drivers/net/wireless/rtlwifi/usb.c +++ b/drivers/net/wireless/rtlwifi/usb.c @@ -34,13 +34,14 @@ #include "usb.h" #include "base.h" #include "ps.h" +#include "rtl8192c/fw_common.h" #define REALTEK_USB_VENQT_READ 0xC0 #define REALTEK_USB_VENQT_WRITE 0x40 #define REALTEK_USB_VENQT_CMD_REQ 0x05 #define REALTEK_USB_VENQT_CMD_IDX 0x00 -#define REALTEK_USB_VENQT_MAX_BUF_SIZE 254 +#define MAX_USBCTRL_VENDORREQ_TIMES 10 static void usbctrl_async_callback(struct urb *urb) { @@ -82,6 +83,7 @@ static int _usbctrl_vendorreq_async_write(struct usb_device *udev, u8 request, dr->wValue = cpu_to_le16(value); dr->wIndex = cpu_to_le16(index); dr->wLength = cpu_to_le16(len); + /* data are already in little-endian order */ memcpy(buf, pdata, len); usb_fill_control_urb(urb, udev, pipe, (unsigned char *)dr, buf, len, @@ -100,16 +102,28 @@ static int _usbctrl_vendorreq_sync_read(struct usb_device *udev, u8 request, unsigned int pipe; int status; u8 reqtype; + int vendorreq_times = 0; + static int count; pipe = usb_rcvctrlpipe(udev, 0); /* read_in */ reqtype = REALTEK_USB_VENQT_READ; - status = usb_control_msg(udev, pipe, request, reqtype, value, index, - pdata, len, 0); /* max. timeout */ + do { + status = usb_control_msg(udev, pipe, request, reqtype, value, + index, pdata, len, 0); /*max. timeout*/ + if (status < 0) { + /* firmware download is checksumed, don't retry */ + if ((value >= FW_8192C_START_ADDRESS && + value <= FW_8192C_END_ADDRESS)) + break; + } else { + break; + } + } while (++vendorreq_times < MAX_USBCTRL_VENDORREQ_TIMES); - if (status < 0) + if (status < 0 && count++ < 4) pr_err("reg 0x%x, usbctrl_vendorreq TimeOut! status:0x%x value=0x%x\n", - value, status, *(u32 *)pdata); + value, status, le32_to_cpu(*(u32 *)pdata)); return status; } @@ -129,7 +143,7 @@ static u32 _usb_read_sync(struct usb_device *udev, u32 addr, u16 len) wvalue = (u16)addr; _usbctrl_vendorreq_sync_read(udev, request, wvalue, index, data, len); - ret = *data; + ret = le32_to_cpu(*data); kfree(data); return ret; } @@ -161,12 +175,12 @@ static void _usb_write_async(struct usb_device *udev, u32 addr, u32 val, u8 request; u16 wvalue; u16 index; - u32 data; + __le32 data; request = REALTEK_USB_VENQT_CMD_REQ; index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */ wvalue = (u16)(addr&0x0000ffff); - data = val; + data = cpu_to_le32(val); _usbctrl_vendorreq_async_write(udev, request, wvalue, index, &data, len); } @@ -192,6 +206,30 @@ static void _usb_write32_async(struct rtl_priv *rtlpriv, u32 addr, u32 val) _usb_write_async(to_usb_device(dev), addr, val, 4); } +static void _usb_writeN_sync(struct rtl_priv *rtlpriv, u32 addr, void *data, + u16 len) +{ + struct device *dev = rtlpriv->io.dev; + struct usb_device *udev = to_usb_device(dev); + u8 request = REALTEK_USB_VENQT_CMD_REQ; + u8 reqtype = REALTEK_USB_VENQT_WRITE; + u16 wvalue; + u16 index = REALTEK_USB_VENQT_CMD_IDX; + int pipe = usb_sndctrlpipe(udev, 0); /* write_out */ + u8 *buffer; + dma_addr_t dma_addr; + + wvalue = (u16)(addr&0x0000ffff); + buffer = usb_alloc_coherent(udev, (size_t)len, GFP_ATOMIC, &dma_addr); + if (!buffer) + return; + memcpy(buffer, data, len); + usb_control_msg(udev, pipe, request, reqtype, wvalue, + index, buffer, len, 50); + + usb_free_coherent(udev, (size_t)len, buffer, dma_addr); +} + static void _rtl_usb_io_handler_init(struct device *dev, struct ieee80211_hw *hw) { @@ -205,6 +243,7 @@ static void _rtl_usb_io_handler_init(struct device *dev, rtlpriv->io.read8_sync = _usb_read8_sync; rtlpriv->io.read16_sync = _usb_read16_sync; rtlpriv->io.read32_sync = _usb_read32_sync; + rtlpriv->io.writeN_sync = _usb_writeN_sync; } static void _rtl_usb_io_handler_release(struct ieee80211_hw *hw) diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h index 713c7ddba8e..cdaf1429fa0 100644 --- a/drivers/net/wireless/rtlwifi/wifi.h +++ b/drivers/net/wireless/rtlwifi/wifi.h @@ -63,6 +63,7 @@ #define AC_MAX 4 #define QOS_QUEUE_NUM 4 #define RTL_MAC80211_NUM_QUEUE 5 +#define REALTEK_USB_VENQT_MAX_BUF_SIZE 254 #define QBSS_LOAD_SIZE 5 #define MAX_WMMELE_LENGTH 64 @@ -943,8 +944,10 @@ struct rtl_io { unsigned long pci_base_addr; /*device I/O address */ void (*write8_async) (struct rtl_priv *rtlpriv, u32 addr, u8 val); - void (*write16_async) (struct rtl_priv *rtlpriv, u32 addr, __le16 val); - void (*write32_async) (struct rtl_priv *rtlpriv, u32 addr, __le32 val); + void (*write16_async) (struct rtl_priv *rtlpriv, u32 addr, u16 val); + void (*write32_async) (struct rtl_priv *rtlpriv, u32 addr, u32 val); + void (*writeN_sync) (struct rtl_priv *rtlpriv, u32 addr, void *buf, + u16 len); u8(*read8_sync) (struct rtl_priv *rtlpriv, u32 addr); u16(*read16_sync) (struct rtl_priv *rtlpriv, u32 addr); @@ -1485,7 +1488,7 @@ struct rtl_intf_ops { struct rtl_mod_params { /* default: 0 = using hardware encryption */ - int sw_crypto; + bool sw_crypto; /* default: 0 = DBG_EMERG (0)*/ int debug; @@ -1541,6 +1544,7 @@ struct rtl_hal_cfg { struct rtl_locks { /* mutex */ struct mutex conf_mutex; + struct mutex ps_mutex; /*spin lock */ spinlock_t ips_lock; @@ -1548,7 +1552,6 @@ struct rtl_locks { spinlock_t h2c_lock; spinlock_t rf_ps_lock; spinlock_t rf_lock; - spinlock_t lps_lock; spinlock_t waitq_lock; /*Dual mac*/ @@ -1573,7 +1576,8 @@ struct rtl_works { /* For SW LPS */ struct delayed_work ps_work; struct delayed_work ps_rfon_wq; - struct tasklet_struct ips_leave_tasklet; + + struct work_struct lps_leave_work; }; struct rtl_debug { diff --git a/drivers/net/wireless/wl1251/spi.c b/drivers/net/wireless/wl1251/spi.c index eaa5f955620..6248c354fc5 100644 --- a/drivers/net/wireless/wl1251/spi.c +++ b/drivers/net/wireless/wl1251/spi.c @@ -319,7 +319,6 @@ static int __devexit wl1251_spi_remove(struct spi_device *spi) static struct spi_driver wl1251_spi_driver = { .driver = { .name = DRIVER_NAME, - .bus = &spi_bus_type, .owner = THIS_MODULE, }, diff --git a/drivers/net/wireless/wl12xx/Kconfig b/drivers/net/wireless/wl12xx/Kconfig index 3fe388b87c2..af08c8609c6 100644 --- a/drivers/net/wireless/wl12xx/Kconfig +++ b/drivers/net/wireless/wl12xx/Kconfig @@ -42,16 +42,6 @@ config WL12XX_SDIO If you choose to build a module, it'll be called wl12xx_sdio. Say N if unsure. -config WL12XX_SDIO_TEST - tristate "TI wl12xx SDIO testing support" - depends on WL12XX && MMC && WL12XX_SDIO - default n - ---help--- - This module adds support for the SDIO bus testing with the - TI wl12xx chipsets. You probably don't want this unless you are - testing a new hardware platform. Select this if you want to test the - SDIO bus which is connected to the wl12xx chip. - config WL12XX_PLATFORM_DATA bool depends on WL12XX_SDIO != n || WL1251_SDIO != n diff --git a/drivers/net/wireless/wl12xx/Makefile b/drivers/net/wireless/wl12xx/Makefile index 621b3483ca2..fe67262ba19 100644 --- a/drivers/net/wireless/wl12xx/Makefile +++ b/drivers/net/wireless/wl12xx/Makefile @@ -3,14 +3,11 @@ wl12xx-objs = main.o cmd.o io.o event.o tx.o rx.o ps.o acx.o \ wl12xx_spi-objs = spi.o wl12xx_sdio-objs = sdio.o -wl12xx_sdio_test-objs = sdio_test.o wl12xx-$(CONFIG_NL80211_TESTMODE) += testmode.o obj-$(CONFIG_WL12XX) += wl12xx.o obj-$(CONFIG_WL12XX_SPI) += wl12xx_spi.o obj-$(CONFIG_WL12XX_SDIO) += wl12xx_sdio.o -obj-$(CONFIG_WL12XX_SDIO_TEST) += wl12xx_sdio_test.o - # small builtin driver bit obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wl12xx_platform_data.o diff --git a/drivers/net/wireless/wl12xx/acx.c b/drivers/net/wireless/wl12xx/acx.c index ca044a74319..7537c401a44 100644 --- a/drivers/net/wireless/wl12xx/acx.c +++ b/drivers/net/wireless/wl12xx/acx.c @@ -29,11 +29,12 @@ #include <linux/slab.h> #include "wl12xx.h" +#include "debug.h" #include "wl12xx_80211.h" #include "reg.h" #include "ps.h" -int wl1271_acx_wake_up_conditions(struct wl1271 *wl) +int wl1271_acx_wake_up_conditions(struct wl1271 *wl, struct wl12xx_vif *wlvif) { struct acx_wake_up_condition *wake_up; int ret; @@ -46,7 +47,7 @@ int wl1271_acx_wake_up_conditions(struct wl1271 *wl) goto out; } - wake_up->role_id = wl->role_id; + wake_up->role_id = wlvif->role_id; wake_up->wake_up_event = wl->conf.conn.wake_up_event; wake_up->listen_interval = wl->conf.conn.listen_interval; @@ -84,7 +85,8 @@ out: return ret; } -int wl1271_acx_tx_power(struct wl1271 *wl, int power) +int wl1271_acx_tx_power(struct wl1271 *wl, struct wl12xx_vif *wlvif, + int power) { struct acx_current_tx_power *acx; int ret; @@ -100,7 +102,7 @@ int wl1271_acx_tx_power(struct wl1271 *wl, int power) goto out; } - acx->role_id = wl->role_id; + acx->role_id = wlvif->role_id; acx->current_tx_power = power * 10; ret = wl1271_cmd_configure(wl, DOT11_CUR_TX_PWR, acx, sizeof(*acx)); @@ -114,7 +116,7 @@ out: return ret; } -int wl1271_acx_feature_cfg(struct wl1271 *wl) +int wl1271_acx_feature_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif) { struct acx_feature_config *feature; int ret; @@ -128,7 +130,7 @@ int wl1271_acx_feature_cfg(struct wl1271 *wl) } /* DF_ENCRYPTION_DISABLE and DF_SNIFF_MODE_ENABLE are disabled */ - feature->role_id = wl->role_id; + feature->role_id = wlvif->role_id; feature->data_flow_options = 0; feature->options = 0; @@ -184,33 +186,8 @@ out: return ret; } -int wl1271_acx_pd_threshold(struct wl1271 *wl) -{ - struct acx_packet_detection *pd; - int ret; - - wl1271_debug(DEBUG_ACX, "acx data pd threshold"); - - pd = kzalloc(sizeof(*pd), GFP_KERNEL); - if (!pd) { - ret = -ENOMEM; - goto out; - } - - pd->threshold = cpu_to_le32(wl->conf.rx.packet_detection_threshold); - - ret = wl1271_cmd_configure(wl, ACX_PD_THRESHOLD, pd, sizeof(*pd)); - if (ret < 0) { - wl1271_warning("failed to set pd threshold: %d", ret); - goto out; - } - -out: - kfree(pd); - return ret; -} - -int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time) +int wl1271_acx_slot(struct wl1271 *wl, struct wl12xx_vif *wlvif, + enum acx_slot_type slot_time) { struct acx_slot *slot; int ret; @@ -223,7 +200,7 @@ int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time) goto out; } - slot->role_id = wl->role_id; + slot->role_id = wlvif->role_id; slot->wone_index = STATION_WONE_INDEX; slot->slot_time = slot_time; @@ -238,8 +215,8 @@ out: return ret; } -int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable, - void *mc_list, u32 mc_list_len) +int wl1271_acx_group_address_tbl(struct wl1271 *wl, struct wl12xx_vif *wlvif, + bool enable, void *mc_list, u32 mc_list_len) { struct acx_dot11_grp_addr_tbl *acx; int ret; @@ -253,7 +230,7 @@ int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable, } /* MAC filtering */ - acx->role_id = wl->role_id; + acx->role_id = wlvif->role_id; acx->enabled = enable; acx->num_groups = mc_list_len; memcpy(acx->mac_table, mc_list, mc_list_len * ETH_ALEN); @@ -270,7 +247,8 @@ out: return ret; } -int wl1271_acx_service_period_timeout(struct wl1271 *wl) +int wl1271_acx_service_period_timeout(struct wl1271 *wl, + struct wl12xx_vif *wlvif) { struct acx_rx_timeout *rx_timeout; int ret; @@ -283,7 +261,7 @@ int wl1271_acx_service_period_timeout(struct wl1271 *wl) wl1271_debug(DEBUG_ACX, "acx service period timeout"); - rx_timeout->role_id = wl->role_id; + rx_timeout->role_id = wlvif->role_id; rx_timeout->ps_poll_timeout = cpu_to_le16(wl->conf.rx.ps_poll_timeout); rx_timeout->upsd_timeout = cpu_to_le16(wl->conf.rx.upsd_timeout); @@ -300,7 +278,8 @@ out: return ret; } -int wl1271_acx_rts_threshold(struct wl1271 *wl, u32 rts_threshold) +int wl1271_acx_rts_threshold(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u32 rts_threshold) { struct acx_rts_threshold *rts; int ret; @@ -320,7 +299,7 @@ int wl1271_acx_rts_threshold(struct wl1271 *wl, u32 rts_threshold) goto out; } - rts->role_id = wl->role_id; + rts->role_id = wlvif->role_id; rts->threshold = cpu_to_le16((u16)rts_threshold); ret = wl1271_cmd_configure(wl, DOT11_RTS_THRESHOLD, rts, sizeof(*rts)); @@ -363,7 +342,8 @@ out: return ret; } -int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter) +int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, struct wl12xx_vif *wlvif, + bool enable_filter) { struct acx_beacon_filter_option *beacon_filter = NULL; int ret = 0; @@ -380,7 +360,7 @@ int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter) goto out; } - beacon_filter->role_id = wl->role_id; + beacon_filter->role_id = wlvif->role_id; beacon_filter->enable = enable_filter; /* @@ -401,7 +381,8 @@ out: return ret; } -int wl1271_acx_beacon_filter_table(struct wl1271 *wl) +int wl1271_acx_beacon_filter_table(struct wl1271 *wl, + struct wl12xx_vif *wlvif) { struct acx_beacon_filter_ie_table *ie_table; int i, idx = 0; @@ -417,7 +398,7 @@ int wl1271_acx_beacon_filter_table(struct wl1271 *wl) } /* configure default beacon pass-through rules */ - ie_table->role_id = wl->role_id; + ie_table->role_id = wlvif->role_id; ie_table->num_ie = 0; for (i = 0; i < wl->conf.conn.bcn_filt_ie_count; i++) { struct conf_bcn_filt_rule *r = &(wl->conf.conn.bcn_filt_ie[i]); @@ -458,7 +439,8 @@ out: #define ACX_CONN_MONIT_DISABLE_VALUE 0xffffffff -int wl1271_acx_conn_monit_params(struct wl1271 *wl, bool enable) +int wl1271_acx_conn_monit_params(struct wl1271 *wl, struct wl12xx_vif *wlvif, + bool enable) { struct acx_conn_monit_params *acx; u32 threshold = ACX_CONN_MONIT_DISABLE_VALUE; @@ -479,7 +461,7 @@ int wl1271_acx_conn_monit_params(struct wl1271 *wl, bool enable) timeout = wl->conf.conn.bss_lose_timeout; } - acx->role_id = wl->role_id; + acx->role_id = wlvif->role_id; acx->synch_fail_thold = cpu_to_le32(threshold); acx->bss_lose_timeout = cpu_to_le32(timeout); @@ -582,7 +564,7 @@ out: return ret; } -int wl1271_acx_bcn_dtim_options(struct wl1271 *wl) +int wl1271_acx_bcn_dtim_options(struct wl1271 *wl, struct wl12xx_vif *wlvif) { struct acx_beacon_broadcast *bb; int ret; @@ -595,7 +577,7 @@ int wl1271_acx_bcn_dtim_options(struct wl1271 *wl) goto out; } - bb->role_id = wl->role_id; + bb->role_id = wlvif->role_id; bb->beacon_rx_timeout = cpu_to_le16(wl->conf.conn.beacon_rx_timeout); bb->broadcast_timeout = cpu_to_le16(wl->conf.conn.broadcast_timeout); bb->rx_broadcast_in_ps = wl->conf.conn.rx_broadcast_in_ps; @@ -612,7 +594,7 @@ out: return ret; } -int wl1271_acx_aid(struct wl1271 *wl, u16 aid) +int wl1271_acx_aid(struct wl1271 *wl, struct wl12xx_vif *wlvif, u16 aid) { struct acx_aid *acx_aid; int ret; @@ -625,7 +607,7 @@ int wl1271_acx_aid(struct wl1271 *wl, u16 aid) goto out; } - acx_aid->role_id = wl->role_id; + acx_aid->role_id = wlvif->role_id; acx_aid->aid = cpu_to_le16(aid); ret = wl1271_cmd_configure(wl, ACX_AID, acx_aid, sizeof(*acx_aid)); @@ -668,7 +650,8 @@ out: return ret; } -int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble) +int wl1271_acx_set_preamble(struct wl1271 *wl, struct wl12xx_vif *wlvif, + enum acx_preamble_type preamble) { struct acx_preamble *acx; int ret; @@ -681,7 +664,7 @@ int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble) goto out; } - acx->role_id = wl->role_id; + acx->role_id = wlvif->role_id; acx->preamble = preamble; ret = wl1271_cmd_configure(wl, ACX_PREAMBLE_TYPE, acx, sizeof(*acx)); @@ -695,7 +678,7 @@ out: return ret; } -int wl1271_acx_cts_protect(struct wl1271 *wl, +int wl1271_acx_cts_protect(struct wl1271 *wl, struct wl12xx_vif *wlvif, enum acx_ctsprotect_type ctsprotect) { struct acx_ctsprotect *acx; @@ -709,7 +692,7 @@ int wl1271_acx_cts_protect(struct wl1271 *wl, goto out; } - acx->role_id = wl->role_id; + acx->role_id = wlvif->role_id; acx->ctsprotect = ctsprotect; ret = wl1271_cmd_configure(wl, ACX_CTS_PROTECTION, acx, sizeof(*acx)); @@ -739,7 +722,7 @@ int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats) return 0; } -int wl1271_acx_sta_rate_policies(struct wl1271 *wl) +int wl1271_acx_sta_rate_policies(struct wl1271 *wl, struct wl12xx_vif *wlvif) { struct acx_rate_policy *acx; struct conf_tx_rate_class *c = &wl->conf.tx.sta_rc_conf; @@ -755,11 +738,11 @@ int wl1271_acx_sta_rate_policies(struct wl1271 *wl) } wl1271_debug(DEBUG_ACX, "basic_rate: 0x%x, full_rate: 0x%x", - wl->basic_rate, wl->rate_set); + wlvif->basic_rate, wlvif->rate_set); /* configure one basic rate class */ - acx->rate_policy_idx = cpu_to_le32(ACX_TX_BASIC_RATE); - acx->rate_policy.enabled_rates = cpu_to_le32(wl->basic_rate); + acx->rate_policy_idx = cpu_to_le32(wlvif->sta.basic_rate_idx); + acx->rate_policy.enabled_rates = cpu_to_le32(wlvif->basic_rate); acx->rate_policy.short_retry_limit = c->short_retry_limit; acx->rate_policy.long_retry_limit = c->long_retry_limit; acx->rate_policy.aflags = c->aflags; @@ -771,8 +754,8 @@ int wl1271_acx_sta_rate_policies(struct wl1271 *wl) } /* configure one AP supported rate class */ - acx->rate_policy_idx = cpu_to_le32(ACX_TX_AP_FULL_RATE); - acx->rate_policy.enabled_rates = cpu_to_le32(wl->rate_set); + acx->rate_policy_idx = cpu_to_le32(wlvif->sta.ap_rate_idx); + acx->rate_policy.enabled_rates = cpu_to_le32(wlvif->rate_set); acx->rate_policy.short_retry_limit = c->short_retry_limit; acx->rate_policy.long_retry_limit = c->long_retry_limit; acx->rate_policy.aflags = c->aflags; @@ -788,7 +771,7 @@ int wl1271_acx_sta_rate_policies(struct wl1271 *wl) * (p2p packets should always go out with OFDM rates, even * if we are currently connected to 11b AP) */ - acx->rate_policy_idx = cpu_to_le32(ACX_TX_BASIC_RATE_P2P); + acx->rate_policy_idx = cpu_to_le32(wlvif->sta.p2p_rate_idx); acx->rate_policy.enabled_rates = cpu_to_le32(CONF_TX_RATE_MASK_BASIC_P2P); acx->rate_policy.short_retry_limit = c->short_retry_limit; @@ -839,8 +822,8 @@ out: return ret; } -int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max, - u8 aifsn, u16 txop) +int wl1271_acx_ac_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 ac, u8 cw_min, u16 cw_max, u8 aifsn, u16 txop) { struct acx_ac_cfg *acx; int ret = 0; @@ -855,7 +838,7 @@ int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max, goto out; } - acx->role_id = wl->role_id; + acx->role_id = wlvif->role_id; acx->ac = ac; acx->cw_min = cw_min; acx->cw_max = cpu_to_le16(cw_max); @@ -873,7 +856,8 @@ out: return ret; } -int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type, +int wl1271_acx_tid_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 queue_id, u8 channel_type, u8 tsid, u8 ps_scheme, u8 ack_policy, u32 apsd_conf0, u32 apsd_conf1) { @@ -889,7 +873,7 @@ int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type, goto out; } - acx->role_id = wl->role_id; + acx->role_id = wlvif->role_id; acx->queue_id = queue_id; acx->channel_type = channel_type; acx->tsid = tsid; @@ -1098,7 +1082,8 @@ out: return ret; } -int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable) +int wl1271_acx_bet_enable(struct wl1271 *wl, struct wl12xx_vif *wlvif, + bool enable) { struct wl1271_acx_bet_enable *acx = NULL; int ret = 0; @@ -1114,7 +1099,7 @@ int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable) goto out; } - acx->role_id = wl->role_id; + acx->role_id = wlvif->role_id; acx->enable = enable ? CONF_BET_MODE_ENABLE : CONF_BET_MODE_DISABLE; acx->max_consecutive = wl->conf.conn.bet_max_consecutive; @@ -1129,7 +1114,8 @@ out: return ret; } -int wl1271_acx_arp_ip_filter(struct wl1271 *wl, u8 enable, __be32 address) +int wl1271_acx_arp_ip_filter(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 enable, __be32 address) { struct wl1271_acx_arp_filter *acx; int ret; @@ -1142,7 +1128,7 @@ int wl1271_acx_arp_ip_filter(struct wl1271 *wl, u8 enable, __be32 address) goto out; } - acx->role_id = wl->role_id; + acx->role_id = wlvif->role_id; acx->version = ACX_IPV4_VERSION; acx->enable = enable; @@ -1189,7 +1175,8 @@ out: return ret; } -int wl1271_acx_keep_alive_mode(struct wl1271 *wl, bool enable) +int wl1271_acx_keep_alive_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif, + bool enable) { struct wl1271_acx_keep_alive_mode *acx = NULL; int ret = 0; @@ -1202,7 +1189,7 @@ int wl1271_acx_keep_alive_mode(struct wl1271 *wl, bool enable) goto out; } - acx->role_id = wl->role_id; + acx->role_id = wlvif->role_id; acx->enabled = enable; ret = wl1271_cmd_configure(wl, ACX_KEEP_ALIVE_MODE, acx, sizeof(*acx)); @@ -1216,7 +1203,8 @@ out: return ret; } -int wl1271_acx_keep_alive_config(struct wl1271 *wl, u8 index, u8 tpl_valid) +int wl1271_acx_keep_alive_config(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 index, u8 tpl_valid) { struct wl1271_acx_keep_alive_config *acx = NULL; int ret = 0; @@ -1229,7 +1217,7 @@ int wl1271_acx_keep_alive_config(struct wl1271 *wl, u8 index, u8 tpl_valid) goto out; } - acx->role_id = wl->role_id; + acx->role_id = wlvif->role_id; acx->period = cpu_to_le32(wl->conf.conn.keep_alive_interval); acx->index = index; acx->tpl_validation = tpl_valid; @@ -1247,8 +1235,8 @@ out: return ret; } -int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable, - s16 thold, u8 hyst) +int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, struct wl12xx_vif *wlvif, + bool enable, s16 thold, u8 hyst) { struct wl1271_acx_rssi_snr_trigger *acx = NULL; int ret = 0; @@ -1261,9 +1249,9 @@ int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable, goto out; } - wl->last_rssi_event = -1; + wlvif->last_rssi_event = -1; - acx->role_id = wl->role_id; + acx->role_id = wlvif->role_id; acx->pacing = cpu_to_le16(wl->conf.roam_trigger.trigger_pacing); acx->metric = WL1271_ACX_TRIG_METRIC_RSSI_BEACON; acx->type = WL1271_ACX_TRIG_TYPE_EDGE; @@ -1288,7 +1276,8 @@ out: return ret; } -int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl) +int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl, + struct wl12xx_vif *wlvif) { struct wl1271_acx_rssi_snr_avg_weights *acx = NULL; struct conf_roam_trigger_settings *c = &wl->conf.roam_trigger; @@ -1302,7 +1291,7 @@ int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl) goto out; } - acx->role_id = wl->role_id; + acx->role_id = wlvif->role_id; acx->rssi_beacon = c->avg_weight_rssi_beacon; acx->rssi_data = c->avg_weight_rssi_data; acx->snr_beacon = c->avg_weight_snr_beacon; @@ -1367,6 +1356,7 @@ out: } int wl1271_acx_set_ht_information(struct wl1271 *wl, + struct wl12xx_vif *wlvif, u16 ht_operation_mode) { struct wl1271_acx_ht_information *acx; @@ -1380,7 +1370,7 @@ int wl1271_acx_set_ht_information(struct wl1271 *wl, goto out; } - acx->role_id = wl->role_id; + acx->role_id = wlvif->role_id; acx->ht_protection = (u8)(ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION); acx->rifs_mode = 0; @@ -1402,7 +1392,8 @@ out: } /* Configure BA session initiator/receiver parameters setting in the FW. */ -int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl) +int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl, + struct wl12xx_vif *wlvif) { struct wl1271_acx_ba_initiator_policy *acx; int ret; @@ -1416,7 +1407,7 @@ int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl) } /* set for the current role */ - acx->role_id = wl->role_id; + acx->role_id = wlvif->role_id; acx->tid_bitmap = wl->conf.ht.tx_ba_tid_bitmap; acx->win_size = wl->conf.ht.tx_ba_win_size; acx->inactivity_timeout = wl->conf.ht.inactivity_timeout; @@ -1494,7 +1485,8 @@ out: return ret; } -int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, bool enable) +int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif, + bool enable) { struct wl1271_acx_ps_rx_streaming *rx_streaming; u32 conf_queues, enable_queues; @@ -1523,7 +1515,7 @@ int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, bool enable) if (!(conf_queues & BIT(i))) continue; - rx_streaming->role_id = wl->role_id; + rx_streaming->role_id = wlvif->role_id; rx_streaming->tid = i; rx_streaming->enable = enable_queues & BIT(i); rx_streaming->period = wl->conf.rx_streaming.interval; @@ -1542,7 +1534,7 @@ out: return ret; } -int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl) +int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl, struct wl12xx_vif *wlvif) { struct wl1271_acx_ap_max_tx_retry *acx = NULL; int ret; @@ -1553,7 +1545,7 @@ int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl) if (!acx) return -ENOMEM; - acx->role_id = wl->role_id; + acx->role_id = wlvif->role_id; acx->max_tx_retry = cpu_to_le16(wl->conf.tx.max_tx_retries); ret = wl1271_cmd_configure(wl, ACX_MAX_TX_FAILURE, acx, sizeof(*acx)); @@ -1567,7 +1559,7 @@ out: return ret; } -int wl1271_acx_config_ps(struct wl1271 *wl) +int wl12xx_acx_config_ps(struct wl1271 *wl, struct wl12xx_vif *wlvif) { struct wl1271_acx_config_ps *config_ps; int ret; @@ -1582,7 +1574,7 @@ int wl1271_acx_config_ps(struct wl1271 *wl) config_ps->exit_retries = wl->conf.conn.psm_exit_retries; config_ps->enter_retries = wl->conf.conn.psm_entry_retries; - config_ps->null_data_rate = cpu_to_le32(wl->basic_rate); + config_ps->null_data_rate = cpu_to_le32(wlvif->basic_rate); ret = wl1271_cmd_configure(wl, ACX_CONFIG_PS, config_ps, sizeof(*config_ps)); diff --git a/drivers/net/wireless/wl12xx/acx.h b/drivers/net/wireless/wl12xx/acx.h index e3f93b4b342..69892b40c2d 100644 --- a/drivers/net/wireless/wl12xx/acx.h +++ b/drivers/net/wireless/wl12xx/acx.h @@ -171,13 +171,6 @@ struct acx_rx_msdu_lifetime { __le32 lifetime; } __packed; -struct acx_packet_detection { - struct acx_header header; - - __le32 threshold; -} __packed; - - enum acx_slot_type { SLOT_TIME_LONG = 0, SLOT_TIME_SHORT = 1, @@ -654,11 +647,6 @@ struct acx_rate_class { u8 reserved; }; -#define ACX_TX_BASIC_RATE 0 -#define ACX_TX_AP_FULL_RATE 1 -#define ACX_TX_BASIC_RATE_P2P 2 -#define ACX_TX_AP_MODE_MGMT_RATE 4 -#define ACX_TX_AP_MODE_BCST_RATE 5 struct acx_rate_policy { struct acx_header header; @@ -1234,39 +1222,48 @@ enum { }; -int wl1271_acx_wake_up_conditions(struct wl1271 *wl); +int wl1271_acx_wake_up_conditions(struct wl1271 *wl, + struct wl12xx_vif *wlvif); int wl1271_acx_sleep_auth(struct wl1271 *wl, u8 sleep_auth); -int wl1271_acx_tx_power(struct wl1271 *wl, int power); -int wl1271_acx_feature_cfg(struct wl1271 *wl); +int wl1271_acx_tx_power(struct wl1271 *wl, struct wl12xx_vif *wlvif, + int power); +int wl1271_acx_feature_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif); int wl1271_acx_mem_map(struct wl1271 *wl, struct acx_header *mem_map, size_t len); int wl1271_acx_rx_msdu_life_time(struct wl1271 *wl); -int wl1271_acx_pd_threshold(struct wl1271 *wl); -int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time); -int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable, - void *mc_list, u32 mc_list_len); -int wl1271_acx_service_period_timeout(struct wl1271 *wl); -int wl1271_acx_rts_threshold(struct wl1271 *wl, u32 rts_threshold); +int wl1271_acx_slot(struct wl1271 *wl, struct wl12xx_vif *wlvif, + enum acx_slot_type slot_time); +int wl1271_acx_group_address_tbl(struct wl1271 *wl, struct wl12xx_vif *wlvif, + bool enable, void *mc_list, u32 mc_list_len); +int wl1271_acx_service_period_timeout(struct wl1271 *wl, + struct wl12xx_vif *wlvif); +int wl1271_acx_rts_threshold(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u32 rts_threshold); int wl1271_acx_dco_itrim_params(struct wl1271 *wl); -int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter); -int wl1271_acx_beacon_filter_table(struct wl1271 *wl); -int wl1271_acx_conn_monit_params(struct wl1271 *wl, bool enable); +int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, struct wl12xx_vif *wlvif, + bool enable_filter); +int wl1271_acx_beacon_filter_table(struct wl1271 *wl, + struct wl12xx_vif *wlvif); +int wl1271_acx_conn_monit_params(struct wl1271 *wl, struct wl12xx_vif *wlvif, + bool enable); int wl1271_acx_sg_enable(struct wl1271 *wl, bool enable); int wl12xx_acx_sg_cfg(struct wl1271 *wl); int wl1271_acx_cca_threshold(struct wl1271 *wl); -int wl1271_acx_bcn_dtim_options(struct wl1271 *wl); -int wl1271_acx_aid(struct wl1271 *wl, u16 aid); +int wl1271_acx_bcn_dtim_options(struct wl1271 *wl, struct wl12xx_vif *wlvif); +int wl1271_acx_aid(struct wl1271 *wl, struct wl12xx_vif *wlvif, u16 aid); int wl1271_acx_event_mbox_mask(struct wl1271 *wl, u32 event_mask); -int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble); -int wl1271_acx_cts_protect(struct wl1271 *wl, +int wl1271_acx_set_preamble(struct wl1271 *wl, struct wl12xx_vif *wlvif, + enum acx_preamble_type preamble); +int wl1271_acx_cts_protect(struct wl1271 *wl, struct wl12xx_vif *wlvif, enum acx_ctsprotect_type ctsprotect); int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats); -int wl1271_acx_sta_rate_policies(struct wl1271 *wl); +int wl1271_acx_sta_rate_policies(struct wl1271 *wl, struct wl12xx_vif *wlvif); int wl1271_acx_ap_rate_policy(struct wl1271 *wl, struct conf_tx_rate_class *c, u8 idx); -int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max, - u8 aifsn, u16 txop); -int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type, +int wl1271_acx_ac_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 ac, u8 cw_min, u16 cw_max, u8 aifsn, u16 txop); +int wl1271_acx_tid_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 queue_id, u8 channel_type, u8 tsid, u8 ps_scheme, u8 ack_policy, u32 apsd_conf0, u32 apsd_conf1); int wl1271_acx_frag_threshold(struct wl1271 *wl, u32 frag_threshold); @@ -1276,26 +1273,34 @@ int wl1271_acx_init_mem_config(struct wl1271 *wl); int wl1271_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap); int wl1271_acx_init_rx_interrupt(struct wl1271 *wl); int wl1271_acx_smart_reflex(struct wl1271 *wl); -int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable); -int wl1271_acx_arp_ip_filter(struct wl1271 *wl, u8 enable, __be32 address); +int wl1271_acx_bet_enable(struct wl1271 *wl, struct wl12xx_vif *wlvif, + bool enable); +int wl1271_acx_arp_ip_filter(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 enable, __be32 address); int wl1271_acx_pm_config(struct wl1271 *wl); -int wl1271_acx_keep_alive_mode(struct wl1271 *wl, bool enable); -int wl1271_acx_keep_alive_config(struct wl1271 *wl, u8 index, u8 tpl_valid); -int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable, - s16 thold, u8 hyst); -int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl); +int wl1271_acx_keep_alive_mode(struct wl1271 *wl, struct wl12xx_vif *vif, + bool enable); +int wl1271_acx_keep_alive_config(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 index, u8 tpl_valid); +int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, struct wl12xx_vif *wlvif, + bool enable, s16 thold, u8 hyst); +int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl, + struct wl12xx_vif *wlvif); int wl1271_acx_set_ht_capabilities(struct wl1271 *wl, struct ieee80211_sta_ht_cap *ht_cap, bool allow_ht_operation, u8 hlid); int wl1271_acx_set_ht_information(struct wl1271 *wl, + struct wl12xx_vif *wlvif, u16 ht_operation_mode); -int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl); +int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl, + struct wl12xx_vif *wlvif); int wl12xx_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index, u16 ssn, bool enable, u8 peer_hlid); int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime); -int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, bool enable); -int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl); -int wl1271_acx_config_ps(struct wl1271 *wl); +int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif, + bool enable); +int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl, struct wl12xx_vif *wlvif); +int wl12xx_acx_config_ps(struct wl1271 *wl, struct wl12xx_vif *wlvif); int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr); int wl1271_acx_fm_coex(struct wl1271 *wl); int wl12xx_acx_set_rate_mgmt_params(struct wl1271 *wl); diff --git a/drivers/net/wireless/wl12xx/boot.c b/drivers/net/wireless/wl12xx/boot.c index 68133791497..8f9cf5a816e 100644 --- a/drivers/net/wireless/wl12xx/boot.c +++ b/drivers/net/wireless/wl12xx/boot.c @@ -25,6 +25,7 @@ #include <linux/wl12xx.h> #include <linux/export.h> +#include "debug.h" #include "acx.h" #include "reg.h" #include "boot.h" @@ -347,6 +348,9 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl) nvs_ptr += 3; for (i = 0; i < burst_len; i++) { + if (nvs_ptr + 3 >= (u8 *) wl->nvs + nvs_len) + goto out_badnvs; + val = (nvs_ptr[0] | (nvs_ptr[1] << 8) | (nvs_ptr[2] << 16) | (nvs_ptr[3] << 24)); @@ -358,6 +362,9 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl) nvs_ptr += 4; dest_addr += 4; } + + if (nvs_ptr >= (u8 *) wl->nvs + nvs_len) + goto out_badnvs; } /* @@ -369,6 +376,10 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl) */ nvs_ptr = (u8 *)wl->nvs + ALIGN(nvs_ptr - (u8 *)wl->nvs + 7, 4); + + if (nvs_ptr >= (u8 *) wl->nvs + nvs_len) + goto out_badnvs; + nvs_len -= nvs_ptr - (u8 *)wl->nvs; /* Now we must set the partition correctly */ @@ -384,6 +395,10 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl) kfree(nvs_aligned); return 0; + +out_badnvs: + wl1271_error("nvs data is malformed"); + return -EILSEQ; } static void wl1271_boot_enable_interrupts(struct wl1271 *wl) diff --git a/drivers/net/wireless/wl12xx/cmd.c b/drivers/net/wireless/wl12xx/cmd.c index a52299e548f..25990bd38be 100644 --- a/drivers/net/wireless/wl12xx/cmd.c +++ b/drivers/net/wireless/wl12xx/cmd.c @@ -29,6 +29,7 @@ #include <linux/slab.h> #include "wl12xx.h" +#include "debug.h" #include "reg.h" #include "io.h" #include "acx.h" @@ -120,6 +121,11 @@ int wl1271_cmd_general_parms(struct wl1271 *wl) if (!wl->nvs) return -ENODEV; + if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) { + wl1271_warning("FEM index from INI out of bounds"); + return -EINVAL; + } + gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL); if (!gen_parms) return -ENOMEM; @@ -143,6 +149,12 @@ int wl1271_cmd_general_parms(struct wl1271 *wl) gp->tx_bip_fem_manufacturer = gen_parms->general_params.tx_bip_fem_manufacturer; + if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) { + wl1271_warning("FEM index from FW out of bounds"); + ret = -EINVAL; + goto out; + } + wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n", answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer); @@ -162,6 +174,11 @@ int wl128x_cmd_general_parms(struct wl1271 *wl) if (!wl->nvs) return -ENODEV; + if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) { + wl1271_warning("FEM index from ini out of bounds"); + return -EINVAL; + } + gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL); if (!gen_parms) return -ENOMEM; @@ -186,6 +203,12 @@ int wl128x_cmd_general_parms(struct wl1271 *wl) gp->tx_bip_fem_manufacturer = gen_parms->general_params.tx_bip_fem_manufacturer; + if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) { + wl1271_warning("FEM index from FW out of bounds"); + ret = -EINVAL; + goto out; + } + wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n", answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer); @@ -358,7 +381,8 @@ static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask) return 0; } -int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 role_type, u8 *role_id) +int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 *addr, u8 role_type, + u8 *role_id) { struct wl12xx_cmd_role_enable *cmd; int ret; @@ -381,7 +405,7 @@ int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 role_type, u8 *role_id) goto out_free; } - memcpy(cmd->mac_address, wl->mac_addr, ETH_ALEN); + memcpy(cmd->mac_address, addr, ETH_ALEN); cmd->role_type = role_type; ret = wl1271_cmd_send(wl, CMD_ROLE_ENABLE, cmd, sizeof(*cmd), 0); @@ -433,37 +457,41 @@ out: return ret; } -static int wl12xx_allocate_link(struct wl1271 *wl, u8 *hlid) +int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid) { u8 link = find_first_zero_bit(wl->links_map, WL12XX_MAX_LINKS); if (link >= WL12XX_MAX_LINKS) return -EBUSY; __set_bit(link, wl->links_map); + __set_bit(link, wlvif->links_map); *hlid = link; return 0; } -static void wl12xx_free_link(struct wl1271 *wl, u8 *hlid) +void wl12xx_free_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid) { if (*hlid == WL12XX_INVALID_LINK_ID) return; __clear_bit(*hlid, wl->links_map); + __clear_bit(*hlid, wlvif->links_map); *hlid = WL12XX_INVALID_LINK_ID; } -static int wl12xx_get_new_session_id(struct wl1271 *wl) +static int wl12xx_get_new_session_id(struct wl1271 *wl, + struct wl12xx_vif *wlvif) { - if (wl->session_counter >= SESSION_COUNTER_MAX) - wl->session_counter = 0; + if (wlvif->session_counter >= SESSION_COUNTER_MAX) + wlvif->session_counter = 0; - wl->session_counter++; + wlvif->session_counter++; - return wl->session_counter; + return wlvif->session_counter; } -int wl12xx_cmd_role_start_dev(struct wl1271 *wl) +static int wl12xx_cmd_role_start_dev(struct wl1271 *wl, + struct wl12xx_vif *wlvif) { struct wl12xx_cmd_role_start *cmd; int ret; @@ -474,20 +502,20 @@ int wl12xx_cmd_role_start_dev(struct wl1271 *wl) goto out; } - wl1271_debug(DEBUG_CMD, "cmd role start dev %d", wl->dev_role_id); + wl1271_debug(DEBUG_CMD, "cmd role start dev %d", wlvif->dev_role_id); - cmd->role_id = wl->dev_role_id; - if (wl->band == IEEE80211_BAND_5GHZ) + cmd->role_id = wlvif->dev_role_id; + if (wlvif->band == IEEE80211_BAND_5GHZ) cmd->band = WL12XX_BAND_5GHZ; - cmd->channel = wl->channel; + cmd->channel = wlvif->channel; - if (wl->dev_hlid == WL12XX_INVALID_LINK_ID) { - ret = wl12xx_allocate_link(wl, &wl->dev_hlid); + if (wlvif->dev_hlid == WL12XX_INVALID_LINK_ID) { + ret = wl12xx_allocate_link(wl, wlvif, &wlvif->dev_hlid); if (ret) goto out_free; } - cmd->device.hlid = wl->dev_hlid; - cmd->device.session = wl->session_counter; + cmd->device.hlid = wlvif->dev_hlid; + cmd->device.session = wlvif->session_counter; wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d", cmd->role_id, cmd->device.hlid, cmd->device.session); @@ -502,9 +530,7 @@ int wl12xx_cmd_role_start_dev(struct wl1271 *wl) err_hlid: /* clear links on error */ - __clear_bit(wl->dev_hlid, wl->links_map); - wl->dev_hlid = WL12XX_INVALID_LINK_ID; - + wl12xx_free_link(wl, wlvif, &wlvif->dev_hlid); out_free: kfree(cmd); @@ -513,12 +539,13 @@ out: return ret; } -int wl12xx_cmd_role_stop_dev(struct wl1271 *wl) +static int wl12xx_cmd_role_stop_dev(struct wl1271 *wl, + struct wl12xx_vif *wlvif) { struct wl12xx_cmd_role_stop *cmd; int ret; - if (WARN_ON(wl->dev_hlid == WL12XX_INVALID_LINK_ID)) + if (WARN_ON(wlvif->dev_hlid == WL12XX_INVALID_LINK_ID)) return -EINVAL; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); @@ -529,7 +556,7 @@ int wl12xx_cmd_role_stop_dev(struct wl1271 *wl) wl1271_debug(DEBUG_CMD, "cmd role stop dev"); - cmd->role_id = wl->dev_role_id; + cmd->role_id = wlvif->dev_role_id; cmd->disc_type = DISCONNECT_IMMEDIATE; cmd->reason = cpu_to_le16(WLAN_REASON_UNSPECIFIED); @@ -545,7 +572,7 @@ int wl12xx_cmd_role_stop_dev(struct wl1271 *wl) goto out_free; } - wl12xx_free_link(wl, &wl->dev_hlid); + wl12xx_free_link(wl, wlvif, &wlvif->dev_hlid); out_free: kfree(cmd); @@ -554,8 +581,9 @@ out: return ret; } -int wl12xx_cmd_role_start_sta(struct wl1271 *wl) +int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif) { + struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); struct wl12xx_cmd_role_start *cmd; int ret; @@ -565,33 +593,33 @@ int wl12xx_cmd_role_start_sta(struct wl1271 *wl) goto out; } - wl1271_debug(DEBUG_CMD, "cmd role start sta %d", wl->role_id); + wl1271_debug(DEBUG_CMD, "cmd role start sta %d", wlvif->role_id); - cmd->role_id = wl->role_id; - if (wl->band == IEEE80211_BAND_5GHZ) + cmd->role_id = wlvif->role_id; + if (wlvif->band == IEEE80211_BAND_5GHZ) cmd->band = WL12XX_BAND_5GHZ; - cmd->channel = wl->channel; - cmd->sta.basic_rate_set = cpu_to_le32(wl->basic_rate_set); - cmd->sta.beacon_interval = cpu_to_le16(wl->beacon_int); + cmd->channel = wlvif->channel; + cmd->sta.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set); + cmd->sta.beacon_interval = cpu_to_le16(wlvif->beacon_int); cmd->sta.ssid_type = WL12XX_SSID_TYPE_ANY; - cmd->sta.ssid_len = wl->ssid_len; - memcpy(cmd->sta.ssid, wl->ssid, wl->ssid_len); - memcpy(cmd->sta.bssid, wl->bssid, ETH_ALEN); - cmd->sta.local_rates = cpu_to_le32(wl->rate_set); + cmd->sta.ssid_len = wlvif->ssid_len; + memcpy(cmd->sta.ssid, wlvif->ssid, wlvif->ssid_len); + memcpy(cmd->sta.bssid, vif->bss_conf.bssid, ETH_ALEN); + cmd->sta.local_rates = cpu_to_le32(wlvif->rate_set); - if (wl->sta_hlid == WL12XX_INVALID_LINK_ID) { - ret = wl12xx_allocate_link(wl, &wl->sta_hlid); + if (wlvif->sta.hlid == WL12XX_INVALID_LINK_ID) { + ret = wl12xx_allocate_link(wl, wlvif, &wlvif->sta.hlid); if (ret) goto out_free; } - cmd->sta.hlid = wl->sta_hlid; - cmd->sta.session = wl12xx_get_new_session_id(wl); - cmd->sta.remote_rates = cpu_to_le32(wl->rate_set); + cmd->sta.hlid = wlvif->sta.hlid; + cmd->sta.session = wl12xx_get_new_session_id(wl, wlvif); + cmd->sta.remote_rates = cpu_to_le32(wlvif->rate_set); wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d " "basic_rate_set: 0x%x, remote_rates: 0x%x", - wl->role_id, cmd->sta.hlid, cmd->sta.session, - wl->basic_rate_set, wl->rate_set); + wlvif->role_id, cmd->sta.hlid, cmd->sta.session, + wlvif->basic_rate_set, wlvif->rate_set); ret = wl1271_cmd_send(wl, CMD_ROLE_START, cmd, sizeof(*cmd), 0); if (ret < 0) { @@ -603,7 +631,7 @@ int wl12xx_cmd_role_start_sta(struct wl1271 *wl) err_hlid: /* clear links on error. */ - wl12xx_free_link(wl, &wl->sta_hlid); + wl12xx_free_link(wl, wlvif, &wlvif->sta.hlid); out_free: kfree(cmd); @@ -613,12 +641,12 @@ out: } /* use this function to stop ibss as well */ -int wl12xx_cmd_role_stop_sta(struct wl1271 *wl) +int wl12xx_cmd_role_stop_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif) { struct wl12xx_cmd_role_stop *cmd; int ret; - if (WARN_ON(wl->sta_hlid == WL12XX_INVALID_LINK_ID)) + if (WARN_ON(wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)) return -EINVAL; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); @@ -627,9 +655,9 @@ int wl12xx_cmd_role_stop_sta(struct wl1271 *wl) goto out; } - wl1271_debug(DEBUG_CMD, "cmd role stop sta %d", wl->role_id); + wl1271_debug(DEBUG_CMD, "cmd role stop sta %d", wlvif->role_id); - cmd->role_id = wl->role_id; + cmd->role_id = wlvif->role_id; cmd->disc_type = DISCONNECT_IMMEDIATE; cmd->reason = cpu_to_le16(WLAN_REASON_UNSPECIFIED); @@ -639,7 +667,7 @@ int wl12xx_cmd_role_stop_sta(struct wl1271 *wl) goto out_free; } - wl12xx_free_link(wl, &wl->sta_hlid); + wl12xx_free_link(wl, wlvif, &wlvif->sta.hlid); out_free: kfree(cmd); @@ -648,16 +676,17 @@ out: return ret; } -int wl12xx_cmd_role_start_ap(struct wl1271 *wl) +int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif) { struct wl12xx_cmd_role_start *cmd; - struct ieee80211_bss_conf *bss_conf = &wl->vif->bss_conf; + struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); + struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; int ret; - wl1271_debug(DEBUG_CMD, "cmd role start ap %d", wl->role_id); + wl1271_debug(DEBUG_CMD, "cmd role start ap %d", wlvif->role_id); /* trying to use hidden SSID with an old hostapd version */ - if (wl->ssid_len == 0 && !bss_conf->hidden_ssid) { + if (wlvif->ssid_len == 0 && !bss_conf->hidden_ssid) { wl1271_error("got a null SSID from beacon/bss"); ret = -EINVAL; goto out; @@ -669,30 +698,30 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl) goto out; } - ret = wl12xx_allocate_link(wl, &wl->ap_global_hlid); + ret = wl12xx_allocate_link(wl, wlvif, &wlvif->ap.global_hlid); if (ret < 0) goto out_free; - ret = wl12xx_allocate_link(wl, &wl->ap_bcast_hlid); + ret = wl12xx_allocate_link(wl, wlvif, &wlvif->ap.bcast_hlid); if (ret < 0) goto out_free_global; - cmd->role_id = wl->role_id; + cmd->role_id = wlvif->role_id; cmd->ap.aging_period = cpu_to_le16(wl->conf.tx.ap_aging_period); cmd->ap.bss_index = WL1271_AP_BSS_INDEX; - cmd->ap.global_hlid = wl->ap_global_hlid; - cmd->ap.broadcast_hlid = wl->ap_bcast_hlid; - cmd->ap.basic_rate_set = cpu_to_le32(wl->basic_rate_set); - cmd->ap.beacon_interval = cpu_to_le16(wl->beacon_int); + cmd->ap.global_hlid = wlvif->ap.global_hlid; + cmd->ap.broadcast_hlid = wlvif->ap.bcast_hlid; + cmd->ap.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set); + cmd->ap.beacon_interval = cpu_to_le16(wlvif->beacon_int); cmd->ap.dtim_interval = bss_conf->dtim_period; cmd->ap.beacon_expiry = WL1271_AP_DEF_BEACON_EXP; - cmd->channel = wl->channel; + cmd->channel = wlvif->channel; if (!bss_conf->hidden_ssid) { /* take the SSID from the beacon for backward compatibility */ cmd->ap.ssid_type = WL12XX_SSID_TYPE_PUBLIC; - cmd->ap.ssid_len = wl->ssid_len; - memcpy(cmd->ap.ssid, wl->ssid, wl->ssid_len); + cmd->ap.ssid_len = wlvif->ssid_len; + memcpy(cmd->ap.ssid, wlvif->ssid, wlvif->ssid_len); } else { cmd->ap.ssid_type = WL12XX_SSID_TYPE_HIDDEN; cmd->ap.ssid_len = bss_conf->ssid_len; @@ -701,7 +730,7 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl) cmd->ap.local_rates = cpu_to_le32(0xffffffff); - switch (wl->band) { + switch (wlvif->band) { case IEEE80211_BAND_2GHZ: cmd->band = RADIO_BAND_2_4GHZ; break; @@ -709,7 +738,7 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl) cmd->band = RADIO_BAND_5GHZ; break; default: - wl1271_warning("ap start - unknown band: %d", (int)wl->band); + wl1271_warning("ap start - unknown band: %d", (int)wlvif->band); cmd->band = RADIO_BAND_2_4GHZ; break; } @@ -723,10 +752,10 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl) goto out_free; out_free_bcast: - wl12xx_free_link(wl, &wl->ap_bcast_hlid); + wl12xx_free_link(wl, wlvif, &wlvif->ap.bcast_hlid); out_free_global: - wl12xx_free_link(wl, &wl->ap_global_hlid); + wl12xx_free_link(wl, wlvif, &wlvif->ap.global_hlid); out_free: kfree(cmd); @@ -735,7 +764,7 @@ out: return ret; } -int wl12xx_cmd_role_stop_ap(struct wl1271 *wl) +int wl12xx_cmd_role_stop_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif) { struct wl12xx_cmd_role_stop *cmd; int ret; @@ -746,9 +775,9 @@ int wl12xx_cmd_role_stop_ap(struct wl1271 *wl) goto out; } - wl1271_debug(DEBUG_CMD, "cmd role stop ap %d", wl->role_id); + wl1271_debug(DEBUG_CMD, "cmd role stop ap %d", wlvif->role_id); - cmd->role_id = wl->role_id; + cmd->role_id = wlvif->role_id; ret = wl1271_cmd_send(wl, CMD_ROLE_STOP, cmd, sizeof(*cmd), 0); if (ret < 0) { @@ -756,8 +785,8 @@ int wl12xx_cmd_role_stop_ap(struct wl1271 *wl) goto out_free; } - wl12xx_free_link(wl, &wl->ap_bcast_hlid); - wl12xx_free_link(wl, &wl->ap_global_hlid); + wl12xx_free_link(wl, wlvif, &wlvif->ap.bcast_hlid); + wl12xx_free_link(wl, wlvif, &wlvif->ap.global_hlid); out_free: kfree(cmd); @@ -766,10 +795,11 @@ out: return ret; } -int wl12xx_cmd_role_start_ibss(struct wl1271 *wl) +int wl12xx_cmd_role_start_ibss(struct wl1271 *wl, struct wl12xx_vif *wlvif) { + struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); struct wl12xx_cmd_role_start *cmd; - struct ieee80211_bss_conf *bss_conf = &wl->vif->bss_conf; + struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; int ret; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); @@ -778,35 +808,36 @@ int wl12xx_cmd_role_start_ibss(struct wl1271 *wl) goto out; } - wl1271_debug(DEBUG_CMD, "cmd role start ibss %d", wl->role_id); + wl1271_debug(DEBUG_CMD, "cmd role start ibss %d", wlvif->role_id); - cmd->role_id = wl->role_id; - if (wl->band == IEEE80211_BAND_5GHZ) + cmd->role_id = wlvif->role_id; + if (wlvif->band == IEEE80211_BAND_5GHZ) cmd->band = WL12XX_BAND_5GHZ; - cmd->channel = wl->channel; - cmd->ibss.basic_rate_set = cpu_to_le32(wl->basic_rate_set); - cmd->ibss.beacon_interval = cpu_to_le16(wl->beacon_int); + cmd->channel = wlvif->channel; + cmd->ibss.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set); + cmd->ibss.beacon_interval = cpu_to_le16(wlvif->beacon_int); cmd->ibss.dtim_interval = bss_conf->dtim_period; cmd->ibss.ssid_type = WL12XX_SSID_TYPE_ANY; - cmd->ibss.ssid_len = wl->ssid_len; - memcpy(cmd->ibss.ssid, wl->ssid, wl->ssid_len); - memcpy(cmd->ibss.bssid, wl->bssid, ETH_ALEN); - cmd->sta.local_rates = cpu_to_le32(wl->rate_set); + cmd->ibss.ssid_len = wlvif->ssid_len; + memcpy(cmd->ibss.ssid, wlvif->ssid, wlvif->ssid_len); + memcpy(cmd->ibss.bssid, vif->bss_conf.bssid, ETH_ALEN); + cmd->sta.local_rates = cpu_to_le32(wlvif->rate_set); - if (wl->sta_hlid == WL12XX_INVALID_LINK_ID) { - ret = wl12xx_allocate_link(wl, &wl->sta_hlid); + if (wlvif->sta.hlid == WL12XX_INVALID_LINK_ID) { + ret = wl12xx_allocate_link(wl, wlvif, &wlvif->sta.hlid); if (ret) goto out_free; } - cmd->ibss.hlid = wl->sta_hlid; - cmd->ibss.remote_rates = cpu_to_le32(wl->rate_set); + cmd->ibss.hlid = wlvif->sta.hlid; + cmd->ibss.remote_rates = cpu_to_le32(wlvif->rate_set); wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d " "basic_rate_set: 0x%x, remote_rates: 0x%x", - wl->role_id, cmd->sta.hlid, cmd->sta.session, - wl->basic_rate_set, wl->rate_set); + wlvif->role_id, cmd->sta.hlid, cmd->sta.session, + wlvif->basic_rate_set, wlvif->rate_set); - wl1271_debug(DEBUG_CMD, "wl->bssid = %pM", wl->bssid); + wl1271_debug(DEBUG_CMD, "vif->bss_conf.bssid = %pM", + vif->bss_conf.bssid); ret = wl1271_cmd_send(wl, CMD_ROLE_START, cmd, sizeof(*cmd), 0); if (ret < 0) { @@ -818,7 +849,7 @@ int wl12xx_cmd_role_start_ibss(struct wl1271 *wl) err_hlid: /* clear links on error. */ - wl12xx_free_link(wl, &wl->sta_hlid); + wl12xx_free_link(wl, wlvif, &wlvif->sta.hlid); out_free: kfree(cmd); @@ -962,7 +993,8 @@ out: return ret; } -int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode) +int wl1271_cmd_ps_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 ps_mode) { struct wl1271_cmd_ps_params *ps_params = NULL; int ret = 0; @@ -975,7 +1007,7 @@ int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode) goto out; } - ps_params->role_id = wl->role_id; + ps_params->role_id = wlvif->role_id; ps_params->ps_mode = ps_mode; ret = wl1271_cmd_send(wl, CMD_SET_PS_MODE, ps_params, @@ -1030,7 +1062,7 @@ out: return ret; } -int wl1271_cmd_build_null_data(struct wl1271 *wl) +int wl12xx_cmd_build_null_data(struct wl1271 *wl, struct wl12xx_vif *wlvif) { struct sk_buff *skb = NULL; int size; @@ -1038,11 +1070,12 @@ int wl1271_cmd_build_null_data(struct wl1271 *wl) int ret = -ENOMEM; - if (wl->bss_type == BSS_TYPE_IBSS) { + if (wlvif->bss_type == BSS_TYPE_IBSS) { size = sizeof(struct wl12xx_null_data_template); ptr = NULL; } else { - skb = ieee80211_nullfunc_get(wl->hw, wl->vif); + skb = ieee80211_nullfunc_get(wl->hw, + wl12xx_wlvif_to_vif(wlvif)); if (!skb) goto out; size = skb->len; @@ -1050,7 +1083,7 @@ int wl1271_cmd_build_null_data(struct wl1271 *wl) } ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, ptr, size, 0, - wl->basic_rate); + wlvif->basic_rate); out: dev_kfree_skb(skb); @@ -1061,19 +1094,21 @@ out: } -int wl1271_cmd_build_klv_null_data(struct wl1271 *wl) +int wl12xx_cmd_build_klv_null_data(struct wl1271 *wl, + struct wl12xx_vif *wlvif) { + struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); struct sk_buff *skb = NULL; int ret = -ENOMEM; - skb = ieee80211_nullfunc_get(wl->hw, wl->vif); + skb = ieee80211_nullfunc_get(wl->hw, vif); if (!skb) goto out; ret = wl1271_cmd_template_set(wl, CMD_TEMPL_KLV, skb->data, skb->len, CMD_TEMPL_KLV_IDX_NULL_DATA, - wl->basic_rate); + wlvif->basic_rate); out: dev_kfree_skb(skb); @@ -1084,32 +1119,35 @@ out: } -int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid) +int wl1271_cmd_build_ps_poll(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u16 aid) { + struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); struct sk_buff *skb; int ret = 0; - skb = ieee80211_pspoll_get(wl->hw, wl->vif); + skb = ieee80211_pspoll_get(wl->hw, vif); if (!skb) goto out; ret = wl1271_cmd_template_set(wl, CMD_TEMPL_PS_POLL, skb->data, - skb->len, 0, wl->basic_rate_set); + skb->len, 0, wlvif->basic_rate_set); out: dev_kfree_skb(skb); return ret; } -int wl1271_cmd_build_probe_req(struct wl1271 *wl, +int wl1271_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif, const u8 *ssid, size_t ssid_len, const u8 *ie, size_t ie_len, u8 band) { + struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); struct sk_buff *skb; int ret; u32 rate; - skb = ieee80211_probereq_get(wl->hw, wl->vif, ssid, ssid_len, + skb = ieee80211_probereq_get(wl->hw, vif, ssid, ssid_len, ie, ie_len); if (!skb) { ret = -ENOMEM; @@ -1118,7 +1156,7 @@ int wl1271_cmd_build_probe_req(struct wl1271 *wl, wl1271_dump(DEBUG_SCAN, "PROBE REQ: ", skb->data, skb->len); - rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]); + rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]); if (band == IEEE80211_BAND_2GHZ) ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4, skb->data, skb->len, 0, rate); @@ -1132,20 +1170,22 @@ out: } struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl, + struct wl12xx_vif *wlvif, struct sk_buff *skb) { + struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); int ret; u32 rate; if (!skb) - skb = ieee80211_ap_probereq_get(wl->hw, wl->vif); + skb = ieee80211_ap_probereq_get(wl->hw, vif); if (!skb) goto out; wl1271_dump(DEBUG_SCAN, "AP PROBE REQ: ", skb->data, skb->len); - rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[wl->band]); - if (wl->band == IEEE80211_BAND_2GHZ) + rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[wlvif->band]); + if (wlvif->band == IEEE80211_BAND_2GHZ) ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4, skb->data, skb->len, 0, rate); else @@ -1159,9 +1199,11 @@ out: return skb; } -int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr) +int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif, + __be32 ip_addr) { int ret; + struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); struct wl12xx_arp_rsp_template tmpl; struct ieee80211_hdr_3addr *hdr; struct arphdr *arp_hdr; @@ -1173,8 +1215,8 @@ int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr) hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA | IEEE80211_FCTL_TODS); - memcpy(hdr->addr1, wl->vif->bss_conf.bssid, ETH_ALEN); - memcpy(hdr->addr2, wl->vif->addr, ETH_ALEN); + memcpy(hdr->addr1, vif->bss_conf.bssid, ETH_ALEN); + memcpy(hdr->addr2, vif->addr, ETH_ALEN); memset(hdr->addr3, 0xff, ETH_ALEN); /* llc layer */ @@ -1190,25 +1232,26 @@ int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr) arp_hdr->ar_op = cpu_to_be16(ARPOP_REPLY); /* arp payload */ - memcpy(tmpl.sender_hw, wl->vif->addr, ETH_ALEN); + memcpy(tmpl.sender_hw, vif->addr, ETH_ALEN); tmpl.sender_ip = ip_addr; ret = wl1271_cmd_template_set(wl, CMD_TEMPL_ARP_RSP, &tmpl, sizeof(tmpl), 0, - wl->basic_rate); + wlvif->basic_rate); return ret; } -int wl1271_build_qos_null_data(struct wl1271 *wl) +int wl1271_build_qos_null_data(struct wl1271 *wl, struct ieee80211_vif *vif) { + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); struct ieee80211_qos_hdr template; memset(&template, 0, sizeof(template)); - memcpy(template.addr1, wl->bssid, ETH_ALEN); - memcpy(template.addr2, wl->mac_addr, ETH_ALEN); - memcpy(template.addr3, wl->bssid, ETH_ALEN); + memcpy(template.addr1, vif->bss_conf.bssid, ETH_ALEN); + memcpy(template.addr2, vif->addr, ETH_ALEN); + memcpy(template.addr3, vif->bss_conf.bssid, ETH_ALEN); template.frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC | @@ -1219,7 +1262,7 @@ int wl1271_build_qos_null_data(struct wl1271 *wl) return wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, &template, sizeof(template), 0, - wl->basic_rate); + wlvif->basic_rate); } int wl12xx_cmd_set_default_wep_key(struct wl1271 *wl, u8 id, u8 hlid) @@ -1253,7 +1296,8 @@ out: return ret; } -int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type, +int wl1271_cmd_set_sta_key(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u16 action, u8 id, u8 key_type, u8 key_size, const u8 *key, const u8 *addr, u32 tx_seq_32, u16 tx_seq_16) { @@ -1261,7 +1305,7 @@ int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type, int ret = 0; /* hlid might have already been deleted */ - if (wl->sta_hlid == WL12XX_INVALID_LINK_ID) + if (wlvif->sta.hlid == WL12XX_INVALID_LINK_ID) return 0; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); @@ -1270,7 +1314,7 @@ int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type, goto out; } - cmd->hlid = wl->sta_hlid; + cmd->hlid = wlvif->sta.hlid; if (key_type == KEY_WEP) cmd->lid_key_type = WEP_DEFAULT_LID_TYPE; @@ -1321,9 +1365,10 @@ out: * TODO: merge with sta/ibss into 1 set_key function. * note there are slight diffs */ -int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type, - u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32, - u16 tx_seq_16) +int wl1271_cmd_set_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u16 action, u8 id, u8 key_type, + u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32, + u16 tx_seq_16) { struct wl1271_cmd_set_keys *cmd; int ret = 0; @@ -1333,7 +1378,7 @@ int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type, if (!cmd) return -ENOMEM; - if (hlid == wl->ap_bcast_hlid) { + if (hlid == wlvif->ap.bcast_hlid) { if (key_type == KEY_WEP) lid_type = WEP_DEFAULT_LID_TYPE; else @@ -1411,7 +1456,8 @@ out: return ret; } -int wl12xx_cmd_add_peer(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid) +int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif, + struct ieee80211_sta *sta, u8 hlid) { struct wl12xx_cmd_add_peer *cmd; int i, ret; @@ -1438,13 +1484,13 @@ int wl12xx_cmd_add_peer(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid) else cmd->psd_type[i] = WL1271_PSD_LEGACY; - sta_rates = sta->supp_rates[wl->band]; + sta_rates = sta->supp_rates[wlvif->band]; if (sta->ht_cap.ht_supported) sta_rates |= sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET; cmd->supported_rates = cpu_to_le32(wl1271_tx_enabled_rates_get(wl, sta_rates, - wl->band)); + wlvif->band)); wl1271_debug(DEBUG_CMD, "new peer rates=0x%x queues=0x%x", cmd->supported_rates, sta->uapsd_queues); @@ -1584,12 +1630,13 @@ out: return ret; } -static int wl12xx_cmd_roc(struct wl1271 *wl, u8 role_id) +static int wl12xx_cmd_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 role_id) { struct wl12xx_cmd_roc *cmd; int ret = 0; - wl1271_debug(DEBUG_CMD, "cmd roc %d (%d)", wl->channel, role_id); + wl1271_debug(DEBUG_CMD, "cmd roc %d (%d)", wlvif->channel, role_id); if (WARN_ON(role_id == WL12XX_INVALID_ROLE_ID)) return -EINVAL; @@ -1601,8 +1648,8 @@ static int wl12xx_cmd_roc(struct wl1271 *wl, u8 role_id) } cmd->role_id = role_id; - cmd->channel = wl->channel; - switch (wl->band) { + cmd->channel = wlvif->channel; + switch (wlvif->band) { case IEEE80211_BAND_2GHZ: cmd->band = RADIO_BAND_2_4GHZ; break; @@ -1610,7 +1657,7 @@ static int wl12xx_cmd_roc(struct wl1271 *wl, u8 role_id) cmd->band = RADIO_BAND_5GHZ; break; default: - wl1271_error("roc - unknown band: %d", (int)wl->band); + wl1271_error("roc - unknown band: %d", (int)wlvif->band); ret = -EINVAL; goto out_free; } @@ -1657,14 +1704,14 @@ out: return ret; } -int wl12xx_roc(struct wl1271 *wl, u8 role_id) +int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id) { int ret = 0; if (WARN_ON(test_bit(role_id, wl->roc_map))) return 0; - ret = wl12xx_cmd_roc(wl, role_id); + ret = wl12xx_cmd_roc(wl, wlvif, role_id); if (ret < 0) goto out; @@ -1753,3 +1800,53 @@ out_free: out: return ret; } + +/* start dev role and roc on its channel */ +int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + int ret; + + if (WARN_ON(!(wlvif->bss_type == BSS_TYPE_STA_BSS || + wlvif->bss_type == BSS_TYPE_IBSS))) + return -EINVAL; + + ret = wl12xx_cmd_role_start_dev(wl, wlvif); + if (ret < 0) + goto out; + + ret = wl12xx_roc(wl, wlvif, wlvif->dev_role_id); + if (ret < 0) + goto out_stop; + + return 0; + +out_stop: + wl12xx_cmd_role_stop_dev(wl, wlvif); +out: + return ret; +} + +/* croc dev hlid, and stop the role */ +int wl12xx_stop_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + int ret; + + if (WARN_ON(!(wlvif->bss_type == BSS_TYPE_STA_BSS || + wlvif->bss_type == BSS_TYPE_IBSS))) + return -EINVAL; + + /* flush all pending packets */ + wl1271_tx_work_locked(wl); + + if (test_bit(wlvif->dev_role_id, wl->roc_map)) { + ret = wl12xx_croc(wl, wlvif->dev_role_id); + if (ret < 0) + goto out; + } + + ret = wl12xx_cmd_role_stop_dev(wl, wlvif); + if (ret < 0) + goto out; +out: + return ret; +} diff --git a/drivers/net/wireless/wl12xx/cmd.h b/drivers/net/wireless/wl12xx/cmd.h index b7bd42769aa..3f7d0b93c24 100644 --- a/drivers/net/wireless/wl12xx/cmd.h +++ b/drivers/net/wireless/wl12xx/cmd.h @@ -36,45 +36,54 @@ int wl128x_cmd_general_parms(struct wl1271 *wl); int wl1271_cmd_radio_parms(struct wl1271 *wl); int wl128x_cmd_radio_parms(struct wl1271 *wl); int wl1271_cmd_ext_radio_parms(struct wl1271 *wl); -int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 role_type, u8 *role_id); +int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 *addr, u8 role_type, + u8 *role_id); int wl12xx_cmd_role_disable(struct wl1271 *wl, u8 *role_id); -int wl12xx_cmd_role_start_dev(struct wl1271 *wl); -int wl12xx_cmd_role_stop_dev(struct wl1271 *wl); -int wl12xx_cmd_role_start_sta(struct wl1271 *wl); -int wl12xx_cmd_role_stop_sta(struct wl1271 *wl); -int wl12xx_cmd_role_start_ap(struct wl1271 *wl); -int wl12xx_cmd_role_stop_ap(struct wl1271 *wl); -int wl12xx_cmd_role_start_ibss(struct wl1271 *wl); +int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif); +int wl12xx_cmd_role_stop_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif); +int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif); +int wl12xx_cmd_role_stop_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif); +int wl12xx_cmd_role_start_ibss(struct wl1271 *wl, struct wl12xx_vif *wlvif); +int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif); +int wl12xx_stop_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif); int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer); int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len); int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len); int wl1271_cmd_data_path(struct wl1271 *wl, bool enable); -int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode); +int wl1271_cmd_ps_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 ps_mode); int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer, size_t len); int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id, void *buf, size_t buf_len, int index, u32 rates); -int wl1271_cmd_build_null_data(struct wl1271 *wl); -int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid); -int wl1271_cmd_build_probe_req(struct wl1271 *wl, +int wl12xx_cmd_build_null_data(struct wl1271 *wl, struct wl12xx_vif *wlvif); +int wl1271_cmd_build_ps_poll(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u16 aid); +int wl1271_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif, const u8 *ssid, size_t ssid_len, const u8 *ie, size_t ie_len, u8 band); struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl, + struct wl12xx_vif *wlvif, struct sk_buff *skb); -int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr); -int wl1271_build_qos_null_data(struct wl1271 *wl); -int wl1271_cmd_build_klv_null_data(struct wl1271 *wl); +int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif, + __be32 ip_addr); +int wl1271_build_qos_null_data(struct wl1271 *wl, struct ieee80211_vif *vif); +int wl12xx_cmd_build_klv_null_data(struct wl1271 *wl, + struct wl12xx_vif *wlvif); int wl12xx_cmd_set_default_wep_key(struct wl1271 *wl, u8 id, u8 hlid); -int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type, +int wl1271_cmd_set_sta_key(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u16 action, u8 id, u8 key_type, u8 key_size, const u8 *key, const u8 *addr, u32 tx_seq_32, u16 tx_seq_16); -int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type, +int wl1271_cmd_set_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u16 action, u8 id, u8 key_type, u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32, u16 tx_seq_16); int wl12xx_cmd_set_peer_state(struct wl1271 *wl, u8 hlid); -int wl12xx_roc(struct wl1271 *wl, u8 role_id); +int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id); int wl12xx_croc(struct wl1271 *wl, u8 role_id); -int wl12xx_cmd_add_peer(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid); +int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif, + struct ieee80211_sta *sta, u8 hlid); int wl12xx_cmd_remove_peer(struct wl1271 *wl, u8 hlid); int wl12xx_cmd_config_fwlog(struct wl1271 *wl); int wl12xx_cmd_start_fwlog(struct wl1271 *wl); @@ -82,6 +91,9 @@ int wl12xx_cmd_stop_fwlog(struct wl1271 *wl); int wl12xx_cmd_channel_switch(struct wl1271 *wl, struct ieee80211_channel_switch *ch_switch); int wl12xx_cmd_stop_channel_switch(struct wl1271 *wl); +int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 *hlid); +void wl12xx_free_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid); enum wl1271_commands { CMD_INTERROGATE = 1, /*use this to read information elements*/ diff --git a/drivers/net/wireless/wl12xx/conf.h b/drivers/net/wireless/wl12xx/conf.h index 04bb8fbf93f..1bcfb017058 100644 --- a/drivers/net/wireless/wl12xx/conf.h +++ b/drivers/net/wireless/wl12xx/conf.h @@ -440,6 +440,10 @@ struct conf_rx_settings { CONF_HW_BIT_RATE_36MBPS | CONF_HW_BIT_RATE_48MBPS | \ CONF_HW_BIT_RATE_54MBPS) +#define CONF_TX_CCK_RATES (CONF_HW_BIT_RATE_1MBPS | \ + CONF_HW_BIT_RATE_2MBPS | CONF_HW_BIT_RATE_5_5MBPS | \ + CONF_HW_BIT_RATE_11MBPS) + #define CONF_TX_OFDM_RATES (CONF_HW_BIT_RATE_6MBPS | \ CONF_HW_BIT_RATE_12MBPS | CONF_HW_BIT_RATE_24MBPS | \ CONF_HW_BIT_RATE_36MBPS | CONF_HW_BIT_RATE_48MBPS | \ diff --git a/drivers/net/wireless/wl12xx/debug.h b/drivers/net/wireless/wl12xx/debug.h new file mode 100644 index 00000000000..b85fd8c41e8 --- /dev/null +++ b/drivers/net/wireless/wl12xx/debug.h @@ -0,0 +1,101 @@ +/* + * This file is part of wl12xx + * + * Copyright (C) 2011 Texas Instruments. All rights reserved. + * Copyright (C) 2008-2009 Nokia Corporation + * + * Contact: Luciano Coelho <coelho@ti.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __DEBUG_H__ +#define __DEBUG_H__ + +#include <linux/bitops.h> +#include <linux/printk.h> + +#define DRIVER_NAME "wl12xx" +#define DRIVER_PREFIX DRIVER_NAME ": " + +enum { + DEBUG_NONE = 0, + DEBUG_IRQ = BIT(0), + DEBUG_SPI = BIT(1), + DEBUG_BOOT = BIT(2), + DEBUG_MAILBOX = BIT(3), + DEBUG_TESTMODE = BIT(4), + DEBUG_EVENT = BIT(5), + DEBUG_TX = BIT(6), + DEBUG_RX = BIT(7), + DEBUG_SCAN = BIT(8), + DEBUG_CRYPT = BIT(9), + DEBUG_PSM = BIT(10), + DEBUG_MAC80211 = BIT(11), + DEBUG_CMD = BIT(12), + DEBUG_ACX = BIT(13), + DEBUG_SDIO = BIT(14), + DEBUG_FILTERS = BIT(15), + DEBUG_ADHOC = BIT(16), + DEBUG_AP = BIT(17), + DEBUG_MASTER = (DEBUG_ADHOC | DEBUG_AP), + DEBUG_ALL = ~0, +}; + +extern u32 wl12xx_debug_level; + +#define DEBUG_DUMP_LIMIT 1024 + +#define wl1271_error(fmt, arg...) \ + pr_err(DRIVER_PREFIX "ERROR " fmt "\n", ##arg) + +#define wl1271_warning(fmt, arg...) \ + pr_warning(DRIVER_PREFIX "WARNING " fmt "\n", ##arg) + +#define wl1271_notice(fmt, arg...) \ + pr_info(DRIVER_PREFIX fmt "\n", ##arg) + +#define wl1271_info(fmt, arg...) \ + pr_info(DRIVER_PREFIX fmt "\n", ##arg) + +#define wl1271_debug(level, fmt, arg...) \ + do { \ + if (level & wl12xx_debug_level) \ + pr_debug(DRIVER_PREFIX fmt "\n", ##arg); \ + } while (0) + +/* TODO: use pr_debug_hex_dump when it becomes available */ +#define wl1271_dump(level, prefix, buf, len) \ + do { \ + if (level & wl12xx_debug_level) \ + print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \ + DUMP_PREFIX_OFFSET, 16, 1, \ + buf, \ + min_t(size_t, len, DEBUG_DUMP_LIMIT), \ + 0); \ + } while (0) + +#define wl1271_dump_ascii(level, prefix, buf, len) \ + do { \ + if (level & wl12xx_debug_level) \ + print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \ + DUMP_PREFIX_OFFSET, 16, 1, \ + buf, \ + min_t(size_t, len, DEBUG_DUMP_LIMIT), \ + true); \ + } while (0) + +#endif /* __DEBUG_H__ */ diff --git a/drivers/net/wireless/wl12xx/debugfs.c b/drivers/net/wireless/wl12xx/debugfs.c index 3999fd52830..15eb3a9c30c 100644 --- a/drivers/net/wireless/wl12xx/debugfs.c +++ b/drivers/net/wireless/wl12xx/debugfs.c @@ -27,6 +27,7 @@ #include <linux/slab.h> #include "wl12xx.h" +#include "debug.h" #include "acx.h" #include "ps.h" #include "io.h" @@ -316,12 +317,19 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf, { struct wl1271 *wl = file->private_data; int res = 0; - char buf[1024]; + ssize_t ret; + char *buf; + +#define DRIVER_STATE_BUF_LEN 1024 + + buf = kmalloc(DRIVER_STATE_BUF_LEN, GFP_KERNEL); + if (!buf) + return -ENOMEM; mutex_lock(&wl->mutex); #define DRIVER_STATE_PRINT(x, fmt) \ - (res += scnprintf(buf + res, sizeof(buf) - res,\ + (res += scnprintf(buf + res, DRIVER_STATE_BUF_LEN - res,\ #x " = " fmt "\n", wl->x)) #define DRIVER_STATE_PRINT_LONG(x) DRIVER_STATE_PRINT(x, "%ld") @@ -346,29 +354,14 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf, DRIVER_STATE_PRINT_INT(tx_results_count); DRIVER_STATE_PRINT_LHEX(flags); DRIVER_STATE_PRINT_INT(tx_blocks_freed); - DRIVER_STATE_PRINT_INT(tx_security_last_seq_lsb); DRIVER_STATE_PRINT_INT(rx_counter); - DRIVER_STATE_PRINT_INT(session_counter); DRIVER_STATE_PRINT_INT(state); - DRIVER_STATE_PRINT_INT(bss_type); DRIVER_STATE_PRINT_INT(channel); - DRIVER_STATE_PRINT_HEX(rate_set); - DRIVER_STATE_PRINT_HEX(basic_rate_set); - DRIVER_STATE_PRINT_HEX(basic_rate); DRIVER_STATE_PRINT_INT(band); - DRIVER_STATE_PRINT_INT(beacon_int); - DRIVER_STATE_PRINT_INT(psm_entry_retry); - DRIVER_STATE_PRINT_INT(ps_poll_failures); DRIVER_STATE_PRINT_INT(power_level); - DRIVER_STATE_PRINT_INT(rssi_thold); - DRIVER_STATE_PRINT_INT(last_rssi_event); DRIVER_STATE_PRINT_INT(sg_enabled); DRIVER_STATE_PRINT_INT(enable_11a); DRIVER_STATE_PRINT_INT(noise); - DRIVER_STATE_PRINT_LHEX(ap_hlid_map[0]); - DRIVER_STATE_PRINT_INT(last_tx_hlid); - DRIVER_STATE_PRINT_INT(ba_support); - DRIVER_STATE_PRINT_HEX(ba_rx_bitmap); DRIVER_STATE_PRINT_HEX(ap_fw_ps_map); DRIVER_STATE_PRINT_LHEX(ap_ps_map); DRIVER_STATE_PRINT_HEX(quirks); @@ -387,10 +380,13 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf, #undef DRIVER_STATE_PRINT_LHEX #undef DRIVER_STATE_PRINT_STR #undef DRIVER_STATE_PRINT +#undef DRIVER_STATE_BUF_LEN mutex_unlock(&wl->mutex); - return simple_read_from_buffer(user_buf, count, ppos, buf, res); + ret = simple_read_from_buffer(user_buf, count, ppos, buf, res); + kfree(buf); + return ret; } static const struct file_operations driver_state_ops = { @@ -399,6 +395,115 @@ static const struct file_operations driver_state_ops = { .llseek = default_llseek, }; +static ssize_t vifs_state_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct wl1271 *wl = file->private_data; + struct wl12xx_vif *wlvif; + int ret, res = 0; + const int buf_size = 4096; + char *buf; + char tmp_buf[64]; + + buf = kzalloc(buf_size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + mutex_lock(&wl->mutex); + +#define VIF_STATE_PRINT(x, fmt) \ + (res += scnprintf(buf + res, buf_size - res, \ + #x " = " fmt "\n", wlvif->x)) + +#define VIF_STATE_PRINT_LONG(x) VIF_STATE_PRINT(x, "%ld") +#define VIF_STATE_PRINT_INT(x) VIF_STATE_PRINT(x, "%d") +#define VIF_STATE_PRINT_STR(x) VIF_STATE_PRINT(x, "%s") +#define VIF_STATE_PRINT_LHEX(x) VIF_STATE_PRINT(x, "0x%lx") +#define VIF_STATE_PRINT_LLHEX(x) VIF_STATE_PRINT(x, "0x%llx") +#define VIF_STATE_PRINT_HEX(x) VIF_STATE_PRINT(x, "0x%x") + +#define VIF_STATE_PRINT_NSTR(x, len) \ + do { \ + memset(tmp_buf, 0, sizeof(tmp_buf)); \ + memcpy(tmp_buf, wlvif->x, \ + min_t(u8, len, sizeof(tmp_buf) - 1)); \ + res += scnprintf(buf + res, buf_size - res, \ + #x " = %s\n", tmp_buf); \ + } while (0) + + wl12xx_for_each_wlvif(wl, wlvif) { + VIF_STATE_PRINT_INT(role_id); + VIF_STATE_PRINT_INT(bss_type); + VIF_STATE_PRINT_LHEX(flags); + VIF_STATE_PRINT_INT(p2p); + VIF_STATE_PRINT_INT(dev_role_id); + VIF_STATE_PRINT_INT(dev_hlid); + + if (wlvif->bss_type == BSS_TYPE_STA_BSS || + wlvif->bss_type == BSS_TYPE_IBSS) { + VIF_STATE_PRINT_INT(sta.hlid); + VIF_STATE_PRINT_INT(sta.ba_rx_bitmap); + VIF_STATE_PRINT_INT(sta.basic_rate_idx); + VIF_STATE_PRINT_INT(sta.ap_rate_idx); + VIF_STATE_PRINT_INT(sta.p2p_rate_idx); + } else { + VIF_STATE_PRINT_INT(ap.global_hlid); + VIF_STATE_PRINT_INT(ap.bcast_hlid); + VIF_STATE_PRINT_LHEX(ap.sta_hlid_map[0]); + VIF_STATE_PRINT_INT(ap.mgmt_rate_idx); + VIF_STATE_PRINT_INT(ap.bcast_rate_idx); + VIF_STATE_PRINT_INT(ap.ucast_rate_idx[0]); + VIF_STATE_PRINT_INT(ap.ucast_rate_idx[1]); + VIF_STATE_PRINT_INT(ap.ucast_rate_idx[2]); + VIF_STATE_PRINT_INT(ap.ucast_rate_idx[3]); + } + VIF_STATE_PRINT_INT(last_tx_hlid); + VIF_STATE_PRINT_LHEX(links_map[0]); + VIF_STATE_PRINT_NSTR(ssid, wlvif->ssid_len); + VIF_STATE_PRINT_INT(band); + VIF_STATE_PRINT_INT(channel); + VIF_STATE_PRINT_HEX(bitrate_masks[0]); + VIF_STATE_PRINT_HEX(bitrate_masks[1]); + VIF_STATE_PRINT_HEX(basic_rate_set); + VIF_STATE_PRINT_HEX(basic_rate); + VIF_STATE_PRINT_HEX(rate_set); + VIF_STATE_PRINT_INT(beacon_int); + VIF_STATE_PRINT_INT(default_key); + VIF_STATE_PRINT_INT(aid); + VIF_STATE_PRINT_INT(session_counter); + VIF_STATE_PRINT_INT(ps_poll_failures); + VIF_STATE_PRINT_INT(psm_entry_retry); + VIF_STATE_PRINT_INT(power_level); + VIF_STATE_PRINT_INT(rssi_thold); + VIF_STATE_PRINT_INT(last_rssi_event); + VIF_STATE_PRINT_INT(ba_support); + VIF_STATE_PRINT_INT(ba_allowed); + VIF_STATE_PRINT_LLHEX(tx_security_seq); + VIF_STATE_PRINT_INT(tx_security_last_seq_lsb); + } + +#undef VIF_STATE_PRINT_INT +#undef VIF_STATE_PRINT_LONG +#undef VIF_STATE_PRINT_HEX +#undef VIF_STATE_PRINT_LHEX +#undef VIF_STATE_PRINT_LLHEX +#undef VIF_STATE_PRINT_STR +#undef VIF_STATE_PRINT_NSTR +#undef VIF_STATE_PRINT + + mutex_unlock(&wl->mutex); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, res); + kfree(buf); + return ret; +} + +static const struct file_operations vifs_state_ops = { + .read = vifs_state_read, + .open = wl1271_open_file_generic, + .llseek = default_llseek, +}; + static ssize_t dtim_interval_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { @@ -520,6 +625,7 @@ static ssize_t rx_streaming_interval_write(struct file *file, size_t count, loff_t *ppos) { struct wl1271 *wl = file->private_data; + struct wl12xx_vif *wlvif; unsigned long value; int ret; @@ -543,7 +649,9 @@ static ssize_t rx_streaming_interval_write(struct file *file, if (ret < 0) goto out; - wl1271_recalc_rx_streaming(wl); + wl12xx_for_each_wlvif_sta(wl, wlvif) { + wl1271_recalc_rx_streaming(wl, wlvif); + } wl1271_ps_elp_sleep(wl); out: @@ -572,6 +680,7 @@ static ssize_t rx_streaming_always_write(struct file *file, size_t count, loff_t *ppos) { struct wl1271 *wl = file->private_data; + struct wl12xx_vif *wlvif; unsigned long value; int ret; @@ -595,7 +704,9 @@ static ssize_t rx_streaming_always_write(struct file *file, if (ret < 0) goto out; - wl1271_recalc_rx_streaming(wl); + wl12xx_for_each_wlvif_sta(wl, wlvif) { + wl1271_recalc_rx_streaming(wl, wlvif); + } wl1271_ps_elp_sleep(wl); out: @@ -624,6 +735,7 @@ static ssize_t beacon_filtering_write(struct file *file, size_t count, loff_t *ppos) { struct wl1271 *wl = file->private_data; + struct wl12xx_vif *wlvif; char buf[10]; size_t len; unsigned long value; @@ -646,7 +758,9 @@ static ssize_t beacon_filtering_write(struct file *file, if (ret < 0) goto out; - ret = wl1271_acx_beacon_filter_opt(wl, !!value); + wl12xx_for_each_wlvif(wl, wlvif) { + ret = wl1271_acx_beacon_filter_opt(wl, wlvif, !!value); + } wl1271_ps_elp_sleep(wl); out: @@ -770,6 +884,7 @@ static int wl1271_debugfs_add_files(struct wl1271 *wl, DEBUGFS_ADD(gpio_power, rootdir); DEBUGFS_ADD(start_recovery, rootdir); DEBUGFS_ADD(driver_state, rootdir); + DEBUGFS_ADD(vifs_state, rootdir); DEBUGFS_ADD(dtim_interval, rootdir); DEBUGFS_ADD(beacon_interval, rootdir); DEBUGFS_ADD(beacon_filtering, rootdir); diff --git a/drivers/net/wireless/wl12xx/event.c b/drivers/net/wireless/wl12xx/event.c index 674ad2a9e40..d3280df68f5 100644 --- a/drivers/net/wireless/wl12xx/event.c +++ b/drivers/net/wireless/wl12xx/event.c @@ -22,6 +22,7 @@ */ #include "wl12xx.h" +#include "debug.h" #include "reg.h" #include "io.h" #include "event.h" @@ -31,12 +32,16 @@ void wl1271_pspoll_work(struct work_struct *work) { + struct ieee80211_vif *vif; + struct wl12xx_vif *wlvif; struct delayed_work *dwork; struct wl1271 *wl; int ret; dwork = container_of(work, struct delayed_work, work); - wl = container_of(dwork, struct wl1271, pspoll_work); + wlvif = container_of(dwork, struct wl12xx_vif, pspoll_work); + vif = container_of((void *)wlvif, struct ieee80211_vif, drv_priv); + wl = wlvif->wl; wl1271_debug(DEBUG_EVENT, "pspoll work"); @@ -45,10 +50,10 @@ void wl1271_pspoll_work(struct work_struct *work) if (unlikely(wl->state == WL1271_STATE_OFF)) goto out; - if (!test_and_clear_bit(WL1271_FLAG_PSPOLL_FAILURE, &wl->flags)) + if (!test_and_clear_bit(WLVIF_FLAG_PSPOLL_FAILURE, &wlvif->flags)) goto out; - if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) + if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) goto out; /* @@ -60,31 +65,33 @@ void wl1271_pspoll_work(struct work_struct *work) if (ret < 0) goto out; - wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE, wl->basic_rate, true); + wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE, + wlvif->basic_rate, true); wl1271_ps_elp_sleep(wl); out: mutex_unlock(&wl->mutex); }; -static void wl1271_event_pspoll_delivery_fail(struct wl1271 *wl) +static void wl1271_event_pspoll_delivery_fail(struct wl1271 *wl, + struct wl12xx_vif *wlvif) { int delay = wl->conf.conn.ps_poll_recovery_period; int ret; - wl->ps_poll_failures++; - if (wl->ps_poll_failures == 1) + wlvif->ps_poll_failures++; + if (wlvif->ps_poll_failures == 1) wl1271_info("AP with dysfunctional ps-poll, " "trying to work around it."); /* force active mode receive data from the AP */ - if (test_bit(WL1271_FLAG_PSM, &wl->flags)) { - ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE, - wl->basic_rate, true); + if (test_bit(WLVIF_FLAG_PSM, &wlvif->flags)) { + ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE, + wlvif->basic_rate, true); if (ret < 0) return; - set_bit(WL1271_FLAG_PSPOLL_FAILURE, &wl->flags); - ieee80211_queue_delayed_work(wl->hw, &wl->pspoll_work, + set_bit(WLVIF_FLAG_PSPOLL_FAILURE, &wlvif->flags); + ieee80211_queue_delayed_work(wl->hw, &wlvif->pspoll_work, msecs_to_jiffies(delay)); } @@ -97,6 +104,7 @@ static void wl1271_event_pspoll_delivery_fail(struct wl1271 *wl) } static int wl1271_event_ps_report(struct wl1271 *wl, + struct wl12xx_vif *wlvif, struct event_mailbox *mbox, bool *beacon_loss) { @@ -109,41 +117,37 @@ static int wl1271_event_ps_report(struct wl1271 *wl, case EVENT_ENTER_POWER_SAVE_FAIL: wl1271_debug(DEBUG_PSM, "PSM entry failed"); - if (!test_bit(WL1271_FLAG_PSM, &wl->flags)) { + if (!test_bit(WLVIF_FLAG_PSM, &wlvif->flags)) { /* remain in active mode */ - wl->psm_entry_retry = 0; + wlvif->psm_entry_retry = 0; break; } - if (wl->psm_entry_retry < total_retries) { - wl->psm_entry_retry++; - ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE, - wl->basic_rate, true); + if (wlvif->psm_entry_retry < total_retries) { + wlvif->psm_entry_retry++; + ret = wl1271_ps_set_mode(wl, wlvif, + STATION_POWER_SAVE_MODE, + wlvif->basic_rate, true); } else { wl1271_info("No ack to nullfunc from AP."); - wl->psm_entry_retry = 0; + wlvif->psm_entry_retry = 0; *beacon_loss = true; } break; case EVENT_ENTER_POWER_SAVE_SUCCESS: - wl->psm_entry_retry = 0; - - /* enable beacon filtering */ - ret = wl1271_acx_beacon_filter_opt(wl, true); - if (ret < 0) - break; + wlvif->psm_entry_retry = 0; /* * BET has only a minor effect in 5GHz and masks * channel switch IEs, so we only enable BET on 2.4GHz */ - if (wl->band == IEEE80211_BAND_2GHZ) + if (wlvif->band == IEEE80211_BAND_2GHZ) /* enable beacon early termination */ - ret = wl1271_acx_bet_enable(wl, true); + ret = wl1271_acx_bet_enable(wl, wlvif, true); - if (wl->ps_compl) { - complete(wl->ps_compl); - wl->ps_compl = NULL; + if (wlvif->ps_compl) { + complete(wlvif->ps_compl); + wlvif->ps_compl = NULL; } break; default: @@ -154,39 +158,44 @@ static int wl1271_event_ps_report(struct wl1271 *wl, } static void wl1271_event_rssi_trigger(struct wl1271 *wl, + struct wl12xx_vif *wlvif, struct event_mailbox *mbox) { + struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); enum nl80211_cqm_rssi_threshold_event event; s8 metric = mbox->rssi_snr_trigger_metric[0]; wl1271_debug(DEBUG_EVENT, "RSSI trigger metric: %d", metric); - if (metric <= wl->rssi_thold) + if (metric <= wlvif->rssi_thold) event = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW; else event = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH; - if (event != wl->last_rssi_event) - ieee80211_cqm_rssi_notify(wl->vif, event, GFP_KERNEL); - wl->last_rssi_event = event; + if (event != wlvif->last_rssi_event) + ieee80211_cqm_rssi_notify(vif, event, GFP_KERNEL); + wlvif->last_rssi_event = event; } -static void wl1271_stop_ba_event(struct wl1271 *wl) +static void wl1271_stop_ba_event(struct wl1271 *wl, struct wl12xx_vif *wlvif) { - if (wl->bss_type != BSS_TYPE_AP_BSS) { - if (!wl->ba_rx_bitmap) + struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); + + if (wlvif->bss_type != BSS_TYPE_AP_BSS) { + if (!wlvif->sta.ba_rx_bitmap) return; - ieee80211_stop_rx_ba_session(wl->vif, wl->ba_rx_bitmap, - wl->bssid); + ieee80211_stop_rx_ba_session(vif, wlvif->sta.ba_rx_bitmap, + vif->bss_conf.bssid); } else { - int i; + u8 hlid; struct wl1271_link *lnk; - for (i = WL1271_AP_STA_HLID_START; i < AP_MAX_LINKS; i++) { - lnk = &wl->links[i]; - if (!wl1271_is_active_sta(wl, i) || !lnk->ba_bitmap) + for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, + WL12XX_MAX_LINKS) { + lnk = &wl->links[hlid]; + if (!lnk->ba_bitmap) continue; - ieee80211_stop_rx_ba_session(wl->vif, + ieee80211_stop_rx_ba_session(vif, lnk->ba_bitmap, lnk->addr); } @@ -196,14 +205,23 @@ static void wl1271_stop_ba_event(struct wl1271 *wl) static void wl12xx_event_soft_gemini_sense(struct wl1271 *wl, u8 enable) { + struct ieee80211_vif *vif; + struct wl12xx_vif *wlvif; + if (enable) { /* disable dynamic PS when requested by the firmware */ - ieee80211_disable_dyn_ps(wl->vif); + wl12xx_for_each_wlvif_sta(wl, wlvif) { + vif = wl12xx_wlvif_to_vif(wlvif); + ieee80211_disable_dyn_ps(vif); + } set_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags); } else { - ieee80211_enable_dyn_ps(wl->vif); clear_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags); - wl1271_recalc_rx_streaming(wl); + wl12xx_for_each_wlvif_sta(wl, wlvif) { + vif = wl12xx_wlvif_to_vif(wlvif); + ieee80211_enable_dyn_ps(vif); + wl1271_recalc_rx_streaming(wl, wlvif); + } } } @@ -217,10 +235,11 @@ static void wl1271_event_mbox_dump(struct event_mailbox *mbox) static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox) { + struct ieee80211_vif *vif; + struct wl12xx_vif *wlvif; int ret; u32 vector; bool beacon_loss = false; - bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS); bool disconnect_sta = false; unsigned long sta_bitmap = 0; @@ -234,7 +253,7 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox) wl1271_debug(DEBUG_EVENT, "status: 0x%x", mbox->scheduled_scan_status); - wl1271_scan_stm(wl); + wl1271_scan_stm(wl, wl->scan_vif); } if (vector & PERIODIC_SCAN_REPORT_EVENT_ID) { @@ -248,13 +267,12 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox) wl1271_debug(DEBUG_EVENT, "PERIODIC_SCAN_COMPLETE_EVENT " "(status 0x%0x)", mbox->scheduled_scan_status); if (wl->sched_scanning) { - wl1271_scan_sched_scan_stop(wl); ieee80211_sched_scan_stopped(wl->hw); + wl->sched_scanning = false; } } - if (vector & SOFT_GEMINI_SENSE_EVENT_ID && - wl->bss_type == BSS_TYPE_STA_BSS) + if (vector & SOFT_GEMINI_SENSE_EVENT_ID) wl12xx_event_soft_gemini_sense(wl, mbox->soft_gemini_sense_info); @@ -267,40 +285,54 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox) * BSS_LOSE_EVENT, beacon loss has to be reported to the stack. * */ - if ((vector & BSS_LOSE_EVENT_ID) && !is_ap) { + if (vector & BSS_LOSE_EVENT_ID) { + /* TODO: check for multi-role */ wl1271_info("Beacon loss detected."); /* indicate to the stack, that beacons have been lost */ beacon_loss = true; } - if ((vector & PS_REPORT_EVENT_ID) && !is_ap) { + if (vector & PS_REPORT_EVENT_ID) { wl1271_debug(DEBUG_EVENT, "PS_REPORT_EVENT"); - ret = wl1271_event_ps_report(wl, mbox, &beacon_loss); - if (ret < 0) - return ret; + wl12xx_for_each_wlvif_sta(wl, wlvif) { + ret = wl1271_event_ps_report(wl, wlvif, + mbox, &beacon_loss); + if (ret < 0) + return ret; + } } - if ((vector & PSPOLL_DELIVERY_FAILURE_EVENT_ID) && !is_ap) - wl1271_event_pspoll_delivery_fail(wl); + if (vector & PSPOLL_DELIVERY_FAILURE_EVENT_ID) + wl12xx_for_each_wlvif_sta(wl, wlvif) { + wl1271_event_pspoll_delivery_fail(wl, wlvif); + } if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID) { + /* TODO: check actual multi-role support */ wl1271_debug(DEBUG_EVENT, "RSSI_SNR_TRIGGER_0_EVENT"); - if (wl->vif) - wl1271_event_rssi_trigger(wl, mbox); + wl12xx_for_each_wlvif_sta(wl, wlvif) { + wl1271_event_rssi_trigger(wl, wlvif, mbox); + } } - if ((vector & BA_SESSION_RX_CONSTRAINT_EVENT_ID)) { + if (vector & BA_SESSION_RX_CONSTRAINT_EVENT_ID) { + u8 role_id = mbox->role_id; wl1271_debug(DEBUG_EVENT, "BA_SESSION_RX_CONSTRAINT_EVENT_ID. " - "ba_allowed = 0x%x", mbox->rx_ba_allowed); + "ba_allowed = 0x%x, role_id=%d", + mbox->rx_ba_allowed, role_id); - wl->ba_allowed = !!mbox->rx_ba_allowed; + wl12xx_for_each_wlvif(wl, wlvif) { + if (role_id != 0xff && role_id != wlvif->role_id) + continue; - if (wl->vif && !wl->ba_allowed) - wl1271_stop_ba_event(wl); + wlvif->ba_allowed = !!mbox->rx_ba_allowed; + if (!wlvif->ba_allowed) + wl1271_stop_ba_event(wl, wlvif); + } } - if ((vector & CHANNEL_SWITCH_COMPLETE_EVENT_ID) && !is_ap) { + if (vector & CHANNEL_SWITCH_COMPLETE_EVENT_ID) { wl1271_debug(DEBUG_EVENT, "CHANNEL_SWITCH_COMPLETE_EVENT_ID. " "status = 0x%x", mbox->channel_switch_status); @@ -309,50 +341,65 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox) * 1) channel switch complete with status=0 * 2) channel switch failed status=1 */ - if (test_and_clear_bit(WL1271_FLAG_CS_PROGRESS, &wl->flags) && - (wl->vif)) - ieee80211_chswitch_done(wl->vif, - mbox->channel_switch_status ? false : true); + + /* TODO: configure only the relevant vif */ + wl12xx_for_each_wlvif_sta(wl, wlvif) { + struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); + bool success; + + if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, + &wl->flags)) + continue; + + success = mbox->channel_switch_status ? false : true; + ieee80211_chswitch_done(vif, success); + } } if ((vector & DUMMY_PACKET_EVENT_ID)) { wl1271_debug(DEBUG_EVENT, "DUMMY_PACKET_ID_EVENT_ID"); - if (wl->vif) - wl1271_tx_dummy_packet(wl); + wl1271_tx_dummy_packet(wl); } /* * "TX retries exceeded" has a different meaning according to mode. * In AP mode the offending station is disconnected. */ - if ((vector & MAX_TX_RETRY_EVENT_ID) && is_ap) { + if (vector & MAX_TX_RETRY_EVENT_ID) { wl1271_debug(DEBUG_EVENT, "MAX_TX_RETRY_EVENT_ID"); sta_bitmap |= le16_to_cpu(mbox->sta_tx_retry_exceeded); disconnect_sta = true; } - if ((vector & INACTIVE_STA_EVENT_ID) && is_ap) { + if (vector & INACTIVE_STA_EVENT_ID) { wl1271_debug(DEBUG_EVENT, "INACTIVE_STA_EVENT_ID"); sta_bitmap |= le16_to_cpu(mbox->sta_aging_status); disconnect_sta = true; } - if (is_ap && disconnect_sta) { + if (disconnect_sta) { u32 num_packets = wl->conf.tx.max_tx_retries; struct ieee80211_sta *sta; const u8 *addr; int h; - for (h = find_first_bit(&sta_bitmap, AP_MAX_LINKS); - h < AP_MAX_LINKS; - h = find_next_bit(&sta_bitmap, AP_MAX_LINKS, h+1)) { - if (!wl1271_is_active_sta(wl, h)) + for_each_set_bit(h, &sta_bitmap, WL12XX_MAX_LINKS) { + bool found = false; + /* find the ap vif connected to this sta */ + wl12xx_for_each_wlvif_ap(wl, wlvif) { + if (!test_bit(h, wlvif->ap.sta_hlid_map)) + continue; + found = true; + break; + } + if (!found) continue; + vif = wl12xx_wlvif_to_vif(wlvif); addr = wl->links[h].addr; rcu_read_lock(); - sta = ieee80211_find_sta(wl->vif, addr); + sta = ieee80211_find_sta(vif, addr); if (sta) { wl1271_debug(DEBUG_EVENT, "remove sta %d", h); ieee80211_report_low_ack(sta, num_packets); @@ -361,8 +408,11 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox) } } - if (wl->vif && beacon_loss) - ieee80211_connection_loss(wl->vif); + if (beacon_loss) + wl12xx_for_each_wlvif_sta(wl, wlvif) { + vif = wl12xx_wlvif_to_vif(wlvif); + ieee80211_connection_loss(vif); + } return 0; } diff --git a/drivers/net/wireless/wl12xx/event.h b/drivers/net/wireless/wl12xx/event.h index 49c1a0ede5b..1d878ba47bf 100644 --- a/drivers/net/wireless/wl12xx/event.h +++ b/drivers/net/wireless/wl12xx/event.h @@ -132,7 +132,4 @@ void wl1271_event_mbox_config(struct wl1271 *wl); int wl1271_event_handle(struct wl1271 *wl, u8 mbox); void wl1271_pspoll_work(struct work_struct *work); -/* Functions from main.c */ -bool wl1271_is_active_sta(struct wl1271 *wl, u8 hlid); - #endif diff --git a/drivers/net/wireless/wl12xx/init.c b/drivers/net/wireless/wl12xx/init.c index 04db64c94e9..ca7ee59e450 100644 --- a/drivers/net/wireless/wl12xx/init.c +++ b/drivers/net/wireless/wl12xx/init.c @@ -25,6 +25,7 @@ #include <linux/module.h> #include <linux/slab.h> +#include "debug.h" #include "init.h" #include "wl12xx_80211.h" #include "acx.h" @@ -33,7 +34,7 @@ #include "tx.h" #include "io.h" -int wl1271_sta_init_templates_config(struct wl1271 *wl) +int wl1271_init_templates_config(struct wl1271 *wl) { int ret, i; @@ -64,7 +65,7 @@ int wl1271_sta_init_templates_config(struct wl1271 *wl) ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, NULL, sizeof - (struct wl12xx_qos_null_data_template), + (struct ieee80211_qos_hdr), 0, WL1271_RATE_AUTOMATIC); if (ret < 0) return ret; @@ -88,10 +89,33 @@ int wl1271_sta_init_templates_config(struct wl1271 *wl) if (ret < 0) return ret; + /* + * Put very large empty placeholders for all templates. These + * reserve memory for later. + */ + ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_PROBE_RESPONSE, NULL, + WL1271_CMD_TEMPL_MAX_SIZE, + 0, WL1271_RATE_AUTOMATIC); + if (ret < 0) + return ret; + + ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_BEACON, NULL, + WL1271_CMD_TEMPL_MAX_SIZE, + 0, WL1271_RATE_AUTOMATIC); + if (ret < 0) + return ret; + + ret = wl1271_cmd_template_set(wl, CMD_TEMPL_DEAUTH_AP, NULL, + sizeof + (struct wl12xx_disconn_template), + 0, WL1271_RATE_AUTOMATIC); + if (ret < 0) + return ret; + for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) { ret = wl1271_cmd_template_set(wl, CMD_TEMPL_KLV, NULL, - WL1271_CMD_TEMPL_DFLT_SIZE, i, - WL1271_RATE_AUTOMATIC); + sizeof(struct ieee80211_qos_hdr), + i, WL1271_RATE_AUTOMATIC); if (ret < 0) return ret; } @@ -99,7 +123,8 @@ int wl1271_sta_init_templates_config(struct wl1271 *wl) return 0; } -static int wl1271_ap_init_deauth_template(struct wl1271 *wl) +static int wl1271_ap_init_deauth_template(struct wl1271 *wl, + struct wl12xx_vif *wlvif) { struct wl12xx_disconn_template *tmpl; int ret; @@ -114,7 +139,7 @@ static int wl1271_ap_init_deauth_template(struct wl1271 *wl) tmpl->header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_DEAUTH); - rate = wl1271_tx_min_rate_get(wl, wl->basic_rate_set); + rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); ret = wl1271_cmd_template_set(wl, CMD_TEMPL_DEAUTH_AP, tmpl, sizeof(*tmpl), 0, rate); @@ -123,8 +148,10 @@ out: return ret; } -static int wl1271_ap_init_null_template(struct wl1271 *wl) +static int wl1271_ap_init_null_template(struct wl1271 *wl, + struct ieee80211_vif *vif) { + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); struct ieee80211_hdr_3addr *nullfunc; int ret; u32 rate; @@ -141,10 +168,10 @@ static int wl1271_ap_init_null_template(struct wl1271 *wl) /* nullfunc->addr1 is filled by FW */ - memcpy(nullfunc->addr2, wl->mac_addr, ETH_ALEN); - memcpy(nullfunc->addr3, wl->mac_addr, ETH_ALEN); + memcpy(nullfunc->addr2, vif->addr, ETH_ALEN); + memcpy(nullfunc->addr3, vif->addr, ETH_ALEN); - rate = wl1271_tx_min_rate_get(wl, wl->basic_rate_set); + rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, nullfunc, sizeof(*nullfunc), 0, rate); @@ -153,8 +180,10 @@ out: return ret; } -static int wl1271_ap_init_qos_null_template(struct wl1271 *wl) +static int wl1271_ap_init_qos_null_template(struct wl1271 *wl, + struct ieee80211_vif *vif) { + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); struct ieee80211_qos_hdr *qosnull; int ret; u32 rate; @@ -171,10 +200,10 @@ static int wl1271_ap_init_qos_null_template(struct wl1271 *wl) /* qosnull->addr1 is filled by FW */ - memcpy(qosnull->addr2, wl->mac_addr, ETH_ALEN); - memcpy(qosnull->addr3, wl->mac_addr, ETH_ALEN); + memcpy(qosnull->addr2, vif->addr, ETH_ALEN); + memcpy(qosnull->addr3, vif->addr, ETH_ALEN); - rate = wl1271_tx_min_rate_get(wl, wl->basic_rate_set); + rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, qosnull, sizeof(*qosnull), 0, rate); @@ -183,49 +212,6 @@ out: return ret; } -static int wl1271_ap_init_templates_config(struct wl1271 *wl) -{ - int ret; - - /* - * Put very large empty placeholders for all templates. These - * reserve memory for later. - */ - ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_PROBE_RESPONSE, NULL, - WL1271_CMD_TEMPL_MAX_SIZE, - 0, WL1271_RATE_AUTOMATIC); - if (ret < 0) - return ret; - - ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_BEACON, NULL, - WL1271_CMD_TEMPL_MAX_SIZE, - 0, WL1271_RATE_AUTOMATIC); - if (ret < 0) - return ret; - - ret = wl1271_cmd_template_set(wl, CMD_TEMPL_DEAUTH_AP, NULL, - sizeof - (struct wl12xx_disconn_template), - 0, WL1271_RATE_AUTOMATIC); - if (ret < 0) - return ret; - - ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, NULL, - sizeof(struct wl12xx_null_data_template), - 0, WL1271_RATE_AUTOMATIC); - if (ret < 0) - return ret; - - ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, NULL, - sizeof - (struct wl12xx_qos_null_data_template), - 0, WL1271_RATE_AUTOMATIC); - if (ret < 0) - return ret; - - return 0; -} - static int wl12xx_init_rx_config(struct wl1271 *wl) { int ret; @@ -237,39 +223,37 @@ static int wl12xx_init_rx_config(struct wl1271 *wl) return 0; } -int wl1271_init_phy_config(struct wl1271 *wl) +static int wl12xx_init_phy_vif_config(struct wl1271 *wl, + struct wl12xx_vif *wlvif) { int ret; - ret = wl1271_acx_pd_threshold(wl); - if (ret < 0) - return ret; - - ret = wl1271_acx_slot(wl, DEFAULT_SLOT_TIME); + ret = wl1271_acx_slot(wl, wlvif, DEFAULT_SLOT_TIME); if (ret < 0) return ret; - ret = wl1271_acx_service_period_timeout(wl); + ret = wl1271_acx_service_period_timeout(wl, wlvif); if (ret < 0) return ret; - ret = wl1271_acx_rts_threshold(wl, wl->hw->wiphy->rts_threshold); + ret = wl1271_acx_rts_threshold(wl, wlvif, wl->hw->wiphy->rts_threshold); if (ret < 0) return ret; return 0; } -static int wl1271_init_beacon_filter(struct wl1271 *wl) +static int wl1271_init_sta_beacon_filter(struct wl1271 *wl, + struct wl12xx_vif *wlvif) { int ret; - /* disable beacon filtering at this stage */ - ret = wl1271_acx_beacon_filter_opt(wl, false); + ret = wl1271_acx_beacon_filter_table(wl, wlvif); if (ret < 0) return ret; - ret = wl1271_acx_beacon_filter_table(wl); + /* enable beacon filtering */ + ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true); if (ret < 0) return ret; @@ -302,11 +286,12 @@ int wl1271_init_energy_detection(struct wl1271 *wl) return 0; } -static int wl1271_init_beacon_broadcast(struct wl1271 *wl) +static int wl1271_init_beacon_broadcast(struct wl1271 *wl, + struct wl12xx_vif *wlvif) { int ret; - ret = wl1271_acx_bcn_dtim_options(wl); + ret = wl1271_acx_bcn_dtim_options(wl, wlvif); if (ret < 0) return ret; @@ -327,36 +312,13 @@ static int wl12xx_init_fwlog(struct wl1271 *wl) return 0; } -static int wl1271_sta_hw_init(struct wl1271 *wl) +/* generic sta initialization (non vif-specific) */ +static int wl1271_sta_hw_init(struct wl1271 *wl, struct wl12xx_vif *wlvif) { int ret; - if (wl->chip.id != CHIP_ID_1283_PG20) { - ret = wl1271_cmd_ext_radio_parms(wl); - if (ret < 0) - return ret; - } - /* PS config */ - ret = wl1271_acx_config_ps(wl); - if (ret < 0) - return ret; - - ret = wl1271_sta_init_templates_config(wl); - if (ret < 0) - return ret; - - ret = wl1271_acx_group_address_tbl(wl, true, NULL, 0); - if (ret < 0) - return ret; - - /* Initialize connection monitoring thresholds */ - ret = wl1271_acx_conn_monit_params(wl, false); - if (ret < 0) - return ret; - - /* Beacon filtering */ - ret = wl1271_init_beacon_filter(wl); + ret = wl12xx_acx_config_ps(wl, wlvif); if (ret < 0) return ret; @@ -365,103 +327,61 @@ static int wl1271_sta_hw_init(struct wl1271 *wl) if (ret < 0) return ret; - /* Beacons and broadcast settings */ - ret = wl1271_init_beacon_broadcast(wl); - if (ret < 0) - return ret; - - /* Configure for ELP power saving */ - ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP); - if (ret < 0) - return ret; - - /* Configure rssi/snr averaging weights */ - ret = wl1271_acx_rssi_snr_avg_weights(wl); - if (ret < 0) - return ret; - - ret = wl1271_acx_sta_rate_policies(wl); - if (ret < 0) - return ret; - - ret = wl12xx_acx_mem_cfg(wl); - if (ret < 0) - return ret; - - /* Configure the FW logger */ - ret = wl12xx_init_fwlog(wl); + ret = wl1271_acx_sta_rate_policies(wl, wlvif); if (ret < 0) return ret; return 0; } -static int wl1271_sta_hw_init_post_mem(struct wl1271 *wl) +static int wl1271_sta_hw_init_post_mem(struct wl1271 *wl, + struct ieee80211_vif *vif) { + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); int ret, i; /* disable all keep-alive templates */ for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) { - ret = wl1271_acx_keep_alive_config(wl, i, + ret = wl1271_acx_keep_alive_config(wl, wlvif, i, ACX_KEEP_ALIVE_TPL_INVALID); if (ret < 0) return ret; } /* disable the keep-alive feature */ - ret = wl1271_acx_keep_alive_mode(wl, false); + ret = wl1271_acx_keep_alive_mode(wl, wlvif, false); if (ret < 0) return ret; return 0; } -static int wl1271_ap_hw_init(struct wl1271 *wl) +/* generic ap initialization (non vif-specific) */ +static int wl1271_ap_hw_init(struct wl1271 *wl, struct wl12xx_vif *wlvif) { int ret; - ret = wl1271_ap_init_templates_config(wl); - if (ret < 0) - return ret; - - /* Configure for power always on */ - ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM); - if (ret < 0) - return ret; - - ret = wl1271_init_ap_rates(wl); - if (ret < 0) - return ret; - - ret = wl1271_acx_ap_max_tx_retry(wl); - if (ret < 0) - return ret; - - ret = wl12xx_acx_mem_cfg(wl); - if (ret < 0) - return ret; - - /* initialize Tx power */ - ret = wl1271_acx_tx_power(wl, wl->power_level); + ret = wl1271_init_ap_rates(wl, wlvif); if (ret < 0) return ret; return 0; } -int wl1271_ap_init_templates(struct wl1271 *wl) +int wl1271_ap_init_templates(struct wl1271 *wl, struct ieee80211_vif *vif) { + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); int ret; - ret = wl1271_ap_init_deauth_template(wl); + ret = wl1271_ap_init_deauth_template(wl, wlvif); if (ret < 0) return ret; - ret = wl1271_ap_init_null_template(wl); + ret = wl1271_ap_init_null_template(wl, vif); if (ret < 0) return ret; - ret = wl1271_ap_init_qos_null_template(wl); + ret = wl1271_ap_init_qos_null_template(wl, vif); if (ret < 0) return ret; @@ -469,43 +389,45 @@ int wl1271_ap_init_templates(struct wl1271 *wl) * when operating as AP we want to receive external beacons for * configuring ERP protection. */ - ret = wl1271_acx_beacon_filter_opt(wl, false); + ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false); if (ret < 0) return ret; return 0; } -static int wl1271_ap_hw_init_post_mem(struct wl1271 *wl) +static int wl1271_ap_hw_init_post_mem(struct wl1271 *wl, + struct ieee80211_vif *vif) { - return wl1271_ap_init_templates(wl); + return wl1271_ap_init_templates(wl, vif); } -int wl1271_init_ap_rates(struct wl1271 *wl) +int wl1271_init_ap_rates(struct wl1271 *wl, struct wl12xx_vif *wlvif) { int i, ret; struct conf_tx_rate_class rc; u32 supported_rates; - wl1271_debug(DEBUG_AP, "AP basic rate set: 0x%x", wl->basic_rate_set); + wl1271_debug(DEBUG_AP, "AP basic rate set: 0x%x", + wlvif->basic_rate_set); - if (wl->basic_rate_set == 0) + if (wlvif->basic_rate_set == 0) return -EINVAL; - rc.enabled_rates = wl->basic_rate_set; + rc.enabled_rates = wlvif->basic_rate_set; rc.long_retry_limit = 10; rc.short_retry_limit = 10; rc.aflags = 0; - ret = wl1271_acx_ap_rate_policy(wl, &rc, ACX_TX_AP_MODE_MGMT_RATE); + ret = wl1271_acx_ap_rate_policy(wl, &rc, wlvif->ap.mgmt_rate_idx); if (ret < 0) return ret; /* use the min basic rate for AP broadcast/multicast */ - rc.enabled_rates = wl1271_tx_min_rate_get(wl, wl->basic_rate_set); + rc.enabled_rates = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); rc.short_retry_limit = 10; rc.long_retry_limit = 10; rc.aflags = 0; - ret = wl1271_acx_ap_rate_policy(wl, &rc, ACX_TX_AP_MODE_BCST_RATE); + ret = wl1271_acx_ap_rate_policy(wl, &rc, wlvif->ap.bcast_rate_idx); if (ret < 0) return ret; @@ -513,7 +435,7 @@ int wl1271_init_ap_rates(struct wl1271 *wl) * If the basic rates contain OFDM rates, use OFDM only * rates for unicast TX as well. Else use all supported rates. */ - if ((wl->basic_rate_set & CONF_TX_OFDM_RATES)) + if ((wlvif->basic_rate_set & CONF_TX_OFDM_RATES)) supported_rates = CONF_TX_OFDM_RATES; else supported_rates = CONF_TX_AP_ENABLED_RATES; @@ -527,7 +449,8 @@ int wl1271_init_ap_rates(struct wl1271 *wl) rc.short_retry_limit = 10; rc.long_retry_limit = 10; rc.aflags = 0; - ret = wl1271_acx_ap_rate_policy(wl, &rc, i); + ret = wl1271_acx_ap_rate_policy(wl, &rc, + wlvif->ap.ucast_rate_idx[i]); if (ret < 0) return ret; } @@ -535,24 +458,23 @@ int wl1271_init_ap_rates(struct wl1271 *wl) return 0; } -static int wl1271_set_ba_policies(struct wl1271 *wl) +static int wl1271_set_ba_policies(struct wl1271 *wl, struct wl12xx_vif *wlvif) { /* Reset the BA RX indicators */ - wl->ba_rx_bitmap = 0; - wl->ba_allowed = true; + wlvif->ba_allowed = true; wl->ba_rx_session_count = 0; /* BA is supported in STA/AP modes */ - if (wl->bss_type != BSS_TYPE_AP_BSS && - wl->bss_type != BSS_TYPE_STA_BSS) { - wl->ba_support = false; + if (wlvif->bss_type != BSS_TYPE_AP_BSS && + wlvif->bss_type != BSS_TYPE_STA_BSS) { + wlvif->ba_support = false; return 0; } - wl->ba_support = true; + wlvif->ba_support = true; /* 802.11n initiator BA session setting */ - return wl12xx_acx_set_ba_initiator_policy(wl); + return wl12xx_acx_set_ba_initiator_policy(wl, wlvif); } int wl1271_chip_specific_init(struct wl1271 *wl) @@ -562,7 +484,7 @@ int wl1271_chip_specific_init(struct wl1271 *wl) if (wl->chip.id == CHIP_ID_1283_PG20) { u32 host_cfg_bitmap = HOST_IF_CFG_RX_FIFO_ENABLE; - if (wl->quirks & WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT) + if (!(wl->quirks & WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT)) /* Enable SDIO padding */ host_cfg_bitmap |= HOST_IF_CFG_TX_PAD_TO_SDIO_BLK; @@ -575,39 +497,186 @@ out: return ret; } +/* vif-specifc initialization */ +static int wl12xx_init_sta_role(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + int ret; -int wl1271_hw_init(struct wl1271 *wl) + ret = wl1271_acx_group_address_tbl(wl, wlvif, true, NULL, 0); + if (ret < 0) + return ret; + + /* Initialize connection monitoring thresholds */ + ret = wl1271_acx_conn_monit_params(wl, wlvif, false); + if (ret < 0) + return ret; + + /* Beacon filtering */ + ret = wl1271_init_sta_beacon_filter(wl, wlvif); + if (ret < 0) + return ret; + + /* Beacons and broadcast settings */ + ret = wl1271_init_beacon_broadcast(wl, wlvif); + if (ret < 0) + return ret; + + /* Configure rssi/snr averaging weights */ + ret = wl1271_acx_rssi_snr_avg_weights(wl, wlvif); + if (ret < 0) + return ret; + + return 0; +} + +/* vif-specific intialization */ +static int wl12xx_init_ap_role(struct wl1271 *wl, struct wl12xx_vif *wlvif) { + int ret; + + ret = wl1271_acx_ap_max_tx_retry(wl, wlvif); + if (ret < 0) + return ret; + + /* initialize Tx power */ + ret = wl1271_acx_tx_power(wl, wlvif, wlvif->power_level); + if (ret < 0) + return ret; + + return 0; +} + +int wl1271_init_vif_specific(struct wl1271 *wl, struct ieee80211_vif *vif) +{ + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); struct conf_tx_ac_category *conf_ac; struct conf_tx_tid *conf_tid; + bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS); int ret, i; - bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS); - if (wl->chip.id == CHIP_ID_1283_PG20) - ret = wl128x_cmd_general_parms(wl); - else - ret = wl1271_cmd_general_parms(wl); + /* + * consider all existing roles before configuring psm. + * TODO: reconfigure on interface removal. + */ + if (!wl->ap_count) { + if (is_ap) { + /* Configure for power always on */ + ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM); + if (ret < 0) + return ret; + } else if (!wl->sta_count) { + /* Configure for ELP power saving */ + ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP); + if (ret < 0) + return ret; + } + } + + /* Mode specific init */ + if (is_ap) { + ret = wl1271_ap_hw_init(wl, wlvif); + if (ret < 0) + return ret; + + ret = wl12xx_init_ap_role(wl, wlvif); + if (ret < 0) + return ret; + } else { + ret = wl1271_sta_hw_init(wl, wlvif); + if (ret < 0) + return ret; + + ret = wl12xx_init_sta_role(wl, wlvif); + if (ret < 0) + return ret; + } + + wl12xx_init_phy_vif_config(wl, wlvif); + + /* Default TID/AC configuration */ + BUG_ON(wl->conf.tx.tid_conf_count != wl->conf.tx.ac_conf_count); + for (i = 0; i < wl->conf.tx.tid_conf_count; i++) { + conf_ac = &wl->conf.tx.ac_conf[i]; + ret = wl1271_acx_ac_cfg(wl, wlvif, conf_ac->ac, + conf_ac->cw_min, conf_ac->cw_max, + conf_ac->aifsn, conf_ac->tx_op_limit); + if (ret < 0) + return ret; + + conf_tid = &wl->conf.tx.tid_conf[i]; + ret = wl1271_acx_tid_cfg(wl, wlvif, + conf_tid->queue_id, + conf_tid->channel_type, + conf_tid->tsid, + conf_tid->ps_scheme, + conf_tid->ack_policy, + conf_tid->apsd_conf[0], + conf_tid->apsd_conf[1]); + if (ret < 0) + return ret; + } + + /* Configure HW encryption */ + ret = wl1271_acx_feature_cfg(wl, wlvif); if (ret < 0) return ret; - if (wl->chip.id == CHIP_ID_1283_PG20) - ret = wl128x_cmd_radio_parms(wl); + /* Mode specific init - post mem init */ + if (is_ap) + ret = wl1271_ap_hw_init_post_mem(wl, vif); else - ret = wl1271_cmd_radio_parms(wl); + ret = wl1271_sta_hw_init_post_mem(wl, vif); + + if (ret < 0) + return ret; + + /* Configure initiator BA sessions policies */ + ret = wl1271_set_ba_policies(wl, wlvif); if (ret < 0) return ret; + return 0; +} + +int wl1271_hw_init(struct wl1271 *wl) +{ + int ret; + + if (wl->chip.id == CHIP_ID_1283_PG20) { + ret = wl128x_cmd_general_parms(wl); + if (ret < 0) + return ret; + ret = wl128x_cmd_radio_parms(wl); + if (ret < 0) + return ret; + } else { + ret = wl1271_cmd_general_parms(wl); + if (ret < 0) + return ret; + ret = wl1271_cmd_radio_parms(wl); + if (ret < 0) + return ret; + ret = wl1271_cmd_ext_radio_parms(wl); + if (ret < 0) + return ret; + } + /* Chip-specific init */ ret = wl1271_chip_specific_init(wl); if (ret < 0) return ret; - /* Mode specific init */ - if (is_ap) - ret = wl1271_ap_hw_init(wl); - else - ret = wl1271_sta_hw_init(wl); + /* Init templates */ + ret = wl1271_init_templates_config(wl); + if (ret < 0) + return ret; + + ret = wl12xx_acx_mem_cfg(wl); + if (ret < 0) + return ret; + /* Configure the FW logger */ + ret = wl12xx_init_fwlog(wl); if (ret < 0) return ret; @@ -626,11 +695,6 @@ int wl1271_hw_init(struct wl1271 *wl) if (ret < 0) goto out_free_memmap; - /* PHY layer config */ - ret = wl1271_init_phy_config(wl); - if (ret < 0) - goto out_free_memmap; - ret = wl1271_acx_dco_itrim_params(wl); if (ret < 0) goto out_free_memmap; @@ -655,61 +719,20 @@ int wl1271_hw_init(struct wl1271 *wl) if (ret < 0) goto out_free_memmap; - /* Default TID/AC configuration */ - BUG_ON(wl->conf.tx.tid_conf_count != wl->conf.tx.ac_conf_count); - for (i = 0; i < wl->conf.tx.tid_conf_count; i++) { - conf_ac = &wl->conf.tx.ac_conf[i]; - ret = wl1271_acx_ac_cfg(wl, conf_ac->ac, conf_ac->cw_min, - conf_ac->cw_max, conf_ac->aifsn, - conf_ac->tx_op_limit); - if (ret < 0) - goto out_free_memmap; - - conf_tid = &wl->conf.tx.tid_conf[i]; - ret = wl1271_acx_tid_cfg(wl, conf_tid->queue_id, - conf_tid->channel_type, - conf_tid->tsid, - conf_tid->ps_scheme, - conf_tid->ack_policy, - conf_tid->apsd_conf[0], - conf_tid->apsd_conf[1]); - if (ret < 0) - goto out_free_memmap; - } - /* Enable data path */ ret = wl1271_cmd_data_path(wl, 1); if (ret < 0) goto out_free_memmap; - /* Configure HW encryption */ - ret = wl1271_acx_feature_cfg(wl); - if (ret < 0) - goto out_free_memmap; - /* configure PM */ ret = wl1271_acx_pm_config(wl); if (ret < 0) goto out_free_memmap; - /* Mode specific init - post mem init */ - if (is_ap) - ret = wl1271_ap_hw_init_post_mem(wl); - else - ret = wl1271_sta_hw_init_post_mem(wl); - - if (ret < 0) - goto out_free_memmap; - ret = wl12xx_acx_set_rate_mgmt_params(wl); if (ret < 0) goto out_free_memmap; - /* Configure initiator BA sessions policies */ - ret = wl1271_set_ba_policies(wl); - if (ret < 0) - goto out_free_memmap; - /* configure hangover */ ret = wl12xx_acx_config_hangover(wl); if (ret < 0) diff --git a/drivers/net/wireless/wl12xx/init.h b/drivers/net/wireless/wl12xx/init.h index 3a3c230fd29..2da0f404ef6 100644 --- a/drivers/net/wireless/wl12xx/init.h +++ b/drivers/net/wireless/wl12xx/init.h @@ -27,13 +27,13 @@ #include "wl12xx.h" int wl1271_hw_init_power_auth(struct wl1271 *wl); -int wl1271_sta_init_templates_config(struct wl1271 *wl); -int wl1271_init_phy_config(struct wl1271 *wl); +int wl1271_init_templates_config(struct wl1271 *wl); int wl1271_init_pta(struct wl1271 *wl); int wl1271_init_energy_detection(struct wl1271 *wl); int wl1271_chip_specific_init(struct wl1271 *wl); int wl1271_hw_init(struct wl1271 *wl); -int wl1271_init_ap_rates(struct wl1271 *wl); -int wl1271_ap_init_templates(struct wl1271 *wl); +int wl1271_init_vif_specific(struct wl1271 *wl, struct ieee80211_vif *vif); +int wl1271_init_ap_rates(struct wl1271 *wl, struct wl12xx_vif *wlvif); +int wl1271_ap_init_templates(struct wl1271 *wl, struct ieee80211_vif *vif); #endif diff --git a/drivers/net/wireless/wl12xx/io.c b/drivers/net/wireless/wl12xx/io.c index c2da66f4504..079ad380e8f 100644 --- a/drivers/net/wireless/wl12xx/io.c +++ b/drivers/net/wireless/wl12xx/io.c @@ -24,8 +24,10 @@ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/spi/spi.h> +#include <linux/interrupt.h> #include "wl12xx.h" +#include "debug.h" #include "wl12xx_80211.h" #include "io.h" #include "tx.h" @@ -46,7 +48,7 @@ bool wl1271_set_block_size(struct wl1271 *wl) { if (wl->if_ops->set_block_size) { - wl->if_ops->set_block_size(wl, WL12XX_BUS_BLOCK_SIZE); + wl->if_ops->set_block_size(wl->dev, WL12XX_BUS_BLOCK_SIZE); return true; } @@ -55,12 +57,12 @@ bool wl1271_set_block_size(struct wl1271 *wl) void wl1271_disable_interrupts(struct wl1271 *wl) { - wl->if_ops->disable_irq(wl); + disable_irq(wl->irq); } void wl1271_enable_interrupts(struct wl1271 *wl) { - wl->if_ops->enable_irq(wl); + enable_irq(wl->irq); } /* Set the SPI partitions to access the chip addresses @@ -128,13 +130,13 @@ EXPORT_SYMBOL_GPL(wl1271_set_partition); void wl1271_io_reset(struct wl1271 *wl) { if (wl->if_ops->reset) - wl->if_ops->reset(wl); + wl->if_ops->reset(wl->dev); } void wl1271_io_init(struct wl1271 *wl) { if (wl->if_ops->init) - wl->if_ops->init(wl); + wl->if_ops->init(wl->dev); } void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val) diff --git a/drivers/net/wireless/wl12xx/io.h b/drivers/net/wireless/wl12xx/io.h index e839341dfaf..d398cbcea98 100644 --- a/drivers/net/wireless/wl12xx/io.h +++ b/drivers/net/wireless/wl12xx/io.h @@ -51,23 +51,17 @@ void wl1271_enable_interrupts(struct wl1271 *wl); void wl1271_io_reset(struct wl1271 *wl); void wl1271_io_init(struct wl1271 *wl); -static inline struct device *wl1271_wl_to_dev(struct wl1271 *wl) -{ - return wl->if_ops->dev(wl); -} - - /* Raw target IO, address is not translated */ static inline void wl1271_raw_write(struct wl1271 *wl, int addr, void *buf, size_t len, bool fixed) { - wl->if_ops->write(wl, addr, buf, len, fixed); + wl->if_ops->write(wl->dev, addr, buf, len, fixed); } static inline void wl1271_raw_read(struct wl1271 *wl, int addr, void *buf, size_t len, bool fixed) { - wl->if_ops->read(wl, addr, buf, len, fixed); + wl->if_ops->read(wl->dev, addr, buf, len, fixed); } static inline u32 wl1271_raw_read32(struct wl1271 *wl, int addr) @@ -155,13 +149,13 @@ static inline void wl1271_write32(struct wl1271 *wl, int addr, u32 val) static inline void wl1271_power_off(struct wl1271 *wl) { - wl->if_ops->power(wl, false); + wl->if_ops->power(wl->dev, false); clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags); } static inline int wl1271_power_on(struct wl1271 *wl) { - int ret = wl->if_ops->power(wl, true); + int ret = wl->if_ops->power(wl->dev, true); if (ret == 0) set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags); @@ -176,15 +170,10 @@ u16 wl1271_top_reg_read(struct wl1271 *wl, int addr); int wl1271_set_partition(struct wl1271 *wl, struct wl1271_partition_set *p); +bool wl1271_set_block_size(struct wl1271 *wl); + /* Functions from wl1271_main.c */ -int wl1271_register_hw(struct wl1271 *wl); -void wl1271_unregister_hw(struct wl1271 *wl); -int wl1271_init_ieee80211(struct wl1271 *wl); -struct ieee80211_hw *wl1271_alloc_hw(void); -int wl1271_free_hw(struct wl1271 *wl); -irqreturn_t wl1271_irq(int irq, void *data); -bool wl1271_set_block_size(struct wl1271 *wl); int wl1271_tx_dummy_packet(struct wl1271 *wl); #endif diff --git a/drivers/net/wireless/wl12xx/main.c b/drivers/net/wireless/wl12xx/main.c index 884f82b6321..d5f55a149de 100644 --- a/drivers/net/wireless/wl12xx/main.c +++ b/drivers/net/wireless/wl12xx/main.c @@ -32,8 +32,10 @@ #include <linux/slab.h> #include <linux/wl12xx.h> #include <linux/sched.h> +#include <linux/interrupt.h> #include "wl12xx.h" +#include "debug.h" #include "wl12xx_80211.h" #include "reg.h" #include "io.h" @@ -377,42 +379,30 @@ static char *fwlog_param; static bool bug_on_recovery; static void __wl1271_op_remove_interface(struct wl1271 *wl, + struct ieee80211_vif *vif, bool reset_tx_queues); -static void wl1271_free_ap_keys(struct wl1271 *wl); - - -static void wl1271_device_release(struct device *dev) -{ - -} - -static struct platform_device wl1271_device = { - .name = "wl1271", - .id = -1, - - /* device model insists to have a release function */ - .dev = { - .release = wl1271_device_release, - }, -}; +static void wl1271_op_stop(struct ieee80211_hw *hw); +static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif); static DEFINE_MUTEX(wl_list_mutex); static LIST_HEAD(wl_list); -static int wl1271_check_operstate(struct wl1271 *wl, unsigned char operstate) +static int wl1271_check_operstate(struct wl1271 *wl, struct wl12xx_vif *wlvif, + unsigned char operstate) { int ret; + if (operstate != IF_OPER_UP) return 0; - if (test_and_set_bit(WL1271_FLAG_STA_STATE_SENT, &wl->flags)) + if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags)) return 0; - ret = wl12xx_cmd_set_peer_state(wl, wl->sta_hlid); + ret = wl12xx_cmd_set_peer_state(wl, wlvif->sta.hlid); if (ret < 0) return ret; - wl12xx_croc(wl, wl->role_id); + wl12xx_croc(wl, wlvif->role_id); wl1271_info("Association completed."); return 0; @@ -426,6 +416,7 @@ static int wl1271_dev_notify(struct notifier_block *me, unsigned long what, struct ieee80211_hw *hw; struct wl1271 *wl; struct wl1271 *wl_temp; + struct wl12xx_vif *wlvif; int ret = 0; /* Check that this notification is for us. */ @@ -459,17 +450,28 @@ static int wl1271_dev_notify(struct notifier_block *me, unsigned long what, if (wl->state == WL1271_STATE_OFF) goto out; - if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) + if (dev->operstate != IF_OPER_UP) goto out; + /* + * The correct behavior should be just getting the appropriate wlvif + * from the given dev, but currently we don't have a mac80211 + * interface for it. + */ + wl12xx_for_each_wlvif_sta(wl, wlvif) { + struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) - goto out; + if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) + continue; - wl1271_check_operstate(wl, dev->operstate); + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; - wl1271_ps_elp_sleep(wl); + wl1271_check_operstate(wl, wlvif, + ieee80211_get_operstate(vif)); + wl1271_ps_elp_sleep(wl); + } out: mutex_unlock(&wl->mutex); @@ -498,19 +500,20 @@ static int wl1271_reg_notify(struct wiphy *wiphy, return 0; } -static int wl1271_set_rx_streaming(struct wl1271 *wl, bool enable) +static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif, + bool enable) { int ret = 0; /* we should hold wl->mutex */ - ret = wl1271_acx_ps_rx_streaming(wl, enable); + ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable); if (ret < 0) goto out; if (enable) - set_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags); + set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags); else - clear_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags); + clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags); out: return ret; } @@ -519,25 +522,25 @@ out: * this function is being called when the rx_streaming interval * has beed changed or rx_streaming should be disabled */ -int wl1271_recalc_rx_streaming(struct wl1271 *wl) +int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif) { int ret = 0; int period = wl->conf.rx_streaming.interval; /* don't reconfigure if rx_streaming is disabled */ - if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags)) + if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags)) goto out; /* reconfigure/disable according to new streaming_period */ if (period && - test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) && + test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) && (wl->conf.rx_streaming.always || test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))) - ret = wl1271_set_rx_streaming(wl, true); + ret = wl1271_set_rx_streaming(wl, wlvif, true); else { - ret = wl1271_set_rx_streaming(wl, false); + ret = wl1271_set_rx_streaming(wl, wlvif, false); /* don't cancel_work_sync since we might deadlock */ - del_timer_sync(&wl->rx_streaming_timer); + del_timer_sync(&wlvif->rx_streaming_timer); } out: return ret; @@ -546,13 +549,14 @@ out: static void wl1271_rx_streaming_enable_work(struct work_struct *work) { int ret; - struct wl1271 *wl = - container_of(work, struct wl1271, rx_streaming_enable_work); + struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif, + rx_streaming_enable_work); + struct wl1271 *wl = wlvif->wl; mutex_lock(&wl->mutex); - if (test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags) || - !test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) || + if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) || + !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) || (!wl->conf.rx_streaming.always && !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))) goto out; @@ -564,12 +568,12 @@ static void wl1271_rx_streaming_enable_work(struct work_struct *work) if (ret < 0) goto out; - ret = wl1271_set_rx_streaming(wl, true); + ret = wl1271_set_rx_streaming(wl, wlvif, true); if (ret < 0) goto out_sleep; /* stop it after some time of inactivity */ - mod_timer(&wl->rx_streaming_timer, + mod_timer(&wlvif->rx_streaming_timer, jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration)); out_sleep: @@ -581,19 +585,20 @@ out: static void wl1271_rx_streaming_disable_work(struct work_struct *work) { int ret; - struct wl1271 *wl = - container_of(work, struct wl1271, rx_streaming_disable_work); + struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif, + rx_streaming_disable_work); + struct wl1271 *wl = wlvif->wl; mutex_lock(&wl->mutex); - if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags)) + if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags)) goto out; ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out; - ret = wl1271_set_rx_streaming(wl, false); + ret = wl1271_set_rx_streaming(wl, wlvif, false); if (ret) goto out_sleep; @@ -605,8 +610,9 @@ out: static void wl1271_rx_streaming_timer(unsigned long data) { - struct wl1271 *wl = (struct wl1271 *)data; - ieee80211_queue_work(wl->hw, &wl->rx_streaming_disable_work); + struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data; + struct wl1271 *wl = wlvif->wl; + ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work); } static void wl1271_conf_init(struct wl1271 *wl) @@ -645,9 +651,7 @@ static void wl1271_conf_init(struct wl1271 *wl) static int wl1271_plt_init(struct wl1271 *wl) { - struct conf_tx_ac_category *conf_ac; - struct conf_tx_tid *conf_tid; - int ret, i; + int ret; if (wl->chip.id == CHIP_ID_1283_PG20) ret = wl128x_cmd_general_parms(wl); @@ -676,74 +680,14 @@ static int wl1271_plt_init(struct wl1271 *wl) if (ret < 0) return ret; - ret = wl1271_sta_init_templates_config(wl); - if (ret < 0) - return ret; - ret = wl1271_acx_init_mem_config(wl); if (ret < 0) return ret; - /* PHY layer config */ - ret = wl1271_init_phy_config(wl); - if (ret < 0) - goto out_free_memmap; - - ret = wl1271_acx_dco_itrim_params(wl); - if (ret < 0) - goto out_free_memmap; - - /* Initialize connection monitoring thresholds */ - ret = wl1271_acx_conn_monit_params(wl, false); - if (ret < 0) - goto out_free_memmap; - - /* Bluetooth WLAN coexistence */ - ret = wl1271_init_pta(wl); - if (ret < 0) - goto out_free_memmap; - - /* FM WLAN coexistence */ - ret = wl1271_acx_fm_coex(wl); - if (ret < 0) - goto out_free_memmap; - - /* Energy detection */ - ret = wl1271_init_energy_detection(wl); - if (ret < 0) - goto out_free_memmap; - ret = wl12xx_acx_mem_cfg(wl); if (ret < 0) goto out_free_memmap; - /* Default fragmentation threshold */ - ret = wl1271_acx_frag_threshold(wl, wl->conf.tx.frag_threshold); - if (ret < 0) - goto out_free_memmap; - - /* Default TID/AC configuration */ - BUG_ON(wl->conf.tx.tid_conf_count != wl->conf.tx.ac_conf_count); - for (i = 0; i < wl->conf.tx.tid_conf_count; i++) { - conf_ac = &wl->conf.tx.ac_conf[i]; - ret = wl1271_acx_ac_cfg(wl, conf_ac->ac, conf_ac->cw_min, - conf_ac->cw_max, conf_ac->aifsn, - conf_ac->tx_op_limit); - if (ret < 0) - goto out_free_memmap; - - conf_tid = &wl->conf.tx.tid_conf[i]; - ret = wl1271_acx_tid_cfg(wl, conf_tid->queue_id, - conf_tid->channel_type, - conf_tid->tsid, - conf_tid->ps_scheme, - conf_tid->ack_policy, - conf_tid->apsd_conf[0], - conf_tid->apsd_conf[1]); - if (ret < 0) - goto out_free_memmap; - } - /* Enable data path */ ret = wl1271_cmd_data_path(wl, 1); if (ret < 0) @@ -768,14 +712,12 @@ static int wl1271_plt_init(struct wl1271 *wl) return ret; } -static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl, u8 hlid, u8 tx_pkts) +static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl, + struct wl12xx_vif *wlvif, + u8 hlid, u8 tx_pkts) { bool fw_ps, single_sta; - /* only regulate station links */ - if (hlid < WL1271_AP_STA_HLID_START) - return; - fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map); single_sta = (wl->active_sta_count == 1); @@ -784,7 +726,7 @@ static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl, u8 hlid, u8 tx_pkts) * packets in FW or if the STA is awake. */ if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS) - wl1271_ps_link_end(wl, hlid); + wl12xx_ps_link_end(wl, wlvif, hlid); /* * Start high-level PS if the STA is asleep with enough blocks in FW. @@ -792,24 +734,14 @@ static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl, u8 hlid, u8 tx_pkts) * case FW-memory congestion is not a problem. */ else if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS) - wl1271_ps_link_start(wl, hlid, true); -} - -bool wl1271_is_active_sta(struct wl1271 *wl, u8 hlid) -{ - int id; - - /* global/broadcast "stations" are always active */ - if (hlid < WL1271_AP_STA_HLID_START) - return true; - - id = hlid - WL1271_AP_STA_HLID_START; - return test_bit(id, wl->ap_hlid_map); + wl12xx_ps_link_start(wl, wlvif, hlid, true); } static void wl12xx_irq_update_links_status(struct wl1271 *wl, + struct wl12xx_vif *wlvif, struct wl12xx_fw_status *status) { + struct wl1271_link *lnk; u32 cur_fw_ps_map; u8 hlid, cnt; @@ -825,25 +757,22 @@ static void wl12xx_irq_update_links_status(struct wl1271 *wl, wl->ap_fw_ps_map = cur_fw_ps_map; } - for (hlid = WL1271_AP_STA_HLID_START; hlid < AP_MAX_LINKS; hlid++) { - if (!wl1271_is_active_sta(wl, hlid)) - continue; - - cnt = status->tx_lnk_free_pkts[hlid] - - wl->links[hlid].prev_freed_pkts; + for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS) { + lnk = &wl->links[hlid]; + cnt = status->tx_lnk_free_pkts[hlid] - lnk->prev_freed_pkts; - wl->links[hlid].prev_freed_pkts = - status->tx_lnk_free_pkts[hlid]; - wl->links[hlid].allocated_pkts -= cnt; + lnk->prev_freed_pkts = status->tx_lnk_free_pkts[hlid]; + lnk->allocated_pkts -= cnt; - wl12xx_irq_ps_regulate_link(wl, hlid, - wl->links[hlid].allocated_pkts); + wl12xx_irq_ps_regulate_link(wl, wlvif, hlid, + lnk->allocated_pkts); } } static void wl12xx_fw_status(struct wl1271 *wl, struct wl12xx_fw_status *status) { + struct wl12xx_vif *wlvif; struct timespec ts; u32 old_tx_blk_count = wl->tx_blocks_available; int avail, freed_blocks; @@ -898,8 +827,9 @@ static void wl12xx_fw_status(struct wl1271 *wl, clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags); /* for AP update num of allocated TX blocks per link and ps status */ - if (wl->bss_type == BSS_TYPE_AP_BSS) - wl12xx_irq_update_links_status(wl, status); + wl12xx_for_each_wlvif_ap(wl, wlvif) { + wl12xx_irq_update_links_status(wl, wlvif, status); + } /* update the host-chipset time offset */ getnstimeofday(&ts); @@ -932,7 +862,7 @@ static void wl1271_netstack_work(struct work_struct *work) #define WL1271_IRQ_MAX_LOOPS 256 -irqreturn_t wl1271_irq(int irq, void *cookie) +static irqreturn_t wl1271_irq(int irq, void *cookie) { int ret; u32 intr; @@ -1054,7 +984,6 @@ out: return IRQ_HANDLED; } -EXPORT_SYMBOL_GPL(wl1271_irq); static int wl1271_fetch_firmware(struct wl1271 *wl) { @@ -1069,10 +998,10 @@ static int wl1271_fetch_firmware(struct wl1271 *wl) wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name); - ret = request_firmware(&fw, fw_name, wl1271_wl_to_dev(wl)); + ret = request_firmware(&fw, fw_name, wl->dev); if (ret < 0) { - wl1271_error("could not get firmware: %d", ret); + wl1271_error("could not get firmware %s: %d", fw_name, ret); return ret; } @@ -1107,10 +1036,11 @@ static int wl1271_fetch_nvs(struct wl1271 *wl) const struct firmware *fw; int ret; - ret = request_firmware(&fw, WL12XX_NVS_NAME, wl1271_wl_to_dev(wl)); + ret = request_firmware(&fw, WL12XX_NVS_NAME, wl->dev); if (ret < 0) { - wl1271_error("could not get nvs file: %d", ret); + wl1271_error("could not get nvs file %s: %d", WL12XX_NVS_NAME, + ret); return ret; } @@ -1217,11 +1147,13 @@ static void wl1271_recovery_work(struct work_struct *work) { struct wl1271 *wl = container_of(work, struct wl1271, recovery_work); + struct wl12xx_vif *wlvif; + struct ieee80211_vif *vif; mutex_lock(&wl->mutex); if (wl->state != WL1271_STATE_ON) - goto out; + goto out_unlock; /* Avoid a recursive recovery */ set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags); @@ -1238,9 +1170,12 @@ static void wl1271_recovery_work(struct work_struct *work) * in the firmware during recovery. This doens't hurt if the network is * not encrypted. */ - if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) || - test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) - wl->tx_security_seq += WL1271_TX_SQN_POST_RECOVERY_PADDING; + wl12xx_for_each_wlvif(wl, wlvif) { + if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) || + test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) + wlvif->tx_security_seq += + WL1271_TX_SQN_POST_RECOVERY_PADDING; + } /* Prevent spurious TX during FW restart */ ieee80211_stop_queues(wl->hw); @@ -1251,7 +1186,14 @@ static void wl1271_recovery_work(struct work_struct *work) } /* reboot the chipset */ - __wl1271_op_remove_interface(wl, false); + while (!list_empty(&wl->wlvif_list)) { + wlvif = list_first_entry(&wl->wlvif_list, + struct wl12xx_vif, list); + vif = wl12xx_wlvif_to_vif(wlvif); + __wl1271_op_remove_interface(wl, vif, false); + } + mutex_unlock(&wl->mutex); + wl1271_op_stop(wl->hw); clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags); @@ -1262,8 +1204,8 @@ static void wl1271_recovery_work(struct work_struct *work) * to restart the HW. */ ieee80211_wake_queues(wl->hw); - -out: + return; +out_unlock: mutex_unlock(&wl->mutex); } @@ -1318,7 +1260,16 @@ static int wl1271_chip_wakeup(struct wl1271 *wl) /* 0. read chip id from CHIP_ID */ wl->chip.id = wl1271_read32(wl, CHIP_ID_B); - /* 1. check if chip id is valid */ + /* + * For wl127x based devices we could use the default block + * size (512 bytes), but due to a bug in the sdio driver, we + * need to set it explicitly after the chip is powered on. To + * simplify the code and since the performance impact is + * negligible, we use the same block size for all different + * chip types. + */ + if (!wl1271_set_block_size(wl)) + wl->quirks |= WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT; switch (wl->chip.id) { case CHIP_ID_1271_PG10: @@ -1328,7 +1279,9 @@ static int wl1271_chip_wakeup(struct wl1271 *wl) ret = wl1271_setup(wl); if (ret < 0) goto out; + wl->quirks |= WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT; break; + case CHIP_ID_1271_PG20: wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)", wl->chip.id); @@ -1336,7 +1289,9 @@ static int wl1271_chip_wakeup(struct wl1271 *wl) ret = wl1271_setup(wl); if (ret < 0) goto out; + wl->quirks |= WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT; break; + case CHIP_ID_1283_PG20: wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1283 PG20)", wl->chip.id); @@ -1344,9 +1299,6 @@ static int wl1271_chip_wakeup(struct wl1271 *wl) ret = wl1271_setup(wl); if (ret < 0) goto out; - - if (wl1271_set_block_size(wl)) - wl->quirks |= WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT; break; case CHIP_ID_1283_PG10: default: @@ -1389,8 +1341,6 @@ int wl1271_plt_start(struct wl1271 *wl) goto out; } - wl->bss_type = BSS_TYPE_STA_BSS; - while (retries) { retries--; ret = wl1271_chip_wakeup(wl); @@ -1482,33 +1432,34 @@ int wl1271_plt_stop(struct wl1271 *wl) static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) { struct wl1271 *wl = hw->priv; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_vif *vif = info->control.vif; + struct wl12xx_vif *wlvif = NULL; unsigned long flags; int q, mapping; - u8 hlid = 0; + u8 hlid; + + if (vif) + wlvif = wl12xx_vif_to_data(vif); mapping = skb_get_queue_mapping(skb); q = wl1271_tx_get_queue(mapping); - if (wl->bss_type == BSS_TYPE_AP_BSS) - hlid = wl12xx_tx_get_hlid_ap(wl, skb); + hlid = wl12xx_tx_get_hlid(wl, wlvif, skb); spin_lock_irqsave(&wl->wl_lock, flags); /* queue the packet */ - if (wl->bss_type == BSS_TYPE_AP_BSS) { - if (!wl1271_is_active_sta(wl, hlid)) { - wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", - hlid, q); - dev_kfree_skb(skb); - goto out; - } - - wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d", hlid, q); - skb_queue_tail(&wl->links[hlid].tx_queue[q], skb); - } else { - skb_queue_tail(&wl->tx_queue[q], skb); + if (hlid == WL12XX_INVALID_LINK_ID || + (wlvif && !test_bit(hlid, wlvif->links_map))) { + wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q); + ieee80211_free_txskb(hw, skb); + goto out; } + wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d", hlid, q); + skb_queue_tail(&wl->links[hlid].tx_queue[q], skb); + wl->tx_queue_count[q]++; /* @@ -1609,13 +1560,14 @@ static struct notifier_block wl1271_dev_notifier = { }; #ifdef CONFIG_PM -static int wl1271_configure_suspend_sta(struct wl1271 *wl) +static int wl1271_configure_suspend_sta(struct wl1271 *wl, + struct wl12xx_vif *wlvif) { int ret = 0; mutex_lock(&wl->mutex); - if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) + if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) goto out_unlock; ret = wl1271_ps_elp_wakeup(wl); @@ -1623,12 +1575,12 @@ static int wl1271_configure_suspend_sta(struct wl1271 *wl) goto out_unlock; /* enter psm if needed*/ - if (!test_bit(WL1271_FLAG_PSM, &wl->flags)) { + if (!test_bit(WLVIF_FLAG_PSM, &wlvif->flags)) { DECLARE_COMPLETION_ONSTACK(compl); - wl->ps_compl = &compl; - ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE, - wl->basic_rate, true); + wlvif->ps_compl = &compl; + ret = wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE, + wlvif->basic_rate, true); if (ret < 0) goto out_sleep; @@ -1638,42 +1590,43 @@ static int wl1271_configure_suspend_sta(struct wl1271 *wl) ret = wait_for_completion_timeout( &compl, msecs_to_jiffies(WL1271_PS_COMPLETE_TIMEOUT)); + + mutex_lock(&wl->mutex); if (ret <= 0) { wl1271_warning("couldn't enter ps mode!"); ret = -EBUSY; - goto out; + goto out_cleanup; } - /* take mutex again, and wakeup */ - mutex_lock(&wl->mutex); - ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) - goto out_unlock; + goto out_cleanup; } out_sleep: wl1271_ps_elp_sleep(wl); +out_cleanup: + wlvif->ps_compl = NULL; out_unlock: mutex_unlock(&wl->mutex); -out: return ret; } -static int wl1271_configure_suspend_ap(struct wl1271 *wl) +static int wl1271_configure_suspend_ap(struct wl1271 *wl, + struct wl12xx_vif *wlvif) { int ret = 0; mutex_lock(&wl->mutex); - if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) + if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) goto out_unlock; ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out_unlock; - ret = wl1271_acx_beacon_filter_opt(wl, true); + ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true); wl1271_ps_elp_sleep(wl); out_unlock: @@ -1682,20 +1635,22 @@ out_unlock: } -static int wl1271_configure_suspend(struct wl1271 *wl) +static int wl1271_configure_suspend(struct wl1271 *wl, + struct wl12xx_vif *wlvif) { - if (wl->bss_type == BSS_TYPE_STA_BSS) - return wl1271_configure_suspend_sta(wl); - if (wl->bss_type == BSS_TYPE_AP_BSS) - return wl1271_configure_suspend_ap(wl); + if (wlvif->bss_type == BSS_TYPE_STA_BSS) + return wl1271_configure_suspend_sta(wl, wlvif); + if (wlvif->bss_type == BSS_TYPE_AP_BSS) + return wl1271_configure_suspend_ap(wl, wlvif); return 0; } -static void wl1271_configure_resume(struct wl1271 *wl) +static void wl1271_configure_resume(struct wl1271 *wl, + struct wl12xx_vif *wlvif) { int ret; - bool is_sta = wl->bss_type == BSS_TYPE_STA_BSS; - bool is_ap = wl->bss_type == BSS_TYPE_AP_BSS; + bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS; + bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS; if (!is_sta && !is_ap) return; @@ -1707,11 +1662,11 @@ static void wl1271_configure_resume(struct wl1271 *wl) if (is_sta) { /* exit psm if it wasn't configured */ - if (!test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) - wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE, - wl->basic_rate, true); + if (!test_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags)) + wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE, + wlvif->basic_rate, true); } else if (is_ap) { - wl1271_acx_beacon_filter_opt(wl, false); + wl1271_acx_beacon_filter_opt(wl, wlvif, false); } wl1271_ps_elp_sleep(wl); @@ -1723,16 +1678,19 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wow) { struct wl1271 *wl = hw->priv; + struct wl12xx_vif *wlvif; int ret; wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow); WARN_ON(!wow || !wow->any); wl->wow_enabled = true; - ret = wl1271_configure_suspend(wl); - if (ret < 0) { - wl1271_warning("couldn't prepare device to suspend"); - return ret; + wl12xx_for_each_wlvif(wl, wlvif) { + ret = wl1271_configure_suspend(wl, wlvif); + if (ret < 0) { + wl1271_warning("couldn't prepare device to suspend"); + return ret; + } } /* flush any remaining work */ wl1271_debug(DEBUG_MAC80211, "flushing remaining works"); @@ -1751,7 +1709,9 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw, wl1271_enable_interrupts(wl); flush_work(&wl->tx_work); - flush_delayed_work(&wl->pspoll_work); + wl12xx_for_each_wlvif(wl, wlvif) { + flush_delayed_work(&wlvif->pspoll_work); + } flush_delayed_work(&wl->elp_work); return 0; @@ -1760,6 +1720,7 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw, static int wl1271_op_resume(struct ieee80211_hw *hw) { struct wl1271 *wl = hw->priv; + struct wl12xx_vif *wlvif; unsigned long flags; bool run_irq_work = false; @@ -1783,7 +1744,9 @@ static int wl1271_op_resume(struct ieee80211_hw *hw) wl1271_irq(0, wl); wl1271_enable_interrupts(wl); } - wl1271_configure_resume(wl); + wl12xx_for_each_wlvif(wl, wlvif) { + wl1271_configure_resume(wl, wlvif); + } wl->wow_enabled = false; return 0; @@ -1810,20 +1773,119 @@ static int wl1271_op_start(struct ieee80211_hw *hw) static void wl1271_op_stop(struct ieee80211_hw *hw) { + struct wl1271 *wl = hw->priv; + int i; + wl1271_debug(DEBUG_MAC80211, "mac80211 stop"); + + mutex_lock(&wl->mutex); + if (wl->state == WL1271_STATE_OFF) { + mutex_unlock(&wl->mutex); + return; + } + /* + * this must be before the cancel_work calls below, so that the work + * functions don't perform further work. + */ + wl->state = WL1271_STATE_OFF; + mutex_unlock(&wl->mutex); + + mutex_lock(&wl_list_mutex); + list_del(&wl->list); + mutex_unlock(&wl_list_mutex); + + wl1271_disable_interrupts(wl); + wl1271_flush_deferred_work(wl); + cancel_delayed_work_sync(&wl->scan_complete_work); + cancel_work_sync(&wl->netstack_work); + cancel_work_sync(&wl->tx_work); + cancel_delayed_work_sync(&wl->elp_work); + + /* let's notify MAC80211 about the remaining pending TX frames */ + wl12xx_tx_reset(wl, true); + mutex_lock(&wl->mutex); + + wl1271_power_off(wl); + + wl->band = IEEE80211_BAND_2GHZ; + + wl->rx_counter = 0; + wl->power_level = WL1271_DEFAULT_POWER_LEVEL; + wl->tx_blocks_available = 0; + wl->tx_allocated_blocks = 0; + wl->tx_results_count = 0; + wl->tx_packets_count = 0; + wl->time_offset = 0; + wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT; + wl->ap_fw_ps_map = 0; + wl->ap_ps_map = 0; + wl->sched_scanning = false; + memset(wl->roles_map, 0, sizeof(wl->roles_map)); + memset(wl->links_map, 0, sizeof(wl->links_map)); + memset(wl->roc_map, 0, sizeof(wl->roc_map)); + wl->active_sta_count = 0; + + /* The system link is always allocated */ + __set_bit(WL12XX_SYSTEM_HLID, wl->links_map); + + /* + * this is performed after the cancel_work calls and the associated + * mutex_lock, so that wl1271_op_add_interface does not accidentally + * get executed before all these vars have been reset. + */ + wl->flags = 0; + + wl->tx_blocks_freed = 0; + + for (i = 0; i < NUM_TX_QUEUES; i++) { + wl->tx_pkts_freed[i] = 0; + wl->tx_allocated_pkts[i] = 0; + } + + wl1271_debugfs_reset(wl); + + kfree(wl->fw_status); + wl->fw_status = NULL; + kfree(wl->tx_res_if); + wl->tx_res_if = NULL; + kfree(wl->target_mem_map); + wl->target_mem_map = NULL; + + mutex_unlock(&wl->mutex); } -static u8 wl12xx_get_role_type(struct wl1271 *wl) +static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx) { - switch (wl->bss_type) { + u8 policy = find_first_zero_bit(wl->rate_policies_map, + WL12XX_MAX_RATE_POLICIES); + if (policy >= WL12XX_MAX_RATE_POLICIES) + return -EBUSY; + + __set_bit(policy, wl->rate_policies_map); + *idx = policy; + return 0; +} + +static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx) +{ + if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES)) + return; + + __clear_bit(*idx, wl->rate_policies_map); + *idx = WL12XX_MAX_RATE_POLICIES; +} + +static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + switch (wlvif->bss_type) { case BSS_TYPE_AP_BSS: - if (wl->p2p) + if (wlvif->p2p) return WL1271_ROLE_P2P_GO; else return WL1271_ROLE_AP; case BSS_TYPE_STA_BSS: - if (wl->p2p) + if (wlvif->p2p) return WL1271_ROLE_P2P_CL; else return WL1271_ROLE_STA; @@ -1832,78 +1894,95 @@ static u8 wl12xx_get_role_type(struct wl1271 *wl) return WL1271_ROLE_IBSS; default: - wl1271_error("invalid bss_type: %d", wl->bss_type); + wl1271_error("invalid bss_type: %d", wlvif->bss_type); } return WL12XX_INVALID_ROLE_TYPE; } -static int wl1271_op_add_interface(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) +static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif) { - struct wl1271 *wl = hw->priv; - struct wiphy *wiphy = hw->wiphy; - int retries = WL1271_BOOT_RETRIES; - int ret = 0; - u8 role_type; - bool booted = false; - - wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM", - ieee80211_vif_type_p2p(vif), vif->addr); - - mutex_lock(&wl->mutex); - if (wl->vif) { - wl1271_debug(DEBUG_MAC80211, - "multiple vifs are not supported yet"); - ret = -EBUSY; - goto out; - } + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + int i; - /* - * in some very corner case HW recovery scenarios its possible to - * get here before __wl1271_op_remove_interface is complete, so - * opt out if that is the case. - */ - if (test_bit(WL1271_FLAG_IF_INITIALIZED, &wl->flags)) { - ret = -EBUSY; - goto out; - } + /* clear everything but the persistent data */ + memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent)); switch (ieee80211_vif_type_p2p(vif)) { case NL80211_IFTYPE_P2P_CLIENT: - wl->p2p = 1; + wlvif->p2p = 1; /* fall-through */ case NL80211_IFTYPE_STATION: - wl->bss_type = BSS_TYPE_STA_BSS; - wl->set_bss_type = BSS_TYPE_STA_BSS; + wlvif->bss_type = BSS_TYPE_STA_BSS; break; case NL80211_IFTYPE_ADHOC: - wl->bss_type = BSS_TYPE_IBSS; - wl->set_bss_type = BSS_TYPE_STA_BSS; + wlvif->bss_type = BSS_TYPE_IBSS; break; case NL80211_IFTYPE_P2P_GO: - wl->p2p = 1; + wlvif->p2p = 1; /* fall-through */ case NL80211_IFTYPE_AP: - wl->bss_type = BSS_TYPE_AP_BSS; + wlvif->bss_type = BSS_TYPE_AP_BSS; break; default: - ret = -EOPNOTSUPP; - goto out; + wlvif->bss_type = MAX_BSS_TYPE; + return -EOPNOTSUPP; } - role_type = wl12xx_get_role_type(wl); - if (role_type == WL12XX_INVALID_ROLE_TYPE) { - ret = -EINVAL; - goto out; + wlvif->role_id = WL12XX_INVALID_ROLE_ID; + wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID; + wlvif->dev_hlid = WL12XX_INVALID_LINK_ID; + + if (wlvif->bss_type == BSS_TYPE_STA_BSS || + wlvif->bss_type == BSS_TYPE_IBSS) { + /* init sta/ibss data */ + wlvif->sta.hlid = WL12XX_INVALID_LINK_ID; + wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx); + wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx); + wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx); + } else { + /* init ap data */ + wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID; + wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID; + wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx); + wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx); + for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++) + wl12xx_allocate_rate_policy(wl, + &wlvif->ap.ucast_rate_idx[i]); } - memcpy(wl->mac_addr, vif->addr, ETH_ALEN); - if (wl->state != WL1271_STATE_OFF) { - wl1271_error("cannot start because not in off state: %d", - wl->state); - ret = -EBUSY; - goto out; - } + wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate; + wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5; + wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC; + wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC; + wlvif->rate_set = CONF_TX_RATE_MASK_BASIC; + wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT; + + /* + * mac80211 configures some values globally, while we treat them + * per-interface. thus, on init, we have to copy them from wl + */ + wlvif->band = wl->band; + wlvif->channel = wl->channel; + wlvif->power_level = wl->power_level; + + INIT_WORK(&wlvif->rx_streaming_enable_work, + wl1271_rx_streaming_enable_work); + INIT_WORK(&wlvif->rx_streaming_disable_work, + wl1271_rx_streaming_disable_work); + INIT_DELAYED_WORK(&wlvif->pspoll_work, wl1271_pspoll_work); + INIT_LIST_HEAD(&wlvif->list); + + setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer, + (unsigned long) wlvif); + return 0; +} + +static bool wl12xx_init_fw(struct wl1271 *wl) +{ + int retries = WL1271_BOOT_RETRIES; + bool booted = false; + struct wiphy *wiphy = wl->hw->wiphy; + int ret; while (retries) { retries--; @@ -1915,25 +1994,6 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw, if (ret < 0) goto power_off; - if (wl->bss_type == BSS_TYPE_STA_BSS || - wl->bss_type == BSS_TYPE_IBSS) { - /* - * The device role is a special role used for - * rx and tx frames prior to association (as - * the STA role can get packets only from - * its associated bssid) - */ - ret = wl12xx_cmd_role_enable(wl, - WL1271_ROLE_DEVICE, - &wl->dev_role_id); - if (ret < 0) - goto irq_disable; - } - - ret = wl12xx_cmd_role_enable(wl, role_type, &wl->role_id); - if (ret < 0) - goto irq_disable; - ret = wl1271_hw_init(wl); if (ret < 0) goto irq_disable; @@ -1964,9 +2024,6 @@ power_off: goto out; } - wl->vif = vif; - wl->state = WL1271_STATE_ON; - set_bit(WL1271_FLAG_IF_INITIALIZED, &wl->flags); wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str); /* update hw/fw version info in wiphy struct */ @@ -1984,7 +2041,115 @@ power_off: wl1271_debug(DEBUG_MAC80211, "11a is %ssupported", wl->enable_11a ? "" : "not "); + wl->state = WL1271_STATE_ON; +out: + return booted; +} + +static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif) +{ + return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID; +} + +static int wl1271_op_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct wl1271 *wl = hw->priv; + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + int ret = 0; + u8 role_type; + bool booted = false; + + wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM", + ieee80211_vif_type_p2p(vif), vif->addr); + + mutex_lock(&wl->mutex); + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out_unlock; + + if (wl->vif) { + wl1271_debug(DEBUG_MAC80211, + "multiple vifs are not supported yet"); + ret = -EBUSY; + goto out; + } + + /* + * in some very corner case HW recovery scenarios its possible to + * get here before __wl1271_op_remove_interface is complete, so + * opt out if that is the case. + */ + if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) || + test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) { + ret = -EBUSY; + goto out; + } + + ret = wl12xx_init_vif_data(wl, vif); + if (ret < 0) + goto out; + + wlvif->wl = wl; + role_type = wl12xx_get_role_type(wl, wlvif); + if (role_type == WL12XX_INVALID_ROLE_TYPE) { + ret = -EINVAL; + goto out; + } + + /* + * TODO: after the nvs issue will be solved, move this block + * to start(), and make sure here the driver is ON. + */ + if (wl->state == WL1271_STATE_OFF) { + /* + * we still need this in order to configure the fw + * while uploading the nvs + */ + memcpy(wl->mac_addr, vif->addr, ETH_ALEN); + + booted = wl12xx_init_fw(wl); + if (!booted) { + ret = -EINVAL; + goto out; + } + } + + if (wlvif->bss_type == BSS_TYPE_STA_BSS || + wlvif->bss_type == BSS_TYPE_IBSS) { + /* + * The device role is a special role used for + * rx and tx frames prior to association (as + * the STA role can get packets only from + * its associated bssid) + */ + ret = wl12xx_cmd_role_enable(wl, vif->addr, + WL1271_ROLE_DEVICE, + &wlvif->dev_role_id); + if (ret < 0) + goto out; + } + + ret = wl12xx_cmd_role_enable(wl, vif->addr, + role_type, &wlvif->role_id); + if (ret < 0) + goto out; + + ret = wl1271_init_vif_specific(wl, vif); + if (ret < 0) + goto out; + + wl->vif = vif; + list_add(&wlvif->list, &wl->wlvif_list); + set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags); + + if (wlvif->bss_type == BSS_TYPE_AP_BSS) + wl->ap_count++; + else + wl->sta_count++; out: + wl1271_ps_elp_sleep(wl); +out_unlock: mutex_unlock(&wl->mutex); mutex_lock(&wl_list_mutex); @@ -1996,29 +2161,34 @@ out: } static void __wl1271_op_remove_interface(struct wl1271 *wl, + struct ieee80211_vif *vif, bool reset_tx_queues) { - int ret, i; + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + int i, ret; wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface"); + if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) + return; + + wl->vif = NULL; + /* because of hardware recovery, we may get here twice */ if (wl->state != WL1271_STATE_ON) return; wl1271_info("down"); - mutex_lock(&wl_list_mutex); - list_del(&wl->list); - mutex_unlock(&wl_list_mutex); - /* enable dyn ps just in case (if left on due to fw crash etc) */ - if (wl->bss_type == BSS_TYPE_STA_BSS) - ieee80211_enable_dyn_ps(wl->vif); + if (wlvif->bss_type == BSS_TYPE_STA_BSS) + ieee80211_enable_dyn_ps(vif); - if (wl->scan.state != WL1271_SCAN_STATE_IDLE) { + if (wl->scan.state != WL1271_SCAN_STATE_IDLE && + wl->scan_vif == vif) { wl->scan.state = WL1271_SCAN_STATE_IDLE; memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch)); + wl->scan_vif = NULL; wl->scan.req = NULL; ieee80211_scan_completed(wl->hw, true); } @@ -2029,13 +2199,17 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl, if (ret < 0) goto deinit; - if (wl->bss_type == BSS_TYPE_STA_BSS) { - ret = wl12xx_cmd_role_disable(wl, &wl->dev_role_id); + if (wlvif->bss_type == BSS_TYPE_STA_BSS || + wlvif->bss_type == BSS_TYPE_IBSS) { + if (wl12xx_dev_role_started(wlvif)) + wl12xx_stop_dev(wl, wlvif); + + ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id); if (ret < 0) goto deinit; } - ret = wl12xx_cmd_role_disable(wl, &wl->role_id); + ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id); if (ret < 0) goto deinit; @@ -2043,120 +2217,93 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl, } deinit: /* clear all hlids (except system_hlid) */ - wl->sta_hlid = WL12XX_INVALID_LINK_ID; - wl->dev_hlid = WL12XX_INVALID_LINK_ID; - wl->ap_bcast_hlid = WL12XX_INVALID_LINK_ID; - wl->ap_global_hlid = WL12XX_INVALID_LINK_ID; + wlvif->dev_hlid = WL12XX_INVALID_LINK_ID; + + if (wlvif->bss_type == BSS_TYPE_STA_BSS || + wlvif->bss_type == BSS_TYPE_IBSS) { + wlvif->sta.hlid = WL12XX_INVALID_LINK_ID; + wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx); + wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx); + wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx); + } else { + wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID; + wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID; + wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx); + wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx); + for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++) + wl12xx_free_rate_policy(wl, + &wlvif->ap.ucast_rate_idx[i]); + } - /* - * this must be before the cancel_work calls below, so that the work - * functions don't perform further work. - */ - wl->state = WL1271_STATE_OFF; + wl12xx_tx_reset_wlvif(wl, wlvif); + wl1271_free_ap_keys(wl, wlvif); + if (wl->last_wlvif == wlvif) + wl->last_wlvif = NULL; + list_del(&wlvif->list); + memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map)); + wlvif->role_id = WL12XX_INVALID_ROLE_ID; + wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID; + + if (wlvif->bss_type == BSS_TYPE_AP_BSS) + wl->ap_count--; + else + wl->sta_count--; mutex_unlock(&wl->mutex); - - wl1271_disable_interrupts(wl); - wl1271_flush_deferred_work(wl); - cancel_delayed_work_sync(&wl->scan_complete_work); - cancel_work_sync(&wl->netstack_work); - cancel_work_sync(&wl->tx_work); - del_timer_sync(&wl->rx_streaming_timer); - cancel_work_sync(&wl->rx_streaming_enable_work); - cancel_work_sync(&wl->rx_streaming_disable_work); - cancel_delayed_work_sync(&wl->pspoll_work); - cancel_delayed_work_sync(&wl->elp_work); + del_timer_sync(&wlvif->rx_streaming_timer); + cancel_work_sync(&wlvif->rx_streaming_enable_work); + cancel_work_sync(&wlvif->rx_streaming_disable_work); + cancel_delayed_work_sync(&wlvif->pspoll_work); mutex_lock(&wl->mutex); - - /* let's notify MAC80211 about the remaining pending TX frames */ - wl1271_tx_reset(wl, reset_tx_queues); - wl1271_power_off(wl); - - memset(wl->bssid, 0, ETH_ALEN); - memset(wl->ssid, 0, IEEE80211_MAX_SSID_LEN + 1); - wl->ssid_len = 0; - wl->bss_type = MAX_BSS_TYPE; - wl->set_bss_type = MAX_BSS_TYPE; - wl->p2p = 0; - wl->band = IEEE80211_BAND_2GHZ; - - wl->rx_counter = 0; - wl->psm_entry_retry = 0; - wl->power_level = WL1271_DEFAULT_POWER_LEVEL; - wl->tx_blocks_available = 0; - wl->tx_allocated_blocks = 0; - wl->tx_results_count = 0; - wl->tx_packets_count = 0; - wl->time_offset = 0; - wl->session_counter = 0; - wl->rate_set = CONF_TX_RATE_MASK_BASIC; - wl->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate; - wl->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5; - wl->vif = NULL; - wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT; - wl1271_free_ap_keys(wl); - memset(wl->ap_hlid_map, 0, sizeof(wl->ap_hlid_map)); - wl->ap_fw_ps_map = 0; - wl->ap_ps_map = 0; - wl->sched_scanning = false; - wl->role_id = WL12XX_INVALID_ROLE_ID; - wl->dev_role_id = WL12XX_INVALID_ROLE_ID; - memset(wl->roles_map, 0, sizeof(wl->roles_map)); - memset(wl->links_map, 0, sizeof(wl->links_map)); - memset(wl->roc_map, 0, sizeof(wl->roc_map)); - wl->active_sta_count = 0; - - /* The system link is always allocated */ - __set_bit(WL12XX_SYSTEM_HLID, wl->links_map); - - /* - * this is performed after the cancel_work calls and the associated - * mutex_lock, so that wl1271_op_add_interface does not accidentally - * get executed before all these vars have been reset. - */ - wl->flags = 0; - - wl->tx_blocks_freed = 0; - - for (i = 0; i < NUM_TX_QUEUES; i++) { - wl->tx_pkts_freed[i] = 0; - wl->tx_allocated_pkts[i] = 0; - } - - wl1271_debugfs_reset(wl); - - kfree(wl->fw_status); - wl->fw_status = NULL; - kfree(wl->tx_res_if); - wl->tx_res_if = NULL; - kfree(wl->target_mem_map); - wl->target_mem_map = NULL; } static void wl1271_op_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct wl1271 *wl = hw->priv; + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + struct wl12xx_vif *iter; mutex_lock(&wl->mutex); + + if (wl->state == WL1271_STATE_OFF || + !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) + goto out; + /* * wl->vif can be null here if someone shuts down the interface * just when hardware recovery has been started. */ - if (wl->vif) { - WARN_ON(wl->vif != vif); - __wl1271_op_remove_interface(wl, true); - } + wl12xx_for_each_wlvif(wl, iter) { + if (iter != wlvif) + continue; + __wl1271_op_remove_interface(wl, vif, true); + break; + } + WARN_ON(iter != wlvif); +out: mutex_unlock(&wl->mutex); cancel_work_sync(&wl->recovery_work); } -static int wl1271_join(struct wl1271 *wl, bool set_assoc) +static int wl12xx_op_change_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum nl80211_iftype new_type, bool p2p) +{ + wl1271_op_remove_interface(hw, vif); + + vif->type = ieee80211_iftype_p2p(new_type, p2p); + vif->p2p = p2p; + return wl1271_op_add_interface(hw, vif); +} + +static int wl1271_join(struct wl1271 *wl, struct wl12xx_vif *wlvif, + bool set_assoc) { int ret; - bool is_ibss = (wl->bss_type == BSS_TYPE_IBSS); + bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS); /* * One of the side effects of the JOIN command is that is clears @@ -2167,20 +2314,20 @@ static int wl1271_join(struct wl1271 *wl, bool set_assoc) * Keep the below message for now, unless it starts bothering * users who really like to roam a lot :) */ - if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) + if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) wl1271_info("JOIN while associated."); if (set_assoc) - set_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags); + set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags); if (is_ibss) - ret = wl12xx_cmd_role_start_ibss(wl); + ret = wl12xx_cmd_role_start_ibss(wl, wlvif); else - ret = wl12xx_cmd_role_start_sta(wl); + ret = wl12xx_cmd_role_start_sta(wl, wlvif); if (ret < 0) goto out; - if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) + if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) goto out; /* @@ -2189,19 +2336,20 @@ static int wl1271_join(struct wl1271 *wl, bool set_assoc) * the join. The acx_aid starts the keep-alive process, and the order * of the commands below is relevant. */ - ret = wl1271_acx_keep_alive_mode(wl, true); + ret = wl1271_acx_keep_alive_mode(wl, wlvif, true); if (ret < 0) goto out; - ret = wl1271_acx_aid(wl, wl->aid); + ret = wl1271_acx_aid(wl, wlvif, wlvif->aid); if (ret < 0) goto out; - ret = wl1271_cmd_build_klv_null_data(wl); + ret = wl12xx_cmd_build_klv_null_data(wl, wlvif); if (ret < 0) goto out; - ret = wl1271_acx_keep_alive_config(wl, CMD_TEMPL_KLV_IDX_NULL_DATA, + ret = wl1271_acx_keep_alive_config(wl, wlvif, + CMD_TEMPL_KLV_IDX_NULL_DATA, ACX_KEEP_ALIVE_TPL_VALID); if (ret < 0) goto out; @@ -2210,72 +2358,63 @@ out: return ret; } -static int wl1271_unjoin(struct wl1271 *wl) +static int wl1271_unjoin(struct wl1271 *wl, struct wl12xx_vif *wlvif) { int ret; - if (test_and_clear_bit(WL1271_FLAG_CS_PROGRESS, &wl->flags)) { + if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) { + struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); + wl12xx_cmd_stop_channel_switch(wl); - ieee80211_chswitch_done(wl->vif, false); + ieee80211_chswitch_done(vif, false); } /* to stop listening to a channel, we disconnect */ - ret = wl12xx_cmd_role_stop_sta(wl); + ret = wl12xx_cmd_role_stop_sta(wl, wlvif); if (ret < 0) goto out; - memset(wl->bssid, 0, ETH_ALEN); - /* reset TX security counters on a clean disconnect */ - wl->tx_security_last_seq_lsb = 0; - wl->tx_security_seq = 0; + wlvif->tx_security_last_seq_lsb = 0; + wlvif->tx_security_seq = 0; out: return ret; } -static void wl1271_set_band_rate(struct wl1271 *wl) +static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif) { - wl->basic_rate_set = wl->bitrate_masks[wl->band]; - wl->rate_set = wl->basic_rate_set; + wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band]; + wlvif->rate_set = wlvif->basic_rate_set; } -static bool wl12xx_is_roc(struct wl1271 *wl) -{ - u8 role_id; - - role_id = find_first_bit(wl->roc_map, WL12XX_MAX_ROLES); - if (role_id >= WL12XX_MAX_ROLES) - return false; - - return true; -} - -static int wl1271_sta_handle_idle(struct wl1271 *wl, bool idle) +static int wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif, + bool idle) { int ret; + bool cur_idle = !test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags); + + if (idle == cur_idle) + return 0; if (idle) { /* no need to croc if we weren't busy (e.g. during boot) */ - if (wl12xx_is_roc(wl)) { - ret = wl12xx_croc(wl, wl->dev_role_id); - if (ret < 0) - goto out; - - ret = wl12xx_cmd_role_stop_dev(wl); + if (wl12xx_dev_role_started(wlvif)) { + ret = wl12xx_stop_dev(wl, wlvif); if (ret < 0) goto out; } - wl->rate_set = wl1271_tx_min_rate_get(wl, wl->basic_rate_set); - ret = wl1271_acx_sta_rate_policies(wl); + wlvif->rate_set = + wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); + ret = wl1271_acx_sta_rate_policies(wl, wlvif); if (ret < 0) goto out; ret = wl1271_acx_keep_alive_config( - wl, CMD_TEMPL_KLV_IDX_NULL_DATA, + wl, wlvif, CMD_TEMPL_KLV_IDX_NULL_DATA, ACX_KEEP_ALIVE_TPL_INVALID); if (ret < 0) goto out; - set_bit(WL1271_FLAG_IDLE, &wl->flags); + clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags); } else { /* The current firmware only supports sched_scan in idle */ if (wl->sched_scanning) { @@ -2283,75 +2422,32 @@ static int wl1271_sta_handle_idle(struct wl1271 *wl, bool idle) ieee80211_sched_scan_stopped(wl->hw); } - ret = wl12xx_cmd_role_start_dev(wl); - if (ret < 0) - goto out; - - ret = wl12xx_roc(wl, wl->dev_role_id); + ret = wl12xx_start_dev(wl, wlvif); if (ret < 0) goto out; - clear_bit(WL1271_FLAG_IDLE, &wl->flags); + set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags); } out: return ret; } -static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed) +static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif, + struct ieee80211_conf *conf, u32 changed) { - struct wl1271 *wl = hw->priv; - struct ieee80211_conf *conf = &hw->conf; - int channel, ret = 0; - bool is_ap; + bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS); + int channel, ret; channel = ieee80211_frequency_to_channel(conf->channel->center_freq); - wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s" - " changed 0x%x", - channel, - conf->flags & IEEE80211_CONF_PS ? "on" : "off", - conf->power_level, - conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use", - changed); - - /* - * mac80211 will go to idle nearly immediately after transmitting some - * frames, such as the deauth. To make sure those frames reach the air, - * wait here until the TX queue is fully flushed. - */ - if ((changed & IEEE80211_CONF_CHANGE_IDLE) && - (conf->flags & IEEE80211_CONF_IDLE)) - wl1271_tx_flush(wl); - - mutex_lock(&wl->mutex); - - if (unlikely(wl->state == WL1271_STATE_OFF)) { - /* we support configuring the channel and band while off */ - if ((changed & IEEE80211_CONF_CHANGE_CHANNEL)) { - wl->band = conf->channel->band; - wl->channel = channel; - } - - if ((changed & IEEE80211_CONF_CHANGE_POWER)) - wl->power_level = conf->power_level; - - goto out; - } - - is_ap = (wl->bss_type == BSS_TYPE_AP_BSS); - - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) - goto out; - /* if the channel changes while joined, join again */ if (changed & IEEE80211_CONF_CHANGE_CHANNEL && - ((wl->band != conf->channel->band) || - (wl->channel != channel))) { + ((wlvif->band != conf->channel->band) || + (wlvif->channel != channel))) { /* send all pending packets */ wl1271_tx_work_locked(wl); - wl->band = conf->channel->band; - wl->channel = channel; + wlvif->band = conf->channel->band; + wlvif->channel = channel; if (!is_ap) { /* @@ -2360,24 +2456,27 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed) * possible rate for the band as a fixed rate for * association frames and other control messages. */ - if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) - wl1271_set_band_rate(wl); + if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) + wl1271_set_band_rate(wl, wlvif); - wl->basic_rate = - wl1271_tx_min_rate_get(wl, wl->basic_rate_set); - ret = wl1271_acx_sta_rate_policies(wl); + wlvif->basic_rate = + wl1271_tx_min_rate_get(wl, + wlvif->basic_rate_set); + ret = wl1271_acx_sta_rate_policies(wl, wlvif); if (ret < 0) wl1271_warning("rate policy for channel " "failed %d", ret); - if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) { - if (wl12xx_is_roc(wl)) { + if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, + &wlvif->flags)) { + if (wl12xx_dev_role_started(wlvif)) { /* roaming */ - ret = wl12xx_croc(wl, wl->dev_role_id); + ret = wl12xx_croc(wl, + wlvif->dev_role_id); if (ret < 0) - goto out_sleep; + return ret; } - ret = wl1271_join(wl, false); + ret = wl1271_join(wl, wlvif, false); if (ret < 0) wl1271_warning("cmd join on channel " "failed %d", ret); @@ -2387,66 +2486,114 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed) * not idle. otherwise, CROC will be called * anyway. */ - if (wl12xx_is_roc(wl) && + if (wl12xx_dev_role_started(wlvif) && !(conf->flags & IEEE80211_CONF_IDLE)) { - ret = wl12xx_croc(wl, wl->dev_role_id); + ret = wl12xx_stop_dev(wl, wlvif); if (ret < 0) - goto out_sleep; + return ret; - ret = wl12xx_roc(wl, wl->dev_role_id); + ret = wl12xx_start_dev(wl, wlvif); if (ret < 0) - wl1271_warning("roc failed %d", - ret); + return ret; } } } } - if (changed & IEEE80211_CONF_CHANGE_IDLE && !is_ap) { - ret = wl1271_sta_handle_idle(wl, - conf->flags & IEEE80211_CONF_IDLE); - if (ret < 0) - wl1271_warning("idle mode change failed %d", ret); - } - /* * if mac80211 changes the PSM mode, make sure the mode is not * incorrectly changed after the pspoll failure active window. */ if (changed & IEEE80211_CONF_CHANGE_PS) - clear_bit(WL1271_FLAG_PSPOLL_FAILURE, &wl->flags); + clear_bit(WLVIF_FLAG_PSPOLL_FAILURE, &wlvif->flags); if (conf->flags & IEEE80211_CONF_PS && - !test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) { - set_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags); + !test_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags)) { + set_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags); /* * We enter PSM only if we're already associated. * If we're not, we'll enter it when joining an SSID, * through the bss_info_changed() hook. */ - if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) { + if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) { wl1271_debug(DEBUG_PSM, "psm enabled"); - ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE, - wl->basic_rate, true); + ret = wl1271_ps_set_mode(wl, wlvif, + STATION_POWER_SAVE_MODE, + wlvif->basic_rate, true); } } else if (!(conf->flags & IEEE80211_CONF_PS) && - test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) { + test_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags)) { wl1271_debug(DEBUG_PSM, "psm disabled"); - clear_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags); + clear_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags); - if (test_bit(WL1271_FLAG_PSM, &wl->flags)) - ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE, - wl->basic_rate, true); + if (test_bit(WLVIF_FLAG_PSM, &wlvif->flags)) + ret = wl1271_ps_set_mode(wl, wlvif, + STATION_ACTIVE_MODE, + wlvif->basic_rate, true); } - if (conf->power_level != wl->power_level) { - ret = wl1271_acx_tx_power(wl, conf->power_level); + if (conf->power_level != wlvif->power_level) { + ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level); if (ret < 0) - goto out_sleep; + return ret; + + wlvif->power_level = conf->power_level; + } + return 0; +} + +static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed) +{ + struct wl1271 *wl = hw->priv; + struct wl12xx_vif *wlvif; + struct ieee80211_conf *conf = &hw->conf; + int channel, ret = 0; + + channel = ieee80211_frequency_to_channel(conf->channel->center_freq); + + wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s" + " changed 0x%x", + channel, + conf->flags & IEEE80211_CONF_PS ? "on" : "off", + conf->power_level, + conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use", + changed); + + /* + * mac80211 will go to idle nearly immediately after transmitting some + * frames, such as the deauth. To make sure those frames reach the air, + * wait here until the TX queue is fully flushed. + */ + if ((changed & IEEE80211_CONF_CHANGE_IDLE) && + (conf->flags & IEEE80211_CONF_IDLE)) + wl1271_tx_flush(wl); + + mutex_lock(&wl->mutex); + + /* we support configuring the channel and band even while off */ + if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { + wl->band = conf->channel->band; + wl->channel = channel; + } + + if (changed & IEEE80211_CONF_CHANGE_POWER) wl->power_level = conf->power_level; + + if (unlikely(wl->state == WL1271_STATE_OFF)) + goto out; + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + /* configure each interface */ + wl12xx_for_each_wlvif(wl, wlvif) { + ret = wl12xx_config_vif(wl, wlvif, conf, changed); + if (ret < 0) + goto out_sleep; } out_sleep: @@ -2509,6 +2656,8 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw, { struct wl1271_filter_params *fp = (void *)(unsigned long)multicast; struct wl1271 *wl = hw->priv; + struct wl12xx_vif *wlvif; + int ret; wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x" @@ -2526,15 +2675,20 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw, if (ret < 0) goto out; - if (wl->bss_type != BSS_TYPE_AP_BSS) { - if (*total & FIF_ALLMULTI) - ret = wl1271_acx_group_address_tbl(wl, false, NULL, 0); - else if (fp) - ret = wl1271_acx_group_address_tbl(wl, fp->enabled, - fp->mc_list, - fp->mc_list_length); - if (ret < 0) - goto out_sleep; + wl12xx_for_each_wlvif(wl, wlvif) { + if (wlvif->bss_type != BSS_TYPE_AP_BSS) { + if (*total & FIF_ALLMULTI) + ret = wl1271_acx_group_address_tbl(wl, wlvif, + false, + NULL, 0); + else if (fp) + ret = wl1271_acx_group_address_tbl(wl, wlvif, + fp->enabled, + fp->mc_list, + fp->mc_list_length); + if (ret < 0) + goto out_sleep; + } } /* @@ -2551,9 +2705,10 @@ out: kfree(fp); } -static int wl1271_record_ap_key(struct wl1271 *wl, u8 id, u8 key_type, - u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32, - u16 tx_seq_16) +static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 id, u8 key_type, u8 key_size, + const u8 *key, u8 hlid, u32 tx_seq_32, + u16 tx_seq_16) { struct wl1271_ap_key *ap_key; int i; @@ -2568,10 +2723,10 @@ static int wl1271_record_ap_key(struct wl1271 *wl, u8 id, u8 key_type, * an existing key. */ for (i = 0; i < MAX_NUM_KEYS; i++) { - if (wl->recorded_ap_keys[i] == NULL) + if (wlvif->ap.recorded_keys[i] == NULL) break; - if (wl->recorded_ap_keys[i]->id == id) { + if (wlvif->ap.recorded_keys[i]->id == id) { wl1271_warning("trying to record key replacement"); return -EINVAL; } @@ -2592,21 +2747,21 @@ static int wl1271_record_ap_key(struct wl1271 *wl, u8 id, u8 key_type, ap_key->tx_seq_32 = tx_seq_32; ap_key->tx_seq_16 = tx_seq_16; - wl->recorded_ap_keys[i] = ap_key; + wlvif->ap.recorded_keys[i] = ap_key; return 0; } -static void wl1271_free_ap_keys(struct wl1271 *wl) +static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif) { int i; for (i = 0; i < MAX_NUM_KEYS; i++) { - kfree(wl->recorded_ap_keys[i]); - wl->recorded_ap_keys[i] = NULL; + kfree(wlvif->ap.recorded_keys[i]); + wlvif->ap.recorded_keys[i] = NULL; } } -static int wl1271_ap_init_hwenc(struct wl1271 *wl) +static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif) { int i, ret = 0; struct wl1271_ap_key *key; @@ -2614,15 +2769,15 @@ static int wl1271_ap_init_hwenc(struct wl1271 *wl) for (i = 0; i < MAX_NUM_KEYS; i++) { u8 hlid; - if (wl->recorded_ap_keys[i] == NULL) + if (wlvif->ap.recorded_keys[i] == NULL) break; - key = wl->recorded_ap_keys[i]; + key = wlvif->ap.recorded_keys[i]; hlid = key->hlid; if (hlid == WL12XX_INVALID_LINK_ID) - hlid = wl->ap_bcast_hlid; + hlid = wlvif->ap.bcast_hlid; - ret = wl1271_cmd_set_ap_key(wl, KEY_ADD_OR_REPLACE, + ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE, key->id, key->key_type, key->key_size, key->key, hlid, key->tx_seq_32, @@ -2635,23 +2790,24 @@ static int wl1271_ap_init_hwenc(struct wl1271 *wl) } if (wep_key_added) { - ret = wl12xx_cmd_set_default_wep_key(wl, wl->default_key, - wl->ap_bcast_hlid); + ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key, + wlvif->ap.bcast_hlid); if (ret < 0) goto out; } out: - wl1271_free_ap_keys(wl); + wl1271_free_ap_keys(wl, wlvif); return ret; } -static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type, +static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u16 action, u8 id, u8 key_type, u8 key_size, const u8 *key, u32 tx_seq_32, u16 tx_seq_16, struct ieee80211_sta *sta) { int ret; - bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS); + bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS); if (is_ap) { struct wl1271_station *wl_sta; @@ -2661,10 +2817,10 @@ static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type, wl_sta = (struct wl1271_station *)sta->drv_priv; hlid = wl_sta->hlid; } else { - hlid = wl->ap_bcast_hlid; + hlid = wlvif->ap.bcast_hlid; } - if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) { + if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) { /* * We do not support removing keys after AP shutdown. * Pretend we do to make mac80211 happy. @@ -2672,12 +2828,12 @@ static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type, if (action != KEY_ADD_OR_REPLACE) return 0; - ret = wl1271_record_ap_key(wl, id, + ret = wl1271_record_ap_key(wl, wlvif, id, key_type, key_size, key, hlid, tx_seq_32, tx_seq_16); } else { - ret = wl1271_cmd_set_ap_key(wl, action, + ret = wl1271_cmd_set_ap_key(wl, wlvif, action, id, key_type, key_size, key, hlid, tx_seq_32, tx_seq_16); @@ -2718,10 +2874,10 @@ static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type, /* don't remove key if hlid was already deleted */ if (action == KEY_REMOVE && - wl->sta_hlid == WL12XX_INVALID_LINK_ID) + wlvif->sta.hlid == WL12XX_INVALID_LINK_ID) return 0; - ret = wl1271_cmd_set_sta_key(wl, action, + ret = wl1271_cmd_set_sta_key(wl, wlvif, action, id, key_type, key_size, key, addr, tx_seq_32, tx_seq_16); @@ -2731,8 +2887,8 @@ static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type, /* the default WEP key needs to be configured at least once */ if (key_type == KEY_WEP) { ret = wl12xx_cmd_set_default_wep_key(wl, - wl->default_key, - wl->sta_hlid); + wlvif->default_key, + wlvif->sta.hlid); if (ret < 0) return ret; } @@ -2747,6 +2903,7 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_key_conf *key_conf) { struct wl1271 *wl = hw->priv; + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); int ret; u32 tx_seq_32 = 0; u16 tx_seq_16 = 0; @@ -2782,20 +2939,20 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, key_type = KEY_TKIP; key_conf->hw_key_idx = key_conf->keyidx; - tx_seq_32 = WL1271_TX_SECURITY_HI32(wl->tx_security_seq); - tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq); + tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq); + tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq); break; case WLAN_CIPHER_SUITE_CCMP: key_type = KEY_AES; - key_conf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; - tx_seq_32 = WL1271_TX_SECURITY_HI32(wl->tx_security_seq); - tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq); + key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; + tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq); + tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq); break; case WL1271_CIPHER_SUITE_GEM: key_type = KEY_GEM; - tx_seq_32 = WL1271_TX_SECURITY_HI32(wl->tx_security_seq); - tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq); + tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq); + tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq); break; default: wl1271_error("Unknown key algo 0x%x", key_conf->cipher); @@ -2806,7 +2963,7 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, switch (cmd) { case SET_KEY: - ret = wl1271_set_key(wl, KEY_ADD_OR_REPLACE, + ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE, key_conf->keyidx, key_type, key_conf->keylen, key_conf->key, tx_seq_32, tx_seq_16, sta); @@ -2817,7 +2974,7 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, break; case DISABLE_KEY: - ret = wl1271_set_key(wl, KEY_REMOVE, + ret = wl1271_set_key(wl, wlvif, KEY_REMOVE, key_conf->keyidx, key_type, key_conf->keylen, key_conf->key, 0, 0, sta); @@ -2847,6 +3004,8 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw, struct cfg80211_scan_request *req) { struct wl1271 *wl = hw->priv; + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + int ret; u8 *ssid = NULL; size_t len = 0; @@ -2874,18 +3033,18 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw, if (ret < 0) goto out; - /* cancel ROC before scanning */ - if (wl12xx_is_roc(wl)) { - if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) { - /* don't allow scanning right now */ - ret = -EBUSY; - goto out_sleep; - } - wl12xx_croc(wl, wl->dev_role_id); - wl12xx_cmd_role_stop_dev(wl); + if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) && + test_bit(wlvif->role_id, wl->roc_map)) { + /* don't allow scanning right now */ + ret = -EBUSY; + goto out_sleep; } - ret = wl1271_scan(hw->priv, ssid, len, req); + /* cancel ROC before scanning */ + if (wl12xx_dev_role_started(wlvif)) + wl12xx_stop_dev(wl, wlvif); + + ret = wl1271_scan(hw->priv, vif, ssid, len, req); out_sleep: wl1271_ps_elp_sleep(wl); out: @@ -2921,6 +3080,7 @@ static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw, } wl->scan.state = WL1271_SCAN_STATE_IDLE; memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch)); + wl->scan_vif = NULL; wl->scan.req = NULL; ieee80211_scan_completed(wl->hw, true); @@ -2938,6 +3098,7 @@ static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw, struct ieee80211_sched_scan_ies *ies) { struct wl1271 *wl = hw->priv; + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); int ret; wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start"); @@ -2948,11 +3109,11 @@ static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw, if (ret < 0) goto out; - ret = wl1271_scan_sched_scan_config(wl, req, ies); + ret = wl1271_scan_sched_scan_config(wl, wlvif, req, ies); if (ret < 0) goto out_sleep; - ret = wl1271_scan_sched_scan_start(wl); + ret = wl1271_scan_sched_scan_start(wl, wlvif); if (ret < 0) goto out_sleep; @@ -3017,6 +3178,7 @@ out: static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value) { struct wl1271 *wl = hw->priv; + struct wl12xx_vif *wlvif; int ret = 0; mutex_lock(&wl->mutex); @@ -3030,10 +3192,11 @@ static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value) if (ret < 0) goto out; - ret = wl1271_acx_rts_threshold(wl, value); - if (ret < 0) - wl1271_warning("wl1271_op_set_rts_threshold failed: %d", ret); - + wl12xx_for_each_wlvif(wl, wlvif) { + ret = wl1271_acx_rts_threshold(wl, wlvif, value); + if (ret < 0) + wl1271_warning("set rts threshold failed: %d", ret); + } wl1271_ps_elp_sleep(wl); out: @@ -3042,9 +3205,10 @@ out: return ret; } -static int wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *skb, +static int wl1271_ssid_set(struct ieee80211_vif *vif, struct sk_buff *skb, int offset) { + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); u8 ssid_len; const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset, skb->len - offset); @@ -3060,8 +3224,8 @@ static int wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *skb, return -EINVAL; } - wl->ssid_len = ssid_len; - memcpy(wl->ssid, ptr+2, ssid_len); + wlvif->ssid_len = ssid_len; + memcpy(wlvif->ssid, ptr+2, ssid_len); return 0; } @@ -3096,18 +3260,40 @@ static void wl12xx_remove_vendor_ie(struct sk_buff *skb, skb_trim(skb, skb->len - len); } -static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, - u8 *probe_rsp_data, - size_t probe_rsp_len, - u32 rates) +static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates, + struct ieee80211_vif *vif) { - struct ieee80211_bss_conf *bss_conf = &wl->vif->bss_conf; + struct sk_buff *skb; + int ret; + + skb = ieee80211_proberesp_get(wl->hw, vif); + if (!skb) + return -EOPNOTSUPP; + + ret = wl1271_cmd_template_set(wl, + CMD_TEMPL_AP_PROBE_RESPONSE, + skb->data, + skb->len, 0, + rates); + + dev_kfree_skb(skb); + return ret; +} + +static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl, + struct ieee80211_vif *vif, + u8 *probe_rsp_data, + size_t probe_rsp_len, + u32 rates) +{ + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE]; int ssid_ie_offset, ie_offset, templ_len; const u8 *ptr; /* no need to change probe response if the SSID is set correctly */ - if (wl->ssid_len > 0) + if (wlvif->ssid_len > 0) return wl1271_cmd_template_set(wl, CMD_TEMPL_AP_PROBE_RESPONSE, probe_rsp_data, @@ -3153,16 +3339,18 @@ static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, } static int wl1271_bss_erp_info_changed(struct wl1271 *wl, + struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, u32 changed) { + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); int ret = 0; if (changed & BSS_CHANGED_ERP_SLOT) { if (bss_conf->use_short_slot) - ret = wl1271_acx_slot(wl, SLOT_TIME_SHORT); + ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT); else - ret = wl1271_acx_slot(wl, SLOT_TIME_LONG); + ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG); if (ret < 0) { wl1271_warning("Set slot time failed %d", ret); goto out; @@ -3171,16 +3359,18 @@ static int wl1271_bss_erp_info_changed(struct wl1271 *wl, if (changed & BSS_CHANGED_ERP_PREAMBLE) { if (bss_conf->use_short_preamble) - wl1271_acx_set_preamble(wl, ACX_PREAMBLE_SHORT); + wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT); else - wl1271_acx_set_preamble(wl, ACX_PREAMBLE_LONG); + wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG); } if (changed & BSS_CHANGED_ERP_CTS_PROT) { if (bss_conf->use_cts_prot) - ret = wl1271_acx_cts_protect(wl, CTSPROTECT_ENABLE); + ret = wl1271_acx_cts_protect(wl, wlvif, + CTSPROTECT_ENABLE); else - ret = wl1271_acx_cts_protect(wl, CTSPROTECT_DISABLE); + ret = wl1271_acx_cts_protect(wl, wlvif, + CTSPROTECT_DISABLE); if (ret < 0) { wl1271_warning("Set ctsprotect failed %d", ret); goto out; @@ -3196,14 +3386,23 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl, struct ieee80211_bss_conf *bss_conf, u32 changed) { - bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS); + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS); int ret = 0; if ((changed & BSS_CHANGED_BEACON_INT)) { wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d", bss_conf->beacon_int); - wl->beacon_int = bss_conf->beacon_int; + wlvif->beacon_int = bss_conf->beacon_int; + } + + if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) { + u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); + if (!wl1271_ap_set_probe_resp_tmpl(wl, rate, vif)) { + wl1271_debug(DEBUG_AP, "probe response updated"); + set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags); + } } if ((changed & BSS_CHANGED_BEACON)) { @@ -3214,17 +3413,19 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl, struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif); u16 tmpl_id; - if (!beacon) + if (!beacon) { + ret = -EINVAL; goto out; + } wl1271_debug(DEBUG_MASTER, "beacon updated"); - ret = wl1271_ssid_set(wl, beacon, ieoffset); + ret = wl1271_ssid_set(vif, beacon, ieoffset); if (ret < 0) { dev_kfree_skb(beacon); goto out; } - min_rate = wl1271_tx_min_rate_get(wl, wl->basic_rate_set); + min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON : CMD_TEMPL_BEACON; ret = wl1271_cmd_template_set(wl, tmpl_id, @@ -3236,6 +3437,13 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl, goto out; } + /* + * In case we already have a probe-resp beacon set explicitly + * by usermode, don't use the beacon data. + */ + if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags)) + goto end_bcn; + /* remove TIM ie from probe response */ wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset); @@ -3254,7 +3462,7 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl, hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_RESP); if (is_ap) - ret = wl1271_ap_set_probe_resp_tmpl(wl, + ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif, beacon->data, beacon->len, min_rate); @@ -3264,12 +3472,15 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl, beacon->data, beacon->len, 0, min_rate); +end_bcn: dev_kfree_skb(beacon); if (ret < 0) goto out; } out: + if (ret != 0) + wl1271_error("beacon info change failed: %d", ret); return ret; } @@ -3279,23 +3490,24 @@ static void wl1271_bss_info_changed_ap(struct wl1271 *wl, struct ieee80211_bss_conf *bss_conf, u32 changed) { + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); int ret = 0; if ((changed & BSS_CHANGED_BASIC_RATES)) { u32 rates = bss_conf->basic_rates; - wl->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates, - wl->band); - wl->basic_rate = wl1271_tx_min_rate_get(wl, - wl->basic_rate_set); + wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates, + wlvif->band); + wlvif->basic_rate = wl1271_tx_min_rate_get(wl, + wlvif->basic_rate_set); - ret = wl1271_init_ap_rates(wl); + ret = wl1271_init_ap_rates(wl, wlvif); if (ret < 0) { wl1271_error("AP rate policy change failed %d", ret); goto out; } - ret = wl1271_ap_init_templates(wl); + ret = wl1271_ap_init_templates(wl, vif); if (ret < 0) goto out; } @@ -3306,38 +3518,40 @@ static void wl1271_bss_info_changed_ap(struct wl1271 *wl, if ((changed & BSS_CHANGED_BEACON_ENABLED)) { if (bss_conf->enable_beacon) { - if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) { - ret = wl12xx_cmd_role_start_ap(wl); + if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) { + ret = wl12xx_cmd_role_start_ap(wl, wlvif); if (ret < 0) goto out; - ret = wl1271_ap_init_hwenc(wl); + ret = wl1271_ap_init_hwenc(wl, wlvif); if (ret < 0) goto out; - set_bit(WL1271_FLAG_AP_STARTED, &wl->flags); + set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags); wl1271_debug(DEBUG_AP, "started AP"); } } else { - if (test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) { - ret = wl12xx_cmd_role_stop_ap(wl); + if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) { + ret = wl12xx_cmd_role_stop_ap(wl, wlvif); if (ret < 0) goto out; - clear_bit(WL1271_FLAG_AP_STARTED, &wl->flags); + clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags); + clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, + &wlvif->flags); wl1271_debug(DEBUG_AP, "stopped AP"); } } } - ret = wl1271_bss_erp_info_changed(wl, bss_conf, changed); + ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed); if (ret < 0) goto out; /* Handle HT information change */ if ((changed & BSS_CHANGED_HT) && (bss_conf->channel_type != NL80211_CHAN_NO_HT)) { - ret = wl1271_acx_set_ht_information(wl, + ret = wl1271_acx_set_ht_information(wl, wlvif, bss_conf->ht_operation_mode); if (ret < 0) { wl1271_warning("Set ht information failed %d", ret); @@ -3355,8 +3569,9 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl, struct ieee80211_bss_conf *bss_conf, u32 changed) { + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); bool do_join = false, set_assoc = false; - bool is_ibss = (wl->bss_type == BSS_TYPE_IBSS); + bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS); bool ibss_joined = false; u32 sta_rate_set = 0; int ret; @@ -3373,14 +3588,13 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl, if (changed & BSS_CHANGED_IBSS) { if (bss_conf->ibss_joined) { - set_bit(WL1271_FLAG_IBSS_JOINED, &wl->flags); + set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags); ibss_joined = true; } else { - if (test_and_clear_bit(WL1271_FLAG_IBSS_JOINED, - &wl->flags)) { - wl1271_unjoin(wl); - wl12xx_cmd_role_start_dev(wl); - wl12xx_roc(wl, wl->dev_role_id); + if (test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, + &wlvif->flags)) { + wl1271_unjoin(wl, wlvif); + wl12xx_start_dev(wl, wlvif); } } } @@ -3396,46 +3610,40 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl, wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s", bss_conf->enable_beacon ? "enabled" : "disabled"); - if (bss_conf->enable_beacon) - wl->set_bss_type = BSS_TYPE_IBSS; - else - wl->set_bss_type = BSS_TYPE_STA_BSS; do_join = true; } + if (changed & BSS_CHANGED_IDLE) { + ret = wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle); + if (ret < 0) + wl1271_warning("idle mode change failed %d", ret); + } + if ((changed & BSS_CHANGED_CQM)) { bool enable = false; if (bss_conf->cqm_rssi_thold) enable = true; - ret = wl1271_acx_rssi_snr_trigger(wl, enable, + ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable, bss_conf->cqm_rssi_thold, bss_conf->cqm_rssi_hyst); if (ret < 0) goto out; - wl->rssi_thold = bss_conf->cqm_rssi_thold; + wlvif->rssi_thold = bss_conf->cqm_rssi_thold; } - if ((changed & BSS_CHANGED_BSSID) && - /* - * Now we know the correct bssid, so we send a new join command - * and enable the BSSID filter - */ - memcmp(wl->bssid, bss_conf->bssid, ETH_ALEN)) { - memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN); - - if (!is_zero_ether_addr(wl->bssid)) { - ret = wl1271_cmd_build_null_data(wl); + if (changed & BSS_CHANGED_BSSID) + if (!is_zero_ether_addr(bss_conf->bssid)) { + ret = wl12xx_cmd_build_null_data(wl, wlvif); if (ret < 0) goto out; - ret = wl1271_build_qos_null_data(wl); + ret = wl1271_build_qos_null_data(wl, vif); if (ret < 0) goto out; /* Need to update the BSSID (for filtering etc) */ do_join = true; } - } if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_HT)) { rcu_read_lock(); @@ -3459,26 +3667,28 @@ sta_not_found: if (bss_conf->assoc) { u32 rates; int ieoffset; - wl->aid = bss_conf->aid; + wlvif->aid = bss_conf->aid; set_assoc = true; - wl->ps_poll_failures = 0; + wlvif->ps_poll_failures = 0; /* * use basic rates from AP, and determine lowest rate * to use with control frames. */ rates = bss_conf->basic_rates; - wl->basic_rate_set = + wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates, - wl->band); - wl->basic_rate = - wl1271_tx_min_rate_get(wl, wl->basic_rate_set); + wlvif->band); + wlvif->basic_rate = + wl1271_tx_min_rate_get(wl, + wlvif->basic_rate_set); if (sta_rate_set) - wl->rate_set = wl1271_tx_enabled_rates_get(wl, + wlvif->rate_set = + wl1271_tx_enabled_rates_get(wl, sta_rate_set, - wl->band); - ret = wl1271_acx_sta_rate_policies(wl); + wlvif->band); + ret = wl1271_acx_sta_rate_policies(wl, wlvif); if (ret < 0) goto out; @@ -3488,53 +3698,56 @@ sta_not_found: * updates it by itself when the first beacon is * received after a join. */ - ret = wl1271_cmd_build_ps_poll(wl, wl->aid); + ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid); if (ret < 0) goto out; /* * Get a template for hardware connection maintenance */ - dev_kfree_skb(wl->probereq); - wl->probereq = wl1271_cmd_build_ap_probe_req(wl, NULL); + dev_kfree_skb(wlvif->probereq); + wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl, + wlvif, + NULL); ieoffset = offsetof(struct ieee80211_mgmt, u.probe_req.variable); - wl1271_ssid_set(wl, wl->probereq, ieoffset); + wl1271_ssid_set(vif, wlvif->probereq, ieoffset); /* enable the connection monitoring feature */ - ret = wl1271_acx_conn_monit_params(wl, true); + ret = wl1271_acx_conn_monit_params(wl, wlvif, true); if (ret < 0) goto out; } else { /* use defaults when not associated */ bool was_assoc = - !!test_and_clear_bit(WL1271_FLAG_STA_ASSOCIATED, - &wl->flags); + !!test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, + &wlvif->flags); bool was_ifup = - !!test_and_clear_bit(WL1271_FLAG_STA_STATE_SENT, - &wl->flags); - wl->aid = 0; + !!test_and_clear_bit(WLVIF_FLAG_STA_STATE_SENT, + &wlvif->flags); + wlvif->aid = 0; /* free probe-request template */ - dev_kfree_skb(wl->probereq); - wl->probereq = NULL; + dev_kfree_skb(wlvif->probereq); + wlvif->probereq = NULL; /* re-enable dynamic ps - just in case */ - ieee80211_enable_dyn_ps(wl->vif); + ieee80211_enable_dyn_ps(vif); /* revert back to minimum rates for the current band */ - wl1271_set_band_rate(wl); - wl->basic_rate = - wl1271_tx_min_rate_get(wl, wl->basic_rate_set); - ret = wl1271_acx_sta_rate_policies(wl); + wl1271_set_band_rate(wl, wlvif); + wlvif->basic_rate = + wl1271_tx_min_rate_get(wl, + wlvif->basic_rate_set); + ret = wl1271_acx_sta_rate_policies(wl, wlvif); if (ret < 0) goto out; /* disable connection monitor features */ - ret = wl1271_acx_conn_monit_params(wl, false); + ret = wl1271_acx_conn_monit_params(wl, wlvif, false); /* Disable the keep-alive feature */ - ret = wl1271_acx_keep_alive_mode(wl, false); + ret = wl1271_acx_keep_alive_mode(wl, wlvif, false); if (ret < 0) goto out; @@ -3546,7 +3759,7 @@ sta_not_found: * no IF_OPER_UP notification. */ if (!was_ifup) { - ret = wl12xx_croc(wl, wl->role_id); + ret = wl12xx_croc(wl, wlvif->role_id); if (ret < 0) goto out; } @@ -3555,17 +3768,16 @@ sta_not_found: * roaming on the same channel. until we will * have a better flow...) */ - if (test_bit(wl->dev_role_id, wl->roc_map)) { - ret = wl12xx_croc(wl, wl->dev_role_id); + if (test_bit(wlvif->dev_role_id, wl->roc_map)) { + ret = wl12xx_croc(wl, + wlvif->dev_role_id); if (ret < 0) goto out; } - wl1271_unjoin(wl); - if (!(conf_flags & IEEE80211_CONF_IDLE)) { - wl12xx_cmd_role_start_dev(wl); - wl12xx_roc(wl, wl->dev_role_id); - } + wl1271_unjoin(wl, wlvif); + if (!(conf_flags & IEEE80211_CONF_IDLE)) + wl12xx_start_dev(wl, wlvif); } } } @@ -3576,27 +3788,28 @@ sta_not_found: if (bss_conf->ibss_joined) { u32 rates = bss_conf->basic_rates; - wl->basic_rate_set = + wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates, - wl->band); - wl->basic_rate = - wl1271_tx_min_rate_get(wl, wl->basic_rate_set); + wlvif->band); + wlvif->basic_rate = + wl1271_tx_min_rate_get(wl, + wlvif->basic_rate_set); /* by default, use 11b + OFDM rates */ - wl->rate_set = CONF_TX_IBSS_DEFAULT_RATES; - ret = wl1271_acx_sta_rate_policies(wl); + wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES; + ret = wl1271_acx_sta_rate_policies(wl, wlvif); if (ret < 0) goto out; } } - ret = wl1271_bss_erp_info_changed(wl, bss_conf, changed); + ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed); if (ret < 0) goto out; if (changed & BSS_CHANGED_ARP_FILTER) { __be32 addr = bss_conf->arp_addr_list[0]; - WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS); + WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS); if (bss_conf->arp_addr_cnt == 1 && bss_conf->arp_filter_enabled) { @@ -3606,24 +3819,24 @@ sta_not_found: * isn't being set (when sending), so we have to * reconfigure the template upon every ip change. */ - ret = wl1271_cmd_build_arp_rsp(wl, addr); + ret = wl1271_cmd_build_arp_rsp(wl, wlvif, addr); if (ret < 0) { wl1271_warning("build arp rsp failed: %d", ret); goto out; } - ret = wl1271_acx_arp_ip_filter(wl, + ret = wl1271_acx_arp_ip_filter(wl, wlvif, ACX_ARP_FILTER_ARP_FILTERING, addr); } else - ret = wl1271_acx_arp_ip_filter(wl, 0, addr); + ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr); if (ret < 0) goto out; } if (do_join) { - ret = wl1271_join(wl, set_assoc); + ret = wl1271_join(wl, wlvif, set_assoc); if (ret < 0) { wl1271_warning("cmd join failed %d", ret); goto out; @@ -3631,35 +3844,31 @@ sta_not_found: /* ROC until connected (after EAPOL exchange) */ if (!is_ibss) { - ret = wl12xx_roc(wl, wl->role_id); + ret = wl12xx_roc(wl, wlvif, wlvif->role_id); if (ret < 0) goto out; - wl1271_check_operstate(wl, + wl1271_check_operstate(wl, wlvif, ieee80211_get_operstate(vif)); } /* * stop device role if started (we might already be in - * STA role). TODO: make it better. + * STA/IBSS role). */ - if (wl->dev_role_id != WL12XX_INVALID_ROLE_ID) { - ret = wl12xx_croc(wl, wl->dev_role_id); - if (ret < 0) - goto out; - - ret = wl12xx_cmd_role_stop_dev(wl); + if (wl12xx_dev_role_started(wlvif)) { + ret = wl12xx_stop_dev(wl, wlvif); if (ret < 0) goto out; } /* If we want to go in PSM but we're not there yet */ - if (test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags) && - !test_bit(WL1271_FLAG_PSM, &wl->flags)) { + if (test_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags) && + !test_bit(WLVIF_FLAG_PSM, &wlvif->flags)) { enum wl1271_cmd_ps_mode mode; mode = STATION_POWER_SAVE_MODE; - ret = wl1271_ps_set_mode(wl, mode, - wl->basic_rate, + ret = wl1271_ps_set_mode(wl, wlvif, mode, + wlvif->basic_rate, true); if (ret < 0) goto out; @@ -3673,7 +3882,7 @@ sta_not_found: ret = wl1271_acx_set_ht_capabilities(wl, &sta_ht_cap, true, - wl->sta_hlid); + wlvif->sta.hlid); if (ret < 0) { wl1271_warning("Set ht cap true failed %d", ret); @@ -3685,7 +3894,7 @@ sta_not_found: ret = wl1271_acx_set_ht_capabilities(wl, &sta_ht_cap, false, - wl->sta_hlid); + wlvif->sta.hlid); if (ret < 0) { wl1271_warning("Set ht cap false failed %d", ret); @@ -3697,7 +3906,7 @@ sta_not_found: /* Handle HT information change. Done after join. */ if ((changed & BSS_CHANGED_HT) && (bss_conf->channel_type != NL80211_CHAN_NO_HT)) { - ret = wl1271_acx_set_ht_information(wl, + ret = wl1271_acx_set_ht_information(wl, wlvif, bss_conf->ht_operation_mode); if (ret < 0) { wl1271_warning("Set ht information failed %d", ret); @@ -3715,7 +3924,8 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw, u32 changed) { struct wl1271 *wl = hw->priv; - bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS); + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS); int ret; wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed 0x%x", @@ -3726,6 +3936,9 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw, if (unlikely(wl->state == WL1271_STATE_OFF)) goto out; + if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))) + goto out; + ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out; @@ -3746,6 +3959,7 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw, const struct ieee80211_tx_queue_params *params) { struct wl1271 *wl = hw->priv; + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); u8 ps_scheme; int ret = 0; @@ -3758,31 +3972,8 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw, else ps_scheme = CONF_PS_SCHEME_LEGACY; - if (wl->state == WL1271_STATE_OFF) { - /* - * If the state is off, the parameters will be recorded and - * configured on init. This happens in AP-mode. - */ - struct conf_tx_ac_category *conf_ac = - &wl->conf.tx.ac_conf[wl1271_tx_get_queue(queue)]; - struct conf_tx_tid *conf_tid = - &wl->conf.tx.tid_conf[wl1271_tx_get_queue(queue)]; - - conf_ac->ac = wl1271_tx_get_queue(queue); - conf_ac->cw_min = (u8)params->cw_min; - conf_ac->cw_max = params->cw_max; - conf_ac->aifsn = params->aifs; - conf_ac->tx_op_limit = params->txop << 5; - - conf_tid->queue_id = wl1271_tx_get_queue(queue); - conf_tid->channel_type = CONF_CHANNEL_TYPE_EDCF; - conf_tid->tsid = wl1271_tx_get_queue(queue); - conf_tid->ps_scheme = ps_scheme; - conf_tid->ack_policy = CONF_ACK_POLICY_LEGACY; - conf_tid->apsd_conf[0] = 0; - conf_tid->apsd_conf[1] = 0; + if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) goto out; - } ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) @@ -3792,13 +3983,13 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw, * the txop is confed in units of 32us by the mac80211, * we need us */ - ret = wl1271_acx_ac_cfg(wl, wl1271_tx_get_queue(queue), + ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue), params->cw_min, params->cw_max, params->aifs, params->txop << 5); if (ret < 0) goto out_sleep; - ret = wl1271_acx_tid_cfg(wl, wl1271_tx_get_queue(queue), + ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue), CONF_CHANNEL_TYPE_EDCF, wl1271_tx_get_queue(queue), ps_scheme, CONF_ACK_POLICY_LEGACY, @@ -3861,43 +4052,43 @@ static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx, } static int wl1271_allocate_sta(struct wl1271 *wl, - struct ieee80211_sta *sta, - u8 *hlid) + struct wl12xx_vif *wlvif, + struct ieee80211_sta *sta) { struct wl1271_station *wl_sta; - int id; + int ret; - id = find_first_zero_bit(wl->ap_hlid_map, AP_MAX_STATIONS); - if (id >= AP_MAX_STATIONS) { + + if (wl->active_sta_count >= AP_MAX_STATIONS) { wl1271_warning("could not allocate HLID - too much stations"); return -EBUSY; } wl_sta = (struct wl1271_station *)sta->drv_priv; - set_bit(id, wl->ap_hlid_map); - wl_sta->hlid = WL1271_AP_STA_HLID_START + id; - *hlid = wl_sta->hlid; + ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid); + if (ret < 0) { + wl1271_warning("could not allocate HLID - too many links"); + return -EBUSY; + } + + set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map); memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN); wl->active_sta_count++; return 0; } -void wl1271_free_sta(struct wl1271 *wl, u8 hlid) +void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid) { - int id = hlid - WL1271_AP_STA_HLID_START; - - if (hlid < WL1271_AP_STA_HLID_START) + if (!test_bit(hlid, wlvif->ap.sta_hlid_map)) return; - if (!test_bit(id, wl->ap_hlid_map)) - return; - - clear_bit(id, wl->ap_hlid_map); + clear_bit(hlid, wlvif->ap.sta_hlid_map); memset(wl->links[hlid].addr, 0, ETH_ALEN); wl->links[hlid].ba_bitmap = 0; wl1271_tx_reset_link_queues(wl, hlid); __clear_bit(hlid, &wl->ap_ps_map); __clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map); + wl12xx_free_link(wl, wlvif, &hlid); wl->active_sta_count--; } @@ -3906,6 +4097,8 @@ static int wl1271_op_sta_add(struct ieee80211_hw *hw, struct ieee80211_sta *sta) { struct wl1271 *wl = hw->priv; + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + struct wl1271_station *wl_sta; int ret = 0; u8 hlid; @@ -3914,20 +4107,23 @@ static int wl1271_op_sta_add(struct ieee80211_hw *hw, if (unlikely(wl->state == WL1271_STATE_OFF)) goto out; - if (wl->bss_type != BSS_TYPE_AP_BSS) + if (wlvif->bss_type != BSS_TYPE_AP_BSS) goto out; wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid); - ret = wl1271_allocate_sta(wl, sta, &hlid); + ret = wl1271_allocate_sta(wl, wlvif, sta); if (ret < 0) goto out; + wl_sta = (struct wl1271_station *)sta->drv_priv; + hlid = wl_sta->hlid; + ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out_free_sta; - ret = wl12xx_cmd_add_peer(wl, sta, hlid); + ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid); if (ret < 0) goto out_sleep; @@ -3944,7 +4140,7 @@ out_sleep: out_free_sta: if (ret < 0) - wl1271_free_sta(wl, hlid); + wl1271_free_sta(wl, wlvif, hlid); out: mutex_unlock(&wl->mutex); @@ -3956,6 +4152,7 @@ static int wl1271_op_sta_remove(struct ieee80211_hw *hw, struct ieee80211_sta *sta) { struct wl1271 *wl = hw->priv; + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); struct wl1271_station *wl_sta; int ret = 0, id; @@ -3964,14 +4161,14 @@ static int wl1271_op_sta_remove(struct ieee80211_hw *hw, if (unlikely(wl->state == WL1271_STATE_OFF)) goto out; - if (wl->bss_type != BSS_TYPE_AP_BSS) + if (wlvif->bss_type != BSS_TYPE_AP_BSS) goto out; wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid); wl_sta = (struct wl1271_station *)sta->drv_priv; - id = wl_sta->hlid - WL1271_AP_STA_HLID_START; - if (WARN_ON(!test_bit(id, wl->ap_hlid_map))) + id = wl_sta->hlid; + if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map))) goto out; ret = wl1271_ps_elp_wakeup(wl); @@ -3982,7 +4179,7 @@ static int wl1271_op_sta_remove(struct ieee80211_hw *hw, if (ret < 0) goto out_sleep; - wl1271_free_sta(wl, wl_sta->hlid); + wl1271_free_sta(wl, wlvif, wl_sta->hlid); out_sleep: wl1271_ps_elp_sleep(wl); @@ -3999,6 +4196,7 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw, u8 buf_size) { struct wl1271 *wl = hw->priv; + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); int ret; u8 hlid, *ba_bitmap; @@ -4016,10 +4214,10 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw, goto out; } - if (wl->bss_type == BSS_TYPE_STA_BSS) { - hlid = wl->sta_hlid; - ba_bitmap = &wl->ba_rx_bitmap; - } else if (wl->bss_type == BSS_TYPE_AP_BSS) { + if (wlvif->bss_type == BSS_TYPE_STA_BSS) { + hlid = wlvif->sta.hlid; + ba_bitmap = &wlvif->sta.ba_rx_bitmap; + } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) { struct wl1271_station *wl_sta; wl_sta = (struct wl1271_station *)sta->drv_priv; @@ -4039,7 +4237,7 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw, switch (action) { case IEEE80211_AMPDU_RX_START: - if (!wl->ba_support || !wl->ba_allowed) { + if (!wlvif->ba_support || !wlvif->ba_allowed) { ret = -ENOTSUPP; break; } @@ -4108,8 +4306,9 @@ static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const struct cfg80211_bitrate_mask *mask) { + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); struct wl1271 *wl = hw->priv; - int i; + int i, ret = 0; wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x", mask->control[NL80211_BAND_2GHZ].legacy, @@ -4118,19 +4317,39 @@ static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw, mutex_lock(&wl->mutex); for (i = 0; i < IEEE80211_NUM_BANDS; i++) - wl->bitrate_masks[i] = + wlvif->bitrate_masks[i] = wl1271_tx_enabled_rates_get(wl, mask->control[i].legacy, i); + + if (unlikely(wl->state == WL1271_STATE_OFF)) + goto out; + + if (wlvif->bss_type == BSS_TYPE_STA_BSS && + !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) { + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + wl1271_set_band_rate(wl, wlvif); + wlvif->basic_rate = + wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); + ret = wl1271_acx_sta_rate_policies(wl, wlvif); + + wl1271_ps_elp_sleep(wl); + } +out: mutex_unlock(&wl->mutex); - return 0; + return ret; } static void wl12xx_op_channel_switch(struct ieee80211_hw *hw, struct ieee80211_channel_switch *ch_switch) { struct wl1271 *wl = hw->priv; + struct wl12xx_vif *wlvif; int ret; wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch"); @@ -4138,19 +4357,24 @@ static void wl12xx_op_channel_switch(struct ieee80211_hw *hw, mutex_lock(&wl->mutex); if (unlikely(wl->state == WL1271_STATE_OFF)) { - mutex_unlock(&wl->mutex); - ieee80211_chswitch_done(wl->vif, false); - return; + wl12xx_for_each_wlvif_sta(wl, wlvif) { + struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); + ieee80211_chswitch_done(vif, false); + } + goto out; } ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out; - ret = wl12xx_cmd_channel_switch(wl, ch_switch); + /* TODO: change mac80211 to pass vif as param */ + wl12xx_for_each_wlvif_sta(wl, wlvif) { + ret = wl12xx_cmd_channel_switch(wl, ch_switch); - if (!ret) - set_bit(WL1271_FLAG_CS_PROGRESS, &wl->flags); + if (!ret) + set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags); + } wl1271_ps_elp_sleep(wl); @@ -4170,10 +4394,6 @@ static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw) /* packets are considered pending if in the TX queue or the FW */ ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0); - - /* the above is appropriate for STA mode for PS purposes */ - WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS); - out: mutex_unlock(&wl->mutex); @@ -4410,6 +4630,7 @@ static const struct ieee80211_ops wl1271_ops = { .stop = wl1271_op_stop, .add_interface = wl1271_op_add_interface, .remove_interface = wl1271_op_remove_interface, + .change_interface = wl12xx_op_change_interface, #ifdef CONFIG_PM .suspend = wl1271_op_suspend, .resume = wl1271_op_resume, @@ -4604,7 +4825,7 @@ static struct bin_attribute fwlog_attr = { .read = wl1271_sysfs_read_fwlog, }; -int wl1271_register_hw(struct wl1271 *wl) +static int wl1271_register_hw(struct wl1271 *wl) { int ret; @@ -4645,9 +4866,8 @@ int wl1271_register_hw(struct wl1271 *wl) return 0; } -EXPORT_SYMBOL_GPL(wl1271_register_hw); -void wl1271_unregister_hw(struct wl1271 *wl) +static void wl1271_unregister_hw(struct wl1271 *wl) { if (wl->state == WL1271_STATE_PLT) __wl1271_plt_stop(wl); @@ -4657,9 +4877,8 @@ void wl1271_unregister_hw(struct wl1271 *wl) wl->mac80211_registered = false; } -EXPORT_SYMBOL_GPL(wl1271_unregister_hw); -int wl1271_init_ieee80211(struct wl1271 *wl) +static int wl1271_init_ieee80211(struct wl1271 *wl) { static const u32 cipher_suites[] = { WLAN_CIPHER_SUITE_WEP40, @@ -4736,27 +4955,33 @@ int wl1271_init_ieee80211(struct wl1271 *wl) wl->hw->wiphy->reg_notifier = wl1271_reg_notify; - SET_IEEE80211_DEV(wl->hw, wl1271_wl_to_dev(wl)); + /* the FW answers probe-requests in AP-mode */ + wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD; + wl->hw->wiphy->probe_resp_offload = + NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | + NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 | + NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P; + + SET_IEEE80211_DEV(wl->hw, wl->dev); wl->hw->sta_data_size = sizeof(struct wl1271_station); + wl->hw->vif_data_size = sizeof(struct wl12xx_vif); wl->hw->max_rx_aggregation_subframes = 8; return 0; } -EXPORT_SYMBOL_GPL(wl1271_init_ieee80211); #define WL1271_DEFAULT_CHANNEL 0 -struct ieee80211_hw *wl1271_alloc_hw(void) +static struct ieee80211_hw *wl1271_alloc_hw(void) { struct ieee80211_hw *hw; - struct platform_device *plat_dev = NULL; struct wl1271 *wl; int i, j, ret; unsigned int order; - BUILD_BUG_ON(AP_MAX_LINKS > WL12XX_MAX_LINKS); + BUILD_BUG_ON(AP_MAX_STATIONS > WL12XX_MAX_LINKS); hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops); if (!hw) { @@ -4765,41 +4990,26 @@ struct ieee80211_hw *wl1271_alloc_hw(void) goto err_hw_alloc; } - plat_dev = kmemdup(&wl1271_device, sizeof(wl1271_device), GFP_KERNEL); - if (!plat_dev) { - wl1271_error("could not allocate platform_device"); - ret = -ENOMEM; - goto err_plat_alloc; - } - wl = hw->priv; memset(wl, 0, sizeof(*wl)); INIT_LIST_HEAD(&wl->list); + INIT_LIST_HEAD(&wl->wlvif_list); wl->hw = hw; - wl->plat_dev = plat_dev; - - for (i = 0; i < NUM_TX_QUEUES; i++) - skb_queue_head_init(&wl->tx_queue[i]); for (i = 0; i < NUM_TX_QUEUES; i++) - for (j = 0; j < AP_MAX_LINKS; j++) + for (j = 0; j < WL12XX_MAX_LINKS; j++) skb_queue_head_init(&wl->links[j].tx_queue[i]); skb_queue_head_init(&wl->deferred_rx_queue); skb_queue_head_init(&wl->deferred_tx_queue); INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work); - INIT_DELAYED_WORK(&wl->pspoll_work, wl1271_pspoll_work); INIT_WORK(&wl->netstack_work, wl1271_netstack_work); INIT_WORK(&wl->tx_work, wl1271_tx_work); INIT_WORK(&wl->recovery_work, wl1271_recovery_work); INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work); - INIT_WORK(&wl->rx_streaming_enable_work, - wl1271_rx_streaming_enable_work); - INIT_WORK(&wl->rx_streaming_disable_work, - wl1271_rx_streaming_disable_work); wl->freezable_wq = create_freezable_workqueue("wl12xx_wq"); if (!wl->freezable_wq) { @@ -4808,41 +5018,21 @@ struct ieee80211_hw *wl1271_alloc_hw(void) } wl->channel = WL1271_DEFAULT_CHANNEL; - wl->beacon_int = WL1271_DEFAULT_BEACON_INT; - wl->default_key = 0; wl->rx_counter = 0; - wl->psm_entry_retry = 0; wl->power_level = WL1271_DEFAULT_POWER_LEVEL; - wl->basic_rate_set = CONF_TX_RATE_MASK_BASIC; - wl->basic_rate = CONF_TX_RATE_MASK_BASIC; - wl->rate_set = CONF_TX_RATE_MASK_BASIC; wl->band = IEEE80211_BAND_2GHZ; wl->vif = NULL; wl->flags = 0; wl->sg_enabled = true; wl->hw_pg_ver = -1; - wl->bss_type = MAX_BSS_TYPE; - wl->set_bss_type = MAX_BSS_TYPE; - wl->last_tx_hlid = 0; wl->ap_ps_map = 0; wl->ap_fw_ps_map = 0; wl->quirks = 0; wl->platform_quirks = 0; wl->sched_scanning = false; - wl->tx_security_seq = 0; - wl->tx_security_last_seq_lsb = 0; wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT; - wl->role_id = WL12XX_INVALID_ROLE_ID; wl->system_hlid = WL12XX_SYSTEM_HLID; - wl->sta_hlid = WL12XX_INVALID_LINK_ID; - wl->dev_role_id = WL12XX_INVALID_ROLE_ID; - wl->dev_hlid = WL12XX_INVALID_LINK_ID; - wl->session_counter = 0; - wl->ap_bcast_hlid = WL12XX_INVALID_LINK_ID; - wl->ap_global_hlid = WL12XX_INVALID_LINK_ID; wl->active_sta_count = 0; - setup_timer(&wl->rx_streaming_timer, wl1271_rx_streaming_timer, - (unsigned long) wl); wl->fwlog_size = 0; init_waitqueue_head(&wl->fwlog_waitq); @@ -4860,8 +5050,6 @@ struct ieee80211_hw *wl1271_alloc_hw(void) /* Apply default driver configuration. */ wl1271_conf_init(wl); - wl->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate; - wl->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5; order = get_order(WL1271_AGGR_BUFFER_SIZE); wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order); @@ -4883,49 +5071,8 @@ struct ieee80211_hw *wl1271_alloc_hw(void) goto err_dummy_packet; } - /* Register platform device */ - ret = platform_device_register(wl->plat_dev); - if (ret) { - wl1271_error("couldn't register platform device"); - goto err_fwlog; - } - dev_set_drvdata(&wl->plat_dev->dev, wl); - - /* Create sysfs file to control bt coex state */ - ret = device_create_file(&wl->plat_dev->dev, &dev_attr_bt_coex_state); - if (ret < 0) { - wl1271_error("failed to create sysfs file bt_coex_state"); - goto err_platform; - } - - /* Create sysfs file to get HW PG version */ - ret = device_create_file(&wl->plat_dev->dev, &dev_attr_hw_pg_ver); - if (ret < 0) { - wl1271_error("failed to create sysfs file hw_pg_ver"); - goto err_bt_coex_state; - } - - /* Create sysfs file for the FW log */ - ret = device_create_bin_file(&wl->plat_dev->dev, &fwlog_attr); - if (ret < 0) { - wl1271_error("failed to create sysfs file fwlog"); - goto err_hw_pg_ver; - } - return hw; -err_hw_pg_ver: - device_remove_file(&wl->plat_dev->dev, &dev_attr_hw_pg_ver); - -err_bt_coex_state: - device_remove_file(&wl->plat_dev->dev, &dev_attr_bt_coex_state); - -err_platform: - platform_device_unregister(wl->plat_dev); - -err_fwlog: - free_page((unsigned long)wl->fwlog); - err_dummy_packet: dev_kfree_skb(wl->dummy_packet); @@ -4937,18 +5084,14 @@ err_wq: err_hw: wl1271_debugfs_exit(wl); - kfree(plat_dev); - -err_plat_alloc: ieee80211_free_hw(hw); err_hw_alloc: return ERR_PTR(ret); } -EXPORT_SYMBOL_GPL(wl1271_alloc_hw); -int wl1271_free_hw(struct wl1271 *wl) +static int wl1271_free_hw(struct wl1271 *wl) { /* Unblock any fwlog readers */ mutex_lock(&wl->mutex); @@ -4956,17 +5099,15 @@ int wl1271_free_hw(struct wl1271 *wl) wake_up_interruptible_all(&wl->fwlog_waitq); mutex_unlock(&wl->mutex); - device_remove_bin_file(&wl->plat_dev->dev, &fwlog_attr); + device_remove_bin_file(wl->dev, &fwlog_attr); - device_remove_file(&wl->plat_dev->dev, &dev_attr_hw_pg_ver); + device_remove_file(wl->dev, &dev_attr_hw_pg_ver); - device_remove_file(&wl->plat_dev->dev, &dev_attr_bt_coex_state); - platform_device_unregister(wl->plat_dev); + device_remove_file(wl->dev, &dev_attr_bt_coex_state); free_page((unsigned long)wl->fwlog); dev_kfree_skb(wl->dummy_packet); free_pages((unsigned long)wl->aggr_buf, get_order(WL1271_AGGR_BUFFER_SIZE)); - kfree(wl->plat_dev); wl1271_debugfs_exit(wl); @@ -4983,7 +5124,174 @@ int wl1271_free_hw(struct wl1271 *wl) return 0; } -EXPORT_SYMBOL_GPL(wl1271_free_hw); + +static irqreturn_t wl12xx_hardirq(int irq, void *cookie) +{ + struct wl1271 *wl = cookie; + unsigned long flags; + + wl1271_debug(DEBUG_IRQ, "IRQ"); + + /* complete the ELP completion */ + spin_lock_irqsave(&wl->wl_lock, flags); + set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags); + if (wl->elp_compl) { + complete(wl->elp_compl); + wl->elp_compl = NULL; + } + + if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) { + /* don't enqueue a work right now. mark it as pending */ + set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags); + wl1271_debug(DEBUG_IRQ, "should not enqueue work"); + disable_irq_nosync(wl->irq); + pm_wakeup_event(wl->dev, 0); + spin_unlock_irqrestore(&wl->wl_lock, flags); + return IRQ_HANDLED; + } + spin_unlock_irqrestore(&wl->wl_lock, flags); + + return IRQ_WAKE_THREAD; +} + +static int __devinit wl12xx_probe(struct platform_device *pdev) +{ + struct wl12xx_platform_data *pdata = pdev->dev.platform_data; + struct ieee80211_hw *hw; + struct wl1271 *wl; + unsigned long irqflags; + int ret = -ENODEV; + + hw = wl1271_alloc_hw(); + if (IS_ERR(hw)) { + wl1271_error("can't allocate hw"); + ret = PTR_ERR(hw); + goto out; + } + + wl = hw->priv; + wl->irq = platform_get_irq(pdev, 0); + wl->ref_clock = pdata->board_ref_clock; + wl->tcxo_clock = pdata->board_tcxo_clock; + wl->platform_quirks = pdata->platform_quirks; + wl->set_power = pdata->set_power; + wl->dev = &pdev->dev; + wl->if_ops = pdata->ops; + + platform_set_drvdata(pdev, wl); + + if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ) + irqflags = IRQF_TRIGGER_RISING; + else + irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT; + + ret = request_threaded_irq(wl->irq, wl12xx_hardirq, wl1271_irq, + irqflags, + pdev->name, wl); + if (ret < 0) { + wl1271_error("request_irq() failed: %d", ret); + goto out_free_hw; + } + + ret = enable_irq_wake(wl->irq); + if (!ret) { + wl->irq_wake_enabled = true; + device_init_wakeup(wl->dev, 1); + if (pdata->pwr_in_suspend) + hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY; + + } + disable_irq(wl->irq); + + ret = wl1271_init_ieee80211(wl); + if (ret) + goto out_irq; + + ret = wl1271_register_hw(wl); + if (ret) + goto out_irq; + + /* Create sysfs file to control bt coex state */ + ret = device_create_file(wl->dev, &dev_attr_bt_coex_state); + if (ret < 0) { + wl1271_error("failed to create sysfs file bt_coex_state"); + goto out_irq; + } + + /* Create sysfs file to get HW PG version */ + ret = device_create_file(wl->dev, &dev_attr_hw_pg_ver); + if (ret < 0) { + wl1271_error("failed to create sysfs file hw_pg_ver"); + goto out_bt_coex_state; + } + + /* Create sysfs file for the FW log */ + ret = device_create_bin_file(wl->dev, &fwlog_attr); + if (ret < 0) { + wl1271_error("failed to create sysfs file fwlog"); + goto out_hw_pg_ver; + } + + return 0; + +out_hw_pg_ver: + device_remove_file(wl->dev, &dev_attr_hw_pg_ver); + +out_bt_coex_state: + device_remove_file(wl->dev, &dev_attr_bt_coex_state); + +out_irq: + free_irq(wl->irq, wl); + +out_free_hw: + wl1271_free_hw(wl); + +out: + return ret; +} + +static int __devexit wl12xx_remove(struct platform_device *pdev) +{ + struct wl1271 *wl = platform_get_drvdata(pdev); + + if (wl->irq_wake_enabled) { + device_init_wakeup(wl->dev, 0); + disable_irq_wake(wl->irq); + } + wl1271_unregister_hw(wl); + free_irq(wl->irq, wl); + wl1271_free_hw(wl); + + return 0; +} + +static const struct platform_device_id wl12xx_id_table[] __devinitconst = { + { "wl12xx", 0 }, + { } /* Terminating Entry */ +}; +MODULE_DEVICE_TABLE(platform, wl12xx_id_table); + +static struct platform_driver wl12xx_driver = { + .probe = wl12xx_probe, + .remove = __devexit_p(wl12xx_remove), + .id_table = wl12xx_id_table, + .driver = { + .name = "wl12xx_driver", + .owner = THIS_MODULE, + } +}; + +static int __init wl12xx_init(void) +{ + return platform_driver_register(&wl12xx_driver); +} +module_init(wl12xx_init); + +static void __exit wl12xx_exit(void) +{ + platform_driver_unregister(&wl12xx_driver); +} +module_exit(wl12xx_exit); u32 wl12xx_debug_level = DEBUG_NONE; EXPORT_SYMBOL_GPL(wl12xx_debug_level); diff --git a/drivers/net/wireless/wl12xx/ps.c b/drivers/net/wireless/wl12xx/ps.c index c15ebf2efd4..a2bdacdd7e1 100644 --- a/drivers/net/wireless/wl12xx/ps.c +++ b/drivers/net/wireless/wl12xx/ps.c @@ -25,6 +25,7 @@ #include "ps.h" #include "io.h" #include "tx.h" +#include "debug.h" #define WL1271_WAKEUP_TIMEOUT 500 @@ -32,6 +33,7 @@ void wl1271_elp_work(struct work_struct *work) { struct delayed_work *dwork; struct wl1271 *wl; + struct wl12xx_vif *wlvif; dwork = container_of(work, struct delayed_work, work); wl = container_of(dwork, struct wl1271, elp_work); @@ -47,11 +49,18 @@ void wl1271_elp_work(struct work_struct *work) if (unlikely(!test_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags))) goto out; - if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags) || - (!test_bit(WL1271_FLAG_PSM, &wl->flags) && - !test_bit(WL1271_FLAG_IDLE, &wl->flags))) + if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags)) goto out; + wl12xx_for_each_wlvif(wl, wlvif) { + if (wlvif->bss_type == BSS_TYPE_AP_BSS) + goto out; + + if (!test_bit(WLVIF_FLAG_PSM, &wlvif->flags) && + test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) + goto out; + } + wl1271_debug(DEBUG_PSM, "chip to elp"); wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP); set_bit(WL1271_FLAG_IN_ELP, &wl->flags); @@ -65,13 +74,20 @@ out: /* Routines to toggle sleep mode while in ELP */ void wl1271_ps_elp_sleep(struct wl1271 *wl) { + struct wl12xx_vif *wlvif; + /* we shouldn't get consecutive sleep requests */ if (WARN_ON(test_and_set_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags))) return; - if (!test_bit(WL1271_FLAG_PSM, &wl->flags) && - !test_bit(WL1271_FLAG_IDLE, &wl->flags)) - return; + wl12xx_for_each_wlvif(wl, wlvif) { + if (wlvif->bss_type == BSS_TYPE_AP_BSS) + return; + + if (!test_bit(WLVIF_FLAG_PSM, &wlvif->flags) && + test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) + return; + } ieee80211_queue_delayed_work(wl->hw, &wl->elp_work, msecs_to_jiffies(ELP_ENTRY_DELAY)); @@ -143,8 +159,8 @@ out: return 0; } -int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode, - u32 rates, bool send) +int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif, + enum wl1271_cmd_ps_mode mode, u32 rates, bool send) { int ret; @@ -152,39 +168,34 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode, case STATION_POWER_SAVE_MODE: wl1271_debug(DEBUG_PSM, "entering psm"); - ret = wl1271_acx_wake_up_conditions(wl); + ret = wl1271_acx_wake_up_conditions(wl, wlvif); if (ret < 0) { wl1271_error("couldn't set wake up conditions"); return ret; } - ret = wl1271_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE); + ret = wl1271_cmd_ps_mode(wl, wlvif, STATION_POWER_SAVE_MODE); if (ret < 0) return ret; - set_bit(WL1271_FLAG_PSM, &wl->flags); + set_bit(WLVIF_FLAG_PSM, &wlvif->flags); break; case STATION_ACTIVE_MODE: default: wl1271_debug(DEBUG_PSM, "leaving psm"); /* disable beacon early termination */ - if (wl->band == IEEE80211_BAND_2GHZ) { - ret = wl1271_acx_bet_enable(wl, false); + if (wlvif->band == IEEE80211_BAND_2GHZ) { + ret = wl1271_acx_bet_enable(wl, wlvif, false); if (ret < 0) return ret; } - /* disable beacon filtering */ - ret = wl1271_acx_beacon_filter_opt(wl, false); - if (ret < 0) - return ret; - - ret = wl1271_cmd_ps_mode(wl, STATION_ACTIVE_MODE); + ret = wl1271_cmd_ps_mode(wl, wlvif, STATION_ACTIVE_MODE); if (ret < 0) return ret; - clear_bit(WL1271_FLAG_PSM, &wl->flags); + clear_bit(WLVIF_FLAG_PSM, &wlvif->flags); break; } @@ -223,9 +234,11 @@ static void wl1271_ps_filter_frames(struct wl1271 *wl, u8 hlid) wl1271_handle_tx_low_watermark(wl); } -void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues) +void wl12xx_ps_link_start(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 hlid, bool clean_queues) { struct ieee80211_sta *sta; + struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); if (test_bit(hlid, &wl->ap_ps_map)) return; @@ -235,7 +248,7 @@ void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues) clean_queues); rcu_read_lock(); - sta = ieee80211_find_sta(wl->vif, wl->links[hlid].addr); + sta = ieee80211_find_sta(vif, wl->links[hlid].addr); if (!sta) { wl1271_error("could not find sta %pM for starting ps", wl->links[hlid].addr); @@ -253,9 +266,10 @@ void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues) __set_bit(hlid, &wl->ap_ps_map); } -void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid) +void wl12xx_ps_link_end(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid) { struct ieee80211_sta *sta; + struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); if (!test_bit(hlid, &wl->ap_ps_map)) return; @@ -265,7 +279,7 @@ void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid) __clear_bit(hlid, &wl->ap_ps_map); rcu_read_lock(); - sta = ieee80211_find_sta(wl->vif, wl->links[hlid].addr); + sta = ieee80211_find_sta(vif, wl->links[hlid].addr); if (!sta) { wl1271_error("could not find sta %pM for ending ps", wl->links[hlid].addr); diff --git a/drivers/net/wireless/wl12xx/ps.h b/drivers/net/wireless/wl12xx/ps.h index 25eb9bc9b62..a12052f0202 100644 --- a/drivers/net/wireless/wl12xx/ps.h +++ b/drivers/net/wireless/wl12xx/ps.h @@ -27,13 +27,14 @@ #include "wl12xx.h" #include "acx.h" -int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode, - u32 rates, bool send); +int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif, + enum wl1271_cmd_ps_mode mode, u32 rates, bool send); void wl1271_ps_elp_sleep(struct wl1271 *wl); int wl1271_ps_elp_wakeup(struct wl1271 *wl); void wl1271_elp_work(struct work_struct *work); -void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues); -void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid); +void wl12xx_ps_link_start(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 hlid, bool clean_queues); +void wl12xx_ps_link_end(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid); #define WL1271_PS_COMPLETE_TIMEOUT 500 diff --git a/drivers/net/wireless/wl12xx/reg.h b/drivers/net/wireless/wl12xx/reg.h index 3f570f39758..df34d5977b9 100644 --- a/drivers/net/wireless/wl12xx/reg.h +++ b/drivers/net/wireless/wl12xx/reg.h @@ -408,7 +408,7 @@ /* Firmware image load chunk size */ -#define CHUNK_SIZE 512 +#define CHUNK_SIZE 16384 /* Firmware image header size */ #define FW_HDR_SIZE 8 diff --git a/drivers/net/wireless/wl12xx/rx.c b/drivers/net/wireless/wl12xx/rx.c index dee4cfe9ccc..4fbd2a722ff 100644 --- a/drivers/net/wireless/wl12xx/rx.c +++ b/drivers/net/wireless/wl12xx/rx.c @@ -25,9 +25,11 @@ #include <linux/sched.h> #include "wl12xx.h" +#include "debug.h" #include "acx.h" #include "reg.h" #include "rx.h" +#include "tx.h" #include "io.h" static u8 wl12xx_rx_get_mem_block(struct wl12xx_fw_status *status, @@ -96,7 +98,7 @@ static void wl1271_rx_status(struct wl1271 *wl, } static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length, - bool unaligned) + bool unaligned, u8 *hlid) { struct wl1271_rx_descriptor *desc; struct sk_buff *skb; @@ -159,6 +161,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length, * payload aligned to 4 bytes. */ memcpy(buf, data + sizeof(*desc), length - sizeof(*desc)); + *hlid = desc->hlid; hdr = (struct ieee80211_hdr *)skb->data; if (ieee80211_is_beacon(hdr->frame_control)) @@ -169,10 +172,10 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length, wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon); seq_num = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4; - wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s seq %d", skb, + wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s seq %d hlid %d", skb, skb->len - desc->pad_len, beacon ? "beacon" : "", - seq_num); + seq_num, *hlid); skb_trim(skb, skb->len - desc->pad_len); @@ -185,6 +188,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length, void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status) { struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map; + unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0}; u32 buf_size; u32 fw_rx_counter = status->fw_rx_counter & NUM_RX_PKT_DESC_MOD_MASK; u32 drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK; @@ -192,8 +196,7 @@ void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status) u32 mem_block; u32 pkt_length; u32 pkt_offset; - bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS); - bool had_data = false; + u8 hlid; bool unaligned = false; while (drv_rx_counter != fw_rx_counter) { @@ -253,8 +256,15 @@ void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status) */ if (wl1271_rx_handle_data(wl, wl->aggr_buf + pkt_offset, - pkt_length, unaligned) == 1) - had_data = true; + pkt_length, unaligned, + &hlid) == 1) { + if (hlid < WL12XX_MAX_LINKS) + __set_bit(hlid, active_hlids); + else + WARN(1, + "hlid exceeded WL12XX_MAX_LINKS " + "(%d)\n", hlid); + } wl->rx_counter++; drv_rx_counter++; @@ -270,17 +280,5 @@ void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status) if (wl->quirks & WL12XX_QUIRK_END_OF_TRANSACTION) wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter); - if (!is_ap && wl->conf.rx_streaming.interval && had_data && - (wl->conf.rx_streaming.always || - test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))) { - u32 timeout = wl->conf.rx_streaming.duration; - - /* restart rx streaming */ - if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags)) - ieee80211_queue_work(wl->hw, - &wl->rx_streaming_enable_work); - - mod_timer(&wl->rx_streaming_timer, - jiffies + msecs_to_jiffies(timeout)); - } + wl12xx_rearm_rx_streaming(wl, active_hlids); } diff --git a/drivers/net/wireless/wl12xx/scan.c b/drivers/net/wireless/wl12xx/scan.c index fc29c671cf3..e24111ececc 100644 --- a/drivers/net/wireless/wl12xx/scan.c +++ b/drivers/net/wireless/wl12xx/scan.c @@ -24,6 +24,7 @@ #include <linux/ieee80211.h> #include "wl12xx.h" +#include "debug.h" #include "cmd.h" #include "scan.h" #include "acx.h" @@ -34,6 +35,8 @@ void wl1271_scan_complete_work(struct work_struct *work) { struct delayed_work *dwork; struct wl1271 *wl; + struct ieee80211_vif *vif; + struct wl12xx_vif *wlvif; int ret; bool is_sta, is_ibss; @@ -50,28 +53,31 @@ void wl1271_scan_complete_work(struct work_struct *work) if (wl->scan.state == WL1271_SCAN_STATE_IDLE) goto out; + vif = wl->scan_vif; + wlvif = wl12xx_vif_to_data(vif); + wl->scan.state = WL1271_SCAN_STATE_IDLE; memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch)); wl->scan.req = NULL; + wl->scan_vif = NULL; ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out; - if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) { + if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) { /* restore hardware connection monitoring template */ - wl1271_cmd_build_ap_probe_req(wl, wl->probereq); + wl1271_cmd_build_ap_probe_req(wl, wlvif, wlvif->probereq); } /* return to ROC if needed */ - is_sta = (wl->bss_type == BSS_TYPE_STA_BSS); - is_ibss = (wl->bss_type == BSS_TYPE_IBSS); - if (((is_sta && !test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) || - (is_ibss && !test_bit(WL1271_FLAG_IBSS_JOINED, &wl->flags))) && - !test_bit(wl->dev_role_id, wl->roc_map)) { + is_sta = (wlvif->bss_type == BSS_TYPE_STA_BSS); + is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS); + if (((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) || + (is_ibss && !test_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))) && + !test_bit(wlvif->dev_role_id, wl->roc_map)) { /* restore remain on channel */ - wl12xx_cmd_role_start_dev(wl); - wl12xx_roc(wl, wl->dev_role_id); + wl12xx_start_dev(wl, wlvif); } wl1271_ps_elp_sleep(wl); @@ -155,9 +161,11 @@ static int wl1271_get_scan_channels(struct wl1271 *wl, #define WL1271_NOTHING_TO_SCAN 1 -static int wl1271_scan_send(struct wl1271 *wl, enum ieee80211_band band, - bool passive, u32 basic_rate) +static int wl1271_scan_send(struct wl1271 *wl, struct ieee80211_vif *vif, + enum ieee80211_band band, + bool passive, u32 basic_rate) { + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); struct wl1271_cmd_scan *cmd; struct wl1271_cmd_trigger_scan_to *trigger; int ret; @@ -177,11 +185,11 @@ static int wl1271_scan_send(struct wl1271 *wl, enum ieee80211_band band, if (passive) scan_options |= WL1271_SCAN_OPT_PASSIVE; - if (WARN_ON(wl->role_id == WL12XX_INVALID_ROLE_ID)) { + if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID)) { ret = -EINVAL; goto out; } - cmd->params.role_id = wl->role_id; + cmd->params.role_id = wlvif->role_id; cmd->params.scan_options = cpu_to_le16(scan_options); cmd->params.n_ch = wl1271_get_scan_channels(wl, wl->scan.req, @@ -194,7 +202,6 @@ static int wl1271_scan_send(struct wl1271 *wl, enum ieee80211_band band, cmd->params.tx_rate = cpu_to_le32(basic_rate); cmd->params.n_probe_reqs = wl->conf.scan.num_probe_reqs; - cmd->params.tx_rate = cpu_to_le32(basic_rate); cmd->params.tid_trigger = 0; cmd->params.scan_tag = WL1271_SCAN_DEFAULT_TAG; @@ -208,11 +215,11 @@ static int wl1271_scan_send(struct wl1271 *wl, enum ieee80211_band band, memcpy(cmd->params.ssid, wl->scan.ssid, wl->scan.ssid_len); } - memcpy(cmd->addr, wl->mac_addr, ETH_ALEN); + memcpy(cmd->addr, vif->addr, ETH_ALEN); - ret = wl1271_cmd_build_probe_req(wl, wl->scan.ssid, wl->scan.ssid_len, - wl->scan.req->ie, wl->scan.req->ie_len, - band); + ret = wl1271_cmd_build_probe_req(wl, wlvif, wl->scan.ssid, + wl->scan.ssid_len, wl->scan.req->ie, + wl->scan.req->ie_len, band); if (ret < 0) { wl1271_error("PROBE request template failed"); goto out; @@ -241,11 +248,12 @@ out: return ret; } -void wl1271_scan_stm(struct wl1271 *wl) +void wl1271_scan_stm(struct wl1271 *wl, struct ieee80211_vif *vif) { + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); int ret = 0; enum ieee80211_band band; - u32 rate; + u32 rate, mask; switch (wl->scan.state) { case WL1271_SCAN_STATE_IDLE: @@ -253,47 +261,59 @@ void wl1271_scan_stm(struct wl1271 *wl) case WL1271_SCAN_STATE_2GHZ_ACTIVE: band = IEEE80211_BAND_2GHZ; - rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]); - ret = wl1271_scan_send(wl, band, false, rate); + mask = wlvif->bitrate_masks[band]; + if (wl->scan.req->no_cck) { + mask &= ~CONF_TX_CCK_RATES; + if (!mask) + mask = CONF_TX_RATE_MASK_BASIC_P2P; + } + rate = wl1271_tx_min_rate_get(wl, mask); + ret = wl1271_scan_send(wl, vif, band, false, rate); if (ret == WL1271_NOTHING_TO_SCAN) { wl->scan.state = WL1271_SCAN_STATE_2GHZ_PASSIVE; - wl1271_scan_stm(wl); + wl1271_scan_stm(wl, vif); } break; case WL1271_SCAN_STATE_2GHZ_PASSIVE: band = IEEE80211_BAND_2GHZ; - rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]); - ret = wl1271_scan_send(wl, band, true, rate); + mask = wlvif->bitrate_masks[band]; + if (wl->scan.req->no_cck) { + mask &= ~CONF_TX_CCK_RATES; + if (!mask) + mask = CONF_TX_RATE_MASK_BASIC_P2P; + } + rate = wl1271_tx_min_rate_get(wl, mask); + ret = wl1271_scan_send(wl, vif, band, true, rate); if (ret == WL1271_NOTHING_TO_SCAN) { if (wl->enable_11a) wl->scan.state = WL1271_SCAN_STATE_5GHZ_ACTIVE; else wl->scan.state = WL1271_SCAN_STATE_DONE; - wl1271_scan_stm(wl); + wl1271_scan_stm(wl, vif); } break; case WL1271_SCAN_STATE_5GHZ_ACTIVE: band = IEEE80211_BAND_5GHZ; - rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]); - ret = wl1271_scan_send(wl, band, false, rate); + rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]); + ret = wl1271_scan_send(wl, vif, band, false, rate); if (ret == WL1271_NOTHING_TO_SCAN) { wl->scan.state = WL1271_SCAN_STATE_5GHZ_PASSIVE; - wl1271_scan_stm(wl); + wl1271_scan_stm(wl, vif); } break; case WL1271_SCAN_STATE_5GHZ_PASSIVE: band = IEEE80211_BAND_5GHZ; - rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]); - ret = wl1271_scan_send(wl, band, true, rate); + rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]); + ret = wl1271_scan_send(wl, vif, band, true, rate); if (ret == WL1271_NOTHING_TO_SCAN) { wl->scan.state = WL1271_SCAN_STATE_DONE; - wl1271_scan_stm(wl); + wl1271_scan_stm(wl, vif); } break; @@ -317,7 +337,8 @@ void wl1271_scan_stm(struct wl1271 *wl) } } -int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len, +int wl1271_scan(struct wl1271 *wl, struct ieee80211_vif *vif, + const u8 *ssid, size_t ssid_len, struct cfg80211_scan_request *req) { /* @@ -338,6 +359,7 @@ int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len, wl->scan.ssid_len = 0; } + wl->scan_vif = vif; wl->scan.req = req; memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch)); @@ -346,7 +368,7 @@ int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len, ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work, msecs_to_jiffies(WL1271_SCAN_TIMEOUT)); - wl1271_scan_stm(wl); + wl1271_scan_stm(wl, vif); return 0; } @@ -415,18 +437,19 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl, if (flags & IEEE80211_CHAN_RADAR) { channels[j].flags |= SCAN_CHANNEL_FLAGS_DFS; + channels[j].passive_duration = cpu_to_le16(c->dwell_time_dfs); - } - else if (flags & IEEE80211_CHAN_PASSIVE_SCAN) { + } else { channels[j].passive_duration = cpu_to_le16(c->dwell_time_passive); - } else { - channels[j].min_duration = - cpu_to_le16(c->min_dwell_time_active); - channels[j].max_duration = - cpu_to_le16(c->max_dwell_time_active); } + + channels[j].min_duration = + cpu_to_le16(c->min_dwell_time_active); + channels[j].max_duration = + cpu_to_le16(c->max_dwell_time_active); + channels[j].tx_power_att = req->channels[i]->max_power; channels[j].channel = req->channels[i]->hw_value; @@ -550,6 +573,9 @@ wl12xx_scan_sched_scan_ssid_list(struct wl1271 *wl, * so they're used in probe requests. */ for (i = 0; i < req->n_ssids; i++) { + if (!req->ssids[i].ssid_len) + continue; + for (j = 0; j < cmd->n_ssids; j++) if (!memcmp(req->ssids[i].ssid, cmd->ssids[j].ssid, @@ -585,6 +611,7 @@ out: } int wl1271_scan_sched_scan_config(struct wl1271 *wl, + struct wl12xx_vif *wlvif, struct cfg80211_sched_scan_request *req, struct ieee80211_sched_scan_ies *ies) { @@ -631,7 +658,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl, } if (!force_passive && cfg->active[0]) { - ret = wl1271_cmd_build_probe_req(wl, req->ssids[0].ssid, + ret = wl1271_cmd_build_probe_req(wl, wlvif, req->ssids[0].ssid, req->ssids[0].ssid_len, ies->ie[IEEE80211_BAND_2GHZ], ies->len[IEEE80211_BAND_2GHZ], @@ -643,7 +670,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl, } if (!force_passive && cfg->active[1]) { - ret = wl1271_cmd_build_probe_req(wl, req->ssids[0].ssid, + ret = wl1271_cmd_build_probe_req(wl, wlvif, req->ssids[0].ssid, req->ssids[0].ssid_len, ies->ie[IEEE80211_BAND_5GHZ], ies->len[IEEE80211_BAND_5GHZ], @@ -667,17 +694,17 @@ out: return ret; } -int wl1271_scan_sched_scan_start(struct wl1271 *wl) +int wl1271_scan_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif) { struct wl1271_cmd_sched_scan_start *start; int ret = 0; wl1271_debug(DEBUG_CMD, "cmd periodic scan start"); - if (wl->bss_type != BSS_TYPE_STA_BSS) + if (wlvif->bss_type != BSS_TYPE_STA_BSS) return -EOPNOTSUPP; - if (!test_bit(WL1271_FLAG_IDLE, &wl->flags)) + if (test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) return -EBUSY; start = kzalloc(sizeof(*start), GFP_KERNEL); @@ -727,7 +754,6 @@ void wl1271_scan_sched_scan_stop(struct wl1271 *wl) wl1271_error("failed to send sched scan stop command"); goto out_free; } - wl->sched_scanning = false; out_free: kfree(stop); diff --git a/drivers/net/wireless/wl12xx/scan.h b/drivers/net/wireless/wl12xx/scan.h index 92115156522..a7ed43dc08c 100644 --- a/drivers/net/wireless/wl12xx/scan.h +++ b/drivers/net/wireless/wl12xx/scan.h @@ -26,18 +26,20 @@ #include "wl12xx.h" -int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len, +int wl1271_scan(struct wl1271 *wl, struct ieee80211_vif *vif, + const u8 *ssid, size_t ssid_len, struct cfg80211_scan_request *req); int wl1271_scan_stop(struct wl1271 *wl); int wl1271_scan_build_probe_req(struct wl1271 *wl, const u8 *ssid, size_t ssid_len, const u8 *ie, size_t ie_len, u8 band); -void wl1271_scan_stm(struct wl1271 *wl); +void wl1271_scan_stm(struct wl1271 *wl, struct ieee80211_vif *vif); void wl1271_scan_complete_work(struct work_struct *work); int wl1271_scan_sched_scan_config(struct wl1271 *wl, + struct wl12xx_vif *wlvif, struct cfg80211_sched_scan_request *req, struct ieee80211_sched_scan_ies *ies); -int wl1271_scan_sched_scan_start(struct wl1271 *wl); +int wl1271_scan_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif); void wl1271_scan_sched_scan_stop(struct wl1271 *wl); void wl1271_scan_sched_scan_results(struct wl1271 *wl); diff --git a/drivers/net/wireless/wl12xx/sdio.c b/drivers/net/wireless/wl12xx/sdio.c index 516a8980723..468a50553fa 100644 --- a/drivers/net/wireless/wl12xx/sdio.c +++ b/drivers/net/wireless/wl12xx/sdio.c @@ -24,6 +24,7 @@ #include <linux/irq.h> #include <linux/module.h> #include <linux/vmalloc.h> +#include <linux/platform_device.h> #include <linux/mmc/sdio_func.h> #include <linux/mmc/sdio_ids.h> #include <linux/mmc/card.h> @@ -44,107 +45,67 @@ #define SDIO_DEVICE_ID_TI_WL1271 0x4076 #endif +struct wl12xx_sdio_glue { + struct device *dev; + struct platform_device *core; +}; + static const struct sdio_device_id wl1271_devices[] __devinitconst = { { SDIO_DEVICE(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271) }, {} }; MODULE_DEVICE_TABLE(sdio, wl1271_devices); -static void wl1271_sdio_set_block_size(struct wl1271 *wl, unsigned int blksz) -{ - sdio_claim_host(wl->if_priv); - sdio_set_block_size(wl->if_priv, blksz); - sdio_release_host(wl->if_priv); -} - -static inline struct sdio_func *wl_to_func(struct wl1271 *wl) -{ - return wl->if_priv; -} - -static struct device *wl1271_sdio_wl_to_dev(struct wl1271 *wl) -{ - return &(wl_to_func(wl)->dev); -} - -static irqreturn_t wl1271_hardirq(int irq, void *cookie) +static void wl1271_sdio_set_block_size(struct device *child, + unsigned int blksz) { - struct wl1271 *wl = cookie; - unsigned long flags; + struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent); + struct sdio_func *func = dev_to_sdio_func(glue->dev); - wl1271_debug(DEBUG_IRQ, "IRQ"); - - /* complete the ELP completion */ - spin_lock_irqsave(&wl->wl_lock, flags); - set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags); - if (wl->elp_compl) { - complete(wl->elp_compl); - wl->elp_compl = NULL; - } - - if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) { - /* don't enqueue a work right now. mark it as pending */ - set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags); - wl1271_debug(DEBUG_IRQ, "should not enqueue work"); - disable_irq_nosync(wl->irq); - pm_wakeup_event(wl1271_sdio_wl_to_dev(wl), 0); - spin_unlock_irqrestore(&wl->wl_lock, flags); - return IRQ_HANDLED; - } - spin_unlock_irqrestore(&wl->wl_lock, flags); - - return IRQ_WAKE_THREAD; -} - -static void wl1271_sdio_disable_interrupts(struct wl1271 *wl) -{ - disable_irq(wl->irq); -} - -static void wl1271_sdio_enable_interrupts(struct wl1271 *wl) -{ - enable_irq(wl->irq); + sdio_claim_host(func); + sdio_set_block_size(func, blksz); + sdio_release_host(func); } -static void wl1271_sdio_raw_read(struct wl1271 *wl, int addr, void *buf, +static void wl12xx_sdio_raw_read(struct device *child, int addr, void *buf, size_t len, bool fixed) { int ret; - struct sdio_func *func = wl_to_func(wl); + struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent); + struct sdio_func *func = dev_to_sdio_func(glue->dev); if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) { ((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret); - wl1271_debug(DEBUG_SDIO, "sdio read 52 addr 0x%x, byte 0x%02x", - addr, ((u8 *)buf)[0]); + dev_dbg(child->parent, "sdio read 52 addr 0x%x, byte 0x%02x\n", + addr, ((u8 *)buf)[0]); } else { if (fixed) ret = sdio_readsb(func, buf, addr, len); else ret = sdio_memcpy_fromio(func, buf, addr, len); - wl1271_debug(DEBUG_SDIO, "sdio read 53 addr 0x%x, %zu bytes", - addr, len); - wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len); + dev_dbg(child->parent, "sdio read 53 addr 0x%x, %zu bytes\n", + addr, len); } if (ret) - wl1271_error("sdio read failed (%d)", ret); + dev_err(child->parent, "sdio read failed (%d)\n", ret); } -static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf, +static void wl12xx_sdio_raw_write(struct device *child, int addr, void *buf, size_t len, bool fixed) { int ret; - struct sdio_func *func = wl_to_func(wl); + struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent); + struct sdio_func *func = dev_to_sdio_func(glue->dev); if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) { sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret); - wl1271_debug(DEBUG_SDIO, "sdio write 52 addr 0x%x, byte 0x%02x", - addr, ((u8 *)buf)[0]); + dev_dbg(child->parent, "sdio write 52 addr 0x%x, byte 0x%02x\n", + addr, ((u8 *)buf)[0]); } else { - wl1271_debug(DEBUG_SDIO, "sdio write 53 addr 0x%x, %zu bytes", - addr, len); - wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len); + dev_dbg(child->parent, "sdio write 53 addr 0x%x, %zu bytes\n", + addr, len); if (fixed) ret = sdio_writesb(func, addr, buf, len); @@ -153,13 +114,13 @@ static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf, } if (ret) - wl1271_error("sdio write failed (%d)", ret); + dev_err(child->parent, "sdio write failed (%d)\n", ret); } -static int wl1271_sdio_power_on(struct wl1271 *wl) +static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue) { - struct sdio_func *func = wl_to_func(wl); int ret; + struct sdio_func *func = dev_to_sdio_func(glue->dev); /* If enabled, tell runtime PM not to power off the card */ if (pm_runtime_enabled(&func->dev)) { @@ -180,10 +141,10 @@ out: return ret; } -static int wl1271_sdio_power_off(struct wl1271 *wl) +static int wl12xx_sdio_power_off(struct wl12xx_sdio_glue *glue) { - struct sdio_func *func = wl_to_func(wl); int ret; + struct sdio_func *func = dev_to_sdio_func(glue->dev); sdio_disable_func(func); sdio_release_host(func); @@ -200,46 +161,43 @@ static int wl1271_sdio_power_off(struct wl1271 *wl) return ret; } -static int wl1271_sdio_set_power(struct wl1271 *wl, bool enable) +static int wl12xx_sdio_set_power(struct device *child, bool enable) { + struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent); + if (enable) - return wl1271_sdio_power_on(wl); + return wl12xx_sdio_power_on(glue); else - return wl1271_sdio_power_off(wl); + return wl12xx_sdio_power_off(glue); } static struct wl1271_if_operations sdio_ops = { - .read = wl1271_sdio_raw_read, - .write = wl1271_sdio_raw_write, - .power = wl1271_sdio_set_power, - .dev = wl1271_sdio_wl_to_dev, - .enable_irq = wl1271_sdio_enable_interrupts, - .disable_irq = wl1271_sdio_disable_interrupts, + .read = wl12xx_sdio_raw_read, + .write = wl12xx_sdio_raw_write, + .power = wl12xx_sdio_set_power, .set_block_size = wl1271_sdio_set_block_size, }; static int __devinit wl1271_probe(struct sdio_func *func, const struct sdio_device_id *id) { - struct ieee80211_hw *hw; - const struct wl12xx_platform_data *wlan_data; - struct wl1271 *wl; - unsigned long irqflags; + struct wl12xx_platform_data *wlan_data; + struct wl12xx_sdio_glue *glue; + struct resource res[1]; mmc_pm_flag_t mmcflags; - int ret; + int ret = -ENOMEM; /* We are only able to handle the wlan function */ if (func->num != 0x02) return -ENODEV; - hw = wl1271_alloc_hw(); - if (IS_ERR(hw)) - return PTR_ERR(hw); - - wl = hw->priv; + glue = kzalloc(sizeof(*glue), GFP_KERNEL); + if (!glue) { + dev_err(&func->dev, "can't allocate glue\n"); + goto out; + } - wl->if_priv = func; - wl->if_ops = &sdio_ops; + glue->dev = &func->dev; /* Grab access to FN0 for ELP reg. */ func->card->quirks |= MMC_QUIRK_LENIENT_FN0; @@ -250,80 +208,79 @@ static int __devinit wl1271_probe(struct sdio_func *func, wlan_data = wl12xx_get_platform_data(); if (IS_ERR(wlan_data)) { ret = PTR_ERR(wlan_data); - wl1271_error("missing wlan platform data: %d", ret); - goto out_free; + dev_err(glue->dev, "missing wlan platform data: %d\n", ret); + goto out_free_glue; } - wl->irq = wlan_data->irq; - wl->ref_clock = wlan_data->board_ref_clock; - wl->tcxo_clock = wlan_data->board_tcxo_clock; - wl->platform_quirks = wlan_data->platform_quirks; + /* if sdio can keep power while host is suspended, enable wow */ + mmcflags = sdio_get_host_pm_caps(func); + dev_dbg(glue->dev, "sdio PM caps = 0x%x\n", mmcflags); - if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ) - irqflags = IRQF_TRIGGER_RISING; - else - irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT; - - ret = request_threaded_irq(wl->irq, wl1271_hardirq, wl1271_irq, - irqflags, - DRIVER_NAME, wl); - if (ret < 0) { - wl1271_error("request_irq() failed: %d", ret); - goto out_free; - } + if (mmcflags & MMC_PM_KEEP_POWER) + wlan_data->pwr_in_suspend = true; + + wlan_data->ops = &sdio_ops; - ret = enable_irq_wake(wl->irq); - if (!ret) { - wl->irq_wake_enabled = true; - device_init_wakeup(wl1271_sdio_wl_to_dev(wl), 1); + sdio_set_drvdata(func, glue); - /* if sdio can keep power while host is suspended, enable wow */ - mmcflags = sdio_get_host_pm_caps(func); - wl1271_debug(DEBUG_SDIO, "sdio PM caps = 0x%x", mmcflags); + /* Tell PM core that we don't need the card to be powered now */ + pm_runtime_put_noidle(&func->dev); - if (mmcflags & MMC_PM_KEEP_POWER) - hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY; + glue->core = platform_device_alloc("wl12xx", -1); + if (!glue->core) { + dev_err(glue->dev, "can't allocate platform_device"); + ret = -ENOMEM; + goto out_free_glue; } - disable_irq(wl->irq); - ret = wl1271_init_ieee80211(wl); - if (ret) - goto out_irq; + glue->core->dev.parent = &func->dev; - ret = wl1271_register_hw(wl); - if (ret) - goto out_irq; + memset(res, 0x00, sizeof(res)); - sdio_set_drvdata(func, wl); + res[0].start = wlan_data->irq; + res[0].flags = IORESOURCE_IRQ; + res[0].name = "irq"; - /* Tell PM core that we don't need the card to be powered now */ - pm_runtime_put_noidle(&func->dev); + ret = platform_device_add_resources(glue->core, res, ARRAY_SIZE(res)); + if (ret) { + dev_err(glue->dev, "can't add resources\n"); + goto out_dev_put; + } + ret = platform_device_add_data(glue->core, wlan_data, + sizeof(*wlan_data)); + if (ret) { + dev_err(glue->dev, "can't add platform data\n"); + goto out_dev_put; + } + + ret = platform_device_add(glue->core); + if (ret) { + dev_err(glue->dev, "can't add platform device\n"); + goto out_dev_put; + } return 0; - out_irq: - free_irq(wl->irq, wl); +out_dev_put: + platform_device_put(glue->core); - out_free: - wl1271_free_hw(wl); +out_free_glue: + kfree(glue); +out: return ret; } static void __devexit wl1271_remove(struct sdio_func *func) { - struct wl1271 *wl = sdio_get_drvdata(func); + struct wl12xx_sdio_glue *glue = sdio_get_drvdata(func); /* Undo decrement done above in wl1271_probe */ pm_runtime_get_noresume(&func->dev); - wl1271_unregister_hw(wl); - if (wl->irq_wake_enabled) { - device_init_wakeup(wl1271_sdio_wl_to_dev(wl), 0); - disable_irq_wake(wl->irq); - } - free_irq(wl->irq, wl); - wl1271_free_hw(wl); + platform_device_del(glue->core); + platform_device_put(glue->core); + kfree(glue); } #ifdef CONFIG_PM @@ -332,20 +289,21 @@ static int wl1271_suspend(struct device *dev) /* Tell MMC/SDIO core it's OK to power down the card * (if it isn't already), but not to remove it completely */ struct sdio_func *func = dev_to_sdio_func(dev); - struct wl1271 *wl = sdio_get_drvdata(func); + struct wl12xx_sdio_glue *glue = sdio_get_drvdata(func); + struct wl1271 *wl = platform_get_drvdata(glue->core); mmc_pm_flag_t sdio_flags; int ret = 0; - wl1271_debug(DEBUG_MAC80211, "wl1271 suspend. wow_enabled: %d", - wl->wow_enabled); + dev_dbg(dev, "wl1271 suspend. wow_enabled: %d\n", + wl->wow_enabled); /* check whether sdio should keep power */ if (wl->wow_enabled) { sdio_flags = sdio_get_host_pm_caps(func); if (!(sdio_flags & MMC_PM_KEEP_POWER)) { - wl1271_error("can't keep power while host " - "is suspended"); + dev_err(dev, "can't keep power while host " + "is suspended\n"); ret = -EINVAL; goto out; } @@ -353,7 +311,7 @@ static int wl1271_suspend(struct device *dev) /* keep power while host suspended */ ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); if (ret) { - wl1271_error("error while trying to keep power"); + dev_err(dev, "error while trying to keep power\n"); goto out; } @@ -367,9 +325,10 @@ out: static int wl1271_resume(struct device *dev) { struct sdio_func *func = dev_to_sdio_func(dev); - struct wl1271 *wl = sdio_get_drvdata(func); + struct wl12xx_sdio_glue *glue = sdio_get_drvdata(func); + struct wl1271 *wl = platform_get_drvdata(glue->core); - wl1271_debug(DEBUG_MAC80211, "wl1271 resume"); + dev_dbg(dev, "wl1271 resume\n"); if (wl->wow_enabled) { /* claim back host */ sdio_claim_host(func); diff --git a/drivers/net/wireless/wl12xx/sdio_test.c b/drivers/net/wireless/wl12xx/sdio_test.c deleted file mode 100644 index f25d5d9212e..00000000000 --- a/drivers/net/wireless/wl12xx/sdio_test.c +++ /dev/null @@ -1,543 +0,0 @@ -/* - * SDIO testing driver for wl12xx - * - * Copyright (C) 2010 Nokia Corporation - * - * Contact: Roger Quadros <roger.quadros@nokia.com> - * - * wl12xx read/write routines taken from the main module - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA - * 02110-1301 USA - * - */ - -#include <linux/irq.h> -#include <linux/module.h> -#include <linux/crc7.h> -#include <linux/vmalloc.h> -#include <linux/mmc/sdio_func.h> -#include <linux/mmc/sdio_ids.h> -#include <linux/mmc/card.h> -#include <linux/mmc/host.h> -#include <linux/gpio.h> -#include <linux/wl12xx.h> -#include <linux/kthread.h> -#include <linux/firmware.h> -#include <linux/pm_runtime.h> - -#include "wl12xx.h" -#include "io.h" -#include "boot.h" - -#ifndef SDIO_VENDOR_ID_TI -#define SDIO_VENDOR_ID_TI 0x0097 -#endif - -#ifndef SDIO_DEVICE_ID_TI_WL1271 -#define SDIO_DEVICE_ID_TI_WL1271 0x4076 -#endif - -static bool rx, tx; - -module_param(rx, bool, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(rx, "Perform rx test. Default (0). " - "This test continuously reads data from the SDIO device.\n"); - -module_param(tx, bool, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(tx, "Perform tx test. Default (0). " - "This test continuously writes data to the SDIO device.\n"); - -struct wl1271_test { - struct wl1271 wl; - struct task_struct *test_task; -}; - -static const struct sdio_device_id wl1271_devices[] = { - { SDIO_DEVICE(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271) }, - {} -}; - -static inline struct sdio_func *wl_to_func(struct wl1271 *wl) -{ - return wl->if_priv; -} - -static struct device *wl1271_sdio_wl_to_dev(struct wl1271 *wl) -{ - return &(wl_to_func(wl)->dev); -} - -static void wl1271_sdio_raw_read(struct wl1271 *wl, int addr, void *buf, - size_t len, bool fixed) -{ - int ret = 0; - struct sdio_func *func = wl_to_func(wl); - - if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) { - ((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret); - wl1271_debug(DEBUG_SDIO, "sdio read 52 addr 0x%x, byte 0x%02x", - addr, ((u8 *)buf)[0]); - } else { - if (fixed) - ret = sdio_readsb(func, buf, addr, len); - else - ret = sdio_memcpy_fromio(func, buf, addr, len); - - wl1271_debug(DEBUG_SDIO, "sdio read 53 addr 0x%x, %zu bytes", - addr, len); - wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len); - } - - if (ret) - wl1271_error("sdio read failed (%d)", ret); -} - -static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf, - size_t len, bool fixed) -{ - int ret = 0; - struct sdio_func *func = wl_to_func(wl); - - if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) { - sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret); - wl1271_debug(DEBUG_SDIO, "sdio write 52 addr 0x%x, byte 0x%02x", - addr, ((u8 *)buf)[0]); - } else { - wl1271_debug(DEBUG_SDIO, "sdio write 53 addr 0x%x, %zu bytes", - addr, len); - wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len); - - if (fixed) - ret = sdio_writesb(func, addr, buf, len); - else - ret = sdio_memcpy_toio(func, addr, buf, len); - } - if (ret) - wl1271_error("sdio write failed (%d)", ret); - -} - -static int wl1271_sdio_set_power(struct wl1271 *wl, bool enable) -{ - struct sdio_func *func = wl_to_func(wl); - int ret; - - /* Let the SDIO stack handle wlan_enable control, so we - * keep host claimed while wlan is in use to keep wl1271 - * alive. - */ - if (enable) { - /* Power up the card */ - ret = pm_runtime_get_sync(&func->dev); - if (ret < 0) - goto out; - - /* Runtime PM might be disabled, power up the card manually */ - ret = mmc_power_restore_host(func->card->host); - if (ret < 0) - goto out; - - sdio_claim_host(func); - sdio_enable_func(func); - } else { - sdio_disable_func(func); - sdio_release_host(func); - - /* Runtime PM might be disabled, power off the card manually */ - ret = mmc_power_save_host(func->card->host); - if (ret < 0) - goto out; - - /* Power down the card */ - ret = pm_runtime_put_sync(&func->dev); - } - -out: - return ret; -} - -static void wl1271_sdio_disable_interrupts(struct wl1271 *wl) -{ -} - -static void wl1271_sdio_enable_interrupts(struct wl1271 *wl) -{ -} - - -static struct wl1271_if_operations sdio_ops = { - .read = wl1271_sdio_raw_read, - .write = wl1271_sdio_raw_write, - .power = wl1271_sdio_set_power, - .dev = wl1271_sdio_wl_to_dev, - .enable_irq = wl1271_sdio_enable_interrupts, - .disable_irq = wl1271_sdio_disable_interrupts, -}; - -static void wl1271_fw_wakeup(struct wl1271 *wl) -{ - u32 elp_reg; - - elp_reg = ELPCTRL_WAKE_UP; - wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, elp_reg); -} - -static int wl1271_fetch_firmware(struct wl1271 *wl) -{ - const struct firmware *fw; - int ret; - - if (wl->chip.id == CHIP_ID_1283_PG20) - ret = request_firmware(&fw, WL128X_FW_NAME, - wl1271_wl_to_dev(wl)); - else - ret = request_firmware(&fw, WL127X_FW_NAME, - wl1271_wl_to_dev(wl)); - - if (ret < 0) { - wl1271_error("could not get firmware: %d", ret); - return ret; - } - - if (fw->size % 4) { - wl1271_error("firmware size is not multiple of 32 bits: %zu", - fw->size); - ret = -EILSEQ; - goto out; - } - - wl->fw_len = fw->size; - wl->fw = vmalloc(wl->fw_len); - - if (!wl->fw) { - wl1271_error("could not allocate memory for the firmware"); - ret = -ENOMEM; - goto out; - } - - memcpy(wl->fw, fw->data, wl->fw_len); - - ret = 0; - -out: - release_firmware(fw); - - return ret; -} - -static int wl1271_fetch_nvs(struct wl1271 *wl) -{ - const struct firmware *fw; - int ret; - - ret = request_firmware(&fw, WL12XX_NVS_NAME, wl1271_wl_to_dev(wl)); - - if (ret < 0) { - wl1271_error("could not get nvs file: %d", ret); - return ret; - } - - wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL); - - if (!wl->nvs) { - wl1271_error("could not allocate memory for the nvs file"); - ret = -ENOMEM; - goto out; - } - - wl->nvs_len = fw->size; - -out: - release_firmware(fw); - - return ret; -} - -static int wl1271_chip_wakeup(struct wl1271 *wl) -{ - struct wl1271_partition_set partition; - int ret; - - msleep(WL1271_PRE_POWER_ON_SLEEP); - ret = wl1271_power_on(wl); - if (ret) - return ret; - - msleep(WL1271_POWER_ON_SLEEP); - - /* We don't need a real memory partition here, because we only want - * to use the registers at this point. */ - memset(&partition, 0, sizeof(partition)); - partition.reg.start = REGISTERS_BASE; - partition.reg.size = REGISTERS_DOWN_SIZE; - wl1271_set_partition(wl, &partition); - - /* ELP module wake up */ - wl1271_fw_wakeup(wl); - - /* whal_FwCtrl_BootSm() */ - - /* 0. read chip id from CHIP_ID */ - wl->chip.id = wl1271_read32(wl, CHIP_ID_B); - - /* 1. check if chip id is valid */ - - switch (wl->chip.id) { - case CHIP_ID_1271_PG10: - wl1271_warning("chip id 0x%x (1271 PG10) support is obsolete", - wl->chip.id); - break; - case CHIP_ID_1271_PG20: - wl1271_notice("chip id 0x%x (1271 PG20)", - wl->chip.id); - break; - case CHIP_ID_1283_PG20: - wl1271_notice("chip id 0x%x (1283 PG20)", - wl->chip.id); - break; - case CHIP_ID_1283_PG10: - default: - wl1271_warning("unsupported chip id: 0x%x", wl->chip.id); - return -ENODEV; - } - - return ret; -} - -static struct wl1271_partition_set part_down = { - .mem = { - .start = 0x00000000, - .size = 0x000177c0 - }, - .reg = { - .start = REGISTERS_BASE, - .size = 0x00008800 - }, - .mem2 = { - .start = 0x00000000, - .size = 0x00000000 - }, - .mem3 = { - .start = 0x00000000, - .size = 0x00000000 - }, -}; - -static int tester(void *data) -{ - struct wl1271 *wl = data; - struct sdio_func *func = wl_to_func(wl); - struct device *pdev = &func->dev; - int ret = 0; - bool rx_started = 0; - bool tx_started = 0; - uint8_t *tx_buf, *rx_buf; - int test_size = PAGE_SIZE; - u32 addr = 0; - struct wl1271_partition_set partition; - - /* We assume chip is powered up and firmware fetched */ - - memcpy(&partition, &part_down, sizeof(partition)); - partition.mem.start = addr; - wl1271_set_partition(wl, &partition); - - tx_buf = kmalloc(test_size, GFP_KERNEL); - rx_buf = kmalloc(test_size, GFP_KERNEL); - if (!tx_buf || !rx_buf) { - dev_err(pdev, - "Could not allocate memory. Test will not run.\n"); - ret = -ENOMEM; - goto free; - } - - memset(tx_buf, 0x5a, test_size); - - /* write something in data area so we can read it back */ - wl1271_write(wl, addr, tx_buf, test_size, false); - - while (!kthread_should_stop()) { - if (rx && !rx_started) { - dev_info(pdev, "starting rx test\n"); - rx_started = 1; - } else if (!rx && rx_started) { - dev_info(pdev, "stopping rx test\n"); - rx_started = 0; - } - - if (tx && !tx_started) { - dev_info(pdev, "starting tx test\n"); - tx_started = 1; - } else if (!tx && tx_started) { - dev_info(pdev, "stopping tx test\n"); - tx_started = 0; - } - - if (rx_started) - wl1271_read(wl, addr, rx_buf, test_size, false); - - if (tx_started) - wl1271_write(wl, addr, tx_buf, test_size, false); - - if (!rx_started && !tx_started) - msleep(100); - } - -free: - kfree(tx_buf); - kfree(rx_buf); - return ret; -} - -static int __devinit wl1271_probe(struct sdio_func *func, - const struct sdio_device_id *id) -{ - const struct wl12xx_platform_data *wlan_data; - struct wl1271 *wl; - struct wl1271_test *wl_test; - int ret = 0; - - /* wl1271 has 2 sdio functions we handle just the wlan part */ - if (func->num != 0x02) - return -ENODEV; - - wl_test = kzalloc(sizeof(struct wl1271_test), GFP_KERNEL); - if (!wl_test) { - dev_err(&func->dev, "Could not allocate memory\n"); - return -ENOMEM; - } - - wl = &wl_test->wl; - - wl->if_priv = func; - wl->if_ops = &sdio_ops; - - /* Grab access to FN0 for ELP reg. */ - func->card->quirks |= MMC_QUIRK_LENIENT_FN0; - - /* Use block mode for transferring over one block size of data */ - func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE; - - wlan_data = wl12xx_get_platform_data(); - if (IS_ERR(wlan_data)) { - ret = PTR_ERR(wlan_data); - dev_err(&func->dev, "missing wlan platform data: %d\n", ret); - goto out_free; - } - - wl->irq = wlan_data->irq; - wl->ref_clock = wlan_data->board_ref_clock; - wl->tcxo_clock = wlan_data->board_tcxo_clock; - - sdio_set_drvdata(func, wl_test); - - /* power up the device */ - ret = wl1271_chip_wakeup(wl); - if (ret) { - dev_err(&func->dev, "could not wake up chip\n"); - goto out_free; - } - - if (wl->fw == NULL) { - ret = wl1271_fetch_firmware(wl); - if (ret < 0) { - dev_err(&func->dev, "firmware fetch error\n"); - goto out_off; - } - } - - /* fetch NVS */ - if (wl->nvs == NULL) { - ret = wl1271_fetch_nvs(wl); - if (ret < 0) { - dev_err(&func->dev, "NVS fetch error\n"); - goto out_off; - } - } - - ret = wl1271_load_firmware(wl); - if (ret < 0) { - dev_err(&func->dev, "firmware load error: %d\n", ret); - goto out_free; - } - - dev_info(&func->dev, "initialized\n"); - - /* I/O testing will be done in the tester thread */ - - wl_test->test_task = kthread_run(tester, wl, "sdio_tester"); - if (IS_ERR(wl_test->test_task)) { - dev_err(&func->dev, "unable to create kernel thread\n"); - ret = PTR_ERR(wl_test->test_task); - goto out_free; - } - - return 0; - -out_off: - /* power off the chip */ - wl1271_power_off(wl); - -out_free: - kfree(wl_test); - return ret; -} - -static void __devexit wl1271_remove(struct sdio_func *func) -{ - struct wl1271_test *wl_test = sdio_get_drvdata(func); - - /* stop the I/O test thread */ - kthread_stop(wl_test->test_task); - - /* power off the chip */ - wl1271_power_off(&wl_test->wl); - - vfree(wl_test->wl.fw); - wl_test->wl.fw = NULL; - kfree(wl_test->wl.nvs); - wl_test->wl.nvs = NULL; - - kfree(wl_test); -} - -static struct sdio_driver wl1271_sdio_driver = { - .name = "wl12xx_sdio_test", - .id_table = wl1271_devices, - .probe = wl1271_probe, - .remove = __devexit_p(wl1271_remove), -}; - -static int __init wl1271_init(void) -{ - int ret; - - ret = sdio_register_driver(&wl1271_sdio_driver); - if (ret < 0) - pr_err("failed to register sdio driver: %d\n", ret); - - return ret; -} -module_init(wl1271_init); - -static void __exit wl1271_exit(void) -{ - sdio_unregister_driver(&wl1271_sdio_driver); -} -module_exit(wl1271_exit); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Roger Quadros <roger.quadros@nokia.com>"); - diff --git a/drivers/net/wireless/wl12xx/spi.c b/drivers/net/wireless/wl12xx/spi.c index 0f971867786..92caa7ce605 100644 --- a/drivers/net/wireless/wl12xx/spi.c +++ b/drivers/net/wireless/wl12xx/spi.c @@ -27,6 +27,7 @@ #include <linux/crc7.h> #include <linux/spi/spi.h> #include <linux/wl12xx.h> +#include <linux/platform_device.h> #include <linux/slab.h> #include "wl12xx.h" @@ -69,35 +70,22 @@ #define WSPI_MAX_NUM_OF_CHUNKS (WL1271_AGGR_BUFFER_SIZE / WSPI_MAX_CHUNK_SIZE) -static inline struct spi_device *wl_to_spi(struct wl1271 *wl) -{ - return wl->if_priv; -} - -static struct device *wl1271_spi_wl_to_dev(struct wl1271 *wl) -{ - return &(wl_to_spi(wl)->dev); -} - -static void wl1271_spi_disable_interrupts(struct wl1271 *wl) -{ - disable_irq(wl->irq); -} - -static void wl1271_spi_enable_interrupts(struct wl1271 *wl) -{ - enable_irq(wl->irq); -} +struct wl12xx_spi_glue { + struct device *dev; + struct platform_device *core; +}; -static void wl1271_spi_reset(struct wl1271 *wl) +static void wl12xx_spi_reset(struct device *child) { + struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent); u8 *cmd; struct spi_transfer t; struct spi_message m; cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL); if (!cmd) { - wl1271_error("could not allocate cmd for spi reset"); + dev_err(child->parent, + "could not allocate cmd for spi reset\n"); return; } @@ -110,21 +98,22 @@ static void wl1271_spi_reset(struct wl1271 *wl) t.len = WSPI_INIT_CMD_LEN; spi_message_add_tail(&t, &m); - spi_sync(wl_to_spi(wl), &m); + spi_sync(to_spi_device(glue->dev), &m); - wl1271_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN); kfree(cmd); } -static void wl1271_spi_init(struct wl1271 *wl) +static void wl12xx_spi_init(struct device *child) { + struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent); u8 crc[WSPI_INIT_CMD_CRC_LEN], *cmd; struct spi_transfer t; struct spi_message m; cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL); if (!cmd) { - wl1271_error("could not allocate cmd for spi init"); + dev_err(child->parent, + "could not allocate cmd for spi init\n"); return; } @@ -165,15 +154,16 @@ static void wl1271_spi_init(struct wl1271 *wl) t.len = WSPI_INIT_CMD_LEN; spi_message_add_tail(&t, &m); - spi_sync(wl_to_spi(wl), &m); - wl1271_dump(DEBUG_SPI, "spi init -> ", cmd, WSPI_INIT_CMD_LEN); + spi_sync(to_spi_device(glue->dev), &m); kfree(cmd); } #define WL1271_BUSY_WORD_TIMEOUT 1000 -static int wl1271_spi_read_busy(struct wl1271 *wl) +static int wl12xx_spi_read_busy(struct device *child) { + struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent); + struct wl1271 *wl = dev_get_drvdata(child); struct spi_transfer t[1]; struct spi_message m; u32 *busy_buf; @@ -194,20 +184,22 @@ static int wl1271_spi_read_busy(struct wl1271 *wl) t[0].len = sizeof(u32); t[0].cs_change = true; spi_message_add_tail(&t[0], &m); - spi_sync(wl_to_spi(wl), &m); + spi_sync(to_spi_device(glue->dev), &m); if (*busy_buf & 0x1) return 0; } /* The SPI bus is unresponsive, the read failed. */ - wl1271_error("SPI read busy-word timeout!\n"); + dev_err(child->parent, "SPI read busy-word timeout!\n"); return -ETIMEDOUT; } -static void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf, +static void wl12xx_spi_raw_read(struct device *child, int addr, void *buf, size_t len, bool fixed) { + struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent); + struct wl1271 *wl = dev_get_drvdata(child); struct spi_transfer t[2]; struct spi_message m; u32 *busy_buf; @@ -243,10 +235,10 @@ static void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf, t[1].cs_change = true; spi_message_add_tail(&t[1], &m); - spi_sync(wl_to_spi(wl), &m); + spi_sync(to_spi_device(glue->dev), &m); if (!(busy_buf[WL1271_BUSY_WORD_CNT - 1] & 0x1) && - wl1271_spi_read_busy(wl)) { + wl12xx_spi_read_busy(child)) { memset(buf, 0, chunk_len); return; } @@ -259,10 +251,7 @@ static void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf, t[0].cs_change = true; spi_message_add_tail(&t[0], &m); - spi_sync(wl_to_spi(wl), &m); - - wl1271_dump(DEBUG_SPI, "spi_read cmd -> ", cmd, sizeof(*cmd)); - wl1271_dump(DEBUG_SPI, "spi_read buf <- ", buf, chunk_len); + spi_sync(to_spi_device(glue->dev), &m); if (!fixed) addr += chunk_len; @@ -271,9 +260,10 @@ static void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf, } } -static void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf, - size_t len, bool fixed) +static void wl12xx_spi_raw_write(struct device *child, int addr, void *buf, + size_t len, bool fixed) { + struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent); struct spi_transfer t[2 * WSPI_MAX_NUM_OF_CHUNKS]; struct spi_message m; u32 commands[WSPI_MAX_NUM_OF_CHUNKS]; @@ -308,9 +298,6 @@ static void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf, t[i].len = chunk_len; spi_message_add_tail(&t[i++], &m); - wl1271_dump(DEBUG_SPI, "spi_write cmd -> ", cmd, sizeof(*cmd)); - wl1271_dump(DEBUG_SPI, "spi_write buf -> ", buf, chunk_len); - if (!fixed) addr += chunk_len; buf += chunk_len; @@ -318,72 +305,41 @@ static void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf, cmd++; } - spi_sync(wl_to_spi(wl), &m); -} - -static irqreturn_t wl1271_hardirq(int irq, void *cookie) -{ - struct wl1271 *wl = cookie; - unsigned long flags; - - wl1271_debug(DEBUG_IRQ, "IRQ"); - - /* complete the ELP completion */ - spin_lock_irqsave(&wl->wl_lock, flags); - set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags); - if (wl->elp_compl) { - complete(wl->elp_compl); - wl->elp_compl = NULL; - } - spin_unlock_irqrestore(&wl->wl_lock, flags); - - return IRQ_WAKE_THREAD; -} - -static int wl1271_spi_set_power(struct wl1271 *wl, bool enable) -{ - if (wl->set_power) - wl->set_power(enable); - - return 0; + spi_sync(to_spi_device(glue->dev), &m); } static struct wl1271_if_operations spi_ops = { - .read = wl1271_spi_raw_read, - .write = wl1271_spi_raw_write, - .reset = wl1271_spi_reset, - .init = wl1271_spi_init, - .power = wl1271_spi_set_power, - .dev = wl1271_spi_wl_to_dev, - .enable_irq = wl1271_spi_enable_interrupts, - .disable_irq = wl1271_spi_disable_interrupts, + .read = wl12xx_spi_raw_read, + .write = wl12xx_spi_raw_write, + .reset = wl12xx_spi_reset, + .init = wl12xx_spi_init, .set_block_size = NULL, }; static int __devinit wl1271_probe(struct spi_device *spi) { + struct wl12xx_spi_glue *glue; struct wl12xx_platform_data *pdata; - struct ieee80211_hw *hw; - struct wl1271 *wl; - unsigned long irqflags; - int ret; + struct resource res[1]; + int ret = -ENOMEM; pdata = spi->dev.platform_data; if (!pdata) { - wl1271_error("no platform data"); + dev_err(&spi->dev, "no platform data\n"); return -ENODEV; } - hw = wl1271_alloc_hw(); - if (IS_ERR(hw)) - return PTR_ERR(hw); + pdata->ops = &spi_ops; - wl = hw->priv; + glue = kzalloc(sizeof(*glue), GFP_KERNEL); + if (!glue) { + dev_err(&spi->dev, "can't allocate glue\n"); + goto out; + } - dev_set_drvdata(&spi->dev, wl); - wl->if_priv = spi; + glue->dev = &spi->dev; - wl->if_ops = &spi_ops; + spi_set_drvdata(spi, glue); /* This is the only SPI value that we need to set here, the rest * comes from the board-peripherals file */ @@ -391,69 +347,61 @@ static int __devinit wl1271_probe(struct spi_device *spi) ret = spi_setup(spi); if (ret < 0) { - wl1271_error("spi_setup failed"); - goto out_free; + dev_err(glue->dev, "spi_setup failed\n"); + goto out_free_glue; } - wl->set_power = pdata->set_power; - if (!wl->set_power) { - wl1271_error("set power function missing in platform data"); - ret = -ENODEV; - goto out_free; + glue->core = platform_device_alloc("wl12xx", -1); + if (!glue->core) { + dev_err(glue->dev, "can't allocate platform_device\n"); + ret = -ENOMEM; + goto out_free_glue; } - wl->ref_clock = pdata->board_ref_clock; - wl->tcxo_clock = pdata->board_tcxo_clock; - wl->platform_quirks = pdata->platform_quirks; + glue->core->dev.parent = &spi->dev; - if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ) - irqflags = IRQF_TRIGGER_RISING; - else - irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT; + memset(res, 0x00, sizeof(res)); - wl->irq = spi->irq; - if (wl->irq < 0) { - wl1271_error("irq missing in platform data"); - ret = -ENODEV; - goto out_free; - } + res[0].start = spi->irq; + res[0].flags = IORESOURCE_IRQ; + res[0].name = "irq"; - ret = request_threaded_irq(wl->irq, wl1271_hardirq, wl1271_irq, - irqflags, - DRIVER_NAME, wl); - if (ret < 0) { - wl1271_error("request_irq() failed: %d", ret); - goto out_free; + ret = platform_device_add_resources(glue->core, res, ARRAY_SIZE(res)); + if (ret) { + dev_err(glue->dev, "can't add resources\n"); + goto out_dev_put; } - disable_irq(wl->irq); - - ret = wl1271_init_ieee80211(wl); - if (ret) - goto out_irq; + ret = platform_device_add_data(glue->core, pdata, sizeof(*pdata)); + if (ret) { + dev_err(glue->dev, "can't add platform data\n"); + goto out_dev_put; + } - ret = wl1271_register_hw(wl); - if (ret) - goto out_irq; + ret = platform_device_add(glue->core); + if (ret) { + dev_err(glue->dev, "can't register platform device\n"); + goto out_dev_put; + } return 0; - out_irq: - free_irq(wl->irq, wl); - - out_free: - wl1271_free_hw(wl); +out_dev_put: + platform_device_put(glue->core); +out_free_glue: + kfree(glue); +out: return ret; } static int __devexit wl1271_remove(struct spi_device *spi) { - struct wl1271 *wl = dev_get_drvdata(&spi->dev); + struct wl12xx_spi_glue *glue = spi_get_drvdata(spi); - wl1271_unregister_hw(wl); - free_irq(wl->irq, wl); - wl1271_free_hw(wl); + platform_device_del(glue->core); + platform_device_put(glue->core); + kfree(glue); return 0; } @@ -462,7 +410,6 @@ static int __devexit wl1271_remove(struct spi_device *spi) static struct spi_driver wl1271_spi_driver = { .driver = { .name = "wl1271_spi", - .bus = &spi_bus_type, .owner = THIS_MODULE, }, diff --git a/drivers/net/wireless/wl12xx/testmode.c b/drivers/net/wireless/wl12xx/testmode.c index 4ae8effaee2..25093c0cb0e 100644 --- a/drivers/net/wireless/wl12xx/testmode.c +++ b/drivers/net/wireless/wl12xx/testmode.c @@ -26,8 +26,10 @@ #include <net/genetlink.h> #include "wl12xx.h" +#include "debug.h" #include "acx.h" #include "reg.h" +#include "ps.h" #define WL1271_TM_MAX_DATA_LENGTH 1024 @@ -36,6 +38,7 @@ enum wl1271_tm_commands { WL1271_TM_CMD_TEST, WL1271_TM_CMD_INTERROGATE, WL1271_TM_CMD_CONFIGURE, + WL1271_TM_CMD_NVS_PUSH, /* Not in use. Keep to not break ABI */ WL1271_TM_CMD_SET_PLT_MODE, WL1271_TM_CMD_RECOVER, @@ -87,31 +90,47 @@ static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[]) return -EMSGSIZE; mutex_lock(&wl->mutex); - ret = wl1271_cmd_test(wl, buf, buf_len, answer); - mutex_unlock(&wl->mutex); + if (wl->state == WL1271_STATE_OFF) { + ret = -EINVAL; + goto out; + } + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + ret = wl1271_cmd_test(wl, buf, buf_len, answer); if (ret < 0) { wl1271_warning("testmode cmd test failed: %d", ret); - return ret; + goto out_sleep; } if (answer) { len = nla_total_size(buf_len); skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, len); - if (!skb) - return -ENOMEM; + if (!skb) { + ret = -ENOMEM; + goto out_sleep; + } NLA_PUT(skb, WL1271_TM_ATTR_DATA, buf_len, buf); ret = cfg80211_testmode_reply(skb); if (ret < 0) - return ret; + goto out_sleep; } - return 0; +out_sleep: + wl1271_ps_elp_sleep(wl); +out: + mutex_unlock(&wl->mutex); + + return ret; nla_put_failure: kfree_skb(skb); - return -EMSGSIZE; + ret = -EMSGSIZE; + goto out_sleep; } static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[]) @@ -128,33 +147,53 @@ static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[]) ie_id = nla_get_u8(tb[WL1271_TM_ATTR_IE_ID]); + mutex_lock(&wl->mutex); + + if (wl->state == WL1271_STATE_OFF) { + ret = -EINVAL; + goto out; + } + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); - if (!cmd) - return -ENOMEM; + if (!cmd) { + ret = -ENOMEM; + goto out_sleep; + } - mutex_lock(&wl->mutex); ret = wl1271_cmd_interrogate(wl, ie_id, cmd, sizeof(*cmd)); - mutex_unlock(&wl->mutex); - if (ret < 0) { wl1271_warning("testmode cmd interrogate failed: %d", ret); - kfree(cmd); - return ret; + goto out_free; } skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, sizeof(*cmd)); if (!skb) { - kfree(cmd); - return -ENOMEM; + ret = -ENOMEM; + goto out_free; } NLA_PUT(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd); + ret = cfg80211_testmode_reply(skb); + if (ret < 0) + goto out_free; + +out_free: + kfree(cmd); +out_sleep: + wl1271_ps_elp_sleep(wl); +out: + mutex_unlock(&wl->mutex); - return 0; + return ret; nla_put_failure: kfree_skb(skb); - return -EMSGSIZE; + ret = -EMSGSIZE; + goto out_free; } static int wl1271_tm_cmd_configure(struct wl1271 *wl, struct nlattr *tb[]) diff --git a/drivers/net/wireless/wl12xx/tx.c b/drivers/net/wireless/wl12xx/tx.c index bad9e29d49b..4508ccd7832 100644 --- a/drivers/net/wireless/wl12xx/tx.c +++ b/drivers/net/wireless/wl12xx/tx.c @@ -26,22 +26,24 @@ #include <linux/etherdevice.h> #include "wl12xx.h" +#include "debug.h" #include "io.h" #include "reg.h" #include "ps.h" #include "tx.h" #include "event.h" -static int wl1271_set_default_wep_key(struct wl1271 *wl, u8 id) +static int wl1271_set_default_wep_key(struct wl1271 *wl, + struct wl12xx_vif *wlvif, u8 id) { int ret; - bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS); + bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS); if (is_ap) ret = wl12xx_cmd_set_default_wep_key(wl, id, - wl->ap_bcast_hlid); + wlvif->ap.bcast_hlid); else - ret = wl12xx_cmd_set_default_wep_key(wl, id, wl->sta_hlid); + ret = wl12xx_cmd_set_default_wep_key(wl, id, wlvif->sta.hlid); if (ret < 0) return ret; @@ -76,7 +78,8 @@ static void wl1271_free_tx_id(struct wl1271 *wl, int id) } static int wl1271_tx_update_filters(struct wl1271 *wl, - struct sk_buff *skb) + struct wl12xx_vif *wlvif, + struct sk_buff *skb) { struct ieee80211_hdr *hdr; int ret; @@ -92,15 +95,11 @@ static int wl1271_tx_update_filters(struct wl1271 *wl, if (!ieee80211_is_auth(hdr->frame_control)) return 0; - if (wl->dev_hlid != WL12XX_INVALID_LINK_ID) + if (wlvif->dev_hlid != WL12XX_INVALID_LINK_ID) goto out; wl1271_debug(DEBUG_CMD, "starting device role for roaming"); - ret = wl12xx_cmd_role_start_dev(wl); - if (ret < 0) - goto out; - - ret = wl12xx_roc(wl, wl->dev_role_id); + ret = wl12xx_start_dev(wl, wlvif); if (ret < 0) goto out; out: @@ -123,18 +122,16 @@ static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl, wl1271_acx_set_inconnection_sta(wl, hdr->addr1); } -static void wl1271_tx_regulate_link(struct wl1271 *wl, u8 hlid) +static void wl1271_tx_regulate_link(struct wl1271 *wl, + struct wl12xx_vif *wlvif, + u8 hlid) { bool fw_ps, single_sta; u8 tx_pkts; - /* only regulate station links */ - if (hlid < WL1271_AP_STA_HLID_START) + if (WARN_ON(!test_bit(hlid, wlvif->links_map))) return; - if (WARN_ON(!wl1271_is_active_sta(wl, hlid))) - return; - fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map); tx_pkts = wl->links[hlid].allocated_pkts; single_sta = (wl->active_sta_count == 1); @@ -146,7 +143,7 @@ static void wl1271_tx_regulate_link(struct wl1271 *wl, u8 hlid) * case FW-memory congestion is not a problem. */ if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS) - wl1271_ps_link_start(wl, hlid, true); + wl12xx_ps_link_start(wl, wlvif, hlid, true); } bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb) @@ -154,7 +151,8 @@ bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb) return wl->dummy_packet == skb; } -u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct sk_buff *skb) +u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif, + struct sk_buff *skb) { struct ieee80211_tx_info *control = IEEE80211_SKB_CB(skb); @@ -167,49 +165,51 @@ u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct sk_buff *skb) } else { struct ieee80211_hdr *hdr; - if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) + if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) return wl->system_hlid; hdr = (struct ieee80211_hdr *)skb->data; if (ieee80211_is_mgmt(hdr->frame_control)) - return wl->ap_global_hlid; + return wlvif->ap.global_hlid; else - return wl->ap_bcast_hlid; + return wlvif->ap.bcast_hlid; } } -static u8 wl1271_tx_get_hlid(struct wl1271 *wl, struct sk_buff *skb) +u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif, + struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - if (wl12xx_is_dummy_packet(wl, skb)) + if (!wlvif || wl12xx_is_dummy_packet(wl, skb)) return wl->system_hlid; - if (wl->bss_type == BSS_TYPE_AP_BSS) - return wl12xx_tx_get_hlid_ap(wl, skb); + if (wlvif->bss_type == BSS_TYPE_AP_BSS) + return wl12xx_tx_get_hlid_ap(wl, wlvif, skb); - wl1271_tx_update_filters(wl, skb); + wl1271_tx_update_filters(wl, wlvif, skb); - if ((test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) || - test_bit(WL1271_FLAG_IBSS_JOINED, &wl->flags)) && + if ((test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) || + test_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags)) && !ieee80211_is_auth(hdr->frame_control) && !ieee80211_is_assoc_req(hdr->frame_control)) - return wl->sta_hlid; + return wlvif->sta.hlid; else - return wl->dev_hlid; + return wlvif->dev_hlid; } static unsigned int wl12xx_calc_packet_alignment(struct wl1271 *wl, unsigned int packet_length) { - if (wl->quirks & WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT) - return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE); - else + if (wl->quirks & WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT) return ALIGN(packet_length, WL1271_TX_ALIGN_TO); + else + return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE); } -static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra, - u32 buf_offset, u8 hlid) +static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif, + struct sk_buff *skb, u32 extra, u32 buf_offset, + u8 hlid) { struct wl1271_tx_hw_descr *desc; u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra; @@ -217,6 +217,7 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra, u32 total_blocks; int id, ret = -EBUSY, ac; u32 spare_blocks = wl->tx_spare_blocks; + bool is_dummy = false; if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE) return -EAGAIN; @@ -231,8 +232,10 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra, len = wl12xx_calc_packet_alignment(wl, total_len); /* in case of a dummy packet, use default amount of spare mem blocks */ - if (unlikely(wl12xx_is_dummy_packet(wl, skb))) + if (unlikely(wl12xx_is_dummy_packet(wl, skb))) { + is_dummy = true; spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT; + } total_blocks = (len + TX_HW_BLOCK_SIZE - 1) / TX_HW_BLOCK_SIZE + spare_blocks; @@ -257,8 +260,9 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra, ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); wl->tx_allocated_pkts[ac]++; - if (wl->bss_type == BSS_TYPE_AP_BSS && - hlid >= WL1271_AP_STA_HLID_START) + if (!is_dummy && wlvif && + wlvif->bss_type == BSS_TYPE_AP_BSS && + test_bit(hlid, wlvif->ap.sta_hlid_map)) wl->links[hlid].allocated_pkts++; ret = 0; @@ -273,15 +277,16 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra, return ret; } -static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb, - u32 extra, struct ieee80211_tx_info *control, - u8 hlid) +static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif, + struct sk_buff *skb, u32 extra, + struct ieee80211_tx_info *control, u8 hlid) { struct timespec ts; struct wl1271_tx_hw_descr *desc; int aligned_len, ac, rate_idx; s64 hosttime; - u16 tx_attr; + u16 tx_attr = 0; + bool is_dummy; desc = (struct wl1271_tx_hw_descr *) skb->data; @@ -298,7 +303,8 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb, hosttime = (timespec_to_ns(&ts) >> 10); desc->start_time = cpu_to_le32(hosttime - wl->time_offset); - if (wl->bss_type != BSS_TYPE_AP_BSS) + is_dummy = wl12xx_is_dummy_packet(wl, skb); + if (is_dummy || !wlvif || wlvif->bss_type != BSS_TYPE_AP_BSS) desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU); else desc->life_time = cpu_to_le16(TX_HW_AP_MODE_PKT_LIFETIME_TU); @@ -307,39 +313,42 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb, ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); desc->tid = skb->priority; - if (wl12xx_is_dummy_packet(wl, skb)) { + if (is_dummy) { /* * FW expects the dummy packet to have an invalid session id - * any session id that is different than the one set in the join */ - tx_attr = ((~wl->session_counter) << + tx_attr = (SESSION_COUNTER_INVALID << TX_HW_ATTR_OFST_SESSION_COUNTER) & TX_HW_ATTR_SESSION_COUNTER; tx_attr |= TX_HW_ATTR_TX_DUMMY_REQ; - } else { + } else if (wlvif) { /* configure the tx attributes */ - tx_attr = - wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER; + tx_attr = wlvif->session_counter << + TX_HW_ATTR_OFST_SESSION_COUNTER; } desc->hlid = hlid; - - if (wl->bss_type != BSS_TYPE_AP_BSS) { + if (is_dummy || !wlvif) + rate_idx = 0; + else if (wlvif->bss_type != BSS_TYPE_AP_BSS) { /* if the packets are destined for AP (have a STA entry) send them with AP rate policies, otherwise use default basic rates */ - if (control->control.sta) - rate_idx = ACX_TX_AP_FULL_RATE; + if (control->flags & IEEE80211_TX_CTL_NO_CCK_RATE) + rate_idx = wlvif->sta.p2p_rate_idx; + else if (control->control.sta) + rate_idx = wlvif->sta.ap_rate_idx; else - rate_idx = ACX_TX_BASIC_RATE; + rate_idx = wlvif->sta.basic_rate_idx; } else { - if (hlid == wl->ap_global_hlid) - rate_idx = ACX_TX_AP_MODE_MGMT_RATE; - else if (hlid == wl->ap_bcast_hlid) - rate_idx = ACX_TX_AP_MODE_BCST_RATE; + if (hlid == wlvif->ap.global_hlid) + rate_idx = wlvif->ap.mgmt_rate_idx; + else if (hlid == wlvif->ap.bcast_hlid) + rate_idx = wlvif->ap.bcast_rate_idx; else - rate_idx = ac; + rate_idx = wlvif->ap.ucast_rate_idx[ac]; } tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY; @@ -379,20 +388,24 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb, } /* caller must hold wl->mutex */ -static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb, - u32 buf_offset) +static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif, + struct sk_buff *skb, u32 buf_offset) { struct ieee80211_tx_info *info; u32 extra = 0; int ret = 0; u32 total_len; u8 hlid; + bool is_dummy; if (!skb) return -EINVAL; info = IEEE80211_SKB_CB(skb); + /* TODO: handle dummy packets on multi-vifs */ + is_dummy = wl12xx_is_dummy_packet(wl, skb); + if (info->control.hw_key && info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) extra = WL1271_TKIP_IV_SPACE; @@ -405,29 +418,28 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb, is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) || (cipher == WLAN_CIPHER_SUITE_WEP104); - if (unlikely(is_wep && wl->default_key != idx)) { - ret = wl1271_set_default_wep_key(wl, idx); + if (unlikely(is_wep && wlvif->default_key != idx)) { + ret = wl1271_set_default_wep_key(wl, wlvif, idx); if (ret < 0) return ret; - wl->default_key = idx; + wlvif->default_key = idx; } } - - hlid = wl1271_tx_get_hlid(wl, skb); + hlid = wl12xx_tx_get_hlid(wl, wlvif, skb); if (hlid == WL12XX_INVALID_LINK_ID) { wl1271_error("invalid hlid. dropping skb 0x%p", skb); return -EINVAL; } - ret = wl1271_tx_allocate(wl, skb, extra, buf_offset, hlid); + ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid); if (ret < 0) return ret; - wl1271_tx_fill_hdr(wl, skb, extra, info, hlid); + wl1271_tx_fill_hdr(wl, wlvif, skb, extra, info, hlid); - if (wl->bss_type == BSS_TYPE_AP_BSS) { + if (!is_dummy && wlvif && wlvif->bss_type == BSS_TYPE_AP_BSS) { wl1271_tx_ap_update_inconnection_sta(wl, skb); - wl1271_tx_regulate_link(wl, hlid); + wl1271_tx_regulate_link(wl, wlvif, hlid); } /* @@ -444,7 +456,7 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb, memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len); /* Revert side effects in the dummy packet skb, so it can be reused */ - if (wl12xx_is_dummy_packet(wl, skb)) + if (is_dummy) skb_pull(skb, sizeof(struct wl1271_tx_hw_descr)); return total_len; @@ -522,19 +534,18 @@ static struct sk_buff_head *wl1271_select_queue(struct wl1271 *wl, return &queues[q]; } -static struct sk_buff *wl1271_sta_skb_dequeue(struct wl1271 *wl) +static struct sk_buff *wl12xx_lnk_skb_dequeue(struct wl1271 *wl, + struct wl1271_link *lnk) { - struct sk_buff *skb = NULL; + struct sk_buff *skb; unsigned long flags; struct sk_buff_head *queue; - queue = wl1271_select_queue(wl, wl->tx_queue); + queue = wl1271_select_queue(wl, lnk->tx_queue); if (!queue) - goto out; + return NULL; skb = skb_dequeue(queue); - -out: if (skb) { int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); spin_lock_irqsave(&wl->wl_lock, flags); @@ -545,43 +556,33 @@ out: return skb; } -static struct sk_buff *wl1271_ap_skb_dequeue(struct wl1271 *wl) +static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl, + struct wl12xx_vif *wlvif) { struct sk_buff *skb = NULL; - unsigned long flags; int i, h, start_hlid; - struct sk_buff_head *queue; /* start from the link after the last one */ - start_hlid = (wl->last_tx_hlid + 1) % AP_MAX_LINKS; + start_hlid = (wlvif->last_tx_hlid + 1) % WL12XX_MAX_LINKS; /* dequeue according to AC, round robin on each link */ - for (i = 0; i < AP_MAX_LINKS; i++) { - h = (start_hlid + i) % AP_MAX_LINKS; + for (i = 0; i < WL12XX_MAX_LINKS; i++) { + h = (start_hlid + i) % WL12XX_MAX_LINKS; /* only consider connected stations */ - if (h >= WL1271_AP_STA_HLID_START && - !test_bit(h - WL1271_AP_STA_HLID_START, wl->ap_hlid_map)) + if (!test_bit(h, wlvif->links_map)) continue; - queue = wl1271_select_queue(wl, wl->links[h].tx_queue); - if (!queue) + skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[h]); + if (!skb) continue; - skb = skb_dequeue(queue); - if (skb) - break; + wlvif->last_tx_hlid = h; + break; } - if (skb) { - int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); - wl->last_tx_hlid = h; - spin_lock_irqsave(&wl->wl_lock, flags); - wl->tx_queue_count[q]--; - spin_unlock_irqrestore(&wl->wl_lock, flags); - } else { - wl->last_tx_hlid = 0; - } + if (!skb) + wlvif->last_tx_hlid = 0; return skb; } @@ -589,12 +590,32 @@ static struct sk_buff *wl1271_ap_skb_dequeue(struct wl1271 *wl) static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl) { unsigned long flags; + struct wl12xx_vif *wlvif = wl->last_wlvif; struct sk_buff *skb = NULL; - if (wl->bss_type == BSS_TYPE_AP_BSS) - skb = wl1271_ap_skb_dequeue(wl); - else - skb = wl1271_sta_skb_dequeue(wl); + if (wlvif) { + wl12xx_for_each_wlvif_continue(wl, wlvif) { + skb = wl12xx_vif_skb_dequeue(wl, wlvif); + if (skb) { + wl->last_wlvif = wlvif; + break; + } + } + } + + /* do another pass */ + if (!skb) { + wl12xx_for_each_wlvif(wl, wlvif) { + skb = wl12xx_vif_skb_dequeue(wl, wlvif); + if (skb) { + wl->last_wlvif = wlvif; + break; + } + } + } + + if (!skb) + skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[wl->system_hlid]); if (!skb && test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) { @@ -610,21 +631,21 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl) return skb; } -static void wl1271_skb_queue_head(struct wl1271 *wl, struct sk_buff *skb) +static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif, + struct sk_buff *skb) { unsigned long flags; int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); if (wl12xx_is_dummy_packet(wl, skb)) { set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags); - } else if (wl->bss_type == BSS_TYPE_AP_BSS) { - u8 hlid = wl1271_tx_get_hlid(wl, skb); + } else { + u8 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb); skb_queue_head(&wl->links[hlid].tx_queue[q], skb); /* make sure we dequeue the same packet next time */ - wl->last_tx_hlid = (hlid + AP_MAX_LINKS - 1) % AP_MAX_LINKS; - } else { - skb_queue_head(&wl->tx_queue[q], skb); + wlvif->last_tx_hlid = (hlid + WL12XX_MAX_LINKS - 1) % + WL12XX_MAX_LINKS; } spin_lock_irqsave(&wl->wl_lock, flags); @@ -639,29 +660,71 @@ static bool wl1271_tx_is_data_present(struct sk_buff *skb) return ieee80211_is_data_present(hdr->frame_control); } +void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids) +{ + struct wl12xx_vif *wlvif; + u32 timeout; + u8 hlid; + + if (!wl->conf.rx_streaming.interval) + return; + + if (!wl->conf.rx_streaming.always && + !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)) + return; + + timeout = wl->conf.rx_streaming.duration; + wl12xx_for_each_wlvif_sta(wl, wlvif) { + bool found = false; + for_each_set_bit(hlid, active_hlids, WL12XX_MAX_LINKS) { + if (test_bit(hlid, wlvif->links_map)) { + found = true; + break; + } + } + + if (!found) + continue; + + /* enable rx streaming */ + if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags)) + ieee80211_queue_work(wl->hw, + &wlvif->rx_streaming_enable_work); + + mod_timer(&wlvif->rx_streaming_timer, + jiffies + msecs_to_jiffies(timeout)); + } +} + void wl1271_tx_work_locked(struct wl1271 *wl) { + struct wl12xx_vif *wlvif; struct sk_buff *skb; + struct wl1271_tx_hw_descr *desc; u32 buf_offset = 0; bool sent_packets = false; - bool had_data = false; - bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS); + unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0}; int ret; if (unlikely(wl->state == WL1271_STATE_OFF)) return; while ((skb = wl1271_skb_dequeue(wl))) { - if (wl1271_tx_is_data_present(skb)) - had_data = true; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + bool has_data = false; - ret = wl1271_prepare_tx_frame(wl, skb, buf_offset); + wlvif = NULL; + if (!wl12xx_is_dummy_packet(wl, skb) && info->control.vif) + wlvif = wl12xx_vif_to_data(info->control.vif); + + has_data = wlvif && wl1271_tx_is_data_present(skb); + ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset); if (ret == -EAGAIN) { /* * Aggregation buffer is full. * Flush buffer and try again. */ - wl1271_skb_queue_head(wl, skb); + wl1271_skb_queue_head(wl, wlvif, skb); wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf, buf_offset, true); sent_packets = true; @@ -672,16 +735,27 @@ void wl1271_tx_work_locked(struct wl1271 *wl) * Firmware buffer is full. * Queue back last skb, and stop aggregating. */ - wl1271_skb_queue_head(wl, skb); + wl1271_skb_queue_head(wl, wlvif, skb); /* No work left, avoid scheduling redundant tx work */ set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags); goto out_ack; } else if (ret < 0) { - dev_kfree_skb(skb); + if (wl12xx_is_dummy_packet(wl, skb)) + /* + * fw still expects dummy packet, + * so re-enqueue it + */ + wl1271_skb_queue_head(wl, wlvif, skb); + else + ieee80211_free_txskb(wl->hw, skb); goto out_ack; } buf_offset += ret; wl->tx_packets_count++; + if (has_data) { + desc = (struct wl1271_tx_hw_descr *) skb->data; + __set_bit(desc->hlid, active_hlids); + } } out_ack: @@ -701,19 +775,7 @@ out_ack: wl1271_handle_tx_low_watermark(wl); } - if (!is_ap && wl->conf.rx_streaming.interval && had_data && - (wl->conf.rx_streaming.always || - test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))) { - u32 timeout = wl->conf.rx_streaming.duration; - - /* enable rx streaming */ - if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags)) - ieee80211_queue_work(wl->hw, - &wl->rx_streaming_enable_work); - - mod_timer(&wl->rx_streaming_timer, - jiffies + msecs_to_jiffies(timeout)); - } + wl12xx_rearm_rx_streaming(wl, active_hlids); } void wl1271_tx_work(struct work_struct *work) @@ -737,6 +799,8 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl, struct wl1271_tx_hw_res_descr *result) { struct ieee80211_tx_info *info; + struct ieee80211_vif *vif; + struct wl12xx_vif *wlvif; struct sk_buff *skb; int id = result->id; int rate = -1; @@ -756,11 +820,16 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl, return; } + /* info->control is valid as long as we don't update info->status */ + vif = info->control.vif; + wlvif = wl12xx_vif_to_data(vif); + /* update the TX status info */ if (result->status == TX_SUCCESS) { if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) info->flags |= IEEE80211_TX_STAT_ACK; - rate = wl1271_rate_to_idx(result->rate_class_index, wl->band); + rate = wl1271_rate_to_idx(result->rate_class_index, + wlvif->band); retries = result->ack_failures; } else if (result->status == TX_RETRY_EXCEEDED) { wl->stats.excessive_retries++; @@ -783,14 +852,14 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl, info->control.hw_key->cipher == WLAN_CIPHER_SUITE_CCMP || info->control.hw_key->cipher == WL1271_CIPHER_SUITE_GEM)) { u8 fw_lsb = result->tx_security_sequence_number_lsb; - u8 cur_lsb = wl->tx_security_last_seq_lsb; + u8 cur_lsb = wlvif->tx_security_last_seq_lsb; /* * update security sequence number, taking care of potential * wrap-around */ - wl->tx_security_seq += (fw_lsb - cur_lsb + 256) % 256; - wl->tx_security_last_seq_lsb = fw_lsb; + wlvif->tx_security_seq += (fw_lsb - cur_lsb) & 0xff; + wlvif->tx_security_last_seq_lsb = fw_lsb; } /* remove private header from packet */ @@ -886,39 +955,30 @@ void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid) } /* caller must hold wl->mutex and TX must be stopped */ -void wl1271_tx_reset(struct wl1271 *wl, bool reset_tx_queues) +void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif) { int i; - struct sk_buff *skb; - struct ieee80211_tx_info *info; /* TX failure */ - if (wl->bss_type == BSS_TYPE_AP_BSS) { - for (i = 0; i < AP_MAX_LINKS; i++) { - wl1271_free_sta(wl, i); - wl1271_tx_reset_link_queues(wl, i); - wl->links[i].allocated_pkts = 0; - wl->links[i].prev_freed_pkts = 0; - } - - wl->last_tx_hlid = 0; - } else { - for (i = 0; i < NUM_TX_QUEUES; i++) { - while ((skb = skb_dequeue(&wl->tx_queue[i]))) { - wl1271_debug(DEBUG_TX, "freeing skb 0x%p", - skb); - - if (!wl12xx_is_dummy_packet(wl, skb)) { - info = IEEE80211_SKB_CB(skb); - info->status.rates[0].idx = -1; - info->status.rates[0].count = 0; - ieee80211_tx_status_ni(wl->hw, skb); - } - } - } + for_each_set_bit(i, wlvif->links_map, WL12XX_MAX_LINKS) { + if (wlvif->bss_type == BSS_TYPE_AP_BSS) + wl1271_free_sta(wl, wlvif, i); + else + wlvif->sta.ba_rx_bitmap = 0; - wl->ba_rx_bitmap = 0; + wl1271_tx_reset_link_queues(wl, i); + wl->links[i].allocated_pkts = 0; + wl->links[i].prev_freed_pkts = 0; } + wlvif->last_tx_hlid = 0; + +} +/* caller must hold wl->mutex and TX must be stopped */ +void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues) +{ + int i; + struct sk_buff *skb; + struct ieee80211_tx_info *info; for (i = 0; i < NUM_TX_QUEUES; i++) wl->tx_queue_count[i] = 0; diff --git a/drivers/net/wireless/wl12xx/tx.h b/drivers/net/wireless/wl12xx/tx.h index dc4f09adf08..2dbb24e6d54 100644 --- a/drivers/net/wireless/wl12xx/tx.h +++ b/drivers/net/wireless/wl12xx/tx.h @@ -206,18 +206,23 @@ static inline int wl1271_tx_total_queue_count(struct wl1271 *wl) void wl1271_tx_work(struct work_struct *work); void wl1271_tx_work_locked(struct wl1271 *wl); void wl1271_tx_complete(struct wl1271 *wl); -void wl1271_tx_reset(struct wl1271 *wl, bool reset_tx_queues); +void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif); +void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues); void wl1271_tx_flush(struct wl1271 *wl); u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band); u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set, enum ieee80211_band rate_band); u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set); -u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct sk_buff *skb); +u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif, + struct sk_buff *skb); +u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif, + struct sk_buff *skb); void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid); void wl1271_handle_tx_low_watermark(struct wl1271 *wl); bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb); +void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids); /* from main.c */ -void wl1271_free_sta(struct wl1271 *wl, u8 hlid); +void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid); #endif diff --git a/drivers/net/wireless/wl12xx/wl12xx.h b/drivers/net/wireless/wl12xx/wl12xx.h index 1ec90fc7505..b2b09cd0202 100644 --- a/drivers/net/wireless/wl12xx/wl12xx.h +++ b/drivers/net/wireless/wl12xx/wl12xx.h @@ -35,83 +35,6 @@ #include "conf.h" #include "ini.h" -#define DRIVER_NAME "wl1271" -#define DRIVER_PREFIX DRIVER_NAME ": " - -/* - * FW versions support BA 11n - * versions marks x.x.x.50-60.x - */ -#define WL12XX_BA_SUPPORT_FW_COST_VER2_START 50 -#define WL12XX_BA_SUPPORT_FW_COST_VER2_END 60 - -enum { - DEBUG_NONE = 0, - DEBUG_IRQ = BIT(0), - DEBUG_SPI = BIT(1), - DEBUG_BOOT = BIT(2), - DEBUG_MAILBOX = BIT(3), - DEBUG_TESTMODE = BIT(4), - DEBUG_EVENT = BIT(5), - DEBUG_TX = BIT(6), - DEBUG_RX = BIT(7), - DEBUG_SCAN = BIT(8), - DEBUG_CRYPT = BIT(9), - DEBUG_PSM = BIT(10), - DEBUG_MAC80211 = BIT(11), - DEBUG_CMD = BIT(12), - DEBUG_ACX = BIT(13), - DEBUG_SDIO = BIT(14), - DEBUG_FILTERS = BIT(15), - DEBUG_ADHOC = BIT(16), - DEBUG_AP = BIT(17), - DEBUG_MASTER = (DEBUG_ADHOC | DEBUG_AP), - DEBUG_ALL = ~0, -}; - -extern u32 wl12xx_debug_level; - -#define DEBUG_DUMP_LIMIT 1024 - -#define wl1271_error(fmt, arg...) \ - pr_err(DRIVER_PREFIX "ERROR " fmt "\n", ##arg) - -#define wl1271_warning(fmt, arg...) \ - pr_warning(DRIVER_PREFIX "WARNING " fmt "\n", ##arg) - -#define wl1271_notice(fmt, arg...) \ - pr_info(DRIVER_PREFIX fmt "\n", ##arg) - -#define wl1271_info(fmt, arg...) \ - pr_info(DRIVER_PREFIX fmt "\n", ##arg) - -#define wl1271_debug(level, fmt, arg...) \ - do { \ - if (level & wl12xx_debug_level) \ - pr_debug(DRIVER_PREFIX fmt "\n", ##arg); \ - } while (0) - -/* TODO: use pr_debug_hex_dump when it will be available */ -#define wl1271_dump(level, prefix, buf, len) \ - do { \ - if (level & wl12xx_debug_level) \ - print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \ - DUMP_PREFIX_OFFSET, 16, 1, \ - buf, \ - min_t(size_t, len, DEBUG_DUMP_LIMIT), \ - 0); \ - } while (0) - -#define wl1271_dump_ascii(level, prefix, buf, len) \ - do { \ - if (level & wl12xx_debug_level) \ - print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \ - DUMP_PREFIX_OFFSET, 16, 1, \ - buf, \ - min_t(size_t, len, DEBUG_DUMP_LIMIT), \ - true); \ - } while (0) - #define WL127X_FW_NAME "ti-connectivity/wl127x-fw-3.bin" #define WL128X_FW_NAME "ti-connectivity/wl128x-fw-3.bin" @@ -142,16 +65,12 @@ extern u32 wl12xx_debug_level; #define WL12XX_INVALID_ROLE_ID 0xff #define WL12XX_INVALID_LINK_ID 0xff +#define WL12XX_MAX_RATE_POLICIES 16 + /* Defined by FW as 0. Will not be freed or allocated. */ #define WL12XX_SYSTEM_HLID 0 /* - * TODO: we currently don't support multirole. remove - * this constant from the code when we do. - */ -#define WL1271_AP_STA_HLID_START 3 - -/* * When in AP-mode, we allow (at least) this number of packets * to be transmitted to FW for a STA in PS-mode. Only when packets are * present in the FW buffers it will wake the sleeping STA. We want to put @@ -236,13 +155,6 @@ struct wl1271_stats { #define AP_MAX_STATIONS 8 -/* Broadcast and Global links + system link + links to stations */ -/* - * TODO: when WL1271_AP_STA_HLID_START is no longer constant, change all - * the places that use this. - */ -#define AP_MAX_LINKS (AP_MAX_STATIONS + WL1271_AP_STA_HLID_START) - /* FW status registers */ struct wl12xx_fw_status { __le32 intr; @@ -299,17 +211,14 @@ struct wl1271_scan { }; struct wl1271_if_operations { - void (*read)(struct wl1271 *wl, int addr, void *buf, size_t len, + void (*read)(struct device *child, int addr, void *buf, size_t len, bool fixed); - void (*write)(struct wl1271 *wl, int addr, void *buf, size_t len, + void (*write)(struct device *child, int addr, void *buf, size_t len, bool fixed); - void (*reset)(struct wl1271 *wl); - void (*init)(struct wl1271 *wl); - int (*power)(struct wl1271 *wl, bool enable); - struct device* (*dev)(struct wl1271 *wl); - void (*enable_irq)(struct wl1271 *wl); - void (*disable_irq)(struct wl1271 *wl); - void (*set_block_size) (struct wl1271 *wl, unsigned int blksz); + void (*reset)(struct device *child); + void (*init)(struct device *child); + int (*power)(struct device *child, bool enable); + void (*set_block_size) (struct device *child, unsigned int blksz); }; #define MAX_NUM_KEYS 14 @@ -326,29 +235,33 @@ struct wl1271_ap_key { }; enum wl12xx_flags { - WL1271_FLAG_STA_ASSOCIATED, - WL1271_FLAG_IBSS_JOINED, WL1271_FLAG_GPIO_POWER, WL1271_FLAG_TX_QUEUE_STOPPED, WL1271_FLAG_TX_PENDING, WL1271_FLAG_IN_ELP, WL1271_FLAG_ELP_REQUESTED, - WL1271_FLAG_PSM, - WL1271_FLAG_PSM_REQUESTED, WL1271_FLAG_IRQ_RUNNING, - WL1271_FLAG_IDLE, - WL1271_FLAG_PSPOLL_FAILURE, - WL1271_FLAG_STA_STATE_SENT, WL1271_FLAG_FW_TX_BUSY, - WL1271_FLAG_AP_STARTED, - WL1271_FLAG_IF_INITIALIZED, WL1271_FLAG_DUMMY_PACKET_PENDING, WL1271_FLAG_SUSPENDED, WL1271_FLAG_PENDING_WORK, WL1271_FLAG_SOFT_GEMINI, - WL1271_FLAG_RX_STREAMING_STARTED, WL1271_FLAG_RECOVERY_IN_PROGRESS, - WL1271_FLAG_CS_PROGRESS, +}; + +enum wl12xx_vif_flags { + WLVIF_FLAG_INITIALIZED, + WLVIF_FLAG_STA_ASSOCIATED, + WLVIF_FLAG_IBSS_JOINED, + WLVIF_FLAG_AP_STARTED, + WLVIF_FLAG_PSM, + WLVIF_FLAG_PSM_REQUESTED, + WLVIF_FLAG_STA_STATE_SENT, + WLVIF_FLAG_RX_STREAMING_STARTED, + WLVIF_FLAG_PSPOLL_FAILURE, + WLVIF_FLAG_CS_PROGRESS, + WLVIF_FLAG_AP_PROBE_RESP_SET, + WLVIF_FLAG_IN_USE, }; struct wl1271_link { @@ -366,10 +279,11 @@ struct wl1271_link { }; struct wl1271 { - struct platform_device *plat_dev; struct ieee80211_hw *hw; bool mac80211_registered; + struct device *dev; + void *if_priv; struct wl1271_if_operations *if_ops; @@ -399,25 +313,20 @@ struct wl1271 { s8 hw_pg_ver; - u8 bssid[ETH_ALEN]; u8 mac_addr[ETH_ALEN]; - u8 bss_type; - u8 set_bss_type; - u8 p2p; /* we are using p2p role */ - u8 ssid[IEEE80211_MAX_SSID_LEN + 1]; - u8 ssid_len; int channel; - u8 role_id; - u8 dev_role_id; u8 system_hlid; - u8 sta_hlid; - u8 dev_hlid; - u8 ap_global_hlid; - u8 ap_bcast_hlid; unsigned long links_map[BITS_TO_LONGS(WL12XX_MAX_LINKS)]; unsigned long roles_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)]; unsigned long roc_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)]; + unsigned long rate_policies_map[ + BITS_TO_LONGS(WL12XX_MAX_RATE_POLICIES)]; + + struct list_head wlvif_list; + + u8 sta_count; + u8 ap_count; struct wl1271_acx_mem_map *target_mem_map; @@ -440,11 +349,7 @@ struct wl1271 { /* Time-offset between host and chipset clocks */ s64 time_offset; - /* Session counter for the chipset */ - int session_counter; - /* Frames scheduled for transmission, not handled yet */ - struct sk_buff_head tx_queue[NUM_TX_QUEUES]; int tx_queue_count[NUM_TX_QUEUES]; long stopped_queues_map; @@ -462,17 +367,6 @@ struct wl1271 { struct sk_buff *tx_frames[ACX_TX_DESCRIPTORS]; int tx_frames_cnt; - /* - * Security sequence number - * bits 0-15: lower 16 bits part of sequence number - * bits 16-47: higher 32 bits part of sequence number - * bits 48-63: not in use - */ - u64 tx_security_seq; - - /* 8 bits of the last sequence number in use */ - u8 tx_security_last_seq_lsb; - /* FW Rx counter */ u32 rx_counter; @@ -507,59 +401,21 @@ struct wl1271 { u32 mbox_ptr[2]; /* Are we currently scanning */ + struct ieee80211_vif *scan_vif; struct wl1271_scan scan; struct delayed_work scan_complete_work; bool sched_scanning; - /* probe-req template for the current AP */ - struct sk_buff *probereq; - - /* Our association ID */ - u16 aid; - - /* - * currently configured rate set: - * bits 0-15 - 802.11abg rates - * bits 16-23 - 802.11n MCS index mask - * support only 1 stream, thus only 8 bits for the MCS rates (0-7). - */ - u32 basic_rate_set; - u32 basic_rate; - u32 rate_set; - u32 bitrate_masks[IEEE80211_NUM_BANDS]; - /* The current band */ enum ieee80211_band band; - /* Beaconing interval (needed for ad-hoc) */ - u32 beacon_int; - - /* Default key (for WEP) */ - u32 default_key; - - /* Rx Streaming */ - struct work_struct rx_streaming_enable_work; - struct work_struct rx_streaming_disable_work; - struct timer_list rx_streaming_timer; - struct completion *elp_compl; - struct completion *ps_compl; struct delayed_work elp_work; - struct delayed_work pspoll_work; - - /* counter for ps-poll delivery failures */ - int ps_poll_failures; - - /* retry counter for PSM entries */ - u8 psm_entry_retry; /* in dBm */ int power_level; - int rssi_thold; - int last_rssi_event; - struct wl1271_stats stats; __le32 buffer_32; @@ -583,20 +439,9 @@ struct wl1271 { /* Most recently reported noise in dBm */ s8 noise; - /* map for HLIDs of associated stations - when operating in AP mode */ - unsigned long ap_hlid_map[BITS_TO_LONGS(AP_MAX_STATIONS)]; - - /* recoreded keys for AP-mode - set here before AP startup */ - struct wl1271_ap_key *recorded_ap_keys[MAX_NUM_KEYS]; - /* bands supported by this instance of wl12xx */ struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS]; - /* RX BA constraint value */ - bool ba_support; - u8 ba_rx_bitmap; - bool ba_allowed; - int tcxo_clock; /* @@ -610,10 +455,7 @@ struct wl1271 { * AP-mode - links indexed by HLID. The global and broadcast links * are always active. */ - struct wl1271_link links[AP_MAX_LINKS]; - - /* the hlid of the link where the last transmitted skb came from */ - int last_tx_hlid; + struct wl1271_link links[WL12XX_MAX_LINKS]; /* AP-mode - a bitmap of links currently in PS mode according to FW */ u32 ap_fw_ps_map; @@ -632,21 +474,173 @@ struct wl1271 { /* AP-mode - number of currently connected stations */ int active_sta_count; + + /* last wlvif we transmitted from */ + struct wl12xx_vif *last_wlvif; }; struct wl1271_station { u8 hlid; }; +struct wl12xx_vif { + struct wl1271 *wl; + struct list_head list; + unsigned long flags; + u8 bss_type; + u8 p2p; /* we are using p2p role */ + u8 role_id; + + /* sta/ibss specific */ + u8 dev_role_id; + u8 dev_hlid; + + union { + struct { + u8 hlid; + u8 ba_rx_bitmap; + + u8 basic_rate_idx; + u8 ap_rate_idx; + u8 p2p_rate_idx; + } sta; + struct { + u8 global_hlid; + u8 bcast_hlid; + + /* HLIDs bitmap of associated stations */ + unsigned long sta_hlid_map[BITS_TO_LONGS( + WL12XX_MAX_LINKS)]; + + /* recoreded keys - set here before AP startup */ + struct wl1271_ap_key *recorded_keys[MAX_NUM_KEYS]; + + u8 mgmt_rate_idx; + u8 bcast_rate_idx; + u8 ucast_rate_idx[CONF_TX_MAX_AC_COUNT]; + } ap; + }; + + /* the hlid of the last transmitted skb */ + int last_tx_hlid; + + unsigned long links_map[BITS_TO_LONGS(WL12XX_MAX_LINKS)]; + + u8 ssid[IEEE80211_MAX_SSID_LEN + 1]; + u8 ssid_len; + + /* The current band */ + enum ieee80211_band band; + int channel; + + u32 bitrate_masks[IEEE80211_NUM_BANDS]; + u32 basic_rate_set; + + /* + * currently configured rate set: + * bits 0-15 - 802.11abg rates + * bits 16-23 - 802.11n MCS index mask + * support only 1 stream, thus only 8 bits for the MCS rates (0-7). + */ + u32 basic_rate; + u32 rate_set; + + /* probe-req template for the current AP */ + struct sk_buff *probereq; + + /* Beaconing interval (needed for ad-hoc) */ + u32 beacon_int; + + /* Default key (for WEP) */ + u32 default_key; + + /* Our association ID */ + u16 aid; + + /* Session counter for the chipset */ + int session_counter; + + struct completion *ps_compl; + struct delayed_work pspoll_work; + + /* counter for ps-poll delivery failures */ + int ps_poll_failures; + + /* retry counter for PSM entries */ + u8 psm_entry_retry; + + /* in dBm */ + int power_level; + + int rssi_thold; + int last_rssi_event; + + /* RX BA constraint value */ + bool ba_support; + bool ba_allowed; + + /* Rx Streaming */ + struct work_struct rx_streaming_enable_work; + struct work_struct rx_streaming_disable_work; + struct timer_list rx_streaming_timer; + + /* + * This struct must be last! + * data that has to be saved acrossed reconfigs (e.g. recovery) + * should be declared in this struct. + */ + struct { + u8 persistent[0]; + /* + * Security sequence number + * bits 0-15: lower 16 bits part of sequence number + * bits 16-47: higher 32 bits part of sequence number + * bits 48-63: not in use + */ + u64 tx_security_seq; + + /* 8 bits of the last sequence number in use */ + u8 tx_security_last_seq_lsb; + }; +}; + +static inline struct wl12xx_vif *wl12xx_vif_to_data(struct ieee80211_vif *vif) +{ + return (struct wl12xx_vif *)vif->drv_priv; +} + +static inline +struct ieee80211_vif *wl12xx_wlvif_to_vif(struct wl12xx_vif *wlvif) +{ + return container_of((void *)wlvif, struct ieee80211_vif, drv_priv); +} + +#define wl12xx_for_each_wlvif(wl, wlvif) \ + list_for_each_entry(wlvif, &wl->wlvif_list, list) + +#define wl12xx_for_each_wlvif_continue(wl, wlvif) \ + list_for_each_entry_continue(wlvif, &wl->wlvif_list, list) + +#define wl12xx_for_each_wlvif_bss_type(wl, wlvif, _bss_type) \ + wl12xx_for_each_wlvif(wl, wlvif) \ + if (wlvif->bss_type == _bss_type) + +#define wl12xx_for_each_wlvif_sta(wl, wlvif) \ + wl12xx_for_each_wlvif_bss_type(wl, wlvif, BSS_TYPE_STA_BSS) + +#define wl12xx_for_each_wlvif_ap(wl, wlvif) \ + wl12xx_for_each_wlvif_bss_type(wl, wlvif, BSS_TYPE_AP_BSS) + int wl1271_plt_start(struct wl1271 *wl); int wl1271_plt_stop(struct wl1271 *wl); -int wl1271_recalc_rx_streaming(struct wl1271 *wl); +int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif); void wl12xx_queue_recovery_work(struct wl1271 *wl); size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen); #define JOIN_TIMEOUT 5000 /* 5000 milliseconds to join */ -#define SESSION_COUNTER_MAX 7 /* maximum value for the session counter */ +#define SESSION_COUNTER_MAX 6 /* maximum value for the session counter */ +#define SESSION_COUNTER_INVALID 7 /* used with dummy_packet */ #define WL1271_DEFAULT_POWER_LEVEL 0 @@ -669,8 +663,8 @@ size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen); /* Each RX/TX transaction requires an end-of-transaction transfer */ #define WL12XX_QUIRK_END_OF_TRANSACTION BIT(0) -/* WL128X requires aggregated packets to be aligned to the SDIO block size */ -#define WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT BIT(2) +/* wl127x and SPI don't support SDIO block size alignment */ +#define WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT BIT(2) /* Older firmwares did not implement the FW logger over bus feature */ #define WL12XX_QUIRK_FWLOG_NOT_IMPLEMENTED BIT(4) diff --git a/drivers/net/wireless/wl12xx/wl12xx_80211.h b/drivers/net/wireless/wl12xx/wl12xx_80211.h index f7971d3b089..8f0ffaf6230 100644 --- a/drivers/net/wireless/wl12xx/wl12xx_80211.h +++ b/drivers/net/wireless/wl12xx/wl12xx_80211.h @@ -116,11 +116,6 @@ struct wl12xx_ps_poll_template { u8 ta[ETH_ALEN]; } __packed; -struct wl12xx_qos_null_data_template { - struct ieee80211_header header; - __le16 qos_ctl; -} __packed; - struct wl12xx_arp_rsp_template { struct ieee80211_hdr_3addr hdr; diff --git a/drivers/net/wireless/wl12xx/wl12xx_platform_data.c b/drivers/net/wireless/wl12xx/wl12xx_platform_data.c index 973b11060a8..998e95895f9 100644 --- a/drivers/net/wireless/wl12xx/wl12xx_platform_data.c +++ b/drivers/net/wireless/wl12xx/wl12xx_platform_data.c @@ -1,8 +1,29 @@ +/* + * This file is part of wl12xx + * + * Copyright (C) 2010-2011 Texas Instruments, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + #include <linux/module.h> #include <linux/err.h> #include <linux/wl12xx.h> -static const struct wl12xx_platform_data *platform_data; +static struct wl12xx_platform_data *platform_data; int __init wl12xx_set_platform_data(const struct wl12xx_platform_data *data) { @@ -18,7 +39,7 @@ int __init wl12xx_set_platform_data(const struct wl12xx_platform_data *data) return 0; } -const struct wl12xx_platform_data *wl12xx_get_platform_data(void) +struct wl12xx_platform_data *wl12xx_get_platform_data(void) { if (!platform_data) return ERR_PTR(-ENODEV); diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 182562952c7..b7d41f8c338 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -165,7 +165,8 @@ static int xenvif_change_mtu(struct net_device *dev, int mtu) return 0; } -static u32 xenvif_fix_features(struct net_device *dev, u32 features) +static netdev_features_t xenvif_fix_features(struct net_device *dev, + netdev_features_t features) { struct xenvif *vif = netdev_priv(dev); @@ -222,7 +223,7 @@ static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data) } } -static struct ethtool_ops xenvif_ethtool_ops = { +static const struct ethtool_ops xenvif_ethtool_ops = { .get_link = ethtool_op_get_link, .get_sset_count = xenvif_get_sset_count, @@ -230,7 +231,7 @@ static struct ethtool_ops xenvif_ethtool_ops = { .get_strings = xenvif_get_strings, }; -static struct net_device_ops xenvif_netdev_ops = { +static const struct net_device_ops xenvif_netdev_ops = { .ndo_start_xmit = xenvif_start_xmit, .ndo_get_stats = xenvif_get_stats, .ndo_open = xenvif_open, diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 15e332d08c8..639cf8ab62b 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -395,7 +395,7 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, struct gnttab_copy *copy_gop; struct netbk_rx_meta *meta; /* - * These variables a used iff get_page_ext returns true, + * These variables are used iff get_page_ext returns true, * in which case they are guaranteed to be initialized. */ unsigned int uninitialized_var(group), uninitialized_var(idx); @@ -940,8 +940,6 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk, if (!page) return NULL; - netbk->mmap_pages[pending_idx] = page; - gop->source.u.ref = txp->gref; gop->source.domid = vif->domid; gop->source.offset = txp->offset; @@ -1336,8 +1334,6 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) continue; } - netbk->mmap_pages[pending_idx] = page; - gop->source.u.ref = txreq.gref; gop->source.domid = vif->domid; gop->source.offset = txreq.offset; diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 226faab2360..0a59c57864f 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -201,7 +201,7 @@ static void xennet_sysfs_delif(struct net_device *netdev); #define xennet_sysfs_delif(dev) do { } while (0) #endif -static int xennet_can_sg(struct net_device *dev) +static bool xennet_can_sg(struct net_device *dev) { return dev->features & NETIF_F_SG; } @@ -1190,7 +1190,8 @@ static void xennet_uninit(struct net_device *dev) gnttab_free_grant_references(np->gref_rx_head); } -static u32 xennet_fix_features(struct net_device *dev, u32 features) +static netdev_features_t xennet_fix_features(struct net_device *dev, + netdev_features_t features) { struct netfront_info *np = netdev_priv(dev); int val; @@ -1216,7 +1217,8 @@ static u32 xennet_fix_features(struct net_device *dev, u32 features) return features; } -static int xennet_set_features(struct net_device *dev, u32 features) +static int xennet_set_features(struct net_device *dev, + netdev_features_t features) { if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) { netdev_info(dev, "Reducing MTU because no SG offload"); @@ -1707,7 +1709,6 @@ static void netback_changed(struct xenbus_device *dev, case XenbusStateInitialised: case XenbusStateReconfiguring: case XenbusStateReconfigured: - case XenbusStateConnected: case XenbusStateUnknown: case XenbusStateClosed: break; @@ -1718,6 +1719,9 @@ static void netback_changed(struct xenbus_device *dev, if (xennet_connect(netdev) != 0) break; xenbus_switch_state(dev, XenbusStateConnected); + break; + + case XenbusStateConnected: netif_notify_peers(netdev); break; diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c index 7bcb1febef0..b8b6c2abbd4 100644 --- a/drivers/nfc/pn533.c +++ b/drivers/nfc/pn533.c @@ -72,6 +72,7 @@ MODULE_DEVICE_TABLE(usb, pn533_table); #define PN533_CMD_IN_LIST_PASSIVE_TARGET 0x4A #define PN533_CMD_IN_ATR 0x50 #define PN533_CMD_IN_RELEASE 0x52 +#define PN533_CMD_IN_JUMP_FOR_DEP 0x56 #define PN533_CMD_RESPONSE(cmd) (cmd + 1) @@ -231,6 +232,26 @@ struct pn533_cmd_activate_response { u8 gt[]; } __packed; +/* PN533_CMD_IN_JUMP_FOR_DEP */ +struct pn533_cmd_jump_dep { + u8 active; + u8 baud; + u8 next; + u8 gt[]; +} __packed; + +struct pn533_cmd_jump_dep_response { + u8 status; + u8 tg; + u8 nfcid3t[10]; + u8 didt; + u8 bst; + u8 brt; + u8 to; + u8 ppt; + /* optional */ + u8 gt[]; +} __packed; struct pn533 { struct usb_device *udev; @@ -1121,6 +1142,7 @@ static int pn533_activate_target_nfcdep(struct pn533 *dev) { struct pn533_cmd_activate_param param; struct pn533_cmd_activate_response *resp; + u16 gt_len; int rc; nfc_dev_dbg(&dev->interface->dev, "%s", __func__); @@ -1146,7 +1168,11 @@ static int pn533_activate_target_nfcdep(struct pn533 *dev) if (rc != PN533_CMD_RET_SUCCESS) return -EIO; - return 0; + /* ATR_RES general bytes are located at offset 16 */ + gt_len = PN533_FRAME_CMD_PARAMS_LEN(dev->in_frame) - 16; + rc = nfc_set_remote_general_bytes(dev->nfc_dev, resp->gt, gt_len); + + return rc; } static int pn533_activate_target(struct nfc_dev *nfc_dev, u32 target_idx, @@ -1239,6 +1265,142 @@ static void pn533_deactivate_target(struct nfc_dev *nfc_dev, u32 target_idx) return; } + +static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg, + u8 *params, int params_len) +{ + struct pn533_cmd_jump_dep *cmd; + struct pn533_cmd_jump_dep_response *resp; + struct nfc_target nfc_target; + u8 target_gt_len; + int rc; + + if (params_len == -ENOENT) { + nfc_dev_dbg(&dev->interface->dev, ""); + return 0; + } + + if (params_len < 0) { + nfc_dev_err(&dev->interface->dev, + "Error %d when bringing DEP link up", + params_len); + return 0; + } + + if (dev->tgt_available_prots && + !(dev->tgt_available_prots & (1 << NFC_PROTO_NFC_DEP))) { + nfc_dev_err(&dev->interface->dev, + "The target does not support DEP"); + return -EINVAL; + } + + resp = (struct pn533_cmd_jump_dep_response *) params; + cmd = (struct pn533_cmd_jump_dep *) arg; + rc = resp->status & PN533_CMD_RET_MASK; + if (rc != PN533_CMD_RET_SUCCESS) { + nfc_dev_err(&dev->interface->dev, + "Bringing DEP link up failed %d", rc); + return 0; + } + + if (!dev->tgt_available_prots) { + nfc_dev_dbg(&dev->interface->dev, "Creating new target"); + + nfc_target.supported_protocols = NFC_PROTO_NFC_DEP_MASK; + rc = nfc_targets_found(dev->nfc_dev, &nfc_target, 1); + if (rc) + return 0; + + dev->tgt_available_prots = 0; + } + + dev->tgt_active_prot = NFC_PROTO_NFC_DEP; + + /* ATR_RES general bytes are located at offset 17 */ + target_gt_len = PN533_FRAME_CMD_PARAMS_LEN(dev->in_frame) - 17; + rc = nfc_set_remote_general_bytes(dev->nfc_dev, + resp->gt, target_gt_len); + if (rc == 0) + rc = nfc_dep_link_is_up(dev->nfc_dev, + dev->nfc_dev->targets[0].idx, + !cmd->active, NFC_RF_INITIATOR); + + return 0; +} + +static int pn533_dep_link_up(struct nfc_dev *nfc_dev, int target_idx, + u8 comm_mode, u8 rf_mode) +{ + struct pn533 *dev = nfc_get_drvdata(nfc_dev); + struct pn533_cmd_jump_dep *cmd; + u8 cmd_len, local_gt_len, *local_gt; + int rc; + + nfc_dev_dbg(&dev->interface->dev, "%s", __func__); + + if (rf_mode == NFC_RF_TARGET) { + nfc_dev_err(&dev->interface->dev, "Target mode not supported"); + return -EOPNOTSUPP; + } + + + if (dev->poll_mod_count) { + nfc_dev_err(&dev->interface->dev, + "Cannot bring the DEP link up while polling"); + return -EBUSY; + } + + if (dev->tgt_active_prot) { + nfc_dev_err(&dev->interface->dev, + "There is already an active target"); + return -EBUSY; + } + + local_gt = nfc_get_local_general_bytes(dev->nfc_dev, &local_gt_len); + if (local_gt_len > NFC_MAX_GT_LEN) + return -EINVAL; + + cmd_len = sizeof(struct pn533_cmd_jump_dep) + local_gt_len; + cmd = kzalloc(cmd_len, GFP_KERNEL); + if (cmd == NULL) + return -ENOMEM; + + pn533_tx_frame_init(dev->out_frame, PN533_CMD_IN_JUMP_FOR_DEP); + + cmd->active = !comm_mode; + cmd->baud = 0; + if (local_gt != NULL) { + cmd->next = 4; /* We have some Gi */ + memcpy(cmd->gt, local_gt, local_gt_len); + } else { + cmd->next = 0; + } + + memcpy(PN533_FRAME_CMD_PARAMS_PTR(dev->out_frame), cmd, cmd_len); + dev->out_frame->datalen += cmd_len; + + pn533_tx_frame_finish(dev->out_frame); + + rc = pn533_send_cmd_frame_async(dev, dev->out_frame, dev->in_frame, + dev->in_maxlen, pn533_in_dep_link_up_complete, + cmd, GFP_KERNEL); + if (rc) + goto out; + + +out: + kfree(cmd); + + return rc; +} + +static int pn533_dep_link_down(struct nfc_dev *nfc_dev) +{ + pn533_deactivate_target(nfc_dev, 0); + + return 0; +} + #define PN533_CMD_DATAEXCH_HEAD_LEN (sizeof(struct pn533_frame) + 3) #define PN533_CMD_DATAEXCH_DATA_MAXLEN 262 @@ -1339,7 +1501,7 @@ error: return 0; } -int pn533_data_exchange(struct nfc_dev *nfc_dev, u32 target_idx, +static int pn533_data_exchange(struct nfc_dev *nfc_dev, u32 target_idx, struct sk_buff *skb, data_exchange_cb_t cb, void *cb_context) @@ -1368,7 +1530,7 @@ int pn533_data_exchange(struct nfc_dev *nfc_dev, u32 target_idx, PN533_CMD_DATAEXCH_DATA_MAXLEN + PN533_FRAME_TAIL_SIZE; - skb_resp = nfc_alloc_skb(skb_resp_len, GFP_KERNEL); + skb_resp = nfc_alloc_recv_skb(skb_resp_len, GFP_KERNEL); if (!skb_resp) { rc = -ENOMEM; goto error; @@ -1434,6 +1596,8 @@ static int pn533_set_configuration(struct pn533 *dev, u8 cfgitem, u8 *cfgdata, struct nfc_ops pn533_nfc_ops = { .dev_up = NULL, .dev_down = NULL, + .dep_link_up = pn533_dep_link_up, + .dep_link_down = pn533_dep_link_down, .start_poll = pn533_start_poll, .stop_poll = pn533_stop_poll, .activate_target = pn533_activate_target, diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c index 94f49ffa70b..8af868bab20 100644 --- a/drivers/s390/kvm/kvm_virtio.c +++ b/drivers/s390/kvm/kvm_virtio.c @@ -263,6 +263,11 @@ error: return PTR_ERR(vqs[i]); } +static const char *kvm_bus_name(struct virtio_device *vdev) +{ + return ""; +} + /* * The config ops structure as defined by virtio config */ @@ -276,6 +281,7 @@ static struct virtio_config_ops kvm_vq_configspace_ops = { .reset = kvm_reset, .find_vqs = kvm_find_vqs, .del_vqs = kvm_del_vqs, + .bus_name = kvm_bus_name, }; /* diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index b6a6356d09b..8160591913f 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c @@ -63,6 +63,7 @@ #include <asm/io.h> #include <asm/uaccess.h> +#include <asm/ebcdic.h> #include <net/iucv/iucv.h> #include "fsm.h" @@ -75,7 +76,7 @@ MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver"); * Debug Facility stuff */ #define IUCV_DBF_SETUP_NAME "iucv_setup" -#define IUCV_DBF_SETUP_LEN 32 +#define IUCV_DBF_SETUP_LEN 64 #define IUCV_DBF_SETUP_PAGES 2 #define IUCV_DBF_SETUP_NR_AREAS 1 #define IUCV_DBF_SETUP_LEVEL 3 @@ -226,6 +227,7 @@ struct iucv_connection { struct net_device *netdev; struct connection_profile prof; char userid[9]; + char userdata[17]; }; /** @@ -263,7 +265,7 @@ struct ll_header { }; #define NETIUCV_HDRLEN (sizeof(struct ll_header)) -#define NETIUCV_BUFSIZE_MAX 32768 +#define NETIUCV_BUFSIZE_MAX 65537 #define NETIUCV_BUFSIZE_DEFAULT NETIUCV_BUFSIZE_MAX #define NETIUCV_MTU_MAX (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN) #define NETIUCV_MTU_DEFAULT 9216 @@ -288,7 +290,12 @@ static inline int netiucv_test_and_set_busy(struct net_device *dev) return test_and_set_bit(0, &priv->tbusy); } -static u8 iucvMagic[16] = { +static u8 iucvMagic_ascii[16] = { + 0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20 +}; + +static u8 iucvMagic_ebcdic[16] = { 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40 }; @@ -301,18 +308,38 @@ static u8 iucvMagic[16] = { * * @returns The printable string (static data!!) */ -static char *netiucv_printname(char *name) +static char *netiucv_printname(char *name, int len) { - static char tmp[9]; + static char tmp[17]; char *p = tmp; - memcpy(tmp, name, 8); - tmp[8] = '\0'; - while (*p && (!isspace(*p))) + memcpy(tmp, name, len); + tmp[len] = '\0'; + while (*p && ((p - tmp) < len) && (!isspace(*p))) p++; *p = '\0'; return tmp; } +static char *netiucv_printuser(struct iucv_connection *conn) +{ + static char tmp_uid[9]; + static char tmp_udat[17]; + static char buf[100]; + + if (memcmp(conn->userdata, iucvMagic_ebcdic, 16)) { + tmp_uid[8] = '\0'; + tmp_udat[16] = '\0'; + memcpy(tmp_uid, conn->userid, 8); + memcpy(tmp_uid, netiucv_printname(tmp_uid, 8), 8); + memcpy(tmp_udat, conn->userdata, 16); + EBCASC(tmp_udat, 16); + memcpy(tmp_udat, netiucv_printname(tmp_udat, 16), 16); + sprintf(buf, "%s.%s", tmp_uid, tmp_udat); + return buf; + } else + return netiucv_printname(conn->userid, 8); +} + /** * States of the interface statemachine. */ @@ -563,15 +590,18 @@ static int netiucv_callback_connreq(struct iucv_path *path, { struct iucv_connection *conn = path->private; struct iucv_event ev; + static char tmp_user[9]; + static char tmp_udat[17]; int rc; - if (memcmp(iucvMagic, ipuser, 16)) - /* ipuser must match iucvMagic. */ - return -EINVAL; rc = -EINVAL; + memcpy(tmp_user, netiucv_printname(ipvmid, 8), 8); + memcpy(tmp_udat, ipuser, 16); + EBCASC(tmp_udat, 16); read_lock_bh(&iucv_connection_rwlock); list_for_each_entry(conn, &iucv_connection_list, list) { - if (strncmp(ipvmid, conn->userid, 8)) + if (strncmp(ipvmid, conn->userid, 8) || + strncmp(ipuser, conn->userdata, 16)) continue; /* Found a matching connection for this path. */ conn->path = path; @@ -580,6 +610,8 @@ static int netiucv_callback_connreq(struct iucv_path *path, fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev); rc = 0; } + IUCV_DBF_TEXT_(setup, 2, "Connection requested for %s.%s\n", + tmp_user, netiucv_printname(tmp_udat, 16)); read_unlock_bh(&iucv_connection_rwlock); return rc; } @@ -816,7 +848,7 @@ static void conn_action_connaccept(fsm_instance *fi, int event, void *arg) conn->path = path; path->msglim = NETIUCV_QUEUELEN_DEFAULT; path->flags = 0; - rc = iucv_path_accept(path, &netiucv_handler, NULL, conn); + rc = iucv_path_accept(path, &netiucv_handler, conn->userdata , conn); if (rc) { IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc); return; @@ -854,7 +886,7 @@ static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg) IUCV_DBF_TEXT(trace, 3, __func__); fsm_deltimer(&conn->timer); - iucv_path_sever(conn->path, NULL); + iucv_path_sever(conn->path, conn->userdata); fsm_newstate(fi, CONN_STATE_STARTWAIT); } @@ -867,9 +899,9 @@ static void conn_action_connsever(fsm_instance *fi, int event, void *arg) IUCV_DBF_TEXT(trace, 3, __func__); fsm_deltimer(&conn->timer); - iucv_path_sever(conn->path, NULL); - dev_info(privptr->dev, "The peer interface of the IUCV device" - " has closed the connection\n"); + iucv_path_sever(conn->path, conn->userdata); + dev_info(privptr->dev, "The peer z/VM guest %s has closed the " + "connection\n", netiucv_printuser(conn)); IUCV_DBF_TEXT(data, 2, "conn_action_connsever: Remote dropped connection\n"); fsm_newstate(fi, CONN_STATE_STARTWAIT); @@ -886,8 +918,6 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg) IUCV_DBF_TEXT(trace, 3, __func__); fsm_newstate(fi, CONN_STATE_STARTWAIT); - IUCV_DBF_TEXT_(setup, 2, "%s('%s'): connecting ...\n", - netdev->name, conn->userid); /* * We must set the state before calling iucv_connect because the @@ -897,8 +927,11 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg) fsm_newstate(fi, CONN_STATE_SETUPWAIT); conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL); + IUCV_DBF_TEXT_(setup, 2, "%s: connecting to %s ...\n", + netdev->name, netiucv_printuser(conn)); + rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid, - NULL, iucvMagic, conn); + NULL, conn->userdata, conn); switch (rc) { case 0: netdev->tx_queue_len = conn->path->msglim; @@ -908,13 +941,13 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg) case 11: dev_warn(privptr->dev, "The IUCV device failed to connect to z/VM guest %s\n", - netiucv_printname(conn->userid)); + netiucv_printname(conn->userid, 8)); fsm_newstate(fi, CONN_STATE_STARTWAIT); break; case 12: dev_warn(privptr->dev, "The IUCV device failed to connect to the peer on z/VM" - " guest %s\n", netiucv_printname(conn->userid)); + " guest %s\n", netiucv_printname(conn->userid, 8)); fsm_newstate(fi, CONN_STATE_STARTWAIT); break; case 13: @@ -927,7 +960,7 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg) dev_err(privptr->dev, "z/VM guest %s has too many IUCV connections" " to connect with the IUCV device\n", - netiucv_printname(conn->userid)); + netiucv_printname(conn->userid, 8)); fsm_newstate(fi, CONN_STATE_CONNERR); break; case 15: @@ -972,7 +1005,7 @@ static void conn_action_stop(fsm_instance *fi, int event, void *arg) netiucv_purge_skb_queue(&conn->collect_queue); if (conn->path) { IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n"); - iucv_path_sever(conn->path, iucvMagic); + iucv_path_sever(conn->path, conn->userdata); kfree(conn->path); conn->path = NULL; } @@ -1090,7 +1123,8 @@ dev_action_connup(fsm_instance *fi, int event, void *arg) fsm_newstate(fi, DEV_STATE_RUNNING); dev_info(privptr->dev, "The IUCV device has been connected" - " successfully to %s\n", privptr->conn->userid); + " successfully to %s\n", + netiucv_printuser(privptr->conn)); IUCV_DBF_TEXT(setup, 3, "connection is up and running\n"); break; @@ -1452,45 +1486,72 @@ static ssize_t user_show(struct device *dev, struct device_attribute *attr, struct netiucv_priv *priv = dev_get_drvdata(dev); IUCV_DBF_TEXT(trace, 5, __func__); - return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid)); + return sprintf(buf, "%s\n", netiucv_printuser(priv->conn)); } -static ssize_t user_write(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) +static int netiucv_check_user(const char *buf, size_t count, char *username, + char *userdata) { - struct netiucv_priv *priv = dev_get_drvdata(dev); - struct net_device *ndev = priv->conn->netdev; - char *p; - char *tmp; - char username[9]; - int i; - struct iucv_connection *cp; + const char *p; + int i; - IUCV_DBF_TEXT(trace, 3, __func__); - if (count > 9) { - IUCV_DBF_TEXT_(setup, 2, - "%d is length of username\n", (int) count); + p = strchr(buf, '.'); + if ((p && ((count > 26) || + ((p - buf) > 8) || + (buf + count - p > 18))) || + (!p && (count > 9))) { + IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n"); return -EINVAL; } - tmp = strsep((char **) &buf, "\n"); - for (i = 0, p = tmp; i < 8 && *p; i++, p++) { - if (isalnum(*p) || (*p == '$')) { - username[i]= toupper(*p); + for (i = 0, p = buf; i < 8 && *p && *p != '.'; i++, p++) { + if (isalnum(*p) || *p == '$') { + username[i] = toupper(*p); continue; } - if (*p == '\n') { + if (*p == '\n') /* trailing lf, grr */ break; - } IUCV_DBF_TEXT_(setup, 2, - "username: invalid character %c\n", *p); + "conn_write: invalid character %02x\n", *p); return -EINVAL; } while (i < 8) username[i++] = ' '; username[8] = '\0'; + if (*p == '.') { + p++; + for (i = 0; i < 16 && *p; i++, p++) { + if (*p == '\n') + break; + userdata[i] = toupper(*p); + } + while (i > 0 && i < 16) + userdata[i++] = ' '; + } else + memcpy(userdata, iucvMagic_ascii, 16); + userdata[16] = '\0'; + ASCEBC(userdata, 16); + + return 0; +} + +static ssize_t user_write(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct netiucv_priv *priv = dev_get_drvdata(dev); + struct net_device *ndev = priv->conn->netdev; + char username[9]; + char userdata[17]; + int rc; + struct iucv_connection *cp; + + IUCV_DBF_TEXT(trace, 3, __func__); + rc = netiucv_check_user(buf, count, username, userdata); + if (rc) + return rc; + if (memcmp(username, priv->conn->userid, 9) && (ndev->flags & (IFF_UP | IFF_RUNNING))) { /* username changed while the interface is active. */ @@ -1499,15 +1560,17 @@ static ssize_t user_write(struct device *dev, struct device_attribute *attr, } read_lock_bh(&iucv_connection_rwlock); list_for_each_entry(cp, &iucv_connection_list, list) { - if (!strncmp(username, cp->userid, 9) && cp->netdev != ndev) { + if (!strncmp(username, cp->userid, 9) && + !strncmp(userdata, cp->userdata, 17) && cp->netdev != ndev) { read_unlock_bh(&iucv_connection_rwlock); - IUCV_DBF_TEXT_(setup, 2, "user_write: Connection " - "to %s already exists\n", username); + IUCV_DBF_TEXT_(setup, 2, "user_write: Connection to %s " + "already exists\n", netiucv_printuser(cp)); return -EEXIST; } } read_unlock_bh(&iucv_connection_rwlock); memcpy(priv->conn->userid, username, 9); + memcpy(priv->conn->userdata, userdata, 17); return count; } @@ -1537,7 +1600,8 @@ static ssize_t buffer_write (struct device *dev, struct device_attribute *attr, bs1 = simple_strtoul(buf, &e, 0); if (e && (!isspace(*e))) { - IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %c\n", *e); + IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %02x\n", + *e); return -EINVAL; } if (bs1 > NETIUCV_BUFSIZE_MAX) { @@ -1864,7 +1928,8 @@ static void netiucv_unregister_device(struct device *dev) * Add it to the list of netiucv connections; */ static struct iucv_connection *netiucv_new_connection(struct net_device *dev, - char *username) + char *username, + char *userdata) { struct iucv_connection *conn; @@ -1893,6 +1958,8 @@ static struct iucv_connection *netiucv_new_connection(struct net_device *dev, fsm_settimer(conn->fsm, &conn->timer); fsm_newstate(conn->fsm, CONN_STATE_INVALID); + if (userdata) + memcpy(conn->userdata, userdata, 17); if (username) { memcpy(conn->userid, username, 9); fsm_newstate(conn->fsm, CONN_STATE_STOPPED); @@ -1919,6 +1986,7 @@ out: */ static void netiucv_remove_connection(struct iucv_connection *conn) { + IUCV_DBF_TEXT(trace, 3, __func__); write_lock_bh(&iucv_connection_rwlock); list_del_init(&conn->list); @@ -1926,7 +1994,7 @@ static void netiucv_remove_connection(struct iucv_connection *conn) fsm_deltimer(&conn->timer); netiucv_purge_skb_queue(&conn->collect_queue); if (conn->path) { - iucv_path_sever(conn->path, iucvMagic); + iucv_path_sever(conn->path, conn->userdata); kfree(conn->path); conn->path = NULL; } @@ -1985,7 +2053,7 @@ static void netiucv_setup_netdevice(struct net_device *dev) /** * Allocate and initialize everything of a net device. */ -static struct net_device *netiucv_init_netdevice(char *username) +static struct net_device *netiucv_init_netdevice(char *username, char *userdata) { struct netiucv_priv *privptr; struct net_device *dev; @@ -2004,7 +2072,7 @@ static struct net_device *netiucv_init_netdevice(char *username) if (!privptr->fsm) goto out_netdev; - privptr->conn = netiucv_new_connection(dev, username); + privptr->conn = netiucv_new_connection(dev, username, userdata); if (!privptr->conn) { IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n"); goto out_fsm; @@ -2022,47 +2090,31 @@ out_netdev: static ssize_t conn_write(struct device_driver *drv, const char *buf, size_t count) { - const char *p; char username[9]; - int i, rc; + char userdata[17]; + int rc; struct net_device *dev; struct netiucv_priv *priv; struct iucv_connection *cp; IUCV_DBF_TEXT(trace, 3, __func__); - if (count>9) { - IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n"); - return -EINVAL; - } - - for (i = 0, p = buf; i < 8 && *p; i++, p++) { - if (isalnum(*p) || *p == '$') { - username[i] = toupper(*p); - continue; - } - if (*p == '\n') - /* trailing lf, grr */ - break; - IUCV_DBF_TEXT_(setup, 2, - "conn_write: invalid character %c\n", *p); - return -EINVAL; - } - while (i < 8) - username[i++] = ' '; - username[8] = '\0'; + rc = netiucv_check_user(buf, count, username, userdata); + if (rc) + return rc; read_lock_bh(&iucv_connection_rwlock); list_for_each_entry(cp, &iucv_connection_list, list) { - if (!strncmp(username, cp->userid, 9)) { + if (!strncmp(username, cp->userid, 9) && + !strncmp(userdata, cp->userdata, 17)) { read_unlock_bh(&iucv_connection_rwlock); - IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection " - "to %s already exists\n", username); + IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection to %s " + "already exists\n", netiucv_printuser(cp)); return -EEXIST; } } read_unlock_bh(&iucv_connection_rwlock); - dev = netiucv_init_netdevice(username); + dev = netiucv_init_netdevice(username, userdata); if (!dev) { IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n"); return -ENODEV; @@ -2083,8 +2135,9 @@ static ssize_t conn_write(struct device_driver *drv, if (rc) goto out_unreg; - dev_info(priv->dev, "The IUCV interface to %s has been" - " established successfully\n", netiucv_printname(username)); + dev_info(priv->dev, "The IUCV interface to %s has been established " + "successfully\n", + netiucv_printuser(priv->conn)); return count; diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index fff57de7894..4fae1dc1995 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -66,7 +66,7 @@ static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf); static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, struct qeth_qdio_out_buffer *buf, enum qeth_qdio_buffer_states newbufstate); - +static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int); static inline const char *qeth_get_cardname(struct qeth_card *card) { @@ -363,6 +363,9 @@ static inline enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15, static inline void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx, int forced_cleanup) { + if (q->card->options.cq != QETH_CQ_ENABLED) + return; + if (q->bufs[bidx]->next_pending != NULL) { struct qeth_qdio_out_buffer *head = q->bufs[bidx]; struct qeth_qdio_out_buffer *c = q->bufs[bidx]->next_pending; @@ -390,6 +393,13 @@ static inline void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, } } + if (forced_cleanup && (atomic_read(&(q->bufs[bidx]->state)) == + QETH_QDIO_BUF_HANDLED_DELAYED)) { + /* for recovery situations */ + q->bufs[bidx]->aob = q->bufstates[bidx].aob; + qeth_init_qdio_out_buf(q, bidx); + QETH_CARD_TEXT(q->card, 2, "clprecov"); + } } @@ -412,7 +422,6 @@ static inline void qeth_qdio_handle_aob(struct qeth_card *card, notification = TX_NOTIFY_OK; } else { BUG_ON(atomic_read(&buffer->state) != QETH_QDIO_BUF_PENDING); - atomic_set(&buffer->state, QETH_QDIO_BUF_IN_CQ); notification = TX_NOTIFY_DELAYED_OK; } @@ -425,7 +434,8 @@ static inline void qeth_qdio_handle_aob(struct qeth_card *card, buffer->aob = NULL; qeth_clear_output_buffer(buffer->q, buffer, - QETH_QDIO_BUF_HANDLED_DELAYED); + QETH_QDIO_BUF_HANDLED_DELAYED); + /* from here on: do not touch buffer anymore */ qdio_release_aob(aob); } @@ -1113,11 +1123,25 @@ out: static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf) { struct sk_buff *skb; + struct iucv_sock *iucv; + int notify_general_error = 0; + + if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING) + notify_general_error = 1; + + /* release may never happen from within CQ tasklet scope */ + BUG_ON(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ); skb = skb_dequeue(&buf->skb_list); while (skb) { QETH_CARD_TEXT(buf->q->card, 5, "skbr"); QETH_CARD_TEXT_(buf->q->card, 5, "%lx", (long) skb); + if (notify_general_error && skb->protocol == ETH_P_AF_IUCV) { + if (skb->sk) { + iucv = iucv_sk(skb->sk); + iucv->sk_txnotify(skb, TX_NOTIFY_GENERALERROR); + } + } atomic_dec(&skb->users); dev_kfree_skb_any(skb); skb = skb_dequeue(&buf->skb_list); @@ -1160,7 +1184,7 @@ static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free) for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { if (!q->bufs[j]) continue; - qeth_cleanup_handled_pending(q, j, free); + qeth_cleanup_handled_pending(q, j, 1); qeth_clear_output_buffer(q, q->bufs[j], QETH_QDIO_BUF_EMPTY); if (free) { kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]); @@ -1207,7 +1231,7 @@ static void qeth_free_qdio_buffers(struct qeth_card *card) qeth_free_cq(card); cancel_delayed_work_sync(&card->buffer_reclaim_work); for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) - kfree_skb(card->qdio.in_q->bufs[j].rx_skb); + dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb); kfree(card->qdio.in_q); card->qdio.in_q = NULL; /* inbound buffer pool */ @@ -1329,6 +1353,7 @@ static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread) static void qeth_start_kernel_thread(struct work_struct *work) { + struct task_struct *ts; struct qeth_card *card = container_of(work, struct qeth_card, kernel_thread_starter); QETH_CARD_TEXT(card , 2, "strthrd"); @@ -1336,9 +1361,15 @@ static void qeth_start_kernel_thread(struct work_struct *work) if (card->read.state != CH_STATE_UP && card->write.state != CH_STATE_UP) return; - if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) - kthread_run(card->discipline.recover, (void *) card, + if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) { + ts = kthread_run(card->discipline.recover, (void *)card, "qeth_recover"); + if (IS_ERR(ts)) { + qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); + qeth_clear_thread_running_bit(card, + QETH_RECOVER_THREAD); + } + } } static int qeth_setup_card(struct qeth_card *card) diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index a21ae3d549d..c1296713311 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -301,21 +301,21 @@ static void qeth_l2_process_vlans(struct qeth_card *card) spin_unlock_bh(&card->vlanlock); } -static void qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) +static int qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) { struct qeth_card *card = dev->ml_priv; struct qeth_vlan_vid *id; QETH_CARD_TEXT_(card, 4, "aid:%d", vid); if (!vid) - return; + return 0; if (card->info.type == QETH_CARD_TYPE_OSM) { QETH_CARD_TEXT(card, 3, "aidOSM"); - return; + return 0; } if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { QETH_CARD_TEXT(card, 3, "aidREC"); - return; + return 0; } id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC); if (id) { @@ -324,10 +324,13 @@ static void qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) spin_lock_bh(&card->vlanlock); list_add_tail(&id->list, &card->vid_list); spin_unlock_bh(&card->vlanlock); + } else { + return -ENOMEM; } + return 0; } -static void qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) +static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) { struct qeth_vlan_vid *id, *tmpid = NULL; struct qeth_card *card = dev->ml_priv; @@ -335,11 +338,11 @@ static void qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) QETH_CARD_TEXT_(card, 4, "kid:%d", vid); if (card->info.type == QETH_CARD_TYPE_OSM) { QETH_CARD_TEXT(card, 3, "kidOSM"); - return; + return 0; } if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { QETH_CARD_TEXT(card, 3, "kidREC"); - return; + return 0; } spin_lock_bh(&card->vlanlock); list_for_each_entry(id, &card->vid_list, list) { @@ -355,6 +358,7 @@ static void qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) kfree(tmpid); } qeth_l2_set_multicast_list(card->dev); + return 0; } static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode) @@ -1169,6 +1173,7 @@ static void __exit qeth_l2_exit(void) static void qeth_l2_shutdown(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); + qeth_set_allowed_threads(card, 0, 1); if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap) qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); qeth_qdio_clear_card(card, 0); diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 4d5307ddbe5..9648e4e6833 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -1869,15 +1869,15 @@ static void qeth_l3_free_vlan_addresses(struct qeth_card *card, qeth_l3_free_vlan_addresses6(card, vid); } -static void qeth_l3_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) +static int qeth_l3_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) { struct qeth_card *card = dev->ml_priv; set_bit(vid, card->active_vlans); - return; + return 0; } -static void qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) +static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) { struct qeth_card *card = dev->ml_priv; unsigned long flags; @@ -1885,7 +1885,7 @@ static void qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) QETH_CARD_TEXT_(card, 4, "kid:%d", vid); if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { QETH_CARD_TEXT(card, 3, "kidREC"); - return; + return 0; } spin_lock_irqsave(&card->vlanlock, flags); /* unregister IP addresses of vlan device */ @@ -1893,6 +1893,7 @@ static void qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) clear_bit(vid, card->active_vlans); spin_unlock_irqrestore(&card->vlanlock, flags); qeth_l3_set_multicast_list(card->dev); + return 0; } static inline int qeth_l3_rebuild_skb(struct qeth_card *card, @@ -2759,7 +2760,7 @@ int inline qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb) rcu_read_lock(); dst = skb_dst(skb); if (dst) - n = dst_get_neighbour(dst); + n = dst_get_neighbour_noref(dst); if (n) { cast_type = n->type; rcu_read_unlock(); @@ -2855,7 +2856,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, rcu_read_lock(); dst = skb_dst(skb); if (dst) - n = dst_get_neighbour(dst); + n = dst_get_neighbour_noref(dst); if (ipv == 4) { /* IPv4 */ hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags4(cast_type); @@ -3209,7 +3210,8 @@ static int qeth_l3_stop(struct net_device *dev) return 0; } -static u32 qeth_l3_fix_features(struct net_device *dev, u32 features) +static netdev_features_t qeth_l3_fix_features(struct net_device *dev, + netdev_features_t features) { struct qeth_card *card = dev->ml_priv; @@ -3223,7 +3225,8 @@ static u32 qeth_l3_fix_features(struct net_device *dev, u32 features) return features; } -static int qeth_l3_set_features(struct net_device *dev, u32 features) +static int qeth_l3_set_features(struct net_device *dev, + netdev_features_t features) { struct qeth_card *card = dev->ml_priv; u32 changed = dev->features ^ features; @@ -3489,14 +3492,13 @@ contin: else netif_carrier_off(card->dev); if (recover_flag == CARD_STATE_RECOVER) { + rtnl_lock(); if (recovery_mode) __qeth_l3_open(card->dev); - else { - rtnl_lock(); + else dev_open(card->dev); - rtnl_unlock(); - } qeth_l3_set_multicast_list(card->dev); + rtnl_unlock(); } /* let user_space know that device is online */ kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); @@ -3542,6 +3544,11 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev, card->info.hwtrap = 1; } qeth_l3_stop_card(card, recovery_mode); + if ((card->options.cq == QETH_CQ_ENABLED) && card->dev) { + rtnl_lock(); + call_netdevice_notifiers(NETDEV_REBOOT, card->dev); + rtnl_unlock(); + } rc = ccw_device_set_offline(CARD_DDEV(card)); rc2 = ccw_device_set_offline(CARD_WDEV(card)); rc3 = ccw_device_set_offline(CARD_RDEV(card)); @@ -3596,6 +3603,7 @@ static int qeth_l3_recover(void *ptr) static void qeth_l3_shutdown(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); + qeth_set_allowed_threads(card, 0, 1); if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap) qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); qeth_qdio_clear_card(card, 0); diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c index 000294a9df8..36739da8bc1 100644 --- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c +++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c @@ -966,7 +966,7 @@ static int init_act_open(struct cxgbi_sock *csk) csk->saddr.sin_addr.s_addr = chba->ipv4addr; csk->rss_qid = 0; - csk->l2t = t3_l2t_get(t3dev, dst_get_neighbour(dst), ndev); + csk->l2t = t3_l2t_get(t3dev, dst, ndev); if (!csk->l2t) { pr_err("NO l2t available.\n"); return -EINVAL; diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index ac7a9b1e3e2..5a4a3bfc60c 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -1127,6 +1127,7 @@ static int init_act_open(struct cxgbi_sock *csk) struct net_device *ndev = cdev->ports[csk->port_id]; struct port_info *pi = netdev_priv(ndev); struct sk_buff *skb = NULL; + struct neighbour *n; unsigned int step; log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, @@ -1141,7 +1142,12 @@ static int init_act_open(struct cxgbi_sock *csk) cxgbi_sock_set_flag(csk, CTPF_HAS_ATID); cxgbi_sock_get(csk); - csk->l2t = cxgb4_l2t_get(lldi->l2t, dst_get_neighbour(csk->dst), ndev, 0); + n = dst_get_neighbour_noref(csk->dst); + if (!n) { + pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name); + goto rel_resource; + } + csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0); if (!csk->l2t) { pr_err("%s, cannot alloc l2t.\n", ndev->name); goto rel_resource; diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index c10f74a566f..1d25a87aa47 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c @@ -472,6 +472,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr) struct net_device *ndev; struct cxgbi_device *cdev; struct rtable *rt = NULL; + struct neighbour *n; struct flowi4 fl4; struct cxgbi_sock *csk = NULL; unsigned int mtu = 0; @@ -493,7 +494,12 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr) goto err_out; } dst = &rt->dst; - ndev = dst_get_neighbour(dst)->dev; + n = dst_get_neighbour_noref(dst); + if (!n) { + err = -ENODEV; + goto rel_rt; + } + ndev = n->dev; if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) { pr_info("multi-cast route %pI4, port %u, dev %s.\n", @@ -507,7 +513,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr) ndev = ip_dev_find(&init_net, daddr->sin_addr.s_addr); mtu = ndev->mtu; pr_info("rt dev %s, loopback -> %s, mtu %u.\n", - dst_get_neighbour(dst)->dev->name, ndev->name, mtu); + n->dev->name, ndev->name, mtu); } cdev = cxgbi_device_find_by_netdev(ndev, &port); diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c index 34c3bab90b9..973223f5de8 100644 --- a/drivers/ssb/pci.c +++ b/drivers/ssb/pci.c @@ -607,6 +607,29 @@ static void sprom_extract_r8(struct ssb_sprom *out, const u16 *in) memcpy(&out->antenna_gain.ghz5, &out->antenna_gain.ghz24, sizeof(out->antenna_gain.ghz5)); + /* Extract FEM info */ + SPEX(fem.ghz2.tssipos, SSB_SPROM8_FEM2G, + SSB_SROM8_FEM_TSSIPOS, SSB_SROM8_FEM_TSSIPOS_SHIFT); + SPEX(fem.ghz2.extpa_gain, SSB_SPROM8_FEM2G, + SSB_SROM8_FEM_EXTPA_GAIN, SSB_SROM8_FEM_EXTPA_GAIN_SHIFT); + SPEX(fem.ghz2.pdet_range, SSB_SPROM8_FEM2G, + SSB_SROM8_FEM_PDET_RANGE, SSB_SROM8_FEM_PDET_RANGE_SHIFT); + SPEX(fem.ghz2.tr_iso, SSB_SPROM8_FEM2G, + SSB_SROM8_FEM_TR_ISO, SSB_SROM8_FEM_TR_ISO_SHIFT); + SPEX(fem.ghz2.antswlut, SSB_SPROM8_FEM2G, + SSB_SROM8_FEM_ANTSWLUT, SSB_SROM8_FEM_ANTSWLUT_SHIFT); + + SPEX(fem.ghz5.tssipos, SSB_SPROM8_FEM5G, + SSB_SROM8_FEM_TSSIPOS, SSB_SROM8_FEM_TSSIPOS_SHIFT); + SPEX(fem.ghz5.extpa_gain, SSB_SPROM8_FEM5G, + SSB_SROM8_FEM_EXTPA_GAIN, SSB_SROM8_FEM_EXTPA_GAIN_SHIFT); + SPEX(fem.ghz5.pdet_range, SSB_SPROM8_FEM5G, + SSB_SROM8_FEM_PDET_RANGE, SSB_SROM8_FEM_PDET_RANGE_SHIFT); + SPEX(fem.ghz5.tr_iso, SSB_SPROM8_FEM5G, + SSB_SROM8_FEM_TR_ISO, SSB_SROM8_FEM_TR_ISO_SHIFT); + SPEX(fem.ghz5.antswlut, SSB_SPROM8_FEM5G, + SSB_SROM8_FEM_ANTSWLUT, SSB_SROM8_FEM_ANTSWLUT_SHIFT); + sprom_extract_r458(out, in); /* TODO - get remaining rev 8 stuff needed */ diff --git a/drivers/usb/gadget/f_phonet.c b/drivers/usb/gadget/f_phonet.c index 16a509ae517..7cdcb63b21f 100644 --- a/drivers/usb/gadget/f_phonet.c +++ b/drivers/usb/gadget/f_phonet.c @@ -298,11 +298,10 @@ static void pn_net_setup(struct net_device *dev) static int pn_rx_submit(struct f_phonet *fp, struct usb_request *req, gfp_t gfp_flags) { - struct net_device *dev = fp->dev; struct page *page; int err; - page = __netdev_alloc_page(dev, gfp_flags); + page = alloc_page(gfp_flags); if (!page) return -ENOMEM; @@ -312,7 +311,7 @@ pn_rx_submit(struct f_phonet *fp, struct usb_request *req, gfp_t gfp_flags) err = usb_ep_queue(fp->out_ep, req, gfp_flags); if (unlikely(err)) - netdev_free_page(dev, page); + put_page(page); return err; } @@ -374,9 +373,9 @@ static void pn_rx_complete(struct usb_ep *ep, struct usb_request *req) } if (page) - netdev_free_page(dev, page); + put_page(page); if (req) - pn_rx_submit(fp, req, GFP_ATOMIC); + pn_rx_submit(fp, req, GFP_ATOMIC | __GFP_COLD); } /*-------------------------------------------------------------------------*/ @@ -436,7 +435,7 @@ static int pn_set_alt(struct usb_function *f, unsigned intf, unsigned alt) netif_carrier_on(dev); for (i = 0; i < phonet_rxq_size; i++) - pn_rx_submit(fp, fp->out_reqv[i], GFP_ATOMIC); + pn_rx_submit(fp, fp->out_reqv[i], GFP_ATOMIC | __GFP_COLD); } spin_unlock(&port->lock); return 0; diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index 7317dc2ec42..0269717436a 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -361,7 +361,12 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs, return 0; } +static const char *vm_bus_name(struct virtio_device *vdev) +{ + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); + return vm_dev->pdev->name; +} static struct virtio_config_ops virtio_mmio_config_ops = { .get = vm_get, @@ -373,6 +378,7 @@ static struct virtio_config_ops virtio_mmio_config_ops = { .del_vqs = vm_del_vqs, .get_features = vm_get_features, .finalize_features = vm_finalize_features, + .bus_name = vm_bus_name, }; diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c index 03d1984bd36..baabb7937ec 100644 --- a/drivers/virtio/virtio_pci.c +++ b/drivers/virtio/virtio_pci.c @@ -598,6 +598,13 @@ static int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, false, false); } +static const char *vp_bus_name(struct virtio_device *vdev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + + return pci_name(vp_dev->pci_dev); +} + static struct virtio_config_ops virtio_pci_config_ops = { .get = vp_get, .set = vp_set, @@ -608,6 +615,7 @@ static struct virtio_config_ops virtio_pci_config_ops = { .del_vqs = vp_del_vqs, .get_features = vp_get_features, .finalize_features = vp_finalize_features, + .bus_name = vp_bus_name, }; static void virtio_pci_release_dev(struct device *_d) |