summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/netdevice.h58
-rw-r--r--net/core/dev.c51
2 files changed, 54 insertions, 55 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index f5ea445f89f..b4d056ceab9 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -996,17 +996,17 @@ static inline void netif_tx_schedule_all(struct net_device *dev)
netif_schedule_queue(netdev_get_tx_queue(dev, i));
}
+static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
+{
+ clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
+}
+
/**
* netif_start_queue - allow transmit
* @dev: network device
*
* Allow upper layers to call the device hard_start_xmit routine.
*/
-static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
-{
- clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
-}
-
static inline void netif_start_queue(struct net_device *dev)
{
netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
@@ -1022,13 +1022,6 @@ static inline void netif_tx_start_all_queues(struct net_device *dev)
}
}
-/**
- * netif_wake_queue - restart transmit
- * @dev: network device
- *
- * Allow upper layers to call the device hard_start_xmit routine.
- * Used for flow control when transmit resources are available.
- */
static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
{
#ifdef CONFIG_NETPOLL_TRAP
@@ -1041,6 +1034,13 @@ static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
__netif_schedule(dev_queue->qdisc);
}
+/**
+ * netif_wake_queue - restart transmit
+ * @dev: network device
+ *
+ * Allow upper layers to call the device hard_start_xmit routine.
+ * Used for flow control when transmit resources are available.
+ */
static inline void netif_wake_queue(struct net_device *dev)
{
netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
@@ -1056,6 +1056,11 @@ static inline void netif_tx_wake_all_queues(struct net_device *dev)
}
}
+static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
+{
+ set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
+}
+
/**
* netif_stop_queue - stop transmitted packets
* @dev: network device
@@ -1063,11 +1068,6 @@ static inline void netif_tx_wake_all_queues(struct net_device *dev)
* Stop upper layers calling the device hard_start_xmit routine.
* Used for flow control when transmit resources are unavailable.
*/
-static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
-{
- set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
-}
-
static inline void netif_stop_queue(struct net_device *dev)
{
netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
@@ -1083,17 +1083,17 @@ static inline void netif_tx_stop_all_queues(struct net_device *dev)
}
}
+static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
+{
+ return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
+}
+
/**
* netif_queue_stopped - test if transmit queue is flowblocked
* @dev: network device
*
* Test if transmit queue on device is currently unable to send.
*/
-static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
-{
- return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
-}
-
static inline int netif_queue_stopped(const struct net_device *dev)
{
return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
@@ -1463,13 +1463,6 @@ static inline void netif_rx_complete(struct net_device *dev,
local_irq_restore(flags);
}
-/**
- * netif_tx_lock - grab network device transmit lock
- * @dev: network device
- * @cpu: cpu number of lock owner
- *
- * Get network device transmit lock
- */
static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
{
spin_lock(&txq->_xmit_lock);
@@ -1482,6 +1475,13 @@ static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
txq->xmit_lock_owner = smp_processor_id();
}
+/**
+ * netif_tx_lock - grab network device transmit lock
+ * @dev: network device
+ * @cpu: cpu number of lock owner
+ *
+ * Get network device transmit lock
+ */
static inline void netif_tx_lock(struct net_device *dev)
{
int cpu = smp_processor_id();
diff --git a/net/core/dev.c b/net/core/dev.c
index ad5598d2bb3..65eea83613e 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1645,32 +1645,6 @@ out_kfree_skb:
return 0;
}
-/**
- * dev_queue_xmit - transmit a buffer
- * @skb: buffer to transmit
- *
- * Queue a buffer for transmission to a network device. The caller must
- * have set the device and priority and built the buffer before calling
- * this function. The function can be called from an interrupt.
- *
- * A negative errno code is returned on a failure. A success does not
- * guarantee the frame will be transmitted as it may be dropped due
- * to congestion or traffic shaping.
- *
- * -----------------------------------------------------------------------------------
- * I notice this method can also return errors from the queue disciplines,
- * including NET_XMIT_DROP, which is a positive value. So, errors can also
- * be positive.
- *
- * Regardless of the return value, the skb is consumed, so it is currently
- * difficult to retry a send to this method. (You can bump the ref count
- * before sending to hold a reference for retry if you are careful.)
- *
- * When calling this method, interrupts MUST be enabled. This is because
- * the BH enable code must have IRQs enabled so that it will not deadlock.
- * --BLG
- */
-
static u32 simple_tx_hashrnd;
static int simple_tx_hashrnd_initialized = 0;
@@ -1738,6 +1712,31 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev,
return netdev_get_tx_queue(dev, queue_index);
}
+/**
+ * dev_queue_xmit - transmit a buffer
+ * @skb: buffer to transmit
+ *
+ * Queue a buffer for transmission to a network device. The caller must
+ * have set the device and priority and built the buffer before calling
+ * this function. The function can be called from an interrupt.
+ *
+ * A negative errno code is returned on a failure. A success does not
+ * guarantee the frame will be transmitted as it may be dropped due
+ * to congestion or traffic shaping.
+ *
+ * -----------------------------------------------------------------------------------
+ * I notice this method can also return errors from the queue disciplines,
+ * including NET_XMIT_DROP, which is a positive value. So, errors can also
+ * be positive.
+ *
+ * Regardless of the return value, the skb is consumed, so it is currently
+ * difficult to retry a send to this method. (You can bump the ref count
+ * before sending to hold a reference for retry if you are careful.)
+ *
+ * When calling this method, interrupts MUST be enabled. This is because
+ * the BH enable code must have IRQs enabled so that it will not deadlock.
+ * --BLG
+ */
int dev_queue_xmit(struct sk_buff *skb)
{
struct net_device *dev = skb->dev;