static int cosa_net_open(struct net_device *d);
static int cosa_net_close(struct net_device *d);
static void cosa_net_timeout(struct net_device *d);
-static int cosa_net_tx(struct sk_buff *skb, struct net_device *d);
+static netdev_tx_t cosa_net_tx(struct sk_buff *skb, struct net_device *d);
static char *cosa_net_setup_rx(struct channel_data *channel, int size);
static int cosa_net_rx_done(struct channel_data *channel);
static int cosa_net_tx_done(struct channel_data *channel, int size);
return 0;
}
-static int cosa_net_tx(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t cosa_net_tx(struct sk_buff *skb,
+ struct net_device *dev)
{
struct channel_data *chan = dev_to_chan(dev);
chan->tx_skb = skb;
cosa_start_tx(chan, skb->data, skb->len);
- return 0;
+ return NETDEV_TX_OK;
}
static void cosa_net_timeout(struct net_device *dev)
const void *daddr, const void *saddr,
unsigned len);
static int cycx_netdevice_rebuild_header(struct sk_buff *skb);
-static int cycx_netdevice_hard_start_xmit(struct sk_buff *skb,
- struct net_device *dev);
+static netdev_tx_t cycx_netdevice_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *dev);
static struct net_device_stats *
cycx_netdevice_get_stats(struct net_device *dev);
* bottom half" (with interrupts enabled).
* 2. Setting tbusy flag will inhibit further transmit requests from the
* protocol stack and can be used for flow control with protocol layer. */
-static int cycx_netdevice_hard_start_xmit(struct sk_buff *skb,
- struct net_device *dev)
+static netdev_tx_t cycx_netdevice_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
{
struct cycx_x25_channel *chan = netdev_priv(dev);
struct cycx_device *card = chan->card;
dev_kfree_skb(skb);
}
-static int dlci_transmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t dlci_transmit(struct sk_buff *skb,
+ struct net_device *dev)
{
struct dlci_local *dlp;
- int ret;
-
- ret = 0;
+ netdev_tx_t ret;
if (!skb || !dev)
return NETDEV_TX_OK;
netif_stop_queue(dev);
+ /* This is hackish, overloads driver specific return values
+ on top of normal transmit return! */
ret = dlp->slave->netdev_ops->ndo_start_xmit(skb, dlp->slave);
switch (ret)
{
dev->stats.tx_packets++;
ret = NETDEV_TX_OK;
break;
- case DLCI_RET_ERR:
+ case DLCI_RET_ERR:
dev->stats.tx_errors++;
ret = NETDEV_TX_OK;
break;
- case DLCI_RET_DROP:
+ case DLCI_RET_DROP:
dev->stats.tx_dropped++;
ret = NETDEV_TX_BUSY;
break;
static int dscc4_found1(struct pci_dev *, void __iomem *ioaddr);
static int dscc4_init_one(struct pci_dev *, const struct pci_device_id *ent);
static int dscc4_open(struct net_device *);
-static int dscc4_start_xmit(struct sk_buff *, struct net_device *);
+static netdev_tx_t dscc4_start_xmit(struct sk_buff *,
+ struct net_device *);
static int dscc4_close(struct net_device *);
static int dscc4_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static int dscc4_init_ring(struct net_device *);
}
#endif /* DSCC4_POLLING */
-static int dscc4_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t dscc4_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
{
struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
struct dscc4_pci_priv *ppriv = dpriv->pci_priv;
port->start = 0;
}
-static int
+static netdev_tx_t
fst_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct fst_card_info *card;
#endif /* DEBUG_RINGS */
-static int sca_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t sca_xmit(struct sk_buff *skb, struct net_device *dev)
{
port_t *port = dev_to_port(dev);
card_t *card = port_to_card(port);
spin_unlock_irq(&port->lock);
dev_kfree_skb(skb);
- return 0;
+ return NETDEV_TX_OK;
}
#endif /* DEBUG_RINGS */
-static int sca_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t sca_xmit(struct sk_buff *skb, struct net_device *dev)
{
port_t *port = dev_to_port(dev);
card_t *card = port->card;
spin_unlock_irq(&port->lock);
dev_kfree_skb(skb);
- return 0;
+ return NETDEV_TX_OK;
}
return hdlc->proto->netif_rx(skb);
}
-int hdlc_start_xmit(struct sk_buff *skb, struct net_device *dev)
+netdev_tx_t hdlc_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
return -EINVAL;
}
-static int pvc_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
{
pvc_device *pvc = dev->ml_priv;
static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr);
-static int eth_tx(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t eth_tx(struct sk_buff *skb, struct net_device *dev)
{
int pad = ETH_ZLEN - skb->len;
if (pad > 0) { /* Pad the frame with zeros */
-static int x25_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
{
int result;
skb_pull(skb, 1);
if ((result = lapb_data_request(dev, skb)) != LAPB_OK)
dev_kfree_skb(skb);
- return 0;
+ return NETDEV_TX_OK;
case 1:
if ((result = lapb_connect_request(dev))!= LAPB_OK) {
}
dev_kfree_skb(skb);
- return 0;
+ return NETDEV_TX_OK;
}
* Passed network frames, fire them downwind.
*/
-static int hostess_queue_xmit(struct sk_buff *skb, struct net_device *d)
+static netdev_tx_t hostess_queue_xmit(struct sk_buff *skb,
+ struct net_device *d)
{
return z8530_queue_xmit(&dev_to_sv(d)->chanA, skb);
}
/*
* Send a LAPB frame via an ethernet interface
*/
-static int lapbeth_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t lapbeth_xmit(struct sk_buff *skb,
+ struct net_device *dev)
{
int err;
MODULE_LICENSE("GPL v2");
-static int lmc_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static netdev_tx_t lmc_start_xmit(struct sk_buff *skb,
+ struct net_device *dev);
static int lmc_rx (struct net_device *dev);
static int lmc_open(struct net_device *dev);
static int lmc_close(struct net_device *dev);
return IRQ_RETVAL(handled);
}
-static int lmc_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t lmc_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
{
lmc_softc_t *sc = dev_to_sc(dev);
u32 flag;
int entry;
- int ret = NETDEV_TX_OK;
unsigned long flags;
lmc_trace(dev, "lmc_start_xmit in");
spin_unlock_irqrestore(&sc->lmc_lock, flags);
lmc_trace(dev, "lmc_start_xmit_out");
- return ret;
+ return NETDEV_TX_OK;
}
static struct net_device *sbni_probe1(struct net_device *, unsigned long, int);
static int sbni_open( struct net_device * );
static int sbni_close( struct net_device * );
-static int sbni_start_xmit( struct sk_buff *, struct net_device * );
+static netdev_tx_t sbni_start_xmit(struct sk_buff *,
+ struct net_device * );
static int sbni_ioctl( struct net_device *, struct ifreq *, int );
static void set_multicast_list( struct net_device * );
#ifdef CONFIG_SBNI_MULTILINE
-static int
+static netdev_tx_t
sbni_start_xmit( struct sk_buff *skb, struct net_device *dev )
{
struct net_device *p;
#else /* CONFIG_SBNI_MULTILINE */
-static int
+static netdev_tx_t
sbni_start_xmit( struct sk_buff *skb, struct net_device *dev )
{
struct net_local *nl = netdev_priv(dev);
**************************/
/* NOTE: the DLCI driver deals with freeing the SKB!! */
-static int sdla_transmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t sdla_transmit(struct sk_buff *skb,
+ struct net_device *dev)
{
struct frad_local *flp;
int ret, addr, accept, i;
if(flp->master[i]!=NULL)
netif_wake_queue(flp->master[i]);
}
- return(ret);
+ return NETDEV_TX_OK;
}
static void sdla_receive(struct net_device *dev)
* Passed network frames, fire them downwind.
*/
-static int sealevel_queue_xmit(struct sk_buff *skb, struct net_device *d)
+static netdev_tx_t sealevel_queue_xmit(struct sk_buff *skb,
+ struct net_device *d)
{
return z8530_queue_xmit(dev_to_chan(d)->chan, skb);
}
-static int wanxl_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t wanxl_xmit(struct sk_buff *skb, struct net_device *dev)
{
port_t *port = dev_to_port(dev);
desc_t *desc;
/* Encapsulate an IP datagram and kick it into a TTY queue. */
-static int x25_asy_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t x25_asy_xmit(struct sk_buff *skb,
+ struct net_device *dev)
{
struct x25_asy *sl = netdev_priv(dev);
int err;
* point.
*/
-int z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb)
+netdev_tx_t z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb)
{
unsigned long flags;
netif_stop_queue(c->netdevice);
if(c->tx_next_skb)
- {
- return 1;
- }
+ return NETDEV_TX_BUSY;
+
/* PC SPECIFIC - DMA limits */
z8530_tx_begin(c);
spin_unlock_irqrestore(c->lock, flags);
- return 0;
+ return NETDEV_TX_OK;
}
EXPORT_SYMBOL(z8530_queue_xmit);
extern int z8530_sync_txdma_open(struct net_device *, struct z8530_channel *);
extern int z8530_sync_txdma_close(struct net_device *, struct z8530_channel *);
extern int z8530_channel_load(struct z8530_channel *, u8 *);
-extern int z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb);
+extern netdev_tx_t z8530_queue_xmit(struct z8530_channel *c,
+ struct sk_buff *skb);
extern void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb);