<html><head><meta name="color-scheme" content="light dark"></head><body><pre style="word-wrap: break-word; white-space: pre-wrap;">This patch is to fix a braindead indentation mess in speedo_start_xmit 
that someone added when adding the flags variable.  *sigh*.

		-ben


diff -urN v2.4.9-ac10/drivers/net/eepro100.c foo-v2.4.9-ac10/drivers/net/eepro100.c
--- v2.4.9-ac10/drivers/net/eepro100.c	Mon Sep 10 15:10:59 2001
+++ foo-v2.4.9-ac10/drivers/net/eepro100.c	Thu Sep 13 18:02:08 2001
@@ -1313,56 +1313,55 @@
 	long ioaddr = dev-&gt;base_addr;
 	int entry;
 
-	{	/* Prevent interrupts from changing the Tx ring from underneath us. */
-		unsigned long flags;
+	/* Prevent interrupts from changing the Tx ring from underneath us. */
+	unsigned long flags;
 
-		spin_lock_irqsave(&amp;sp-&gt;lock, flags);
-
-		/* Check if there are enough space. */
-		if ((int)(sp-&gt;cur_tx - sp-&gt;dirty_tx) &gt;= TX_QUEUE_LIMIT) {
-			printk(KERN_ERR "%s: incorrect tbusy state, fixed.\n", dev-&gt;name);
-			netif_stop_queue(dev);
-			sp-&gt;tx_full = 1;
-			spin_unlock_irqrestore(&amp;sp-&gt;lock, flags);
-			return 1;
-		}
-
-		/* Calculate the Tx descriptor entry. */
-		entry = sp-&gt;cur_tx++ % TX_RING_SIZE;
-
-		sp-&gt;tx_skbuff[entry] = skb;
-		sp-&gt;tx_ring[entry].status =
-			cpu_to_le32(CmdSuspend | CmdTx | CmdTxFlex);
-		if (!(entry &amp; ((TX_RING_SIZE&gt;&gt;2)-1)))
-			sp-&gt;tx_ring[entry].status |= cpu_to_le32(CmdIntr);
-		sp-&gt;tx_ring[entry].link =
-			cpu_to_le32(TX_RING_ELEM_DMA(sp, sp-&gt;cur_tx % TX_RING_SIZE));
-		sp-&gt;tx_ring[entry].tx_desc_addr =
-			cpu_to_le32(TX_RING_ELEM_DMA(sp, entry) + TX_DESCR_BUF_OFFSET);
-		/* The data region is always in one buffer descriptor. */
-		sp-&gt;tx_ring[entry].count = cpu_to_le32(sp-&gt;tx_threshold);
-		sp-&gt;tx_ring[entry].tx_buf_addr0 =
-			cpu_to_le32(pci_map_single(sp-&gt;pdev, skb-&gt;data,
-						   skb-&gt;len, PCI_DMA_TODEVICE));
-		sp-&gt;tx_ring[entry].tx_buf_size0 = cpu_to_le32(skb-&gt;len);
-		/* Trigger the command unit resume. */
-		wait_for_cmd_done(ioaddr + SCBCmd);
-		clear_suspend(sp-&gt;last_cmd);
-		/* We want the time window between clearing suspend flag on the previous
-		   command and resuming CU to be as small as possible.
-		   Interrupts in between are very undesired.  --SAW */
-		outb(CUResume, ioaddr + SCBCmd);
-		sp-&gt;last_cmd = (struct descriptor *)&amp;sp-&gt;tx_ring[entry];
-
-		/* Leave room for set_rx_mode(). If there is no more space than reserved
-		   for multicast filter mark the ring as full. */
-		if ((int)(sp-&gt;cur_tx - sp-&gt;dirty_tx) &gt;= TX_QUEUE_LIMIT) {
-			netif_stop_queue(dev);
-			sp-&gt;tx_full = 1;
-		}
+	spin_lock_irqsave(&amp;sp-&gt;lock, flags);
 
+	/* Check if there are enough space. */
+	if ((int)(sp-&gt;cur_tx - sp-&gt;dirty_tx) &gt;= TX_QUEUE_LIMIT) {
+		printk(KERN_ERR "%s: incorrect tbusy state, fixed.\n", dev-&gt;name);
+		netif_stop_queue(dev);
+		sp-&gt;tx_full = 1;
 		spin_unlock_irqrestore(&amp;sp-&gt;lock, flags);
+		return 1;
 	}
+
+	/* Calculate the Tx descriptor entry. */
+	entry = sp-&gt;cur_tx++ % TX_RING_SIZE;
+
+	sp-&gt;tx_skbuff[entry] = skb;
+	sp-&gt;tx_ring[entry].status =
+		cpu_to_le32(CmdSuspend | CmdTx | CmdTxFlex);
+	if (!(entry &amp; ((TX_RING_SIZE&gt;&gt;2)-1)))
+		sp-&gt;tx_ring[entry].status |= cpu_to_le32(CmdIntr);
+	sp-&gt;tx_ring[entry].link =
+		cpu_to_le32(TX_RING_ELEM_DMA(sp, sp-&gt;cur_tx % TX_RING_SIZE));
+	sp-&gt;tx_ring[entry].tx_desc_addr =
+		cpu_to_le32(TX_RING_ELEM_DMA(sp, entry) + TX_DESCR_BUF_OFFSET);
+	/* The data region is always in one buffer descriptor. */
+	sp-&gt;tx_ring[entry].count = cpu_to_le32(sp-&gt;tx_threshold);
+	sp-&gt;tx_ring[entry].tx_buf_addr0 =
+		cpu_to_le32(pci_map_single(sp-&gt;pdev, skb-&gt;data,
+					   skb-&gt;len, PCI_DMA_TODEVICE));
+	sp-&gt;tx_ring[entry].tx_buf_size0 = cpu_to_le32(skb-&gt;len);
+	/* Trigger the command unit resume. */
+	wait_for_cmd_done(ioaddr + SCBCmd);
+	clear_suspend(sp-&gt;last_cmd);
+	/* We want the time window between clearing suspend flag on the previous
+	   command and resuming CU to be as small as possible.
+	   Interrupts in between are very undesired.  --SAW */
+	outb(CUResume, ioaddr + SCBCmd);
+	sp-&gt;last_cmd = (struct descriptor *)&amp;sp-&gt;tx_ring[entry];
+
+	/* Leave room for set_rx_mode(). If there is no more space than reserved
+	   for multicast filter mark the ring as full. */
+	if ((int)(sp-&gt;cur_tx - sp-&gt;dirty_tx) &gt;= TX_QUEUE_LIMIT) {
+		netif_stop_queue(dev);
+		sp-&gt;tx_full = 1;
+	}
+
+	spin_unlock_irqrestore(&amp;sp-&gt;lock, flags);
 
 	dev-&gt;trans_start = jiffies;
 
</pre></body></html>