Merge git://git.kernel.org/pub/scm/linux/kernel/git/wim/linux-2.6-watchdog
[sfrench/cifs-2.6.git] / drivers / atm / iphase.c
1 /******************************************************************************
2          iphase.c: Device driver for Interphase ATM PCI adapter cards 
3                     Author: Peter Wang  <pwang@iphase.com>            
4                    Some fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
5                    Interphase Corporation  <www.iphase.com>           
6                                Version: 1.0                           
7 *******************************************************************************
8       
9       This software may be used and distributed according to the terms
10       of the GNU General Public License (GPL), incorporated herein by reference.
11       Drivers based on this skeleton fall under the GPL and must retain
12       the authorship (implicit copyright) notice.
13
14       This program is distributed in the hope that it will be useful, but
15       WITHOUT ANY WARRANTY; without even the implied warranty of
16       MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17       General Public License for more details.
18       
19       Modified from an incomplete driver for Interphase 5575 1KVC 1M card which 
20       was originally written by Monalisa Agrawal at UNH. Now this driver 
21       supports a variety of varients of Interphase ATM PCI (i)Chip adapter 
22       card family (See www.iphase.com/products/ClassSheet.cfm?ClassID=ATM) 
23       in terms of PHY type, the size of control memory and the size of 
24       packet memory. The followings are the change log and history:
25      
26           Bugfix the Mona's UBR driver.
27           Modify the basic memory allocation and dma logic.
28           Port the driver to the latest kernel from 2.0.46.
29           Complete the ABR logic of the driver, and added the ABR work-
30               around for the hardware anormalies.
31           Add the CBR support.
32           Add the flow control logic to the driver to allow rate-limit VC.
33           Add 4K VC support to the board with 512K control memory.
34           Add the support of all the variants of the Interphase ATM PCI 
35           (i)Chip adapter cards including x575 (155M OC3 and UTP155), x525
36           (25M UTP25) and x531 (DS3 and E3).
37           Add SMP support.
38
39       Support and updates available at: ftp://ftp.iphase.com/pub/atm
40
41 *******************************************************************************/
42
43 #include <linux/module.h>  
44 #include <linux/kernel.h>  
45 #include <linux/mm.h>  
46 #include <linux/pci.h>  
47 #include <linux/errno.h>  
48 #include <linux/atm.h>  
49 #include <linux/atmdev.h>  
50 #include <linux/sonet.h>  
51 #include <linux/skbuff.h>  
52 #include <linux/time.h>  
53 #include <linux/delay.h>  
54 #include <linux/uio.h>  
55 #include <linux/init.h>  
56 #include <linux/wait.h>
57 #include <linux/slab.h>
58 #include <asm/system.h>  
59 #include <asm/io.h>  
60 #include <asm/atomic.h>  
61 #include <asm/uaccess.h>  
62 #include <asm/string.h>  
63 #include <asm/byteorder.h>  
64 #include <linux/vmalloc.h>
65 #include <linux/jiffies.h>
66 #include "iphase.h"               
67 #include "suni.h"                 
68 #define swap_byte_order(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))
69
70 #define PRIV(dev) ((struct suni_priv *) dev->phy_data)
71
72 static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr);
73 static void desc_dbg(IADEV *iadev);
74
75 static IADEV *ia_dev[8];
76 static struct atm_dev *_ia_dev[8];
77 static int iadev_count;
78 static void ia_led_timer(unsigned long arg);
79 static DEFINE_TIMER(ia_timer, ia_led_timer, 0, 0);
80 static int IA_TX_BUF = DFL_TX_BUFFERS, IA_TX_BUF_SZ = DFL_TX_BUF_SZ;
81 static int IA_RX_BUF = DFL_RX_BUFFERS, IA_RX_BUF_SZ = DFL_RX_BUF_SZ;
82 static uint IADebugFlag = /* IF_IADBG_ERR | IF_IADBG_CBR| IF_IADBG_INIT_ADAPTER
83             |IF_IADBG_ABR | IF_IADBG_EVENT*/ 0; 
84
85 module_param(IA_TX_BUF, int, 0);
86 module_param(IA_TX_BUF_SZ, int, 0);
87 module_param(IA_RX_BUF, int, 0);
88 module_param(IA_RX_BUF_SZ, int, 0);
89 module_param(IADebugFlag, uint, 0644);
90
91 MODULE_LICENSE("GPL");
92
93 /**************************** IA_LIB **********************************/
94
95 static void ia_init_rtn_q (IARTN_Q *que) 
96
97    que->next = NULL; 
98    que->tail = NULL; 
99 }
100
101 static void ia_enque_head_rtn_q (IARTN_Q *que, IARTN_Q * data) 
102 {
103    data->next = NULL;
104    if (que->next == NULL) 
105       que->next = que->tail = data;
106    else {
107       data->next = que->next;
108       que->next = data;
109    } 
110    return;
111 }
112
113 static int ia_enque_rtn_q (IARTN_Q *que, struct desc_tbl_t data) {
114    IARTN_Q *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
115    if (!entry) return -1;
116    entry->data = data;
117    entry->next = NULL;
118    if (que->next == NULL) 
119       que->next = que->tail = entry;
120    else {
121       que->tail->next = entry;
122       que->tail = que->tail->next;
123    }      
124    return 1;
125 }
126
127 static IARTN_Q * ia_deque_rtn_q (IARTN_Q *que) {
128    IARTN_Q *tmpdata;
129    if (que->next == NULL)
130       return NULL;
131    tmpdata = que->next;
132    if ( que->next == que->tail)  
133       que->next = que->tail = NULL;
134    else 
135       que->next = que->next->next;
136    return tmpdata;
137 }
138
139 static void ia_hack_tcq(IADEV *dev) {
140
141   u_short               desc1;
142   u_short               tcq_wr;
143   struct ia_vcc         *iavcc_r = NULL; 
144
145   tcq_wr = readl(dev->seg_reg+TCQ_WR_PTR) & 0xffff;
146   while (dev->host_tcq_wr != tcq_wr) {
147      desc1 = *(u_short *)(dev->seg_ram + dev->host_tcq_wr);
148      if (!desc1) ;
149      else if (!dev->desc_tbl[desc1 -1].timestamp) {
150         IF_ABR(printk(" Desc %d is reset at %ld\n", desc1 -1, jiffies);)
151         *(u_short *) (dev->seg_ram + dev->host_tcq_wr) = 0;
152      }                                 
153      else if (dev->desc_tbl[desc1 -1].timestamp) {
154         if (!(iavcc_r = dev->desc_tbl[desc1 -1].iavcc)) { 
155            printk("IA: Fatal err in get_desc\n");
156            continue;
157         }
158         iavcc_r->vc_desc_cnt--;
159         dev->desc_tbl[desc1 -1].timestamp = 0;
160         IF_EVENT(printk("ia_hack: return_q skb = 0x%p desc = %d\n",
161                                    dev->desc_tbl[desc1 -1].txskb, desc1);)
162         if (iavcc_r->pcr < dev->rate_limit) {
163            IA_SKB_STATE (dev->desc_tbl[desc1-1].txskb) |= IA_TX_DONE;
164            if (ia_enque_rtn_q(&dev->tx_return_q, dev->desc_tbl[desc1 -1]) < 0)
165               printk("ia_hack_tcq: No memory available\n");
166         } 
167         dev->desc_tbl[desc1 -1].iavcc = NULL;
168         dev->desc_tbl[desc1 -1].txskb = NULL;
169      }
170      dev->host_tcq_wr += 2;
171      if (dev->host_tcq_wr > dev->ffL.tcq_ed) 
172         dev->host_tcq_wr = dev->ffL.tcq_st;
173   }
174 } /* ia_hack_tcq */
175
176 static u16 get_desc (IADEV *dev, struct ia_vcc *iavcc) {
177   u_short               desc_num, i;
178   struct sk_buff        *skb;
179   struct ia_vcc         *iavcc_r = NULL; 
180   unsigned long delta;
181   static unsigned long timer = 0;
182   int ltimeout;
183
184   ia_hack_tcq (dev);
185   if((time_after(jiffies,timer+50)) || ((dev->ffL.tcq_rd==dev->host_tcq_wr))) {
186      timer = jiffies; 
187      i=0;
188      while (i < dev->num_tx_desc) {
189         if (!dev->desc_tbl[i].timestamp) {
190            i++;
191            continue;
192         }
193         ltimeout = dev->desc_tbl[i].iavcc->ltimeout; 
194         delta = jiffies - dev->desc_tbl[i].timestamp;
195         if (delta >= ltimeout) {
196            IF_ABR(printk("RECOVER run!! desc_tbl %d = %d  delta = %ld, time = %ld\n", i,dev->desc_tbl[i].timestamp, delta, jiffies);)
197            if (dev->ffL.tcq_rd == dev->ffL.tcq_st) 
198               dev->ffL.tcq_rd =  dev->ffL.tcq_ed;
199            else 
200               dev->ffL.tcq_rd -= 2;
201            *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd) = i+1;
202            if (!(skb = dev->desc_tbl[i].txskb) || 
203                           !(iavcc_r = dev->desc_tbl[i].iavcc))
204               printk("Fatal err, desc table vcc or skb is NULL\n");
205            else 
206               iavcc_r->vc_desc_cnt--;
207            dev->desc_tbl[i].timestamp = 0;
208            dev->desc_tbl[i].iavcc = NULL;
209            dev->desc_tbl[i].txskb = NULL;
210         }
211         i++;
212      } /* while */
213   }
214   if (dev->ffL.tcq_rd == dev->host_tcq_wr) 
215      return 0xFFFF;
216     
217   /* Get the next available descriptor number from TCQ */
218   desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
219
220   while (!desc_num || (dev->desc_tbl[desc_num -1]).timestamp) {
221      dev->ffL.tcq_rd += 2;
222      if (dev->ffL.tcq_rd > dev->ffL.tcq_ed) 
223      dev->ffL.tcq_rd = dev->ffL.tcq_st;
224      if (dev->ffL.tcq_rd == dev->host_tcq_wr) 
225         return 0xFFFF; 
226      desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
227   }
228
229   /* get system time */
230   dev->desc_tbl[desc_num -1].timestamp = jiffies;
231   return desc_num;
232 }
233
234 static void clear_lockup (struct atm_vcc *vcc, IADEV *dev) {
235   u_char                foundLockUp;
236   vcstatus_t            *vcstatus;
237   u_short               *shd_tbl;
238   u_short               tempCellSlot, tempFract;
239   struct main_vc *abr_vc = (struct main_vc *)dev->MAIN_VC_TABLE_ADDR;
240   struct ext_vc *eabr_vc = (struct ext_vc *)dev->EXT_VC_TABLE_ADDR;
241   u_int  i;
242
243   if (vcc->qos.txtp.traffic_class == ATM_ABR) {
244      vcstatus = (vcstatus_t *) &(dev->testTable[vcc->vci]->vc_status);
245      vcstatus->cnt++;
246      foundLockUp = 0;
247      if( vcstatus->cnt == 0x05 ) {
248         abr_vc += vcc->vci;
249         eabr_vc += vcc->vci;
250         if( eabr_vc->last_desc ) {
251            if( (abr_vc->status & 0x07) == ABR_STATE /* 0x2 */ ) {
252               /* Wait for 10 Micro sec */
253               udelay(10);
254               if ((eabr_vc->last_desc)&&((abr_vc->status & 0x07)==ABR_STATE))
255                  foundLockUp = 1;
256            }
257            else {
258               tempCellSlot = abr_vc->last_cell_slot;
259               tempFract    = abr_vc->fraction;
260               if((tempCellSlot == dev->testTable[vcc->vci]->lastTime)
261                          && (tempFract == dev->testTable[vcc->vci]->fract))
262                  foundLockUp = 1;                   
263               dev->testTable[vcc->vci]->lastTime = tempCellSlot;   
264               dev->testTable[vcc->vci]->fract = tempFract; 
265            }        
266         } /* last descriptor */            
267         vcstatus->cnt = 0;      
268      } /* vcstatus->cnt */
269         
270      if (foundLockUp) {
271         IF_ABR(printk("LOCK UP found\n");) 
272         writew(0xFFFD, dev->seg_reg+MODE_REG_0);
273         /* Wait for 10 Micro sec */
274         udelay(10); 
275         abr_vc->status &= 0xFFF8;
276         abr_vc->status |= 0x0001;  /* state is idle */
277         shd_tbl = (u_short *)dev->ABR_SCHED_TABLE_ADDR;                
278         for( i = 0; ((i < dev->num_vc) && (shd_tbl[i])); i++ );
279         if (i < dev->num_vc)
280            shd_tbl[i] = vcc->vci;
281         else
282            IF_ERR(printk("ABR Seg. may not continue on VC %x\n",vcc->vci);)
283         writew(T_ONLINE, dev->seg_reg+MODE_REG_0);
284         writew(~(TRANSMIT_DONE|TCQ_NOT_EMPTY), dev->seg_reg+SEG_MASK_REG);
285         writew(TRANSMIT_DONE, dev->seg_reg+SEG_INTR_STATUS_REG);       
286         vcstatus->cnt = 0;
287      } /* foundLockUp */
288
289   } /* if an ABR VC */
290
291
292 }
293  
294 /*
295 ** Conversion of 24-bit cellrate (cells/sec) to 16-bit floating point format.
296 **
297 **  +----+----+------------------+-------------------------------+
298 **  |  R | NZ |  5-bit exponent  |        9-bit mantissa         |
299 **  +----+----+------------------+-------------------------------+
300 ** 
301 **    R = reserved (written as 0)
302 **    NZ = 0 if 0 cells/sec; 1 otherwise
303 **
304 **    if NZ = 1, rate = 1.mmmmmmmmm x 2^(eeeee) cells/sec
305 */
306 static u16
307 cellrate_to_float(u32 cr)
308 {
309
310 #define NZ              0x4000
311 #define M_BITS          9               /* Number of bits in mantissa */
312 #define E_BITS          5               /* Number of bits in exponent */
313 #define M_MASK          0x1ff           
314 #define E_MASK          0x1f
315   u16   flot;
316   u32   tmp = cr & 0x00ffffff;
317   int   i   = 0;
318   if (cr == 0)
319      return 0;
320   while (tmp != 1) {
321      tmp >>= 1;
322      i++;
323   }
324   if (i == M_BITS)
325      flot = NZ | (i << M_BITS) | (cr & M_MASK);
326   else if (i < M_BITS)
327      flot = NZ | (i << M_BITS) | ((cr << (M_BITS - i)) & M_MASK);
328   else
329      flot = NZ | (i << M_BITS) | ((cr >> (i - M_BITS)) & M_MASK);
330   return flot;
331 }
332
333 #if 0
334 /*
335 ** Conversion of 16-bit floating point format to 24-bit cellrate (cells/sec).
336 */
337 static u32
338 float_to_cellrate(u16 rate)
339 {
340   u32   exp, mantissa, cps;
341   if ((rate & NZ) == 0)
342      return 0;
343   exp = (rate >> M_BITS) & E_MASK;
344   mantissa = rate & M_MASK;
345   if (exp == 0)
346      return 1;
347   cps = (1 << M_BITS) | mantissa;
348   if (exp == M_BITS)
349      cps = cps;
350   else if (exp > M_BITS)
351      cps <<= (exp - M_BITS);
352   else
353      cps >>= (M_BITS - exp);
354   return cps;
355 }
356 #endif 
357
358 static void init_abr_vc (IADEV *dev, srv_cls_param_t *srv_p) {
359   srv_p->class_type = ATM_ABR;
360   srv_p->pcr        = dev->LineRate;
361   srv_p->mcr        = 0;
362   srv_p->icr        = 0x055cb7;
363   srv_p->tbe        = 0xffffff;
364   srv_p->frtt       = 0x3a;
365   srv_p->rif        = 0xf;
366   srv_p->rdf        = 0xb;
367   srv_p->nrm        = 0x4;
368   srv_p->trm        = 0x7;
369   srv_p->cdf        = 0x3;
370   srv_p->adtf       = 50;
371 }
372
373 static int
374 ia_open_abr_vc(IADEV *dev, srv_cls_param_t *srv_p, 
375                                                 struct atm_vcc *vcc, u8 flag)
376 {
377   f_vc_abr_entry  *f_abr_vc;
378   r_vc_abr_entry  *r_abr_vc;
379   u32           icr;
380   u8            trm, nrm, crm;
381   u16           adtf, air, *ptr16;      
382   f_abr_vc =(f_vc_abr_entry *)dev->MAIN_VC_TABLE_ADDR;
383   f_abr_vc += vcc->vci;       
384   switch (flag) {
385      case 1: /* FFRED initialization */
386 #if 0  /* sanity check */
387        if (srv_p->pcr == 0)
388           return INVALID_PCR;
389        if (srv_p->pcr > dev->LineRate)
390           srv_p->pcr = dev->LineRate;
391        if ((srv_p->mcr + dev->sum_mcr) > dev->LineRate)
392           return MCR_UNAVAILABLE;
393        if (srv_p->mcr > srv_p->pcr)
394           return INVALID_MCR;
395        if (!(srv_p->icr))
396           srv_p->icr = srv_p->pcr;
397        if ((srv_p->icr < srv_p->mcr) || (srv_p->icr > srv_p->pcr))
398           return INVALID_ICR;
399        if ((srv_p->tbe < MIN_TBE) || (srv_p->tbe > MAX_TBE))
400           return INVALID_TBE;
401        if ((srv_p->frtt < MIN_FRTT) || (srv_p->frtt > MAX_FRTT))
402           return INVALID_FRTT;
403        if (srv_p->nrm > MAX_NRM)
404           return INVALID_NRM;
405        if (srv_p->trm > MAX_TRM)
406           return INVALID_TRM;
407        if (srv_p->adtf > MAX_ADTF)
408           return INVALID_ADTF;
409        else if (srv_p->adtf == 0)
410           srv_p->adtf = 1;
411        if (srv_p->cdf > MAX_CDF)
412           return INVALID_CDF;
413        if (srv_p->rif > MAX_RIF)
414           return INVALID_RIF;
415        if (srv_p->rdf > MAX_RDF)
416           return INVALID_RDF;
417 #endif
418        memset ((caddr_t)f_abr_vc, 0, sizeof(*f_abr_vc));
419        f_abr_vc->f_vc_type = ABR;
420        nrm = 2 << srv_p->nrm;     /* (2 ** (srv_p->nrm +1)) */
421                                   /* i.e 2**n = 2 << (n-1) */
422        f_abr_vc->f_nrm = nrm << 8 | nrm;
423        trm = 100000/(2 << (16 - srv_p->trm));
424        if ( trm == 0) trm = 1;
425        f_abr_vc->f_nrmexp =(((srv_p->nrm +1) & 0x0f) << 12)|(MRM << 8) | trm;
426        crm = srv_p->tbe / nrm;
427        if (crm == 0) crm = 1;
428        f_abr_vc->f_crm = crm & 0xff;
429        f_abr_vc->f_pcr = cellrate_to_float(srv_p->pcr);
430        icr = min( srv_p->icr, (srv_p->tbe > srv_p->frtt) ?
431                                 ((srv_p->tbe/srv_p->frtt)*1000000) :
432                                 (1000000/(srv_p->frtt/srv_p->tbe)));
433        f_abr_vc->f_icr = cellrate_to_float(icr);
434        adtf = (10000 * srv_p->adtf)/8192;
435        if (adtf == 0) adtf = 1; 
436        f_abr_vc->f_cdf = ((7 - srv_p->cdf) << 12 | adtf) & 0xfff;
437        f_abr_vc->f_mcr = cellrate_to_float(srv_p->mcr);
438        f_abr_vc->f_acr = f_abr_vc->f_icr;
439        f_abr_vc->f_status = 0x0042;
440        break;
441     case 0: /* RFRED initialization */  
442        ptr16 = (u_short *)(dev->reass_ram + REASS_TABLE*dev->memSize); 
443        *(ptr16 + vcc->vci) = NO_AAL5_PKT | REASS_ABR;
444        r_abr_vc = (r_vc_abr_entry*)(dev->reass_ram+ABR_VC_TABLE*dev->memSize);
445        r_abr_vc += vcc->vci;
446        r_abr_vc->r_status_rdf = (15 - srv_p->rdf) & 0x000f;
447        air = srv_p->pcr << (15 - srv_p->rif);
448        if (air == 0) air = 1;
449        r_abr_vc->r_air = cellrate_to_float(air);
450        dev->testTable[vcc->vci]->vc_status = VC_ACTIVE | VC_ABR;
451        dev->sum_mcr        += srv_p->mcr;
452        dev->n_abr++;
453        break;
454     default:
455        break;
456   }
457   return        0;
458 }
459 static int ia_cbr_setup (IADEV *dev, struct atm_vcc *vcc) {
460    u32 rateLow=0, rateHigh, rate;
461    int entries;
462    struct ia_vcc *ia_vcc;
463
464    int   idealSlot =0, testSlot, toBeAssigned, inc;
465    u32   spacing;
466    u16  *SchedTbl, *TstSchedTbl;
467    u16  cbrVC, vcIndex;
468    u32   fracSlot    = 0;
469    u32   sp_mod      = 0;
470    u32   sp_mod2     = 0;
471
472    /* IpAdjustTrafficParams */
473    if (vcc->qos.txtp.max_pcr <= 0) {
474       IF_ERR(printk("PCR for CBR not defined\n");)
475       return -1;
476    }
477    rate = vcc->qos.txtp.max_pcr;
478    entries = rate / dev->Granularity;
479    IF_CBR(printk("CBR: CBR entries=0x%x for rate=0x%x & Gran=0x%x\n",
480                                 entries, rate, dev->Granularity);)
481    if (entries < 1)
482       IF_CBR(printk("CBR: Bandwidth smaller than granularity of CBR table\n");) 
483    rateLow  =  entries * dev->Granularity;
484    rateHigh = (entries + 1) * dev->Granularity;
485    if (3*(rate - rateLow) > (rateHigh - rate))
486       entries++;
487    if (entries > dev->CbrRemEntries) {
488       IF_CBR(printk("CBR: Not enough bandwidth to support this PCR.\n");)
489       IF_CBR(printk("Entries = 0x%x, CbrRemEntries = 0x%x.\n",
490                                        entries, dev->CbrRemEntries);)
491       return -EBUSY;
492    }   
493
494    ia_vcc = INPH_IA_VCC(vcc);
495    ia_vcc->NumCbrEntry = entries; 
496    dev->sum_mcr += entries * dev->Granularity; 
497    /* IaFFrednInsertCbrSched */
498    // Starting at an arbitrary location, place the entries into the table
499    // as smoothly as possible
500    cbrVC   = 0;
501    spacing = dev->CbrTotEntries / entries;
502    sp_mod  = dev->CbrTotEntries % entries; // get modulo
503    toBeAssigned = entries;
504    fracSlot = 0;
505    vcIndex  = vcc->vci;
506    IF_CBR(printk("Vci=0x%x,Spacing=0x%x,Sp_mod=0x%x\n",vcIndex,spacing,sp_mod);)
507    while (toBeAssigned)
508    {
509       // If this is the first time, start the table loading for this connection
510       // as close to entryPoint as possible.
511       if (toBeAssigned == entries)
512       {
513          idealSlot = dev->CbrEntryPt;
514          dev->CbrEntryPt += 2;    // Adding 2 helps to prevent clumping
515          if (dev->CbrEntryPt >= dev->CbrTotEntries) 
516             dev->CbrEntryPt -= dev->CbrTotEntries;// Wrap if necessary
517       } else {
518          idealSlot += (u32)(spacing + fracSlot); // Point to the next location
519          // in the table that would be  smoothest
520          fracSlot = ((sp_mod + sp_mod2) / entries);  // get new integer part
521          sp_mod2  = ((sp_mod + sp_mod2) % entries);  // calc new fractional part
522       }
523       if (idealSlot >= (int)dev->CbrTotEntries) 
524          idealSlot -= dev->CbrTotEntries;  
525       // Continuously check around this ideal value until a null
526       // location is encountered.
527       SchedTbl = (u16*)(dev->seg_ram+CBR_SCHED_TABLE*dev->memSize); 
528       inc = 0;
529       testSlot = idealSlot;
530       TstSchedTbl = (u16*)(SchedTbl+testSlot);  //set index and read in value
531       IF_CBR(printk("CBR Testslot 0x%x AT Location 0x%p, NumToAssign=%d\n",
532                                 testSlot, TstSchedTbl,toBeAssigned);)
533       memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
534       while (cbrVC)  // If another VC at this location, we have to keep looking
535       {
536           inc++;
537           testSlot = idealSlot - inc;
538           if (testSlot < 0) { // Wrap if necessary
539              testSlot += dev->CbrTotEntries;
540              IF_CBR(printk("Testslot Wrap. STable Start=0x%p,Testslot=%d\n",
541                                                        SchedTbl,testSlot);)
542           }
543           TstSchedTbl = (u16 *)(SchedTbl + testSlot);  // set table index
544           memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC)); 
545           if (!cbrVC)
546              break;
547           testSlot = idealSlot + inc;
548           if (testSlot >= (int)dev->CbrTotEntries) { // Wrap if necessary
549              testSlot -= dev->CbrTotEntries;
550              IF_CBR(printk("TotCbrEntries=%d",dev->CbrTotEntries);)
551              IF_CBR(printk(" Testslot=0x%x ToBeAssgned=%d\n", 
552                                             testSlot, toBeAssigned);)
553           } 
554           // set table index and read in value
555           TstSchedTbl = (u16*)(SchedTbl + testSlot);
556           IF_CBR(printk("Reading CBR Tbl from 0x%p, CbrVal=0x%x Iteration %d\n",
557                           TstSchedTbl,cbrVC,inc);)
558           memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
559        } /* while */
560        // Move this VCI number into this location of the CBR Sched table.
561        memcpy((caddr_t)TstSchedTbl, (caddr_t)&vcIndex, sizeof(*TstSchedTbl));
562        dev->CbrRemEntries--;
563        toBeAssigned--;
564    } /* while */ 
565
566    /* IaFFrednCbrEnable */
567    dev->NumEnabledCBR++;
568    if (dev->NumEnabledCBR == 1) {
569        writew((CBR_EN | UBR_EN | ABR_EN | (0x23 << 2)), dev->seg_reg+STPARMS);
570        IF_CBR(printk("CBR is enabled\n");)
571    }
572    return 0;
573 }
574 static void ia_cbrVc_close (struct atm_vcc *vcc) {
575    IADEV *iadev;
576    u16 *SchedTbl, NullVci = 0;
577    u32 i, NumFound;
578
579    iadev = INPH_IA_DEV(vcc->dev);
580    iadev->NumEnabledCBR--;
581    SchedTbl = (u16*)(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize);
582    if (iadev->NumEnabledCBR == 0) {
583       writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
584       IF_CBR (printk("CBR support disabled\n");)
585    }
586    NumFound = 0;
587    for (i=0; i < iadev->CbrTotEntries; i++)
588    {
589       if (*SchedTbl == vcc->vci) {
590          iadev->CbrRemEntries++;
591          *SchedTbl = NullVci;
592          IF_CBR(NumFound++;)
593       }
594       SchedTbl++;   
595    } 
596    IF_CBR(printk("Exit ia_cbrVc_close, NumRemoved=%d\n",NumFound);)
597 }
598
599 static int ia_avail_descs(IADEV *iadev) {
600    int tmp = 0;
601    ia_hack_tcq(iadev);
602    if (iadev->host_tcq_wr >= iadev->ffL.tcq_rd)
603       tmp = (iadev->host_tcq_wr - iadev->ffL.tcq_rd) / 2;
604    else
605       tmp = (iadev->ffL.tcq_ed - iadev->ffL.tcq_rd + 2 + iadev->host_tcq_wr -
606                    iadev->ffL.tcq_st) / 2;
607    return tmp;
608 }    
609
610 static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb);
611
612 static int ia_que_tx (IADEV *iadev) { 
613    struct sk_buff *skb;
614    int num_desc;
615    struct atm_vcc *vcc;
616    struct ia_vcc *iavcc;
617    num_desc = ia_avail_descs(iadev);
618
619    while (num_desc && (skb = skb_dequeue(&iadev->tx_backlog))) {
620       if (!(vcc = ATM_SKB(skb)->vcc)) {
621          dev_kfree_skb_any(skb);
622          printk("ia_que_tx: Null vcc\n");
623          break;
624       }
625       if (!test_bit(ATM_VF_READY,&vcc->flags)) {
626          dev_kfree_skb_any(skb);
627          printk("Free the SKB on closed vci %d \n", vcc->vci);
628          break;
629       }
630       iavcc = INPH_IA_VCC(vcc);
631       if (ia_pkt_tx (vcc, skb)) {
632          skb_queue_head(&iadev->tx_backlog, skb);
633       }
634       num_desc--;
635    }
636    return 0;
637 }
638
639 static void ia_tx_poll (IADEV *iadev) {
640    struct atm_vcc *vcc = NULL;
641    struct sk_buff *skb = NULL, *skb1 = NULL;
642    struct ia_vcc *iavcc;
643    IARTN_Q *  rtne;
644
645    ia_hack_tcq(iadev);
646    while ( (rtne = ia_deque_rtn_q(&iadev->tx_return_q))) {
647        skb = rtne->data.txskb;
648        if (!skb) {
649            printk("ia_tx_poll: skb is null\n");
650            goto out;
651        }
652        vcc = ATM_SKB(skb)->vcc;
653        if (!vcc) {
654            printk("ia_tx_poll: vcc is null\n");
655            dev_kfree_skb_any(skb);
656            goto out;
657        }
658
659        iavcc = INPH_IA_VCC(vcc);
660        if (!iavcc) {
661            printk("ia_tx_poll: iavcc is null\n");
662            dev_kfree_skb_any(skb);
663            goto out;
664        }
665
666        skb1 = skb_dequeue(&iavcc->txing_skb);
667        while (skb1 && (skb1 != skb)) {
668           if (!(IA_SKB_STATE(skb1) & IA_TX_DONE)) {
669              printk("IA_tx_intr: Vci %d lost pkt!!!\n", vcc->vci);
670           }
671           IF_ERR(printk("Release the SKB not match\n");)
672           if ((vcc->pop) && (skb1->len != 0))
673           {
674              vcc->pop(vcc, skb1);
675              IF_EVENT(printk("Tansmit Done - skb 0x%lx return\n",
676                                                           (long)skb1);)
677           }
678           else 
679              dev_kfree_skb_any(skb1);
680           skb1 = skb_dequeue(&iavcc->txing_skb);
681        }                                                        
682        if (!skb1) {
683           IF_EVENT(printk("IA: Vci %d - skb not found requed\n",vcc->vci);)
684           ia_enque_head_rtn_q (&iadev->tx_return_q, rtne);
685           break;
686        }
687        if ((vcc->pop) && (skb->len != 0))
688        {
689           vcc->pop(vcc, skb);
690           IF_EVENT(printk("Tx Done - skb 0x%lx return\n",(long)skb);)
691        }
692        else 
693           dev_kfree_skb_any(skb);
694        kfree(rtne);
695     }
696     ia_que_tx(iadev);
697 out:
698     return;
699 }
700 #if 0
701 static void ia_eeprom_put (IADEV *iadev, u32 addr, u_short val)
702 {
703         u32     t;
704         int     i;
705         /*
706          * Issue a command to enable writes to the NOVRAM
707          */
708         NVRAM_CMD (EXTEND + EWEN);
709         NVRAM_CLR_CE;
710         /*
711          * issue the write command
712          */
713         NVRAM_CMD(IAWRITE + addr);
714         /* 
715          * Send the data, starting with D15, then D14, and so on for 16 bits
716          */
717         for (i=15; i>=0; i--) {
718                 NVRAM_CLKOUT (val & 0x8000);
719                 val <<= 1;
720         }
721         NVRAM_CLR_CE;
722         CFG_OR(NVCE);
723         t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS); 
724         while (!(t & NVDO))
725                 t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS); 
726
727         NVRAM_CLR_CE;
728         /*
729          * disable writes again
730          */
731         NVRAM_CMD(EXTEND + EWDS)
732         NVRAM_CLR_CE;
733         CFG_AND(~NVDI);
734 }
735 #endif
736
737 static u16 ia_eeprom_get (IADEV *iadev, u32 addr)
738 {
739         u_short val;
740         u32     t;
741         int     i;
742         /*
743          * Read the first bit that was clocked with the falling edge of the
744          * the last command data clock
745          */
746         NVRAM_CMD(IAREAD + addr);
747         /*
748          * Now read the rest of the bits, the next bit read is D14, then D13,
749          * and so on.
750          */
751         val = 0;
752         for (i=15; i>=0; i--) {
753                 NVRAM_CLKIN(t);
754                 val |= (t << i);
755         }
756         NVRAM_CLR_CE;
757         CFG_AND(~NVDI);
758         return val;
759 }
760
761 static void ia_hw_type(IADEV *iadev) {
762    u_short memType = ia_eeprom_get(iadev, 25);   
763    iadev->memType = memType;
764    if ((memType & MEM_SIZE_MASK) == MEM_SIZE_1M) {
765       iadev->num_tx_desc = IA_TX_BUF;
766       iadev->tx_buf_sz = IA_TX_BUF_SZ;
767       iadev->num_rx_desc = IA_RX_BUF;
768       iadev->rx_buf_sz = IA_RX_BUF_SZ; 
769    } else if ((memType & MEM_SIZE_MASK) == MEM_SIZE_512K) {
770       if (IA_TX_BUF == DFL_TX_BUFFERS)
771         iadev->num_tx_desc = IA_TX_BUF / 2;
772       else 
773         iadev->num_tx_desc = IA_TX_BUF;
774       iadev->tx_buf_sz = IA_TX_BUF_SZ;
775       if (IA_RX_BUF == DFL_RX_BUFFERS)
776         iadev->num_rx_desc = IA_RX_BUF / 2;
777       else
778         iadev->num_rx_desc = IA_RX_BUF;
779       iadev->rx_buf_sz = IA_RX_BUF_SZ;
780    }
781    else {
782       if (IA_TX_BUF == DFL_TX_BUFFERS) 
783         iadev->num_tx_desc = IA_TX_BUF / 8;
784       else
785         iadev->num_tx_desc = IA_TX_BUF;
786       iadev->tx_buf_sz = IA_TX_BUF_SZ;
787       if (IA_RX_BUF == DFL_RX_BUFFERS)
788         iadev->num_rx_desc = IA_RX_BUF / 8;
789       else
790         iadev->num_rx_desc = IA_RX_BUF;
791       iadev->rx_buf_sz = IA_RX_BUF_SZ; 
792    } 
793    iadev->rx_pkt_ram = TX_PACKET_RAM + (iadev->num_tx_desc * iadev->tx_buf_sz); 
794    IF_INIT(printk("BUF: tx=%d,sz=%d rx=%d sz= %d rx_pkt_ram=%d\n",
795          iadev->num_tx_desc, iadev->tx_buf_sz, iadev->num_rx_desc,
796          iadev->rx_buf_sz, iadev->rx_pkt_ram);)
797
798 #if 0
799    if ((memType & FE_MASK) == FE_SINGLE_MODE) {
800       iadev->phy_type = PHY_OC3C_S;
801    else if ((memType & FE_MASK) == FE_UTP_OPTION)
802       iadev->phy_type = PHY_UTP155;
803    else
804      iadev->phy_type = PHY_OC3C_M;
805 #endif
806    
807    iadev->phy_type = memType & FE_MASK;
808    IF_INIT(printk("memType = 0x%x iadev->phy_type = 0x%x\n", 
809                                          memType,iadev->phy_type);)
810    if (iadev->phy_type == FE_25MBIT_PHY) 
811       iadev->LineRate = (u32)(((25600000/8)*26)/(27*53));
812    else if (iadev->phy_type == FE_DS3_PHY)
813       iadev->LineRate = (u32)(((44736000/8)*26)/(27*53));
814    else if (iadev->phy_type == FE_E3_PHY) 
815       iadev->LineRate = (u32)(((34368000/8)*26)/(27*53));
816    else
817        iadev->LineRate = (u32)(ATM_OC3_PCR);
818    IF_INIT(printk("iadev->LineRate = %d \n", iadev->LineRate);)
819
820 }
821
822 static void IaFrontEndIntr(IADEV *iadev) {
823   volatile IA_SUNI *suni;
824   volatile ia_mb25_t *mb25;
825   volatile suni_pm7345_t *suni_pm7345;
826   u32 intr_status;
827   u_int frmr_intr;
828
829   if(iadev->phy_type & FE_25MBIT_PHY) {
830      mb25 = (ia_mb25_t*)iadev->phy;
831      iadev->carrier_detect =  Boolean(mb25->mb25_intr_status & MB25_IS_GSB);
832   } else if (iadev->phy_type & FE_DS3_PHY) {
833      suni_pm7345 = (suni_pm7345_t *)iadev->phy;
834      /* clear FRMR interrupts */
835      frmr_intr   = suni_pm7345->suni_ds3_frm_intr_stat; 
836      iadev->carrier_detect =  
837            Boolean(!(suni_pm7345->suni_ds3_frm_stat & SUNI_DS3_LOSV));
838   } else if (iadev->phy_type & FE_E3_PHY ) {
839      suni_pm7345 = (suni_pm7345_t *)iadev->phy;
840      frmr_intr   = suni_pm7345->suni_e3_frm_maint_intr_ind;
841      iadev->carrier_detect =
842            Boolean(!(suni_pm7345->suni_e3_frm_fram_intr_ind_stat&SUNI_E3_LOS));
843   }
844   else { 
845      suni = (IA_SUNI *)iadev->phy;
846      intr_status = suni->suni_rsop_status & 0xff;
847      iadev->carrier_detect = Boolean(!(suni->suni_rsop_status & SUNI_LOSV));
848   }
849   if (iadev->carrier_detect)
850     printk("IA: SUNI carrier detected\n");
851   else
852     printk("IA: SUNI carrier lost signal\n"); 
853   return;
854 }
855
856 static void ia_mb25_init (IADEV *iadev)
857 {
858    volatile ia_mb25_t  *mb25 = (ia_mb25_t*)iadev->phy;
859 #if 0
860    mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC | MB25_MC_ENABLED;
861 #endif
862    mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC;
863    mb25->mb25_diag_control = 0;
864    /*
865     * Initialize carrier detect state
866     */
867    iadev->carrier_detect =  Boolean(mb25->mb25_intr_status & MB25_IS_GSB);
868    return;
869 }                   
870
871 static void ia_suni_pm7345_init (IADEV *iadev)
872 {
873    volatile suni_pm7345_t *suni_pm7345 = (suni_pm7345_t *)iadev->phy;
874    if (iadev->phy_type & FE_DS3_PHY)
875    {
876       iadev->carrier_detect = 
877           Boolean(!(suni_pm7345->suni_ds3_frm_stat & SUNI_DS3_LOSV)); 
878       suni_pm7345->suni_ds3_frm_intr_enbl = 0x17;
879       suni_pm7345->suni_ds3_frm_cfg = 1;
880       suni_pm7345->suni_ds3_tran_cfg = 1;
881       suni_pm7345->suni_config = 0;
882       suni_pm7345->suni_splr_cfg = 0;
883       suni_pm7345->suni_splt_cfg = 0;
884    }
885    else 
886    {
887       iadev->carrier_detect = 
888           Boolean(!(suni_pm7345->suni_e3_frm_fram_intr_ind_stat & SUNI_E3_LOS));
889       suni_pm7345->suni_e3_frm_fram_options = 0x4;
890       suni_pm7345->suni_e3_frm_maint_options = 0x20;
891       suni_pm7345->suni_e3_frm_fram_intr_enbl = 0x1d;
892       suni_pm7345->suni_e3_frm_maint_intr_enbl = 0x30;
893       suni_pm7345->suni_e3_tran_stat_diag_options = 0x0;
894       suni_pm7345->suni_e3_tran_fram_options = 0x1;
895       suni_pm7345->suni_config = SUNI_PM7345_E3ENBL;
896       suni_pm7345->suni_splr_cfg = 0x41;
897       suni_pm7345->suni_splt_cfg = 0x41;
898    } 
899    /*
900     * Enable RSOP loss of signal interrupt.
901     */
902    suni_pm7345->suni_intr_enbl = 0x28;
903  
904    /*
905     * Clear error counters
906     */
907    suni_pm7345->suni_id_reset = 0;
908
909    /*
910     * Clear "PMCTST" in master test register.
911     */
912    suni_pm7345->suni_master_test = 0;
913
914    suni_pm7345->suni_rxcp_ctrl = 0x2c;
915    suni_pm7345->suni_rxcp_fctrl = 0x81;
916  
917    suni_pm7345->suni_rxcp_idle_pat_h1 =
918         suni_pm7345->suni_rxcp_idle_pat_h2 =
919         suni_pm7345->suni_rxcp_idle_pat_h3 = 0;
920    suni_pm7345->suni_rxcp_idle_pat_h4 = 1;
921  
922    suni_pm7345->suni_rxcp_idle_mask_h1 = 0xff;
923    suni_pm7345->suni_rxcp_idle_mask_h2 = 0xff;
924    suni_pm7345->suni_rxcp_idle_mask_h3 = 0xff;
925    suni_pm7345->suni_rxcp_idle_mask_h4 = 0xfe;
926  
927    suni_pm7345->suni_rxcp_cell_pat_h1 =
928         suni_pm7345->suni_rxcp_cell_pat_h2 =
929         suni_pm7345->suni_rxcp_cell_pat_h3 = 0;
930    suni_pm7345->suni_rxcp_cell_pat_h4 = 1;
931  
932    suni_pm7345->suni_rxcp_cell_mask_h1 =
933         suni_pm7345->suni_rxcp_cell_mask_h2 =
934         suni_pm7345->suni_rxcp_cell_mask_h3 =
935         suni_pm7345->suni_rxcp_cell_mask_h4 = 0xff;
936  
937    suni_pm7345->suni_txcp_ctrl = 0xa4;
938    suni_pm7345->suni_txcp_intr_en_sts = 0x10;
939    suni_pm7345->suni_txcp_idle_pat_h5 = 0x55;
940  
941    suni_pm7345->suni_config &= ~(SUNI_PM7345_LLB |
942                                  SUNI_PM7345_CLB |
943                                  SUNI_PM7345_DLB |
944                                   SUNI_PM7345_PLB);
945 #ifdef __SNMP__
946    suni_pm7345->suni_rxcp_intr_en_sts |= SUNI_OOCDE;
947 #endif /* __SNMP__ */
948    return;
949 }
950
951
952 /***************************** IA_LIB END *****************************/
953     
954 #ifdef CONFIG_ATM_IA_DEBUG
955 static int tcnter = 0;
956 static void xdump( u_char*  cp, int  length, char*  prefix )
957 {
958     int col, count;
959     u_char prntBuf[120];
960     u_char*  pBuf = prntBuf;
961     count = 0;
962     while(count < length){
963         pBuf += sprintf( pBuf, "%s", prefix );
964         for(col = 0;count + col < length && col < 16; col++){
965             if (col != 0 && (col % 4) == 0)
966                 pBuf += sprintf( pBuf, " " );
967             pBuf += sprintf( pBuf, "%02X ", cp[count + col] );
968         }
969         while(col++ < 16){      /* pad end of buffer with blanks */
970             if ((col % 4) == 0)
971                 sprintf( pBuf, " " );
972             pBuf += sprintf( pBuf, "   " );
973         }
974         pBuf += sprintf( pBuf, "  " );
975         for(col = 0;count + col < length && col < 16; col++){
976             if (isprint((int)cp[count + col]))
977                 pBuf += sprintf( pBuf, "%c", cp[count + col] );
978             else
979                 pBuf += sprintf( pBuf, "." );
980                 }
981         printk("%s\n", prntBuf);
982         count += col;
983         pBuf = prntBuf;
984     }
985
986 }  /* close xdump(... */
987 #endif /* CONFIG_ATM_IA_DEBUG */
988
989   
990 static struct atm_dev *ia_boards = NULL;  
991   
992 #define ACTUAL_RAM_BASE \
993         RAM_BASE*((iadev->mem)/(128 * 1024))  
994 #define ACTUAL_SEG_RAM_BASE \
995         IPHASE5575_FRAG_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))  
996 #define ACTUAL_REASS_RAM_BASE \
997         IPHASE5575_REASS_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))  
998   
999   
1000 /*-- some utilities and memory allocation stuff will come here -------------*/  
1001   
1002 static void desc_dbg(IADEV *iadev) {
1003
1004   u_short tcq_wr_ptr, tcq_st_ptr, tcq_ed_ptr;
1005   u32 i;
1006   void __iomem *tmp;
1007   // regval = readl((u32)ia_cmds->maddr);
1008   tcq_wr_ptr =  readw(iadev->seg_reg+TCQ_WR_PTR);
1009   printk("B_tcq_wr = 0x%x desc = %d last desc = %d\n",
1010                      tcq_wr_ptr, readw(iadev->seg_ram+tcq_wr_ptr),
1011                      readw(iadev->seg_ram+tcq_wr_ptr-2));
1012   printk(" host_tcq_wr = 0x%x  host_tcq_rd = 0x%x \n",  iadev->host_tcq_wr, 
1013                    iadev->ffL.tcq_rd);
1014   tcq_st_ptr =  readw(iadev->seg_reg+TCQ_ST_ADR);
1015   tcq_ed_ptr =  readw(iadev->seg_reg+TCQ_ED_ADR);
1016   printk("tcq_st_ptr = 0x%x    tcq_ed_ptr = 0x%x \n", tcq_st_ptr, tcq_ed_ptr);
1017   i = 0;
1018   while (tcq_st_ptr != tcq_ed_ptr) {
1019       tmp = iadev->seg_ram+tcq_st_ptr;
1020       printk("TCQ slot %d desc = %d  Addr = %p\n", i++, readw(tmp), tmp);
1021       tcq_st_ptr += 2;
1022   }
1023   for(i=0; i <iadev->num_tx_desc; i++)
1024       printk("Desc_tbl[%d] = %d \n", i, iadev->desc_tbl[i].timestamp);
1025
1026   
1027   
1028 /*----------------------------- Recieving side stuff --------------------------*/  
1029  
1030 static void rx_excp_rcvd(struct atm_dev *dev)  
1031 {  
1032 #if 0 /* closing the receiving size will cause too many excp int */  
1033   IADEV *iadev;  
1034   u_short state;  
1035   u_short excpq_rd_ptr;  
1036   //u_short *ptr;  
1037   int vci, error = 1;  
1038   iadev = INPH_IA_DEV(dev);  
1039   state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1040   while((state & EXCPQ_EMPTY) != EXCPQ_EMPTY)  
1041   { printk("state = %x \n", state); 
1042         excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_RD_PTR) & 0xffff;  
1043  printk("state = %x excpq_rd_ptr = %x \n", state, excpq_rd_ptr); 
1044         if (excpq_rd_ptr == *(u16*)(iadev->reass_reg + EXCP_Q_WR_PTR))
1045             IF_ERR(printk("excpq_rd_ptr is wrong!!!\n");)
1046         // TODO: update exception stat
1047         vci = readw(iadev->reass_ram+excpq_rd_ptr);  
1048         error = readw(iadev->reass_ram+excpq_rd_ptr+2) & 0x0007;  
1049         // pwang_test
1050         excpq_rd_ptr += 4;  
1051         if (excpq_rd_ptr > (readw(iadev->reass_reg + EXCP_Q_ED_ADR)& 0xffff))  
1052             excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_ST_ADR)& 0xffff;
1053         writew( excpq_rd_ptr, iadev->reass_reg + EXCP_Q_RD_PTR);  
1054         state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1055   }  
1056 #endif
1057 }  
1058   
1059 static void free_desc(struct atm_dev *dev, int desc)  
1060 {  
1061         IADEV *iadev;  
1062         iadev = INPH_IA_DEV(dev);  
1063         writew(desc, iadev->reass_ram+iadev->rfL.fdq_wr); 
1064         iadev->rfL.fdq_wr +=2;
1065         if (iadev->rfL.fdq_wr > iadev->rfL.fdq_ed)
1066                 iadev->rfL.fdq_wr =  iadev->rfL.fdq_st;  
1067         writew(iadev->rfL.fdq_wr, iadev->reass_reg+FREEQ_WR_PTR);  
1068 }  
1069   
1070   
1071 static int rx_pkt(struct atm_dev *dev)  
1072 {  
1073         IADEV *iadev;  
1074         struct atm_vcc *vcc;  
1075         unsigned short status;  
1076         struct rx_buf_desc __iomem *buf_desc_ptr;  
1077         int desc;   
1078         struct dle* wr_ptr;  
1079         int len;  
1080         struct sk_buff *skb;  
1081         u_int buf_addr, dma_addr;  
1082
1083         iadev = INPH_IA_DEV(dev);  
1084         if (iadev->rfL.pcq_rd == (readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff)) 
1085         {  
1086             printk(KERN_ERR DEV_LABEL "(itf %d) Receive queue empty\n", dev->number);  
1087             return -EINVAL;  
1088         }  
1089         /* mask 1st 3 bits to get the actual descno. */  
1090         desc = readw(iadev->reass_ram+iadev->rfL.pcq_rd) & 0x1fff;  
1091         IF_RX(printk("reass_ram = %p iadev->rfL.pcq_rd = 0x%x desc = %d\n", 
1092                                     iadev->reass_ram, iadev->rfL.pcq_rd, desc);
1093               printk(" pcq_wr_ptr = 0x%x\n",
1094                                readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff);)
1095         /* update the read pointer  - maybe we shud do this in the end*/  
1096         if ( iadev->rfL.pcq_rd== iadev->rfL.pcq_ed) 
1097                 iadev->rfL.pcq_rd = iadev->rfL.pcq_st;  
1098         else  
1099                 iadev->rfL.pcq_rd += 2;
1100         writew(iadev->rfL.pcq_rd, iadev->reass_reg+PCQ_RD_PTR);  
1101   
1102         /* get the buffer desc entry.  
1103                 update stuff. - doesn't seem to be any update necessary  
1104         */  
1105         buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1106         /* make the ptr point to the corresponding buffer desc entry */  
1107         buf_desc_ptr += desc;     
1108         if (!desc || (desc > iadev->num_rx_desc) || 
1109                       ((buf_desc_ptr->vc_index & 0xffff) > iadev->num_vc)) { 
1110             free_desc(dev, desc);
1111             IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);)
1112             return -1;
1113         }
1114         vcc = iadev->rx_open[buf_desc_ptr->vc_index & 0xffff];  
1115         if (!vcc)  
1116         {      
1117                 free_desc(dev, desc); 
1118                 printk("IA: null vcc, drop PDU\n");  
1119                 return -1;  
1120         }  
1121           
1122   
1123         /* might want to check the status bits for errors */  
1124         status = (u_short) (buf_desc_ptr->desc_mode);  
1125         if (status & (RX_CER | RX_PTE | RX_OFL))  
1126         {  
1127                 atomic_inc(&vcc->stats->rx_err);
1128                 IF_ERR(printk("IA: bad packet, dropping it");)  
1129                 if (status & RX_CER) { 
1130                     IF_ERR(printk(" cause: packet CRC error\n");)
1131                 }
1132                 else if (status & RX_PTE) {
1133                     IF_ERR(printk(" cause: packet time out\n");)
1134                 }
1135                 else {
1136                     IF_ERR(printk(" cause: buffer overflow\n");)
1137                 }
1138                 goto out_free_desc;
1139         }  
1140   
1141         /*  
1142                 build DLE.        
1143         */  
1144   
1145         buf_addr = (buf_desc_ptr->buf_start_hi << 16) | buf_desc_ptr->buf_start_lo;  
1146         dma_addr = (buf_desc_ptr->dma_start_hi << 16) | buf_desc_ptr->dma_start_lo;  
1147         len = dma_addr - buf_addr;  
1148         if (len > iadev->rx_buf_sz) {
1149            printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
1150            atomic_inc(&vcc->stats->rx_err);
1151            goto out_free_desc;
1152         }
1153                   
1154         if (!(skb = atm_alloc_charge(vcc, len, GFP_ATOMIC))) {
1155            if (vcc->vci < 32)
1156               printk("Drop control packets\n");
1157               goto out_free_desc;
1158         }
1159         skb_put(skb,len);  
1160         // pwang_test
1161         ATM_SKB(skb)->vcc = vcc;
1162         ATM_DESC(skb) = desc;        
1163         skb_queue_tail(&iadev->rx_dma_q, skb);  
1164
1165         /* Build the DLE structure */  
1166         wr_ptr = iadev->rx_dle_q.write;  
1167         wr_ptr->sys_pkt_addr = pci_map_single(iadev->pci, skb->data,
1168                 len, PCI_DMA_FROMDEVICE);
1169         wr_ptr->local_pkt_addr = buf_addr;  
1170         wr_ptr->bytes = len;    /* We don't know this do we ?? */  
1171         wr_ptr->mode = DMA_INT_ENABLE;  
1172   
1173         /* shud take care of wrap around here too. */  
1174         if(++wr_ptr == iadev->rx_dle_q.end)
1175              wr_ptr = iadev->rx_dle_q.start;
1176         iadev->rx_dle_q.write = wr_ptr;  
1177         udelay(1);  
1178         /* Increment transaction counter */  
1179         writel(1, iadev->dma+IPHASE5575_RX_COUNTER);   
1180 out:    return 0;  
1181 out_free_desc:
1182         free_desc(dev, desc);
1183         goto out;
1184 }  
1185   
1186 static void rx_intr(struct atm_dev *dev)  
1187 {  
1188   IADEV *iadev;  
1189   u_short status;  
1190   u_short state, i;  
1191   
1192   iadev = INPH_IA_DEV(dev);  
1193   status = readl(iadev->reass_reg+REASS_INTR_STATUS_REG) & 0xffff;  
1194   IF_EVENT(printk("rx_intr: status = 0x%x\n", status);)
1195   if (status & RX_PKT_RCVD)  
1196   {  
1197         /* do something */  
1198         /* Basically recvd an interrupt for receving a packet.  
1199         A descriptor would have been written to the packet complete   
1200         queue. Get all the descriptors and set up dma to move the   
1201         packets till the packet complete queue is empty..  
1202         */  
1203         state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1204         IF_EVENT(printk("Rx intr status: RX_PKT_RCVD %08x\n", status);) 
1205         while(!(state & PCQ_EMPTY))  
1206         {  
1207              rx_pkt(dev);  
1208              state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1209         }  
1210         iadev->rxing = 1;
1211   }  
1212   if (status & RX_FREEQ_EMPT)  
1213   {   
1214      if (iadev->rxing) {
1215         iadev->rx_tmp_cnt = iadev->rx_pkt_cnt;
1216         iadev->rx_tmp_jif = jiffies; 
1217         iadev->rxing = 0;
1218      } 
1219      else if ((time_after(jiffies, iadev->rx_tmp_jif + 50)) &&
1220                ((iadev->rx_pkt_cnt - iadev->rx_tmp_cnt) == 0)) {
1221         for (i = 1; i <= iadev->num_rx_desc; i++)
1222                free_desc(dev, i);
1223 printk("Test logic RUN!!!!\n");
1224         writew( ~(RX_FREEQ_EMPT|RX_EXCP_RCVD),iadev->reass_reg+REASS_MASK_REG);
1225         iadev->rxing = 1;
1226      }
1227      IF_EVENT(printk("Rx intr status: RX_FREEQ_EMPT %08x\n", status);)  
1228   }  
1229
1230   if (status & RX_EXCP_RCVD)  
1231   {  
1232         /* probably need to handle the exception queue also. */  
1233         IF_EVENT(printk("Rx intr status: RX_EXCP_RCVD %08x\n", status);)  
1234         rx_excp_rcvd(dev);  
1235   }  
1236
1237
1238   if (status & RX_RAW_RCVD)  
1239   {  
1240         /* need to handle the raw incoming cells. This deepnds on   
1241         whether we have programmed to receive the raw cells or not.  
1242         Else ignore. */  
1243         IF_EVENT(printk("Rx intr status:  RX_RAW_RCVD %08x\n", status);)  
1244   }  
1245 }  
1246   
1247   
1248 static void rx_dle_intr(struct atm_dev *dev)  
1249 {  
1250   IADEV *iadev;  
1251   struct atm_vcc *vcc;   
1252   struct sk_buff *skb;  
1253   int desc;  
1254   u_short state;   
1255   struct dle *dle, *cur_dle;  
1256   u_int dle_lp;  
1257   int len;
1258   iadev = INPH_IA_DEV(dev);  
1259  
1260   /* free all the dles done, that is just update our own dle read pointer   
1261         - do we really need to do this. Think not. */  
1262   /* DMA is done, just get all the recevie buffers from the rx dma queue  
1263         and push them up to the higher layer protocol. Also free the desc  
1264         associated with the buffer. */  
1265   dle = iadev->rx_dle_q.read;  
1266   dle_lp = readl(iadev->dma+IPHASE5575_RX_LIST_ADDR) & (sizeof(struct dle)*DLE_ENTRIES - 1);  
1267   cur_dle = (struct dle*)(iadev->rx_dle_q.start + (dle_lp >> 4));  
1268   while(dle != cur_dle)  
1269   {  
1270       /* free the DMAed skb */  
1271       skb = skb_dequeue(&iadev->rx_dma_q);  
1272       if (!skb)  
1273          goto INCR_DLE;
1274       desc = ATM_DESC(skb);
1275       free_desc(dev, desc);  
1276                
1277       if (!(len = skb->len))
1278       {  
1279           printk("rx_dle_intr: skb len 0\n");  
1280           dev_kfree_skb_any(skb);  
1281       }  
1282       else  
1283       {  
1284           struct cpcs_trailer *trailer;
1285           u_short length;
1286           struct ia_vcc *ia_vcc;
1287
1288           pci_unmap_single(iadev->pci, iadev->rx_dle_q.write->sys_pkt_addr,
1289                 len, PCI_DMA_FROMDEVICE);
1290           /* no VCC related housekeeping done as yet. lets see */  
1291           vcc = ATM_SKB(skb)->vcc;
1292           if (!vcc) {
1293               printk("IA: null vcc\n");  
1294               dev_kfree_skb_any(skb);
1295               goto INCR_DLE;
1296           }
1297           ia_vcc = INPH_IA_VCC(vcc);
1298           if (ia_vcc == NULL)
1299           {
1300              atomic_inc(&vcc->stats->rx_err);
1301              dev_kfree_skb_any(skb);
1302              atm_return(vcc, atm_guess_pdu2truesize(len));
1303              goto INCR_DLE;
1304            }
1305           // get real pkt length  pwang_test
1306           trailer = (struct cpcs_trailer*)((u_char *)skb->data +
1307                                  skb->len - sizeof(*trailer));
1308           length = swap_byte_order(trailer->length);
1309           if ((length > iadev->rx_buf_sz) || (length > 
1310                               (skb->len - sizeof(struct cpcs_trailer))))
1311           {
1312              atomic_inc(&vcc->stats->rx_err);
1313              IF_ERR(printk("rx_dle_intr: Bad  AAL5 trailer %d (skb len %d)", 
1314                                                             length, skb->len);)
1315              dev_kfree_skb_any(skb);
1316              atm_return(vcc, atm_guess_pdu2truesize(len));
1317              goto INCR_DLE;
1318           }
1319           skb_trim(skb, length);
1320           
1321           /* Display the packet */  
1322           IF_RXPKT(printk("\nDmad Recvd data: len = %d \n", skb->len);  
1323           xdump(skb->data, skb->len, "RX: ");
1324           printk("\n");)
1325
1326           IF_RX(printk("rx_dle_intr: skb push");)  
1327           vcc->push(vcc,skb);  
1328           atomic_inc(&vcc->stats->rx);
1329           iadev->rx_pkt_cnt++;
1330       }  
1331 INCR_DLE:
1332       if (++dle == iadev->rx_dle_q.end)  
1333           dle = iadev->rx_dle_q.start;  
1334   }  
1335   iadev->rx_dle_q.read = dle;  
1336   
1337   /* if the interrupts are masked because there were no free desc available,  
1338                 unmask them now. */ 
1339   if (!iadev->rxing) {
1340      state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1341      if (!(state & FREEQ_EMPTY)) {
1342         state = readl(iadev->reass_reg + REASS_MASK_REG) & 0xffff;
1343         writel(state & ~(RX_FREEQ_EMPT |/* RX_EXCP_RCVD |*/ RX_PKT_RCVD),
1344                                       iadev->reass_reg+REASS_MASK_REG);
1345         iadev->rxing++; 
1346      }
1347   }
1348 }  
1349   
1350   
1351 static int open_rx(struct atm_vcc *vcc)  
1352 {  
1353         IADEV *iadev;  
1354         u_short __iomem *vc_table;  
1355         u_short __iomem *reass_ptr;  
1356         IF_EVENT(printk("iadev: open_rx %d.%d\n", vcc->vpi, vcc->vci);)
1357
1358         if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0;    
1359         iadev = INPH_IA_DEV(vcc->dev);  
1360         if (vcc->qos.rxtp.traffic_class == ATM_ABR) {  
1361            if (iadev->phy_type & FE_25MBIT_PHY) {
1362                printk("IA:  ABR not support\n");
1363                return -EINVAL; 
1364            }
1365         }
1366         /* Make only this VCI in the vc table valid and let all   
1367                 others be invalid entries */  
1368         vc_table = iadev->reass_ram+RX_VC_TABLE*iadev->memSize;
1369         vc_table += vcc->vci;
1370         /* mask the last 6 bits and OR it with 3 for 1K VCs */  
1371
1372         *vc_table = vcc->vci << 6;
1373         /* Also keep a list of open rx vcs so that we can attach them with  
1374                 incoming PDUs later. */  
1375         if ((vcc->qos.rxtp.traffic_class == ATM_ABR) || 
1376                                 (vcc->qos.txtp.traffic_class == ATM_ABR))  
1377         {  
1378                 srv_cls_param_t srv_p;
1379                 init_abr_vc(iadev, &srv_p);
1380                 ia_open_abr_vc(iadev, &srv_p, vcc, 0);
1381         } 
1382         else {  /* for UBR  later may need to add CBR logic */
1383                 reass_ptr = iadev->reass_ram+REASS_TABLE*iadev->memSize;
1384                 reass_ptr += vcc->vci;
1385                 *reass_ptr = NO_AAL5_PKT;
1386         }
1387         
1388         if (iadev->rx_open[vcc->vci])  
1389                 printk(KERN_CRIT DEV_LABEL "(itf %d): VCI %d already open\n",  
1390                         vcc->dev->number, vcc->vci);  
1391         iadev->rx_open[vcc->vci] = vcc;  
1392         return 0;  
1393 }  
1394   
1395 static int rx_init(struct atm_dev *dev)  
1396 {  
1397         IADEV *iadev;  
1398         struct rx_buf_desc __iomem *buf_desc_ptr;  
1399         unsigned long rx_pkt_start = 0;  
1400         void *dle_addr;  
1401         struct abr_vc_table  *abr_vc_table; 
1402         u16 *vc_table;  
1403         u16 *reass_table;  
1404         int i,j, vcsize_sel;  
1405         u_short freeq_st_adr;  
1406         u_short *freeq_start;  
1407   
1408         iadev = INPH_IA_DEV(dev);  
1409   //    spin_lock_init(&iadev->rx_lock); 
1410   
1411         /* Allocate 4k bytes - more aligned than needed (4k boundary) */
1412         dle_addr = pci_alloc_consistent(iadev->pci, DLE_TOTAL_SIZE,
1413                                         &iadev->rx_dle_dma);  
1414         if (!dle_addr)  {  
1415                 printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1416                 goto err_out;
1417         }
1418         iadev->rx_dle_q.start = (struct dle *)dle_addr;
1419         iadev->rx_dle_q.read = iadev->rx_dle_q.start;  
1420         iadev->rx_dle_q.write = iadev->rx_dle_q.start;  
1421         iadev->rx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1422         /* the end of the dle q points to the entry after the last  
1423         DLE that can be used. */  
1424   
1425         /* write the upper 20 bits of the start address to rx list address register */  
1426         /* We know this is 32bit bus addressed so the following is safe */
1427         writel(iadev->rx_dle_dma & 0xfffff000,
1428                iadev->dma + IPHASE5575_RX_LIST_ADDR);  
1429         IF_INIT(printk("Tx Dle list addr: 0x%p value: 0x%0x\n",
1430                       iadev->dma+IPHASE5575_TX_LIST_ADDR,
1431                       *(u32*)(iadev->dma+IPHASE5575_TX_LIST_ADDR));  
1432         printk("Rx Dle list addr: 0x%p value: 0x%0x\n",
1433                       iadev->dma+IPHASE5575_RX_LIST_ADDR,
1434                       *(u32*)(iadev->dma+IPHASE5575_RX_LIST_ADDR));)  
1435   
1436         writew(0xffff, iadev->reass_reg+REASS_MASK_REG);  
1437         writew(0, iadev->reass_reg+MODE_REG);  
1438         writew(RESET_REASS, iadev->reass_reg+REASS_COMMAND_REG);  
1439   
1440         /* Receive side control memory map  
1441            -------------------------------  
1442   
1443                 Buffer descr    0x0000 (736 - 23K)  
1444                 VP Table        0x5c00 (256 - 512)  
1445                 Except q        0x5e00 (128 - 512)  
1446                 Free buffer q   0x6000 (1K - 2K)  
1447                 Packet comp q   0x6800 (1K - 2K)  
1448                 Reass Table     0x7000 (1K - 2K)  
1449                 VC Table        0x7800 (1K - 2K)  
1450                 ABR VC Table    0x8000 (1K - 32K)  
1451         */  
1452           
1453         /* Base address for Buffer Descriptor Table */  
1454         writew(RX_DESC_BASE >> 16, iadev->reass_reg+REASS_DESC_BASE);  
1455         /* Set the buffer size register */  
1456         writew(iadev->rx_buf_sz, iadev->reass_reg+BUF_SIZE);  
1457   
1458         /* Initialize each entry in the Buffer Descriptor Table */  
1459         iadev->RX_DESC_BASE_ADDR = iadev->reass_ram+RX_DESC_BASE*iadev->memSize;
1460         buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1461         memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1462         buf_desc_ptr++;  
1463         rx_pkt_start = iadev->rx_pkt_ram;  
1464         for(i=1; i<=iadev->num_rx_desc; i++)  
1465         {  
1466                 memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
1467                 buf_desc_ptr->buf_start_hi = rx_pkt_start >> 16;  
1468                 buf_desc_ptr->buf_start_lo = rx_pkt_start & 0x0000ffff;  
1469                 buf_desc_ptr++;           
1470                 rx_pkt_start += iadev->rx_buf_sz;  
1471         }  
1472         IF_INIT(printk("Rx Buffer desc ptr: 0x%p\n", buf_desc_ptr);)
1473         i = FREE_BUF_DESC_Q*iadev->memSize; 
1474         writew(i >> 16,  iadev->reass_reg+REASS_QUEUE_BASE); 
1475         writew(i, iadev->reass_reg+FREEQ_ST_ADR);
1476         writew(i+iadev->num_rx_desc*sizeof(u_short), 
1477                                          iadev->reass_reg+FREEQ_ED_ADR);
1478         writew(i, iadev->reass_reg+FREEQ_RD_PTR);
1479         writew(i+iadev->num_rx_desc*sizeof(u_short), 
1480                                         iadev->reass_reg+FREEQ_WR_PTR);    
1481         /* Fill the FREEQ with all the free descriptors. */  
1482         freeq_st_adr = readw(iadev->reass_reg+FREEQ_ST_ADR);  
1483         freeq_start = (u_short *)(iadev->reass_ram+freeq_st_adr);  
1484         for(i=1; i<=iadev->num_rx_desc; i++)  
1485         {  
1486                 *freeq_start = (u_short)i;  
1487                 freeq_start++;  
1488         }  
1489         IF_INIT(printk("freeq_start: 0x%p\n", freeq_start);)
1490         /* Packet Complete Queue */
1491         i = (PKT_COMP_Q * iadev->memSize) & 0xffff;
1492         writew(i, iadev->reass_reg+PCQ_ST_ADR);
1493         writew(i+iadev->num_vc*sizeof(u_short), iadev->reass_reg+PCQ_ED_ADR);
1494         writew(i, iadev->reass_reg+PCQ_RD_PTR);
1495         writew(i, iadev->reass_reg+PCQ_WR_PTR);
1496
1497         /* Exception Queue */
1498         i = (EXCEPTION_Q * iadev->memSize) & 0xffff;
1499         writew(i, iadev->reass_reg+EXCP_Q_ST_ADR);
1500         writew(i + NUM_RX_EXCP * sizeof(RX_ERROR_Q), 
1501                                              iadev->reass_reg+EXCP_Q_ED_ADR);
1502         writew(i, iadev->reass_reg+EXCP_Q_RD_PTR);
1503         writew(i, iadev->reass_reg+EXCP_Q_WR_PTR); 
1504  
1505         /* Load local copy of FREEQ and PCQ ptrs */
1506         iadev->rfL.fdq_st = readw(iadev->reass_reg+FREEQ_ST_ADR) & 0xffff;
1507         iadev->rfL.fdq_ed = readw(iadev->reass_reg+FREEQ_ED_ADR) & 0xffff ;
1508         iadev->rfL.fdq_rd = readw(iadev->reass_reg+FREEQ_RD_PTR) & 0xffff;
1509         iadev->rfL.fdq_wr = readw(iadev->reass_reg+FREEQ_WR_PTR) & 0xffff;
1510         iadev->rfL.pcq_st = readw(iadev->reass_reg+PCQ_ST_ADR) & 0xffff;
1511         iadev->rfL.pcq_ed = readw(iadev->reass_reg+PCQ_ED_ADR) & 0xffff;
1512         iadev->rfL.pcq_rd = readw(iadev->reass_reg+PCQ_RD_PTR) & 0xffff;
1513         iadev->rfL.pcq_wr = readw(iadev->reass_reg+PCQ_WR_PTR) & 0xffff;
1514         
1515         IF_INIT(printk("INIT:pcq_st:0x%x pcq_ed:0x%x pcq_rd:0x%x pcq_wr:0x%x", 
1516               iadev->rfL.pcq_st, iadev->rfL.pcq_ed, iadev->rfL.pcq_rd, 
1517               iadev->rfL.pcq_wr);)                
1518         /* just for check - no VP TBL */  
1519         /* VP Table */  
1520         /* writew(0x0b80, iadev->reass_reg+VP_LKUP_BASE); */  
1521         /* initialize VP Table for invalid VPIs  
1522                 - I guess we can write all 1s or 0x000f in the entire memory  
1523                   space or something similar.  
1524         */  
1525   
1526         /* This seems to work and looks right to me too !!! */  
1527         i =  REASS_TABLE * iadev->memSize;
1528         writew((i >> 3), iadev->reass_reg+REASS_TABLE_BASE);   
1529         /* initialize Reassembly table to I don't know what ???? */  
1530         reass_table = (u16 *)(iadev->reass_ram+i);  
1531         j = REASS_TABLE_SZ * iadev->memSize;
1532         for(i=0; i < j; i++)  
1533                 *reass_table++ = NO_AAL5_PKT;  
1534        i = 8*1024;
1535        vcsize_sel =  0;
1536        while (i != iadev->num_vc) {
1537           i /= 2;
1538           vcsize_sel++;
1539        }
1540        i = RX_VC_TABLE * iadev->memSize;
1541        writew(((i>>3) & 0xfff8) | vcsize_sel, iadev->reass_reg+VC_LKUP_BASE);
1542        vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);  
1543         j = RX_VC_TABLE_SZ * iadev->memSize;
1544         for(i = 0; i < j; i++)  
1545         {  
1546                 /* shift the reassembly pointer by 3 + lower 3 bits of   
1547                 vc_lkup_base register (=3 for 1K VCs) and the last byte   
1548                 is those low 3 bits.   
1549                 Shall program this later.  
1550                 */  
1551                 *vc_table = (i << 6) | 15;      /* for invalid VCI */  
1552                 vc_table++;  
1553         }  
1554         /* ABR VC table */
1555         i =  ABR_VC_TABLE * iadev->memSize;
1556         writew(i >> 3, iadev->reass_reg+ABR_LKUP_BASE);
1557                    
1558         i = ABR_VC_TABLE * iadev->memSize;
1559         abr_vc_table = (struct abr_vc_table *)(iadev->reass_ram+i);  
1560         j = REASS_TABLE_SZ * iadev->memSize;
1561         memset ((char*)abr_vc_table, 0, j * sizeof(*abr_vc_table));
1562         for(i = 0; i < j; i++) {                
1563                 abr_vc_table->rdf = 0x0003;
1564                 abr_vc_table->air = 0x5eb1;
1565                 abr_vc_table++;         
1566         }  
1567
1568         /* Initialize other registers */  
1569   
1570         /* VP Filter Register set for VC Reassembly only */  
1571         writew(0xff00, iadev->reass_reg+VP_FILTER);  
1572         writew(0, iadev->reass_reg+XTRA_RM_OFFSET);
1573         writew(0x1,  iadev->reass_reg+PROTOCOL_ID);
1574
1575         /* Packet Timeout Count  related Registers : 
1576            Set packet timeout to occur in about 3 seconds
1577            Set Packet Aging Interval count register to overflow in about 4 us
1578         */  
1579         writew(0xF6F8, iadev->reass_reg+PKT_TM_CNT );
1580
1581         i = (j >> 6) & 0xFF;
1582         j += 2 * (j - 1);
1583         i |= ((j << 2) & 0xFF00);
1584         writew(i, iadev->reass_reg+TMOUT_RANGE);
1585
1586         /* initiate the desc_tble */
1587         for(i=0; i<iadev->num_tx_desc;i++)
1588             iadev->desc_tbl[i].timestamp = 0;
1589
1590         /* to clear the interrupt status register - read it */  
1591         readw(iadev->reass_reg+REASS_INTR_STATUS_REG);   
1592   
1593         /* Mask Register - clear it */  
1594         writew(~(RX_FREEQ_EMPT|RX_PKT_RCVD), iadev->reass_reg+REASS_MASK_REG);  
1595   
1596         skb_queue_head_init(&iadev->rx_dma_q);  
1597         iadev->rx_free_desc_qhead = NULL;   
1598
1599         iadev->rx_open = kzalloc(4 * iadev->num_vc, GFP_KERNEL);
1600         if (!iadev->rx_open) {
1601                 printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
1602                 dev->number);  
1603                 goto err_free_dle;
1604         }  
1605
1606         iadev->rxing = 1;
1607         iadev->rx_pkt_cnt = 0;
1608         /* Mode Register */  
1609         writew(R_ONLINE, iadev->reass_reg+MODE_REG);  
1610         return 0;  
1611
1612 err_free_dle:
1613         pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
1614                             iadev->rx_dle_dma);  
1615 err_out:
1616         return -ENOMEM;
1617 }  
1618   
1619
1620 /*  
1621         The memory map suggested in appendix A and the coding for it.   
1622         Keeping it around just in case we change our mind later.  
1623   
1624                 Buffer descr    0x0000 (128 - 4K)  
1625                 UBR sched       0x1000 (1K - 4K)  
1626                 UBR Wait q      0x2000 (1K - 4K)  
1627                 Commn queues    0x3000 Packet Ready, Trasmit comp(0x3100)  
1628                                         (128 - 256) each  
1629                 extended VC     0x4000 (1K - 8K)  
1630                 ABR sched       0x6000  and ABR wait queue (1K - 2K) each  
1631                 CBR sched       0x7000 (as needed)  
1632                 VC table        0x8000 (1K - 32K)  
1633 */  
1634   
1635 static void tx_intr(struct atm_dev *dev)  
1636 {  
1637         IADEV *iadev;  
1638         unsigned short status;  
1639         unsigned long flags;
1640
1641         iadev = INPH_IA_DEV(dev);  
1642   
1643         status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG);  
1644         if (status & TRANSMIT_DONE){
1645
1646            IF_EVENT(printk("Tansmit Done Intr logic run\n");)
1647            spin_lock_irqsave(&iadev->tx_lock, flags);
1648            ia_tx_poll(iadev);
1649            spin_unlock_irqrestore(&iadev->tx_lock, flags);
1650            writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
1651            if (iadev->close_pending)  
1652                wake_up(&iadev->close_wait);
1653         }         
1654         if (status & TCQ_NOT_EMPTY)  
1655         {  
1656             IF_EVENT(printk("TCQ_NOT_EMPTY int received\n");)  
1657         }  
1658 }  
1659   
1660 static void tx_dle_intr(struct atm_dev *dev)
1661 {
1662         IADEV *iadev;
1663         struct dle *dle, *cur_dle; 
1664         struct sk_buff *skb;
1665         struct atm_vcc *vcc;
1666         struct ia_vcc  *iavcc;
1667         u_int dle_lp;
1668         unsigned long flags;
1669
1670         iadev = INPH_IA_DEV(dev);
1671         spin_lock_irqsave(&iadev->tx_lock, flags);   
1672         dle = iadev->tx_dle_q.read;
1673         dle_lp = readl(iadev->dma+IPHASE5575_TX_LIST_ADDR) & 
1674                                         (sizeof(struct dle)*DLE_ENTRIES - 1);
1675         cur_dle = (struct dle*)(iadev->tx_dle_q.start + (dle_lp >> 4));
1676         while (dle != cur_dle)
1677         {
1678             /* free the DMAed skb */ 
1679             skb = skb_dequeue(&iadev->tx_dma_q); 
1680             if (!skb) break;
1681
1682             /* Revenge of the 2 dle (skb + trailer) used in ia_pkt_tx() */
1683             if (!((dle - iadev->tx_dle_q.start)%(2*sizeof(struct dle)))) {
1684                 pci_unmap_single(iadev->pci, dle->sys_pkt_addr, skb->len,
1685                                  PCI_DMA_TODEVICE);
1686             }
1687             vcc = ATM_SKB(skb)->vcc;
1688             if (!vcc) {
1689                   printk("tx_dle_intr: vcc is null\n");
1690                   spin_unlock_irqrestore(&iadev->tx_lock, flags);
1691                   dev_kfree_skb_any(skb);
1692
1693                   return;
1694             }
1695             iavcc = INPH_IA_VCC(vcc);
1696             if (!iavcc) {
1697                   printk("tx_dle_intr: iavcc is null\n");
1698                   spin_unlock_irqrestore(&iadev->tx_lock, flags);
1699                   dev_kfree_skb_any(skb);
1700                   return;
1701             }
1702             if (vcc->qos.txtp.pcr >= iadev->rate_limit) {
1703                if ((vcc->pop) && (skb->len != 0))
1704                {     
1705                  vcc->pop(vcc, skb);
1706                } 
1707                else {
1708                  dev_kfree_skb_any(skb);
1709                }
1710             }
1711             else { /* Hold the rate-limited skb for flow control */
1712                IA_SKB_STATE(skb) |= IA_DLED;
1713                skb_queue_tail(&iavcc->txing_skb, skb);
1714             }
1715             IF_EVENT(printk("tx_dle_intr: enque skb = 0x%p \n", skb);)
1716             if (++dle == iadev->tx_dle_q.end)
1717                  dle = iadev->tx_dle_q.start;
1718         }
1719         iadev->tx_dle_q.read = dle;
1720         spin_unlock_irqrestore(&iadev->tx_lock, flags);
1721 }
1722   
1723 static int open_tx(struct atm_vcc *vcc)  
1724 {  
1725         struct ia_vcc *ia_vcc;  
1726         IADEV *iadev;  
1727         struct main_vc *vc;  
1728         struct ext_vc *evc;  
1729         int ret;
1730         IF_EVENT(printk("iadev: open_tx entered vcc->vci = %d\n", vcc->vci);)  
1731         if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;  
1732         iadev = INPH_IA_DEV(vcc->dev);  
1733         
1734         if (iadev->phy_type & FE_25MBIT_PHY) {
1735            if (vcc->qos.txtp.traffic_class == ATM_ABR) {
1736                printk("IA:  ABR not support\n");
1737                return -EINVAL; 
1738            }
1739           if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1740                printk("IA:  CBR not support\n");
1741                return -EINVAL; 
1742           }
1743         }
1744         ia_vcc =  INPH_IA_VCC(vcc);
1745         memset((caddr_t)ia_vcc, 0, sizeof(*ia_vcc));
1746         if (vcc->qos.txtp.max_sdu > 
1747                          (iadev->tx_buf_sz - sizeof(struct cpcs_trailer))){
1748            printk("IA:  SDU size over (%d) the configured SDU size %d\n",
1749                   vcc->qos.txtp.max_sdu,iadev->tx_buf_sz);
1750            vcc->dev_data = NULL;
1751            kfree(ia_vcc);
1752            return -EINVAL; 
1753         }
1754         ia_vcc->vc_desc_cnt = 0;
1755         ia_vcc->txing = 1;
1756
1757         /* find pcr */
1758         if (vcc->qos.txtp.max_pcr == ATM_MAX_PCR) 
1759            vcc->qos.txtp.pcr = iadev->LineRate;
1760         else if ((vcc->qos.txtp.max_pcr == 0)&&( vcc->qos.txtp.pcr <= 0))
1761            vcc->qos.txtp.pcr = iadev->LineRate;
1762         else if ((vcc->qos.txtp.max_pcr > vcc->qos.txtp.pcr) && (vcc->qos.txtp.max_pcr> 0)) 
1763            vcc->qos.txtp.pcr = vcc->qos.txtp.max_pcr;
1764         if (vcc->qos.txtp.pcr > iadev->LineRate)
1765              vcc->qos.txtp.pcr = iadev->LineRate;
1766         ia_vcc->pcr = vcc->qos.txtp.pcr;
1767
1768         if (ia_vcc->pcr > (iadev->LineRate / 6) ) ia_vcc->ltimeout = HZ / 10;
1769         else if (ia_vcc->pcr > (iadev->LineRate / 130)) ia_vcc->ltimeout = HZ;
1770         else if (ia_vcc->pcr <= 170) ia_vcc->ltimeout = 16 * HZ;
1771         else ia_vcc->ltimeout = 2700 * HZ  / ia_vcc->pcr;
1772         if (ia_vcc->pcr < iadev->rate_limit)
1773            skb_queue_head_init (&ia_vcc->txing_skb);
1774         if (ia_vcc->pcr < iadev->rate_limit) {
1775            struct sock *sk = sk_atm(vcc);
1776
1777            if (vcc->qos.txtp.max_sdu != 0) {
1778                if (ia_vcc->pcr > 60000)
1779                   sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 5;
1780                else if (ia_vcc->pcr > 2000)
1781                   sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 4;
1782                else
1783                  sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 3;
1784            }
1785            else
1786              sk->sk_sndbuf = 24576;
1787         }
1788            
1789         vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;  
1790         evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;  
1791         vc += vcc->vci;  
1792         evc += vcc->vci;  
1793         memset((caddr_t)vc, 0, sizeof(*vc));  
1794         memset((caddr_t)evc, 0, sizeof(*evc));  
1795           
1796         /* store the most significant 4 bits of vci as the last 4 bits   
1797                 of first part of atm header.  
1798            store the last 12 bits of vci as first 12 bits of the second  
1799                 part of the atm header.  
1800         */  
1801         evc->atm_hdr1 = (vcc->vci >> 12) & 0x000f;  
1802         evc->atm_hdr2 = (vcc->vci & 0x0fff) << 4;  
1803  
1804         /* check the following for different traffic classes */  
1805         if (vcc->qos.txtp.traffic_class == ATM_UBR)  
1806         {  
1807                 vc->type = UBR;  
1808                 vc->status = CRC_APPEND;
1809                 vc->acr = cellrate_to_float(iadev->LineRate);  
1810                 if (vcc->qos.txtp.pcr > 0) 
1811                    vc->acr = cellrate_to_float(vcc->qos.txtp.pcr);  
1812                 IF_UBR(printk("UBR: txtp.pcr = 0x%x f_rate = 0x%x\n", 
1813                                              vcc->qos.txtp.max_pcr,vc->acr);)
1814         }  
1815         else if (vcc->qos.txtp.traffic_class == ATM_ABR)  
1816         {       srv_cls_param_t srv_p;
1817                 IF_ABR(printk("Tx ABR VCC\n");)  
1818                 init_abr_vc(iadev, &srv_p);
1819                 if (vcc->qos.txtp.pcr > 0) 
1820                    srv_p.pcr = vcc->qos.txtp.pcr;
1821                 if (vcc->qos.txtp.min_pcr > 0) {
1822                    int tmpsum = iadev->sum_mcr+iadev->sum_cbr+vcc->qos.txtp.min_pcr;
1823                    if (tmpsum > iadev->LineRate)
1824                        return -EBUSY;
1825                    srv_p.mcr = vcc->qos.txtp.min_pcr;
1826                    iadev->sum_mcr += vcc->qos.txtp.min_pcr;
1827                 } 
1828                 else srv_p.mcr = 0;
1829                 if (vcc->qos.txtp.icr)
1830                    srv_p.icr = vcc->qos.txtp.icr;
1831                 if (vcc->qos.txtp.tbe)
1832                    srv_p.tbe = vcc->qos.txtp.tbe;
1833                 if (vcc->qos.txtp.frtt)
1834                    srv_p.frtt = vcc->qos.txtp.frtt;
1835                 if (vcc->qos.txtp.rif)
1836                    srv_p.rif = vcc->qos.txtp.rif;
1837                 if (vcc->qos.txtp.rdf)
1838                    srv_p.rdf = vcc->qos.txtp.rdf;
1839                 if (vcc->qos.txtp.nrm_pres)
1840                    srv_p.nrm = vcc->qos.txtp.nrm;
1841                 if (vcc->qos.txtp.trm_pres)
1842                    srv_p.trm = vcc->qos.txtp.trm;
1843                 if (vcc->qos.txtp.adtf_pres)
1844                    srv_p.adtf = vcc->qos.txtp.adtf;
1845                 if (vcc->qos.txtp.cdf_pres)
1846                    srv_p.cdf = vcc->qos.txtp.cdf;    
1847                 if (srv_p.icr > srv_p.pcr)
1848                    srv_p.icr = srv_p.pcr;    
1849                 IF_ABR(printk("ABR:vcc->qos.txtp.max_pcr = %d  mcr = %d\n", 
1850                                                       srv_p.pcr, srv_p.mcr);)
1851                 ia_open_abr_vc(iadev, &srv_p, vcc, 1);
1852         } else if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1853                 if (iadev->phy_type & FE_25MBIT_PHY) {
1854                     printk("IA:  CBR not support\n");
1855                     return -EINVAL; 
1856                 }
1857                 if (vcc->qos.txtp.max_pcr > iadev->LineRate) {
1858                    IF_CBR(printk("PCR is not availble\n");)
1859                    return -1;
1860                 }
1861                 vc->type = CBR;
1862                 vc->status = CRC_APPEND;
1863                 if ((ret = ia_cbr_setup (iadev, vcc)) < 0) {     
1864                     return ret;
1865                 }
1866        } 
1867         else  
1868            printk("iadev:  Non UBR, ABR and CBR traffic not supportedn"); 
1869         
1870         iadev->testTable[vcc->vci]->vc_status |= VC_ACTIVE;
1871         IF_EVENT(printk("ia open_tx returning \n");)  
1872         return 0;  
1873 }  
1874   
1875   
1876 static int tx_init(struct atm_dev *dev)  
1877 {  
1878         IADEV *iadev;  
1879         struct tx_buf_desc *buf_desc_ptr;
1880         unsigned int tx_pkt_start;  
1881         void *dle_addr;  
1882         int i;  
1883         u_short tcq_st_adr;  
1884         u_short *tcq_start;  
1885         u_short prq_st_adr;  
1886         u_short *prq_start;  
1887         struct main_vc *vc;  
1888         struct ext_vc *evc;   
1889         u_short tmp16;
1890         u32 vcsize_sel;
1891  
1892         iadev = INPH_IA_DEV(dev);  
1893         spin_lock_init(&iadev->tx_lock);
1894  
1895         IF_INIT(printk("Tx MASK REG: 0x%0x\n", 
1896                                 readw(iadev->seg_reg+SEG_MASK_REG));)  
1897
1898         /* Allocate 4k (boundary aligned) bytes */
1899         dle_addr = pci_alloc_consistent(iadev->pci, DLE_TOTAL_SIZE,
1900                                         &iadev->tx_dle_dma);  
1901         if (!dle_addr)  {
1902                 printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1903                 goto err_out;
1904         }
1905         iadev->tx_dle_q.start = (struct dle*)dle_addr;  
1906         iadev->tx_dle_q.read = iadev->tx_dle_q.start;  
1907         iadev->tx_dle_q.write = iadev->tx_dle_q.start;  
1908         iadev->tx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1909
1910         /* write the upper 20 bits of the start address to tx list address register */  
1911         writel(iadev->tx_dle_dma & 0xfffff000,
1912                iadev->dma + IPHASE5575_TX_LIST_ADDR);  
1913         writew(0xffff, iadev->seg_reg+SEG_MASK_REG);  
1914         writew(0, iadev->seg_reg+MODE_REG_0);  
1915         writew(RESET_SEG, iadev->seg_reg+SEG_COMMAND_REG);  
1916         iadev->MAIN_VC_TABLE_ADDR = iadev->seg_ram+MAIN_VC_TABLE*iadev->memSize;
1917         iadev->EXT_VC_TABLE_ADDR = iadev->seg_ram+EXT_VC_TABLE*iadev->memSize;
1918         iadev->ABR_SCHED_TABLE_ADDR=iadev->seg_ram+ABR_SCHED_TABLE*iadev->memSize;
1919   
1920         /*  
1921            Transmit side control memory map  
1922            --------------------------------    
1923          Buffer descr   0x0000 (128 - 4K)  
1924          Commn queues   0x1000  Transmit comp, Packet ready(0x1400)   
1925                                         (512 - 1K) each  
1926                                         TCQ - 4K, PRQ - 5K  
1927          CBR Table      0x1800 (as needed) - 6K  
1928          UBR Table      0x3000 (1K - 4K) - 12K  
1929          UBR Wait queue 0x4000 (1K - 4K) - 16K  
1930          ABR sched      0x5000  and ABR wait queue (1K - 2K) each  
1931                                 ABR Tbl - 20K, ABR Wq - 22K   
1932          extended VC    0x6000 (1K - 8K) - 24K  
1933          VC Table       0x8000 (1K - 32K) - 32K  
1934           
1935         Between 0x2000 (8K) and 0x3000 (12K) there is 4K space left for VBR Tbl  
1936         and Wait q, which can be allotted later.  
1937         */  
1938      
1939         /* Buffer Descriptor Table Base address */  
1940         writew(TX_DESC_BASE, iadev->seg_reg+SEG_DESC_BASE);  
1941   
1942         /* initialize each entry in the buffer descriptor table */  
1943         buf_desc_ptr =(struct tx_buf_desc *)(iadev->seg_ram+TX_DESC_BASE);  
1944         memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
1945         buf_desc_ptr++;  
1946         tx_pkt_start = TX_PACKET_RAM;  
1947         for(i=1; i<=iadev->num_tx_desc; i++)  
1948         {  
1949                 memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
1950                 buf_desc_ptr->desc_mode = AAL5;  
1951                 buf_desc_ptr->buf_start_hi = tx_pkt_start >> 16;  
1952                 buf_desc_ptr->buf_start_lo = tx_pkt_start & 0x0000ffff;  
1953                 buf_desc_ptr++;           
1954                 tx_pkt_start += iadev->tx_buf_sz;  
1955         }  
1956         iadev->tx_buf = kmalloc(iadev->num_tx_desc*sizeof(struct cpcs_trailer_desc), GFP_KERNEL);
1957         if (!iadev->tx_buf) {
1958             printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1959             goto err_free_dle;
1960         }
1961         for (i= 0; i< iadev->num_tx_desc; i++)
1962         {
1963             struct cpcs_trailer *cpcs;
1964  
1965             cpcs = kmalloc(sizeof(*cpcs), GFP_KERNEL|GFP_DMA);
1966             if(!cpcs) {                
1967                 printk(KERN_ERR DEV_LABEL " couldn't get freepage\n"); 
1968                 goto err_free_tx_bufs;
1969             }
1970             iadev->tx_buf[i].cpcs = cpcs;
1971             iadev->tx_buf[i].dma_addr = pci_map_single(iadev->pci,
1972                 cpcs, sizeof(*cpcs), PCI_DMA_TODEVICE);
1973         }
1974         iadev->desc_tbl = kmalloc(iadev->num_tx_desc *
1975                                    sizeof(struct desc_tbl_t), GFP_KERNEL);
1976         if (!iadev->desc_tbl) {
1977                 printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1978                 goto err_free_all_tx_bufs;
1979         }
1980   
1981         /* Communication Queues base address */  
1982         i = TX_COMP_Q * iadev->memSize;
1983         writew(i >> 16, iadev->seg_reg+SEG_QUEUE_BASE);  
1984   
1985         /* Transmit Complete Queue */  
1986         writew(i, iadev->seg_reg+TCQ_ST_ADR);  
1987         writew(i, iadev->seg_reg+TCQ_RD_PTR);  
1988         writew(i+iadev->num_tx_desc*sizeof(u_short),iadev->seg_reg+TCQ_WR_PTR); 
1989         iadev->host_tcq_wr = i + iadev->num_tx_desc*sizeof(u_short);
1990         writew(i+2 * iadev->num_tx_desc * sizeof(u_short), 
1991                                               iadev->seg_reg+TCQ_ED_ADR); 
1992         /* Fill the TCQ with all the free descriptors. */  
1993         tcq_st_adr = readw(iadev->seg_reg+TCQ_ST_ADR);  
1994         tcq_start = (u_short *)(iadev->seg_ram+tcq_st_adr);  
1995         for(i=1; i<=iadev->num_tx_desc; i++)  
1996         {  
1997                 *tcq_start = (u_short)i;  
1998                 tcq_start++;  
1999         }  
2000   
2001         /* Packet Ready Queue */  
2002         i = PKT_RDY_Q * iadev->memSize; 
2003         writew(i, iadev->seg_reg+PRQ_ST_ADR);  
2004         writew(i+2 * iadev->num_tx_desc * sizeof(u_short), 
2005                                               iadev->seg_reg+PRQ_ED_ADR);
2006         writew(i, iadev->seg_reg+PRQ_RD_PTR);  
2007         writew(i, iadev->seg_reg+PRQ_WR_PTR);  
2008          
2009         /* Load local copy of PRQ and TCQ ptrs */
2010         iadev->ffL.prq_st = readw(iadev->seg_reg+PRQ_ST_ADR) & 0xffff;
2011         iadev->ffL.prq_ed = readw(iadev->seg_reg+PRQ_ED_ADR) & 0xffff;
2012         iadev->ffL.prq_wr = readw(iadev->seg_reg+PRQ_WR_PTR) & 0xffff;
2013
2014         iadev->ffL.tcq_st = readw(iadev->seg_reg+TCQ_ST_ADR) & 0xffff;
2015         iadev->ffL.tcq_ed = readw(iadev->seg_reg+TCQ_ED_ADR) & 0xffff;
2016         iadev->ffL.tcq_rd = readw(iadev->seg_reg+TCQ_RD_PTR) & 0xffff;
2017
2018         /* Just for safety initializing the queue to have desc 1 always */  
2019         /* Fill the PRQ with all the free descriptors. */  
2020         prq_st_adr = readw(iadev->seg_reg+PRQ_ST_ADR);  
2021         prq_start = (u_short *)(iadev->seg_ram+prq_st_adr);  
2022         for(i=1; i<=iadev->num_tx_desc; i++)  
2023         {  
2024                 *prq_start = (u_short)0;        /* desc 1 in all entries */  
2025                 prq_start++;  
2026         }  
2027         /* CBR Table */  
2028         IF_INIT(printk("Start CBR Init\n");)
2029 #if 1  /* for 1K VC board, CBR_PTR_BASE is 0 */
2030         writew(0,iadev->seg_reg+CBR_PTR_BASE);
2031 #else /* Charlie's logic is wrong ? */
2032         tmp16 = (iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize)>>17;
2033         IF_INIT(printk("cbr_ptr_base = 0x%x ", tmp16);)
2034         writew(tmp16,iadev->seg_reg+CBR_PTR_BASE);
2035 #endif
2036
2037         IF_INIT(printk("value in register = 0x%x\n",
2038                                    readw(iadev->seg_reg+CBR_PTR_BASE));)
2039         tmp16 = (CBR_SCHED_TABLE*iadev->memSize) >> 1;
2040         writew(tmp16, iadev->seg_reg+CBR_TAB_BEG);
2041         IF_INIT(printk("cbr_tab_beg = 0x%x in reg = 0x%x \n", tmp16,
2042                                         readw(iadev->seg_reg+CBR_TAB_BEG));)
2043         writew(tmp16, iadev->seg_reg+CBR_TAB_END+1); // CBR_PTR;
2044         tmp16 = (CBR_SCHED_TABLE*iadev->memSize + iadev->num_vc*6 - 2) >> 1;
2045         writew(tmp16, iadev->seg_reg+CBR_TAB_END);
2046         IF_INIT(printk("iadev->seg_reg = 0x%p CBR_PTR_BASE = 0x%x\n",
2047                iadev->seg_reg, readw(iadev->seg_reg+CBR_PTR_BASE));)
2048         IF_INIT(printk("CBR_TAB_BEG = 0x%x, CBR_TAB_END = 0x%x, CBR_PTR = 0x%x\n",
2049           readw(iadev->seg_reg+CBR_TAB_BEG), readw(iadev->seg_reg+CBR_TAB_END),
2050           readw(iadev->seg_reg+CBR_TAB_END+1));)
2051
2052         /* Initialize the CBR Schedualing Table */
2053         memset_io(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize, 
2054                                                           0, iadev->num_vc*6); 
2055         iadev->CbrRemEntries = iadev->CbrTotEntries = iadev->num_vc*3;
2056         iadev->CbrEntryPt = 0;
2057         iadev->Granularity = MAX_ATM_155 / iadev->CbrTotEntries;
2058         iadev->NumEnabledCBR = 0;
2059
2060         /* UBR scheduling Table and wait queue */  
2061         /* initialize all bytes of UBR scheduler table and wait queue to 0   
2062                 - SCHEDSZ is 1K (# of entries).  
2063                 - UBR Table size is 4K  
2064                 - UBR wait queue is 4K  
2065            since the table and wait queues are contiguous, all the bytes   
2066            can be initialized by one memeset.  
2067         */  
2068         
2069         vcsize_sel = 0;
2070         i = 8*1024;
2071         while (i != iadev->num_vc) {
2072           i /= 2;
2073           vcsize_sel++;
2074         }
2075  
2076         i = MAIN_VC_TABLE * iadev->memSize;
2077         writew(vcsize_sel | ((i >> 8) & 0xfff8),iadev->seg_reg+VCT_BASE);
2078         i =  EXT_VC_TABLE * iadev->memSize;
2079         writew((i >> 8) & 0xfffe, iadev->seg_reg+VCTE_BASE);
2080         i = UBR_SCHED_TABLE * iadev->memSize;
2081         writew((i & 0xffff) >> 11,  iadev->seg_reg+UBR_SBPTR_BASE);
2082         i = UBR_WAIT_Q * iadev->memSize; 
2083         writew((i >> 7) & 0xffff,  iadev->seg_reg+UBRWQ_BASE);
2084         memset((caddr_t)(iadev->seg_ram+UBR_SCHED_TABLE*iadev->memSize),
2085                                                        0, iadev->num_vc*8);
2086         /* ABR scheduling Table(0x5000-0x57ff) and wait queue(0x5800-0x5fff)*/  
2087         /* initialize all bytes of ABR scheduler table and wait queue to 0   
2088                 - SCHEDSZ is 1K (# of entries).  
2089                 - ABR Table size is 2K  
2090                 - ABR wait queue is 2K  
2091            since the table and wait queues are contiguous, all the bytes   
2092            can be intialized by one memeset.  
2093         */  
2094         i = ABR_SCHED_TABLE * iadev->memSize;
2095         writew((i >> 11) & 0xffff, iadev->seg_reg+ABR_SBPTR_BASE);
2096         i = ABR_WAIT_Q * iadev->memSize;
2097         writew((i >> 7) & 0xffff, iadev->seg_reg+ABRWQ_BASE);
2098  
2099         i = ABR_SCHED_TABLE*iadev->memSize;
2100         memset((caddr_t)(iadev->seg_ram+i),  0, iadev->num_vc*4);
2101         vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;  
2102         evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;  
2103         iadev->testTable = kmalloc(sizeof(long)*iadev->num_vc, GFP_KERNEL); 
2104         if (!iadev->testTable) {
2105            printk("Get freepage  failed\n");
2106            goto err_free_desc_tbl;
2107         }
2108         for(i=0; i<iadev->num_vc; i++)  
2109         {  
2110                 memset((caddr_t)vc, 0, sizeof(*vc));  
2111                 memset((caddr_t)evc, 0, sizeof(*evc));  
2112                 iadev->testTable[i] = kmalloc(sizeof(struct testTable_t),
2113                                                 GFP_KERNEL);
2114                 if (!iadev->testTable[i])
2115                         goto err_free_test_tables;
2116                 iadev->testTable[i]->lastTime = 0;
2117                 iadev->testTable[i]->fract = 0;
2118                 iadev->testTable[i]->vc_status = VC_UBR;
2119                 vc++;  
2120                 evc++;  
2121         }  
2122   
2123         /* Other Initialization */  
2124           
2125         /* Max Rate Register */  
2126         if (iadev->phy_type & FE_25MBIT_PHY) {
2127            writew(RATE25, iadev->seg_reg+MAXRATE);  
2128            writew((UBR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);  
2129         }
2130         else {
2131            writew(cellrate_to_float(iadev->LineRate),iadev->seg_reg+MAXRATE);
2132            writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);  
2133         }
2134         /* Set Idle Header Reigisters to be sure */  
2135         writew(0, iadev->seg_reg+IDLEHEADHI);  
2136         writew(0, iadev->seg_reg+IDLEHEADLO);  
2137   
2138         /* Program ABR UBR Priority Register  as  PRI_ABR_UBR_EQUAL */
2139         writew(0xaa00, iadev->seg_reg+ABRUBR_ARB); 
2140
2141         iadev->close_pending = 0;
2142         init_waitqueue_head(&iadev->close_wait);
2143         init_waitqueue_head(&iadev->timeout_wait);
2144         skb_queue_head_init(&iadev->tx_dma_q);  
2145         ia_init_rtn_q(&iadev->tx_return_q);  
2146
2147         /* RM Cell Protocol ID and Message Type */  
2148         writew(RM_TYPE_4_0, iadev->seg_reg+RM_TYPE);  
2149         skb_queue_head_init (&iadev->tx_backlog);
2150   
2151         /* Mode Register 1 */  
2152         writew(MODE_REG_1_VAL, iadev->seg_reg+MODE_REG_1);  
2153   
2154         /* Mode Register 0 */  
2155         writew(T_ONLINE, iadev->seg_reg+MODE_REG_0);  
2156   
2157         /* Interrupt Status Register - read to clear */  
2158         readw(iadev->seg_reg+SEG_INTR_STATUS_REG);  
2159   
2160         /* Interrupt Mask Reg- don't mask TCQ_NOT_EMPTY interrupt generation */  
2161         writew(~(TRANSMIT_DONE | TCQ_NOT_EMPTY), iadev->seg_reg+SEG_MASK_REG);
2162         writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);  
2163         iadev->tx_pkt_cnt = 0;
2164         iadev->rate_limit = iadev->LineRate / 3;
2165   
2166         return 0;
2167
2168 err_free_test_tables:
2169         while (--i >= 0)
2170                 kfree(iadev->testTable[i]);
2171         kfree(iadev->testTable);
2172 err_free_desc_tbl:
2173         kfree(iadev->desc_tbl);
2174 err_free_all_tx_bufs:
2175         i = iadev->num_tx_desc;
2176 err_free_tx_bufs:
2177         while (--i >= 0) {
2178                 struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2179
2180                 pci_unmap_single(iadev->pci, desc->dma_addr,
2181                         sizeof(*desc->cpcs), PCI_DMA_TODEVICE);
2182                 kfree(desc->cpcs);
2183         }
2184         kfree(iadev->tx_buf);
2185 err_free_dle:
2186         pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2187                             iadev->tx_dle_dma);  
2188 err_out:
2189         return -ENOMEM;
2190 }   
2191    
2192 static irqreturn_t ia_int(int irq, void *dev_id)  
2193 {  
2194    struct atm_dev *dev;  
2195    IADEV *iadev;  
2196    unsigned int status;  
2197    int handled = 0;
2198
2199    dev = dev_id;  
2200    iadev = INPH_IA_DEV(dev);  
2201    while( (status = readl(iadev->reg+IPHASE5575_BUS_STATUS_REG) & 0x7f))  
2202    { 
2203         handled = 1;
2204         IF_EVENT(printk("ia_int: status = 0x%x\n", status);) 
2205         if (status & STAT_REASSINT)  
2206         {  
2207            /* do something */  
2208            IF_EVENT(printk("REASSINT Bus status reg: %08x\n", status);) 
2209            rx_intr(dev);  
2210         }  
2211         if (status & STAT_DLERINT)  
2212         {  
2213            /* Clear this bit by writing a 1 to it. */  
2214            *(u_int *)(iadev->reg+IPHASE5575_BUS_STATUS_REG) = STAT_DLERINT;
2215            rx_dle_intr(dev);  
2216         }  
2217         if (status & STAT_SEGINT)  
2218         {  
2219            /* do something */ 
2220            IF_EVENT(printk("IA: tx_intr \n");) 
2221            tx_intr(dev);  
2222         }  
2223         if (status & STAT_DLETINT)  
2224         {  
2225            *(u_int *)(iadev->reg+IPHASE5575_BUS_STATUS_REG) = STAT_DLETINT;  
2226            tx_dle_intr(dev);  
2227         }  
2228         if (status & (STAT_FEINT | STAT_ERRINT | STAT_MARKINT))  
2229         {  
2230            if (status & STAT_FEINT) 
2231                IaFrontEndIntr(iadev);
2232         }  
2233    }
2234    return IRQ_RETVAL(handled);
2235 }  
2236           
2237           
2238           
2239 /*----------------------------- entries --------------------------------*/  
2240 static int get_esi(struct atm_dev *dev)  
2241 {  
2242         IADEV *iadev;  
2243         int i;  
2244         u32 mac1;  
2245         u16 mac2;  
2246           
2247         iadev = INPH_IA_DEV(dev);  
2248         mac1 = cpu_to_be32(le32_to_cpu(readl(  
2249                                 iadev->reg+IPHASE5575_MAC1)));  
2250         mac2 = cpu_to_be16(le16_to_cpu(readl(iadev->reg+IPHASE5575_MAC2)));  
2251         IF_INIT(printk("ESI: 0x%08x%04x\n", mac1, mac2);)  
2252         for (i=0; i<MAC1_LEN; i++)  
2253                 dev->esi[i] = mac1 >>(8*(MAC1_LEN-1-i));  
2254           
2255         for (i=0; i<MAC2_LEN; i++)  
2256                 dev->esi[i+MAC1_LEN] = mac2 >>(8*(MAC2_LEN - 1 -i));  
2257         return 0;  
2258 }  
2259           
2260 static int reset_sar(struct atm_dev *dev)  
2261 {  
2262         IADEV *iadev;  
2263         int i, error = 1;  
2264         unsigned int pci[64];  
2265           
2266         iadev = INPH_IA_DEV(dev);  
2267         for(i=0; i<64; i++)  
2268           if ((error = pci_read_config_dword(iadev->pci,  
2269                                 i*4, &pci[i])) != PCIBIOS_SUCCESSFUL)  
2270               return error;  
2271         writel(0, iadev->reg+IPHASE5575_EXT_RESET);  
2272         for(i=0; i<64; i++)  
2273           if ((error = pci_write_config_dword(iadev->pci,  
2274                                         i*4, pci[i])) != PCIBIOS_SUCCESSFUL)  
2275             return error;  
2276         udelay(5);  
2277         return 0;  
2278 }  
2279           
2280           
2281 static int __devinit ia_init(struct atm_dev *dev)
2282 {  
2283         IADEV *iadev;  
2284         unsigned long real_base;
2285         void __iomem *base;
2286         unsigned short command;  
2287         int error, i; 
2288           
2289         /* The device has been identified and registered. Now we read   
2290            necessary configuration info like memory base address,   
2291            interrupt number etc */  
2292           
2293         IF_INIT(printk(">ia_init\n");)  
2294         dev->ci_range.vpi_bits = 0;  
2295         dev->ci_range.vci_bits = NR_VCI_LD;  
2296
2297         iadev = INPH_IA_DEV(dev);  
2298         real_base = pci_resource_start (iadev->pci, 0);
2299         iadev->irq = iadev->pci->irq;
2300                   
2301         error = pci_read_config_word(iadev->pci, PCI_COMMAND, &command);
2302         if (error) {
2303                 printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%x\n",  
2304                                 dev->number,error);  
2305                 return -EINVAL;  
2306         }  
2307         IF_INIT(printk(DEV_LABEL "(itf %d): rev.%d,realbase=0x%lx,irq=%d\n",  
2308                         dev->number, iadev->pci->revision, real_base, iadev->irq);)
2309           
2310         /* find mapping size of board */  
2311           
2312         iadev->pci_map_size = pci_resource_len(iadev->pci, 0);
2313
2314         if (iadev->pci_map_size == 0x100000){
2315           iadev->num_vc = 4096;
2316           dev->ci_range.vci_bits = NR_VCI_4K_LD;  
2317           iadev->memSize = 4;
2318         }
2319         else if (iadev->pci_map_size == 0x40000) {
2320           iadev->num_vc = 1024;
2321           iadev->memSize = 1;
2322         }
2323         else {
2324            printk("Unknown pci_map_size = 0x%x\n", iadev->pci_map_size);
2325            return -EINVAL;
2326         }
2327         IF_INIT(printk (DEV_LABEL "map size: %i\n", iadev->pci_map_size);)  
2328           
2329         /* enable bus mastering */
2330         pci_set_master(iadev->pci);
2331
2332         /*  
2333          * Delay at least 1us before doing any mem accesses (how 'bout 10?)  
2334          */  
2335         udelay(10);  
2336           
2337         /* mapping the physical address to a virtual address in address space */  
2338         base = ioremap(real_base,iadev->pci_map_size);  /* ioremap is not resolved ??? */  
2339           
2340         if (!base)  
2341         {  
2342                 printk(DEV_LABEL " (itf %d): can't set up page mapping\n",  
2343                             dev->number);  
2344                 return error;  
2345         }  
2346         IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=%p,irq=%d\n",  
2347                         dev->number, iadev->pci->revision, base, iadev->irq);)
2348           
2349         /* filling the iphase dev structure */  
2350         iadev->mem = iadev->pci_map_size /2;  
2351         iadev->real_base = real_base;  
2352         iadev->base = base;  
2353                   
2354         /* Bus Interface Control Registers */  
2355         iadev->reg = base + REG_BASE;
2356         /* Segmentation Control Registers */  
2357         iadev->seg_reg = base + SEG_BASE;
2358         /* Reassembly Control Registers */  
2359         iadev->reass_reg = base + REASS_BASE;  
2360         /* Front end/ DMA control registers */  
2361         iadev->phy = base + PHY_BASE;  
2362         iadev->dma = base + PHY_BASE;  
2363         /* RAM - Segmentation RAm and Reassembly RAM */  
2364         iadev->ram = base + ACTUAL_RAM_BASE;  
2365         iadev->seg_ram = base + ACTUAL_SEG_RAM_BASE;  
2366         iadev->reass_ram = base + ACTUAL_REASS_RAM_BASE;  
2367   
2368         /* lets print out the above */  
2369         IF_INIT(printk("Base addrs: %p %p %p \n %p %p %p %p\n", 
2370           iadev->reg,iadev->seg_reg,iadev->reass_reg, 
2371           iadev->phy, iadev->ram, iadev->seg_ram, 
2372           iadev->reass_ram);) 
2373           
2374         /* lets try reading the MAC address */  
2375         error = get_esi(dev);  
2376         if (error) {
2377           iounmap(iadev->base);
2378           return error;  
2379         }
2380         printk("IA: ");
2381         for (i=0; i < ESI_LEN; i++)  
2382                 printk("%s%02X",i ? "-" : "",dev->esi[i]);  
2383         printk("\n");  
2384   
2385         /* reset SAR */  
2386         if (reset_sar(dev)) {
2387            iounmap(iadev->base);
2388            printk("IA: reset SAR fail, please try again\n");
2389            return 1;
2390         }
2391         return 0;  
2392 }  
2393
2394 static void ia_update_stats(IADEV *iadev) {
2395     if (!iadev->carrier_detect)
2396         return;
2397     iadev->rx_cell_cnt += readw(iadev->reass_reg+CELL_CTR0)&0xffff;
2398     iadev->rx_cell_cnt += (readw(iadev->reass_reg+CELL_CTR1) & 0xffff) << 16;
2399     iadev->drop_rxpkt +=  readw(iadev->reass_reg + DRP_PKT_CNTR ) & 0xffff;
2400     iadev->drop_rxcell += readw(iadev->reass_reg + ERR_CNTR) & 0xffff;
2401     iadev->tx_cell_cnt += readw(iadev->seg_reg + CELL_CTR_LO_AUTO)&0xffff;
2402     iadev->tx_cell_cnt += (readw(iadev->seg_reg+CELL_CTR_HIGH_AUTO)&0xffff)<<16;
2403     return;
2404 }
2405   
2406 static void ia_led_timer(unsigned long arg) {
2407         unsigned long flags;
2408         static u_char blinking[8] = {0, 0, 0, 0, 0, 0, 0, 0};
2409         u_char i;
2410         static u32 ctrl_reg; 
2411         for (i = 0; i < iadev_count; i++) {
2412            if (ia_dev[i]) {
2413               ctrl_reg = readl(ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2414               if (blinking[i] == 0) {
2415                  blinking[i]++;
2416                  ctrl_reg &= (~CTRL_LED);
2417                  writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2418                  ia_update_stats(ia_dev[i]);
2419               }
2420               else {
2421                  blinking[i] = 0;
2422                  ctrl_reg |= CTRL_LED;
2423                  writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2424                  spin_lock_irqsave(&ia_dev[i]->tx_lock, flags);
2425                  if (ia_dev[i]->close_pending)  
2426                     wake_up(&ia_dev[i]->close_wait);
2427                  ia_tx_poll(ia_dev[i]);
2428                  spin_unlock_irqrestore(&ia_dev[i]->tx_lock, flags);
2429               }
2430            }
2431         }
2432         mod_timer(&ia_timer, jiffies + HZ / 4);
2433         return;
2434 }
2435
2436 static void ia_phy_put(struct atm_dev *dev, unsigned char value,   
2437         unsigned long addr)  
2438 {  
2439         writel(value, INPH_IA_DEV(dev)->phy+addr);  
2440 }  
2441   
2442 static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr)  
2443 {  
2444         return readl(INPH_IA_DEV(dev)->phy+addr);  
2445 }  
2446
2447 static void ia_free_tx(IADEV *iadev)
2448 {
2449         int i;
2450
2451         kfree(iadev->desc_tbl);
2452         for (i = 0; i < iadev->num_vc; i++)
2453                 kfree(iadev->testTable[i]);
2454         kfree(iadev->testTable);
2455         for (i = 0; i < iadev->num_tx_desc; i++) {
2456                 struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2457
2458                 pci_unmap_single(iadev->pci, desc->dma_addr,
2459                         sizeof(*desc->cpcs), PCI_DMA_TODEVICE);
2460                 kfree(desc->cpcs);
2461         }
2462         kfree(iadev->tx_buf);
2463         pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2464                             iadev->tx_dle_dma);  
2465 }
2466
2467 static void ia_free_rx(IADEV *iadev)
2468 {
2469         kfree(iadev->rx_open);
2470         pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
2471                           iadev->rx_dle_dma);  
2472 }
2473
2474 static int __devinit ia_start(struct atm_dev *dev)
2475 {  
2476         IADEV *iadev;  
2477         int error;  
2478         unsigned char phy;  
2479         u32 ctrl_reg;  
2480         IF_EVENT(printk(">ia_start\n");)  
2481         iadev = INPH_IA_DEV(dev);  
2482         if (request_irq(iadev->irq, &ia_int, IRQF_SHARED, DEV_LABEL, dev)) {
2483                 printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n",  
2484                     dev->number, iadev->irq);  
2485                 error = -EAGAIN;
2486                 goto err_out;
2487         }  
2488         /* @@@ should release IRQ on error */  
2489         /* enabling memory + master */  
2490         if ((error = pci_write_config_word(iadev->pci,   
2491                                 PCI_COMMAND,   
2492                                 PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER )))   
2493         {  
2494                 printk(KERN_ERR DEV_LABEL "(itf %d): can't enable memory+"  
2495                     "master (0x%x)\n",dev->number, error);  
2496                 error = -EIO;  
2497                 goto err_free_irq;
2498         }  
2499         udelay(10);  
2500   
2501         /* Maybe we should reset the front end, initialize Bus Interface Control   
2502                 Registers and see. */  
2503   
2504         IF_INIT(printk("Bus ctrl reg: %08x\n", 
2505                             readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)  
2506         ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);  
2507         ctrl_reg = (ctrl_reg & (CTRL_LED | CTRL_FE_RST))  
2508                         | CTRL_B8  
2509                         | CTRL_B16  
2510                         | CTRL_B32  
2511                         | CTRL_B48  
2512                         | CTRL_B64  
2513                         | CTRL_B128  
2514                         | CTRL_ERRMASK  
2515                         | CTRL_DLETMASK         /* shud be removed l8r */  
2516                         | CTRL_DLERMASK  
2517                         | CTRL_SEGMASK  
2518                         | CTRL_REASSMASK          
2519                         | CTRL_FEMASK  
2520                         | CTRL_CSPREEMPT;  
2521   
2522        writel(ctrl_reg, iadev->reg+IPHASE5575_BUS_CONTROL_REG);   
2523   
2524         IF_INIT(printk("Bus ctrl reg after initializing: %08x\n", 
2525                            readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));  
2526            printk("Bus status reg after init: %08x\n", 
2527                             readl(iadev->reg+IPHASE5575_BUS_STATUS_REG));)  
2528     
2529         ia_hw_type(iadev); 
2530         error = tx_init(dev);  
2531         if (error)
2532                 goto err_free_irq;
2533         error = rx_init(dev);  
2534         if (error)
2535                 goto err_free_tx;
2536   
2537         ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);  
2538         writel(ctrl_reg | CTRL_FE_RST, iadev->reg+IPHASE5575_BUS_CONTROL_REG);   
2539         IF_INIT(printk("Bus ctrl reg after initializing: %08x\n", 
2540                                readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)  
2541         phy = 0; /* resolve compiler complaint */
2542         IF_INIT ( 
2543         if ((phy=ia_phy_get(dev,0)) == 0x30)  
2544                 printk("IA: pm5346,rev.%d\n",phy&0x0f);  
2545         else  
2546                 printk("IA: utopia,rev.%0x\n",phy);) 
2547
2548         if (iadev->phy_type &  FE_25MBIT_PHY)
2549            ia_mb25_init(iadev);
2550         else if (iadev->phy_type & (FE_DS3_PHY | FE_E3_PHY))
2551            ia_suni_pm7345_init(iadev);
2552         else {
2553                 error = suni_init(dev);
2554                 if (error)
2555                         goto err_free_rx;
2556                 if (dev->phy->start) {
2557                         error = dev->phy->start(dev);
2558                         if (error)
2559                                 goto err_free_rx;
2560                 }
2561                 /* Get iadev->carrier_detect status */
2562                 IaFrontEndIntr(iadev);
2563         }
2564         return 0;
2565
2566 err_free_rx:
2567         ia_free_rx(iadev);
2568 err_free_tx:
2569         ia_free_tx(iadev);
2570 err_free_irq:
2571         free_irq(iadev->irq, dev);  
2572 err_out:
2573         return error;
2574 }  
2575   
2576 static void ia_close(struct atm_vcc *vcc)  
2577 {
2578         DEFINE_WAIT(wait);
2579         u16 *vc_table;
2580         IADEV *iadev;
2581         struct ia_vcc *ia_vcc;
2582         struct sk_buff *skb = NULL;
2583         struct sk_buff_head tmp_tx_backlog, tmp_vcc_backlog;
2584         unsigned long closetime, flags;
2585
2586         iadev = INPH_IA_DEV(vcc->dev);
2587         ia_vcc = INPH_IA_VCC(vcc);
2588         if (!ia_vcc) return;  
2589
2590         IF_EVENT(printk("ia_close: ia_vcc->vc_desc_cnt = %d  vci = %d\n", 
2591                                               ia_vcc->vc_desc_cnt,vcc->vci);)
2592         clear_bit(ATM_VF_READY,&vcc->flags);
2593         skb_queue_head_init (&tmp_tx_backlog);
2594         skb_queue_head_init (&tmp_vcc_backlog); 
2595         if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2596            iadev->close_pending++;
2597            prepare_to_wait(&iadev->timeout_wait, &wait, TASK_UNINTERRUPTIBLE);
2598            schedule_timeout(50);
2599            finish_wait(&iadev->timeout_wait, &wait);
2600            spin_lock_irqsave(&iadev->tx_lock, flags); 
2601            while((skb = skb_dequeue(&iadev->tx_backlog))) {
2602               if (ATM_SKB(skb)->vcc == vcc){ 
2603                  if (vcc->pop) vcc->pop(vcc, skb);
2604                  else dev_kfree_skb_any(skb);
2605               }
2606               else 
2607                  skb_queue_tail(&tmp_tx_backlog, skb);
2608            } 
2609            while((skb = skb_dequeue(&tmp_tx_backlog))) 
2610              skb_queue_tail(&iadev->tx_backlog, skb);
2611            IF_EVENT(printk("IA TX Done decs_cnt = %d\n", ia_vcc->vc_desc_cnt);) 
2612            closetime = 300000 / ia_vcc->pcr;
2613            if (closetime == 0)
2614               closetime = 1;
2615            spin_unlock_irqrestore(&iadev->tx_lock, flags);
2616            wait_event_timeout(iadev->close_wait, (ia_vcc->vc_desc_cnt <= 0), closetime);
2617            spin_lock_irqsave(&iadev->tx_lock, flags);
2618            iadev->close_pending--;
2619            iadev->testTable[vcc->vci]->lastTime = 0;
2620            iadev->testTable[vcc->vci]->fract = 0; 
2621            iadev->testTable[vcc->vci]->vc_status = VC_UBR; 
2622            if (vcc->qos.txtp.traffic_class == ATM_ABR) {
2623               if (vcc->qos.txtp.min_pcr > 0)
2624                  iadev->sum_mcr -= vcc->qos.txtp.min_pcr;
2625            }
2626            if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2627               ia_vcc = INPH_IA_VCC(vcc); 
2628               iadev->sum_mcr -= ia_vcc->NumCbrEntry*iadev->Granularity;
2629               ia_cbrVc_close (vcc);
2630            }
2631            spin_unlock_irqrestore(&iadev->tx_lock, flags);
2632         }
2633         
2634         if (vcc->qos.rxtp.traffic_class != ATM_NONE) {   
2635            // reset reass table
2636            vc_table = (u16 *)(iadev->reass_ram+REASS_TABLE*iadev->memSize);
2637            vc_table += vcc->vci; 
2638            *vc_table = NO_AAL5_PKT;
2639            // reset vc table
2640            vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
2641            vc_table += vcc->vci;
2642            *vc_table = (vcc->vci << 6) | 15;
2643            if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
2644               struct abr_vc_table __iomem *abr_vc_table = 
2645                                 (iadev->reass_ram+ABR_VC_TABLE*iadev->memSize);
2646               abr_vc_table +=  vcc->vci;
2647               abr_vc_table->rdf = 0x0003;
2648               abr_vc_table->air = 0x5eb1;
2649            }                                 
2650            // Drain the packets
2651            rx_dle_intr(vcc->dev); 
2652            iadev->rx_open[vcc->vci] = NULL;
2653         }
2654         kfree(INPH_IA_VCC(vcc));  
2655         ia_vcc = NULL;
2656         vcc->dev_data = NULL;
2657         clear_bit(ATM_VF_ADDR,&vcc->flags);
2658         return;        
2659 }  
2660   
2661 static int ia_open(struct atm_vcc *vcc)
2662 {  
2663         IADEV *iadev;  
2664         struct ia_vcc *ia_vcc;  
2665         int error;  
2666         if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))  
2667         {  
2668                 IF_EVENT(printk("ia: not partially allocated resources\n");)  
2669                 vcc->dev_data = NULL;
2670         }  
2671         iadev = INPH_IA_DEV(vcc->dev);  
2672         if (vcc->vci != ATM_VPI_UNSPEC && vcc->vpi != ATM_VCI_UNSPEC)  
2673         {  
2674                 IF_EVENT(printk("iphase open: unspec part\n");)  
2675                 set_bit(ATM_VF_ADDR,&vcc->flags);
2676         }  
2677         if (vcc->qos.aal != ATM_AAL5)  
2678                 return -EINVAL;  
2679         IF_EVENT(printk(DEV_LABEL "(itf %d): open %d.%d\n", 
2680                                  vcc->dev->number, vcc->vpi, vcc->vci);)  
2681   
2682         /* Device dependent initialization */  
2683         ia_vcc = kmalloc(sizeof(*ia_vcc), GFP_KERNEL);  
2684         if (!ia_vcc) return -ENOMEM;  
2685         vcc->dev_data = ia_vcc;
2686   
2687         if ((error = open_rx(vcc)))  
2688         {  
2689                 IF_EVENT(printk("iadev: error in open_rx, closing\n");)  
2690                 ia_close(vcc);  
2691                 return error;  
2692         }  
2693   
2694         if ((error = open_tx(vcc)))  
2695         {  
2696                 IF_EVENT(printk("iadev: error in open_tx, closing\n");)  
2697                 ia_close(vcc);  
2698                 return error;  
2699         }  
2700   
2701         set_bit(ATM_VF_READY,&vcc->flags);
2702
2703 #if 0
2704         {
2705            static u8 first = 1; 
2706            if (first) {
2707               ia_timer.expires = jiffies + 3*HZ;
2708               add_timer(&ia_timer);
2709               first = 0;
2710            }           
2711         }
2712 #endif
2713         IF_EVENT(printk("ia open returning\n");)  
2714         return 0;  
2715 }  
2716   
2717 static int ia_change_qos(struct atm_vcc *vcc, struct atm_qos *qos, int flags)  
2718 {  
2719         IF_EVENT(printk(">ia_change_qos\n");)  
2720         return 0;  
2721 }  
2722   
2723 static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)  
2724 {  
2725    IA_CMDBUF ia_cmds;
2726    IADEV *iadev;
2727    int i, board;
2728    u16 __user *tmps;
2729    IF_EVENT(printk(">ia_ioctl\n");)  
2730    if (cmd != IA_CMD) {
2731       if (!dev->phy->ioctl) return -EINVAL;
2732       return dev->phy->ioctl(dev,cmd,arg);
2733    }
2734    if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT; 
2735    board = ia_cmds.status;
2736    if ((board < 0) || (board > iadev_count))
2737          board = 0;    
2738    iadev = ia_dev[board];
2739    switch (ia_cmds.cmd) {
2740    case MEMDUMP:
2741    {
2742         switch (ia_cmds.sub_cmd) {
2743           case MEMDUMP_DEV:     
2744              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2745              if (copy_to_user(ia_cmds.buf, iadev, sizeof(IADEV)))
2746                 return -EFAULT;
2747              ia_cmds.status = 0;
2748              break;
2749           case MEMDUMP_SEGREG:
2750              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2751              tmps = (u16 __user *)ia_cmds.buf;
2752              for(i=0; i<0x80; i+=2, tmps++)
2753                 if(put_user((u16)(readl(iadev->seg_reg+i) & 0xffff), tmps)) return -EFAULT;
2754              ia_cmds.status = 0;
2755              ia_cmds.len = 0x80;
2756              break;
2757           case MEMDUMP_REASSREG:
2758              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2759              tmps = (u16 __user *)ia_cmds.buf;
2760              for(i=0; i<0x80; i+=2, tmps++)
2761                 if(put_user((u16)(readl(iadev->reass_reg+i) & 0xffff), tmps)) return -EFAULT;
2762              ia_cmds.status = 0;
2763              ia_cmds.len = 0x80;
2764              break;
2765           case MEMDUMP_FFL:
2766           {  
2767              ia_regs_t       *regs_local;
2768              ffredn_t        *ffL;
2769              rfredn_t        *rfL;
2770                      
2771              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2772              regs_local = kmalloc(sizeof(*regs_local), GFP_KERNEL);
2773              if (!regs_local) return -ENOMEM;
2774              ffL = &regs_local->ffredn;
2775              rfL = &regs_local->rfredn;
2776              /* Copy real rfred registers into the local copy */
2777              for (i=0; i<(sizeof (rfredn_t))/4; i++)
2778                 ((u_int *)rfL)[i] = readl(iadev->reass_reg + i) & 0xffff;
2779                 /* Copy real ffred registers into the local copy */
2780              for (i=0; i<(sizeof (ffredn_t))/4; i++)
2781                 ((u_int *)ffL)[i] = readl(iadev->seg_reg + i) & 0xffff;
2782
2783              if (copy_to_user(ia_cmds.buf, regs_local,sizeof(ia_regs_t))) {
2784                 kfree(regs_local);
2785                 return -EFAULT;
2786              }
2787              kfree(regs_local);
2788              printk("Board %d registers dumped\n", board);
2789              ia_cmds.status = 0;                  
2790          }      
2791              break;        
2792          case READ_REG:
2793          {  
2794              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2795              desc_dbg(iadev); 
2796              ia_cmds.status = 0; 
2797          }
2798              break;
2799          case 0x6:
2800          {  
2801              ia_cmds.status = 0; 
2802              printk("skb = 0x%lx\n", (long)skb_peek(&iadev->tx_backlog));
2803              printk("rtn_q: 0x%lx\n",(long)ia_deque_rtn_q(&iadev->tx_return_q));
2804          }
2805              break;
2806          case 0x8:
2807          {
2808              struct k_sonet_stats *stats;
2809              stats = &PRIV(_ia_dev[board])->sonet_stats;
2810              printk("section_bip: %d\n", atomic_read(&stats->section_bip));
2811              printk("line_bip   : %d\n", atomic_read(&stats->line_bip));
2812              printk("path_bip   : %d\n", atomic_read(&stats->path_bip));
2813              printk("line_febe  : %d\n", atomic_read(&stats->line_febe));
2814              printk("path_febe  : %d\n", atomic_read(&stats->path_febe));
2815              printk("corr_hcs   : %d\n", atomic_read(&stats->corr_hcs));
2816              printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
2817              printk("tx_cells   : %d\n", atomic_read(&stats->tx_cells));
2818              printk("rx_cells   : %d\n", atomic_read(&stats->rx_cells));
2819          }
2820             ia_cmds.status = 0;
2821             break;
2822          case 0x9:
2823             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2824             for (i = 1; i <= iadev->num_rx_desc; i++)
2825                free_desc(_ia_dev[board], i);
2826             writew( ~(RX_FREEQ_EMPT | RX_EXCP_RCVD), 
2827                                             iadev->reass_reg+REASS_MASK_REG);
2828             iadev->rxing = 1;
2829             
2830             ia_cmds.status = 0;
2831             break;
2832
2833          case 0xb:
2834             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2835             IaFrontEndIntr(iadev);
2836             break;
2837          case 0xa:
2838             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2839          {  
2840              ia_cmds.status = 0; 
2841              IADebugFlag = ia_cmds.maddr;
2842              printk("New debug option loaded\n");
2843          }
2844              break;
2845          default:
2846              ia_cmds.status = 0;
2847              break;
2848       } 
2849    }
2850       break;
2851    default:
2852       break;
2853
2854    }    
2855    return 0;  
2856 }  
2857   
2858 static int ia_getsockopt(struct atm_vcc *vcc, int level, int optname,   
2859         void __user *optval, int optlen)  
2860 {  
2861         IF_EVENT(printk(">ia_getsockopt\n");)  
2862         return -EINVAL;  
2863 }  
2864   
2865 static int ia_setsockopt(struct atm_vcc *vcc, int level, int optname,   
2866         void __user *optval, unsigned int optlen)  
2867 {  
2868         IF_EVENT(printk(">ia_setsockopt\n");)  
2869         return -EINVAL;  
2870 }  
2871   
2872 static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
2873         IADEV *iadev;
2874         struct dle *wr_ptr;
2875         struct tx_buf_desc __iomem *buf_desc_ptr;
2876         int desc;
2877         int comp_code;
2878         int total_len;
2879         struct cpcs_trailer *trailer;
2880         struct ia_vcc *iavcc;
2881
2882         iadev = INPH_IA_DEV(vcc->dev);  
2883         iavcc = INPH_IA_VCC(vcc);
2884         if (!iavcc->txing) {
2885            printk("discard packet on closed VC\n");
2886            if (vcc->pop)
2887                 vcc->pop(vcc, skb);
2888            else
2889                 dev_kfree_skb_any(skb);
2890            return 0;
2891         }
2892
2893         if (skb->len > iadev->tx_buf_sz - 8) {
2894            printk("Transmit size over tx buffer size\n");
2895            if (vcc->pop)
2896                  vcc->pop(vcc, skb);
2897            else
2898                  dev_kfree_skb_any(skb);
2899           return 0;
2900         }
2901         if ((unsigned long)skb->data & 3) {
2902            printk("Misaligned SKB\n");
2903            if (vcc->pop)
2904                  vcc->pop(vcc, skb);
2905            else
2906                  dev_kfree_skb_any(skb);
2907            return 0;
2908         }       
2909         /* Get a descriptor number from our free descriptor queue  
2910            We get the descr number from the TCQ now, since I am using  
2911            the TCQ as a free buffer queue. Initially TCQ will be   
2912            initialized with all the descriptors and is hence, full.  
2913         */
2914         desc = get_desc (iadev, iavcc);
2915         if (desc == 0xffff) 
2916             return 1;
2917         comp_code = desc >> 13;  
2918         desc &= 0x1fff;  
2919   
2920         if ((desc == 0) || (desc > iadev->num_tx_desc))  
2921         {  
2922                 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);) 
2923                 atomic_inc(&vcc->stats->tx);
2924                 if (vcc->pop)   
2925                     vcc->pop(vcc, skb);   
2926                 else  
2927                     dev_kfree_skb_any(skb);
2928                 return 0;   /* return SUCCESS */
2929         }  
2930   
2931         if (comp_code)  
2932         {  
2933             IF_ERR(printk(DEV_LABEL "send desc:%d completion code %d error\n", 
2934                                                             desc, comp_code);)  
2935         }  
2936        
2937         /* remember the desc and vcc mapping */
2938         iavcc->vc_desc_cnt++;
2939         iadev->desc_tbl[desc-1].iavcc = iavcc;
2940         iadev->desc_tbl[desc-1].txskb = skb;
2941         IA_SKB_STATE(skb) = 0;
2942
2943         iadev->ffL.tcq_rd += 2;
2944         if (iadev->ffL.tcq_rd > iadev->ffL.tcq_ed)
2945                 iadev->ffL.tcq_rd  = iadev->ffL.tcq_st;
2946         writew(iadev->ffL.tcq_rd, iadev->seg_reg+TCQ_RD_PTR);
2947   
2948         /* Put the descriptor number in the packet ready queue  
2949                 and put the updated write pointer in the DLE field   
2950         */   
2951         *(u16*)(iadev->seg_ram+iadev->ffL.prq_wr) = desc; 
2952
2953         iadev->ffL.prq_wr += 2;
2954         if (iadev->ffL.prq_wr > iadev->ffL.prq_ed)
2955                 iadev->ffL.prq_wr = iadev->ffL.prq_st;
2956           
2957         /* Figure out the exact length of the packet and padding required to 
2958            make it  aligned on a 48 byte boundary.  */
2959         total_len = skb->len + sizeof(struct cpcs_trailer);  
2960         total_len = ((total_len + 47) / 48) * 48;
2961         IF_TX(printk("ia packet len:%d padding:%d\n", total_len, total_len - skb->len);)  
2962  
2963         /* Put the packet in a tx buffer */   
2964         trailer = iadev->tx_buf[desc-1].cpcs;
2965         IF_TX(printk("Sent: skb = 0x%p skb->data: 0x%p len: %d, desc: %d\n",
2966                   skb, skb->data, skb->len, desc);)
2967         trailer->control = 0; 
2968         /*big endian*/ 
2969         trailer->length = ((skb->len & 0xff) << 8) | ((skb->len & 0xff00) >> 8);
2970         trailer->crc32 = 0;     /* not needed - dummy bytes */  
2971
2972         /* Display the packet */  
2973         IF_TXPKT(printk("Sent data: len = %d MsgNum = %d\n", 
2974                                                         skb->len, tcnter++);  
2975         xdump(skb->data, skb->len, "TX: ");
2976         printk("\n");)
2977
2978         /* Build the buffer descriptor */  
2979         buf_desc_ptr = iadev->seg_ram+TX_DESC_BASE;
2980         buf_desc_ptr += desc;   /* points to the corresponding entry */  
2981         buf_desc_ptr->desc_mode = AAL5 | EOM_EN | APP_CRC32 | CMPL_INT;   
2982         /* Huh ? p.115 of users guide describes this as a read-only register */
2983         writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
2984         buf_desc_ptr->vc_index = vcc->vci;
2985         buf_desc_ptr->bytes = total_len;  
2986
2987         if (vcc->qos.txtp.traffic_class == ATM_ABR)  
2988            clear_lockup (vcc, iadev);
2989
2990         /* Build the DLE structure */  
2991         wr_ptr = iadev->tx_dle_q.write;  
2992         memset((caddr_t)wr_ptr, 0, sizeof(*wr_ptr));  
2993         wr_ptr->sys_pkt_addr = pci_map_single(iadev->pci, skb->data,
2994                 skb->len, PCI_DMA_TODEVICE);
2995         wr_ptr->local_pkt_addr = (buf_desc_ptr->buf_start_hi << 16) | 
2996                                                   buf_desc_ptr->buf_start_lo;  
2997         /* wr_ptr->bytes = swap_byte_order(total_len); didn't seem to affect?? */
2998         wr_ptr->bytes = skb->len;  
2999
3000         /* hw bug - DLEs of 0x2d, 0x2e, 0x2f cause DMA lockup */
3001         if ((wr_ptr->bytes >> 2) == 0xb)
3002            wr_ptr->bytes = 0x30;
3003
3004         wr_ptr->mode = TX_DLE_PSI; 
3005         wr_ptr->prq_wr_ptr_data = 0;
3006   
3007         /* end is not to be used for the DLE q */  
3008         if (++wr_ptr == iadev->tx_dle_q.end)  
3009                 wr_ptr = iadev->tx_dle_q.start;  
3010         
3011         /* Build trailer dle */
3012         wr_ptr->sys_pkt_addr = iadev->tx_buf[desc-1].dma_addr;
3013         wr_ptr->local_pkt_addr = ((buf_desc_ptr->buf_start_hi << 16) | 
3014           buf_desc_ptr->buf_start_lo) + total_len - sizeof(struct cpcs_trailer);
3015
3016         wr_ptr->bytes = sizeof(struct cpcs_trailer);
3017         wr_ptr->mode = DMA_INT_ENABLE; 
3018         wr_ptr->prq_wr_ptr_data = iadev->ffL.prq_wr;
3019         
3020         /* end is not to be used for the DLE q */
3021         if (++wr_ptr == iadev->tx_dle_q.end)  
3022                 wr_ptr = iadev->tx_dle_q.start;
3023
3024         iadev->tx_dle_q.write = wr_ptr;  
3025         ATM_DESC(skb) = vcc->vci;
3026         skb_queue_tail(&iadev->tx_dma_q, skb);
3027
3028         atomic_inc(&vcc->stats->tx);
3029         iadev->tx_pkt_cnt++;
3030         /* Increment transaction counter */  
3031         writel(2, iadev->dma+IPHASE5575_TX_COUNTER);  
3032         
3033 #if 0        
3034         /* add flow control logic */ 
3035         if (atomic_read(&vcc->stats->tx) % 20 == 0) {
3036           if (iavcc->vc_desc_cnt > 10) {
3037              vcc->tx_quota =  vcc->tx_quota * 3 / 4;
3038             printk("Tx1:  vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3039               iavcc->flow_inc = -1;
3040               iavcc->saved_tx_quota = vcc->tx_quota;
3041            } else if ((iavcc->flow_inc < 0) && (iavcc->vc_desc_cnt < 3)) {
3042              // vcc->tx_quota = 3 * iavcc->saved_tx_quota / 4;
3043              printk("Tx2:  vcc->tx_quota = %d \n", (u32)vcc->tx_quota ); 
3044               iavcc->flow_inc = 0;
3045            }
3046         }
3047 #endif
3048         IF_TX(printk("ia send done\n");)  
3049         return 0;  
3050 }  
3051
3052 static int ia_send(struct atm_vcc *vcc, struct sk_buff *skb)
3053 {
3054         IADEV *iadev; 
3055         struct ia_vcc *iavcc;
3056         unsigned long flags;
3057
3058         iadev = INPH_IA_DEV(vcc->dev);
3059         iavcc = INPH_IA_VCC(vcc); 
3060         if ((!skb)||(skb->len>(iadev->tx_buf_sz-sizeof(struct cpcs_trailer))))
3061         {
3062             if (!skb)
3063                 printk(KERN_CRIT "null skb in ia_send\n");
3064             else dev_kfree_skb_any(skb);
3065             return -EINVAL;
3066         }                         
3067         spin_lock_irqsave(&iadev->tx_lock, flags); 
3068         if (!test_bit(ATM_VF_READY,&vcc->flags)){ 
3069             dev_kfree_skb_any(skb);
3070             spin_unlock_irqrestore(&iadev->tx_lock, flags);
3071             return -EINVAL; 
3072         }
3073         ATM_SKB(skb)->vcc = vcc;
3074  
3075         if (skb_peek(&iadev->tx_backlog)) {
3076            skb_queue_tail(&iadev->tx_backlog, skb);
3077         }
3078         else {
3079            if (ia_pkt_tx (vcc, skb)) {
3080               skb_queue_tail(&iadev->tx_backlog, skb);
3081            }
3082         }
3083         spin_unlock_irqrestore(&iadev->tx_lock, flags);
3084         return 0;
3085
3086 }
3087
3088 static int ia_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
3089
3090   int   left = *pos, n;   
3091   char  *tmpPtr;
3092   IADEV *iadev = INPH_IA_DEV(dev);
3093   if(!left--) {
3094      if (iadev->phy_type == FE_25MBIT_PHY) {
3095        n = sprintf(page, "  Board Type         :  Iphase5525-1KVC-128K\n");
3096        return n;
3097      }
3098      if (iadev->phy_type == FE_DS3_PHY)
3099         n = sprintf(page, "  Board Type         :  Iphase-ATM-DS3");
3100      else if (iadev->phy_type == FE_E3_PHY)
3101         n = sprintf(page, "  Board Type         :  Iphase-ATM-E3");
3102      else if (iadev->phy_type == FE_UTP_OPTION)
3103          n = sprintf(page, "  Board Type         :  Iphase-ATM-UTP155"); 
3104      else
3105         n = sprintf(page, "  Board Type         :  Iphase-ATM-OC3");
3106      tmpPtr = page + n;
3107      if (iadev->pci_map_size == 0x40000)
3108         n += sprintf(tmpPtr, "-1KVC-");
3109      else
3110         n += sprintf(tmpPtr, "-4KVC-");  
3111      tmpPtr = page + n; 
3112      if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_1M)
3113         n += sprintf(tmpPtr, "1M  \n");
3114      else if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_512K)
3115         n += sprintf(tmpPtr, "512K\n");
3116      else
3117        n += sprintf(tmpPtr, "128K\n");
3118      return n;
3119   }
3120   if (!left) {
3121      return  sprintf(page, "  Number of Tx Buffer:  %u\n"
3122                            "  Size of Tx Buffer  :  %u\n"
3123                            "  Number of Rx Buffer:  %u\n"
3124                            "  Size of Rx Buffer  :  %u\n"
3125                            "  Packets Receiverd  :  %u\n"
3126                            "  Packets Transmitted:  %u\n"
3127                            "  Cells Received     :  %u\n"
3128                            "  Cells Transmitted  :  %u\n"
3129                            "  Board Dropped Cells:  %u\n"
3130                            "  Board Dropped Pkts :  %u\n",
3131                            iadev->num_tx_desc,  iadev->tx_buf_sz,
3132                            iadev->num_rx_desc,  iadev->rx_buf_sz,
3133                            iadev->rx_pkt_cnt,   iadev->tx_pkt_cnt,
3134                            iadev->rx_cell_cnt, iadev->tx_cell_cnt,
3135                            iadev->drop_rxcell, iadev->drop_rxpkt);                        
3136   }
3137   return 0;
3138 }
3139   
3140 static const struct atmdev_ops ops = {  
3141         .open           = ia_open,  
3142         .close          = ia_close,  
3143         .ioctl          = ia_ioctl,  
3144         .getsockopt     = ia_getsockopt,  
3145         .setsockopt     = ia_setsockopt,  
3146         .send           = ia_send,  
3147         .phy_put        = ia_phy_put,  
3148         .phy_get        = ia_phy_get,  
3149         .change_qos     = ia_change_qos,  
3150         .proc_read      = ia_proc_read,
3151         .owner          = THIS_MODULE,
3152 };  
3153           
3154 static int __devinit ia_init_one(struct pci_dev *pdev,
3155                                  const struct pci_device_id *ent)
3156 {  
3157         struct atm_dev *dev;  
3158         IADEV *iadev;  
3159         unsigned long flags;
3160         int ret;
3161
3162         iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
3163         if (!iadev) {
3164                 ret = -ENOMEM;
3165                 goto err_out;
3166         }
3167
3168         iadev->pci = pdev;
3169
3170         IF_INIT(printk("ia detected at bus:%d dev: %d function:%d\n",
3171                 pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));)
3172         if (pci_enable_device(pdev)) {
3173                 ret = -ENODEV;
3174                 goto err_out_free_iadev;
3175         }
3176         dev = atm_dev_register(DEV_LABEL, &ops, -1, NULL);
3177         if (!dev) {
3178                 ret = -ENOMEM;
3179                 goto err_out_disable_dev;
3180         }
3181         dev->dev_data = iadev;
3182         IF_INIT(printk(DEV_LABEL "registered at (itf :%d)\n", dev->number);)
3183         IF_INIT(printk("dev_id = 0x%p iadev->LineRate = %d \n", dev,
3184                 iadev->LineRate);)
3185
3186         pci_set_drvdata(pdev, dev);
3187
3188         ia_dev[iadev_count] = iadev;
3189         _ia_dev[iadev_count] = dev;
3190         iadev_count++;
3191         spin_lock_init(&iadev->misc_lock);
3192         /* First fixes first. I don't want to think about this now. */
3193         spin_lock_irqsave(&iadev->misc_lock, flags); 
3194         if (ia_init(dev) || ia_start(dev)) {  
3195                 IF_INIT(printk("IA register failed!\n");)
3196                 iadev_count--;
3197                 ia_dev[iadev_count] = NULL;
3198                 _ia_dev[iadev_count] = NULL;
3199                 spin_unlock_irqrestore(&iadev->misc_lock, flags); 
3200                 ret = -EINVAL;
3201                 goto err_out_deregister_dev;
3202         }
3203         spin_unlock_irqrestore(&iadev->misc_lock, flags); 
3204         IF_EVENT(printk("iadev_count = %d\n", iadev_count);)
3205
3206         iadev->next_board = ia_boards;  
3207         ia_boards = dev;  
3208
3209         return 0;
3210
3211 err_out_deregister_dev:
3212         atm_dev_deregister(dev);  
3213 err_out_disable_dev:
3214         pci_disable_device(pdev);
3215 err_out_free_iadev:
3216         kfree(iadev);
3217 err_out:
3218         return ret;
3219 }
3220
3221 static void __devexit ia_remove_one(struct pci_dev *pdev)
3222 {
3223         struct atm_dev *dev = pci_get_drvdata(pdev);
3224         IADEV *iadev = INPH_IA_DEV(dev);
3225
3226         /* Disable phy interrupts */
3227         ia_phy_put(dev, ia_phy_get(dev, SUNI_RSOP_CIE) & ~(SUNI_RSOP_CIE_LOSE),
3228                                    SUNI_RSOP_CIE);
3229         udelay(1);
3230
3231         if (dev->phy && dev->phy->stop)
3232                 dev->phy->stop(dev);
3233
3234         /* De-register device */  
3235         free_irq(iadev->irq, dev);
3236         iadev_count--;
3237         ia_dev[iadev_count] = NULL;
3238         _ia_dev[iadev_count] = NULL;
3239         IF_EVENT(printk("deregistering iav at (itf:%d)\n", dev->number);)
3240         atm_dev_deregister(dev);
3241
3242         iounmap(iadev->base);  
3243         pci_disable_device(pdev);
3244
3245         ia_free_rx(iadev);
3246         ia_free_tx(iadev);
3247
3248         kfree(iadev);
3249 }
3250
3251 static struct pci_device_id ia_pci_tbl[] = {
3252         { PCI_VENDOR_ID_IPHASE, 0x0008, PCI_ANY_ID, PCI_ANY_ID, },
3253         { PCI_VENDOR_ID_IPHASE, 0x0009, PCI_ANY_ID, PCI_ANY_ID, },
3254         { 0,}
3255 };
3256 MODULE_DEVICE_TABLE(pci, ia_pci_tbl);
3257
3258 static struct pci_driver ia_driver = {
3259         .name =         DEV_LABEL,
3260         .id_table =     ia_pci_tbl,
3261         .probe =        ia_init_one,
3262         .remove =       __devexit_p(ia_remove_one),
3263 };
3264
3265 static int __init ia_module_init(void)
3266 {
3267         int ret;
3268
3269         ret = pci_register_driver(&ia_driver);
3270         if (ret >= 0) {
3271                 ia_timer.expires = jiffies + 3*HZ;
3272                 add_timer(&ia_timer); 
3273         } else
3274                 printk(KERN_ERR DEV_LABEL ": no adapter found\n");  
3275         return ret;
3276 }
3277
3278 static void __exit ia_module_exit(void)
3279 {
3280         pci_unregister_driver(&ia_driver);
3281
3282         del_timer(&ia_timer);
3283 }
3284
3285 module_init(ia_module_init);
3286 module_exit(ia_module_exit);