initial commit
[freebsd-arm:freebsd-arm.git] / #x#
1 Index: arm/arm/elf_trampoline.c
2 ===================================================================
3 --- arm/arm/elf_trampoline.c    (revision 201358)
4 +++ arm/arm/elf_trampoline.c    (working copy)
5 @@ -57,6 +57,8 @@
6  #define cpu_idcache_wbinv_all  arm8_cache_purgeID
7  #elif defined(CPU_ARM9)
8  #define cpu_idcache_wbinv_all  arm9_idcache_wbinv_all
9 +#elif defined(CPU_FA526)
10 +#define cpu_idcache_wbinv_all  fa526_idcache_wbinv_all
11  #elif defined(CPU_ARM9E)
12  #define cpu_idcache_wbinv_all  armv5_ec_idcache_wbinv_all
13  #elif defined(CPU_ARM10)
14 Index: arm/arm/cpufunc.c
15 ===================================================================
16 --- arm/arm/cpufunc.c   (revision 201358)
17 +++ arm/arm/cpufunc.c   (working copy)
18 @@ -781,6 +781,73 @@
19         xscale_setup                    /* cpu setup            */
20  };
21  #endif /* CPU_XSCALE_81342 */
22 +
23 +
24 +#if defined(CPU_FA526)
25 +struct cpu_functions fa526_cpufuncs = {
26 +       /* CPU functions */
27 +       
28 +       .cf_id                  = cpufunc_id,
29 +       .cf_cpwait              = cpufunc_nullop,
30 +
31 +       /* MMU functions */
32 +
33 +       .cf_control             = cpufunc_control,
34 +       .cf_domains             = cpufunc_domains,
35 +       .cf_setttb              = fa526_setttb,
36 +       .cf_faultstatus         = cpufunc_faultstatus,
37 +       .cf_faultaddress        = cpufunc_faultaddress,
38 +
39 +       /* TLB functions */
40 +
41 +       .cf_tlb_flushID         = armv4_tlb_flushID,
42 +       .cf_tlb_flushID_SE      = fa526_tlb_flushID_SE,
43 +       .cf_tlb_flushI          = armv4_tlb_flushI,
44 +       .cf_tlb_flushI_SE       = fa526_tlb_flushI_SE,
45 +       .cf_tlb_flushD          = armv4_tlb_flushD,
46 +       .cf_tlb_flushD_SE       = armv4_tlb_flushD_SE,
47 +
48 +       /* Cache operations */
49 +
50 +       .cf_icache_sync_all     = fa526_icache_sync_all,
51 +       .cf_icache_sync_range   = fa526_icache_sync_range,
52 +
53 +       .cf_dcache_wbinv_all    = fa526_dcache_wbinv_all,
54 +       .cf_dcache_wbinv_range  = fa526_dcache_wbinv_range,
55 +       .cf_dcache_inv_range    = fa526_dcache_inv_range,
56 +       .cf_dcache_wb_range     = fa526_dcache_wb_range,
57 +
58 +       .cf_idcache_wbinv_all   = fa526_idcache_wbinv_all,
59 +       .cf_idcache_wbinv_range = fa526_idcache_wbinv_range,
60 +
61 +
62 +       .cf_l2cache_wbinv_all = cpufunc_nullop,
63 +       .cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
64 +       .cf_l2cache_inv_range = (void *)cpufunc_nullop,
65 +       .cf_l2cache_wb_range = (void *)cpufunc_nullop,
66 +
67 +
68 +       /* Other functions */
69 +
70 +       .cf_flush_prefetchbuf   = fa526_flush_prefetchbuf,
71 +       .cf_drain_writebuf      = armv4_drain_writebuf,
72 +       .cf_flush_brnchtgt_C    = cpufunc_nullop,
73 +       .cf_flush_brnchtgt_E    = fa526_flush_brnchtgt_E,
74 +
75 +       .cf_sleep               = fa526_cpu_sleep,
76 +
77 +       /* Soft functions */
78 +
79 +       .cf_dataabt_fixup       = cpufunc_null_fixup,
80 +       .cf_prefetchabt_fixup   = cpufunc_null_fixup,
81 +
82 +       .cf_context_switch      = fa526_context_switch,
83 +
84 +       .cf_setup               = fa526_setup
85 +};          
86 +#endif /* CPU_FA526 */
87 +
88 +
89  /*
90   * Global constants also used by locore.s
91   */
92 @@ -793,6 +860,7 @@
93    defined (CPU_ARM9E) || defined (CPU_ARM10) ||                               \
94    defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||           \
95    defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||         \
96 +  defined(CPU_FA526) ||                                               \
97    defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
98  
99  static void get_cachetype_cp15(void);
100 @@ -1073,6 +1141,19 @@
101                 goto out;
102         }
103  #endif /* CPU_SA1110 */
104 +#ifdef CPU_FA526
105 +       if (cputype == CPU_ID_FA526) {
106 +               cpufuncs = fa526_cpufuncs;
107 +               cpu_reset_needs_v4_MMU_disable = 1;     /* SA needs it  */
108 +               get_cachetype_cp15();
109 +               pmap_pte_init_generic();
110 +
111 +               /* Use powersave on this CPU. */
112 +               cpu_do_powersave = 1;
113 +
114 +               goto out;
115 +       }
116 +#endif /* CPU_FA526 */
117  #ifdef CPU_IXP12X0
118          if (cputype == CPU_ID_IXP1200) {
119                  cpufuncs = ixp12x0_cpufuncs;
120 @@ -1547,7 +1628,8 @@
121    defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||            \
122    defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||          \
123    defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
124 -  defined(CPU_ARM10) ||  defined(CPU_ARM11)
125 +  defined(CPU_ARM10) ||  defined(CPU_ARM11) || \
126 +  defined(CPU_FA526)
127  
128  #define IGN    0
129  #define OR     1
130 @@ -2013,6 +2095,60 @@
131  }
132  #endif /* CPU_SA1100 || CPU_SA1110 */
133  
134 +#if defined(CPU_FA526)
135 +struct cpu_option fa526_options[] = {
136 +#ifdef COMPAT_12
137 +       { "nocache",            IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
138 +       { "nowritebuf",         IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
139 +#endif /* COMPAT_12 */
140 +       { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
141 +       { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
142 +       { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
143 +       { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
144 +       { NULL,                 IGN, IGN, 0 }
145 +};
146 +
147 +void
148 +fa526_setup(char *args)
149 +{
150 +       int cpuctrl, cpuctrlmask;
151 +
152 +       cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
153 +                | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
154 +                | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
155 +                | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
156 +       cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
157 +                | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
158 +                | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
159 +                | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
160 +                | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
161 +                | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
162 +                | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
163 +
164 +#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
165 +       cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
166 +#endif
167 +
168 +       cpuctrl = parse_cpu_options(args, fa526_options, cpuctrl);
169 +
170 +#ifdef __ARMEB__
171 +       cpuctrl |= CPU_CONTROL_BEND_ENABLE;
172 +#endif
173 +
174 +       if (vector_page == ARM_VECTORS_HIGH)
175 +               cpuctrl |= CPU_CONTROL_VECRELOC;
176 +
177 +       /* Clear out the cache */
178 +       cpu_idcache_wbinv_all();
179 +
180 +       /* Set the control register */    
181 +       //curcpu()->ci_ctrl = cpuctrl;
182 +       ctrl = cpuctrl;
183 +       cpu_control(0xffffffff, cpuctrl);
184 +}
185 +#endif /* CPU_FA526 */
186 +
187 +
188  #if defined(CPU_IXP12X0)
189  struct cpu_option ixp12x0_options[] = {
190         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
191 Index: arm/arm/cpufunc_asm_fa526.S
192 ===================================================================
193 --- arm/arm/cpufunc_asm_fa526.S (revision 0)
194 +++ arm/arm/cpufunc_asm_fa526.S (revision 0)
195 @@ -0,0 +1,209 @@
196 +/*     $NetBSD: cpufunc_asm_fa526.S,v 1.3 2008/10/15 16:56:49 matt Exp $       */
197 +/*-
198 + * Copyright (c) 2008 The NetBSD Foundation, Inc.
199 + * All rights reserved.
200 + *
201 + * This code is derived from software contributed to The NetBSD Foundation
202 + * by Matt Thomas <matt@3am-software.com>
203 + *
204 + * Redistribution and use in source and binary forms, with or without
205 + * modification, are permitted provided that the following conditions
206 + * are met:
207 + * 1. Redistributions of source code must retain the above copyright
208 + *    notice, this list of conditions and the following disclaimer.
209 + * 2. Redistributions in binary form must reproduce the above copyright
210 + *    notice, this list of conditions and the following disclaimer in the
211 + *    documentation and/or other materials provided with the distribution.
212 + *
213 + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
214 + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
215 + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
216 + * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
217 + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
218 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
219 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
220 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
221 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
222 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
223 + * POSSIBILITY OF SUCH DAMAGE.
224 + */
225 +
226 +
227 +#include <machine/asm.h>
228 +
229 +#define        CACHELINE_SIZE  16
230 +
231 +ENTRY(fa526_setttb)
232 +       mov     r1, #0
233 +       mcr     p15, 0, r1, c7, c14, 0  /* clean and invalidate D$ */
234 +       mcr     p15, 0, r1, c7, c5, 0   /* invalidate I$ */
235 +       mcr     p15, 0, r1, c7, c5, 6   /* invalidate BTB */
236 +       mcr     p15, 0, r1, c7, c10, 4  /* drain write and fill buffer */
237 +
238 +       mcr     p15, 0, r0, c2, c0, 0   /* Write the TTB */ 
239 +
240 +       /* If we have updated the TTB we must flush the TLB */
241 +       mcr     p15, 0, r1, c8, c7, 0   /* invalidate I+D TLB */
242 +
243 +       /* Make sure that pipeline is emptied */
244 +       mov     r0, r0
245 +       mov     r0, r0
246 +       mov     pc, lr
247 +
248 +/*
249 + * TLB functions
250 + */
251 +ENTRY(fa526_tlb_flushID_SE)
252 +       mcr     p15, 0, r0, c8, c7, 1   /* flush Utlb single entry */
253 +       mov     pc, lr
254 +
255 +/*
256 + * TLB functions
257 + */
258 +ENTRY(fa526_tlb_flushI_SE)
259 +       mcr     p15, 0, r0, c8, c5, 1   /* flush Itlb single entry */
260 +       mov     pc, lr
261 +
262 +ENTRY(fa526_cpu_sleep)
263 +       mov     r0, #0
264 +/*     nop
265 +       nop*/
266 +       //mcr   p15, 0, r0, c7, c5, 5   // Enter sleep mode ? invalidate iscratcpad ram*/
267 +       mcr     p15, 0, r0, c7, c0, 4           /* Wait for interrupt (IDLE mode)       */
268 +       mov     pc, lr
269 +
270 +ENTRY(fa526_flush_prefetchbuf)
271 +       mov     r0, #0
272 +       mcr     p15, 0, r0, c7, c5, 4   /* Pre-fetch flush */
273 +       mov     pc, lr
274 +
275 +/*
276 + * Cache functions
277 + */
278 +ENTRY(fa526_idcache_wbinv_all)
279 +       mov     r0, #0
280 +       mcr     p15, 0, r0, c7, c14, 0  /* clean and invalidate D$ */
281 +       mcr     p15, 0, r0, c7, c5, 0   /* invalidate I$ */
282 +       mcr     p15, 0, r0, c7, c10, 4  /* drain write buffer */
283 +       mov     pc, lr
284 +
285 +ENTRY(fa526_icache_sync_all)
286 +       mov     r0, #0
287 +       mcr     p15, 0, r0, c7, c5, 0   /* invalidate I$ */
288 +       mov     pc, lr
289 +
290 +ENTRY(fa526_dcache_wbinv_all)
291 +       mov     r0, #0
292 +       mcr     p15, 0, r0, c7, c14, 0  /* clean and invalidate D$ */
293 +       mcr     p15, 0, r0, c7, c10, 4  /* drain write buffer */
294 +       mov     pc, lr
295 +
296 +/*
297 + * Soft functions
298 + */
299 +ENTRY(fa526_dcache_wbinv_range)
300 +       cmp     r1, #0x4000
301 +       bhs     _C_LABEL(fa526_dcache_wbinv_all)
302 +
303 +       and     r2, r0, #(CACHELINE_SIZE-1)
304 +       add     r1, r1, r2
305 +       bic     r0, r0, #(CACHELINE_SIZE-1)
306 +
307 +1:     mcr     p15, 0, r0, c7, c14, 1  /* clean and invalidate D$ entry */
308 +       add     r0, r0, #CACHELINE_SIZE
309 +       subs    r1, r1, #CACHELINE_SIZE
310 +       bhi     1b
311 +
312 +       mcr     p15, 0, r0, c7, c10, 4  /* drain write buffer */
313 +       mov     pc, lr
314 +
315 +ENTRY(fa526_dcache_wb_range)
316 +       cmp     r1, #0x4000
317 +       bls     1f
318 +
319 +       mov     r0, #0
320 +       mcr     p15, 0, r0, c7, c10, 0  /* clean entire D$ */
321 +       b       3f
322 +
323 +1:     and     r2, r0, #(CACHELINE_SIZE-1)
324 +       add     r1, r1, r2
325 +       bic     r0, r0, #(CACHELINE_SIZE-1)
326 +
327 +2:     mcr     p15, 0, r0, c7, c10, 1  /* clean D$ entry */
328 +       add     r0, r0, #CACHELINE_SIZE
329 +       subs    r1, r1, #CACHELINE_SIZE
330 +       bhi     2b
331 +
332 +3:     mcr     p15, 0, r0, c7, c10, 4  /* drain write buffer */
333 +       mov     pc, lr
334 +
335 +ENTRY(fa526_dcache_inv_range)
336 +       and     r2, r0, #(CACHELINE_SIZE-1)
337 +       add     r1, r1, r2
338 +       bic     r0, r0, #(CACHELINE_SIZE-1)
339 +
340 +1:     mcr     p15, 0, r0, c7, c6, 1   /* invalidate D$ single entry */
341 +       add     r0, r0, #CACHELINE_SIZE
342 +       subs    r1, r1, #CACHELINE_SIZE
343 +       bhi     1b
344 +
345 +       mov     pc, lr
346 +
347 +ENTRY(fa526_idcache_wbinv_range)
348 +       cmp     r1, #0x4000
349 +       bhs     _C_LABEL(fa526_idcache_wbinv_all)
350 +
351 +       and     r2, r0, #(CACHELINE_SIZE-1)
352 +       add     r1, r1, r2
353 +       bic     r0, r0, #(CACHELINE_SIZE-1)
354 +
355 +1:     mcr     p15, 0, r0, c7, c14, 1  /* clean and invalidate D$ entry */
356 +       mcr     p15, 0, r0, c7, c5, 1   /* invalidate I$ entry */
357 +       add     r0, r0, #CACHELINE_SIZE
358 +       subs    r1, r1, #CACHELINE_SIZE
359 +       bhi     1b
360 +
361 +2:     mcr     p15, 0, r0, c7, c10, 4  /* drain write buffer */
362 +       mov     pc, lr
363 +
364 +ENTRY(fa526_icache_sync_range)
365 +       cmp     r1, #0x4000
366 +       bhs     _C_LABEL(fa526_icache_sync_all)
367 +
368 +       and     r2, r0, #(CACHELINE_SIZE-1)
369 +       add     r1, r1, r2
370 +       bic     r0, r0, #(CACHELINE_SIZE-1)
371 +
372 +1:     mcr     p15, 0, r0, c7, c10, 1  /* clean D$ entry */
373 +       mcr     p15, 0, r0, c7, c5, 1   /* invalidate I$ entry */
374 +       add     r0, r0, #CACHELINE_SIZE
375 +       subs    r1, r1, #CACHELINE_SIZE
376 +       bhi     1b
377 +
378 +2:     mcr     p15, 0, r0, c7, c10, 4  /* drain write buffer */
379 +       mov     pc, lr
380 +
381 +ENTRY(fa526_flush_brnchtgt_E)
382 +       mov     r0, #0
383 +       mcr     p15, 0, r0, c7, c5, 6   /* invalidate BTB cache */
384 +       mov     pc, lr
385 +
386 +ENTRY(fa526_context_switch)
387 +       /*
388 +        * CF_CACHE_PURGE_ID will *ALWAYS* be called prior to this.
389 +        * Thus the data cache will contain only kernel data and the
390 +        * instruction cache will contain only kernel code, and all
391 +        * kernel mappings are shared by all processes.
392 +        */
393 +
394 +       mcr     p15, 0, r0, c2, c0, 0   /* Write the TTB */
395 +
396 +       /* If we have updated the TTB we must flush the TLB */
397 +       mov     r0, #0
398 +       mcr     p15, 0, r0, c8, c7, 0   /* flush the I+D tlb */
399 +
400 +       /* Make sure that pipeline is emptied */
401 +       mov     r0, r0
402 +       mov     r0, r0
403 +       mov     pc, lr
404 +
405 Index: arm/include/cpuconf.h
406 ===================================================================
407 --- arm/include/cpuconf.h       (revision 201358)
408 +++ arm/include/cpuconf.h       (working copy)
409 @@ -61,6 +61,7 @@
410                          defined(CPU_XSCALE_80200) +                    \
411                          defined(CPU_XSCALE_80321) +                    \
412                          defined(CPU_XSCALE_PXA2X0) +                   \
413 +                        defined(CPU_FA526) +                           \
414                          defined(CPU_XSCALE_IXP425))
415  
416  /*
417 @@ -68,7 +69,7 @@
418   */
419  #if (defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) ||        \
420       defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
421 -    defined(CPU_IXP12X0) || defined(CPU_XSCALE_IXP425))
422 +     defined(CPU_IXP12X0) || defined(CPU_XSCALE_IXP425) || defined(CPU_FA526))
423  #define        ARM_ARCH_4      1
424  #else
425  #define        ARM_ARCH_4      0
426 @@ -125,7 +126,7 @@
427  
428  #if (defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) ||        \
429       defined(CPU_ARM8) || defined(CPU_ARM9) || defined(CPU_ARM9E) ||   \
430 -     defined(CPU_ARM10) || defined(CPU_ARM11))
431 +     defined(CPU_ARM10) || defined(CPU_ARM11) || defined(CPU_FA526))
432  #define        ARM_MMU_GENERIC         1
433  #else
434  #define        ARM_MMU_GENERIC         0
435 Index: arm/include/cpufunc.h
436 ===================================================================
437 --- arm/include/cpufunc.h       (revision 201358)
438 +++ arm/include/cpufunc.h       (working copy)
439 @@ -283,6 +283,28 @@
440  u_int  arm8_clock_config       (u_int, u_int);
441  #endif
442  
443 +
444 +#ifdef CPU_FA526
445 +void   fa526_setup             (char *arg);
446 +void   fa526_setttb            (u_int ttb);
447 +void   fa526_context_switch    (void);
448 +void   fa526_cpu_sleep         (int);
449 +void   fa526_tlb_flushI_SE     (u_int);
450 +void   fa526_tlb_flushID_SE    (u_int);
451 +void   fa526_flush_prefetchbuf (void);
452 +void   fa526_flush_brnchtgt_E  (u_int);
453 +
454 +void   fa526_icache_sync_all   (void);
455 +void   fa526_icache_sync_range(vm_offset_t start, vm_size_t end);
456 +void   fa526_dcache_wbinv_all  (void);
457 +void   fa526_dcache_wbinv_range(vm_offset_t start, vm_size_t end);
458 +void   fa526_dcache_inv_range  (vm_offset_t start, vm_size_t end);
459 +void   fa526_dcache_wb_range   (vm_offset_t start, vm_size_t end);
460 +void   fa526_idcache_wbinv_all(void);
461 +void   fa526_idcache_wbinv_range(vm_offset_t start, vm_size_t end);
462 +#endif
463 +
464 +
465  #ifdef CPU_SA110
466  void   sa110_setup             (char *string);
467  void   sa110_context_switch    (void);
468 @@ -445,6 +467,7 @@
469  #if defined(CPU_ARM9) || defined(CPU_ARM9E) || defined(CPU_ARM10) || \
470    defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
471    defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||         \
472 +    defined(CPU_FA526) || \
473    defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||       \
474    defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
475    
476 Index: arm/econa/econa_var.h
477 ===================================================================
478 --- arm/econa/econa_var.h       (revision 0)
479 +++ arm/econa/econa_var.h       (revision 0)
480 @@ -0,0 +1,45 @@
481 +/*-
482 + * Copyright (c) 2009 Yohanes Nugroho <yohanes@gmail.com>.
483 + * All rights reserved.
484 + *
485 + * Redistribution and use in source and binary forms, with or without
486 + * modification, are permitted provided that the following conditions
487 + * are met:
488 + * 1. Redistributions of source code must retain the above copyright
489 + *    notice, this list of conditions and the following disclaimer.
490 + * 2. Redistributions in binary form must reproduce the above copyright
491 + *    notice, this list of conditions and the following disclaimer in the
492 + *    documentation and/or other materials provided with the distribution.
493 + *
494 + * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
495 + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
496 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
497 + * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
498 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
499 + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
500 + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
501 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
502 + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
503 + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
504 + * SUCH DAMAGE.
505 + */
506 +
507 +#ifndef _ARM_ECONA_VAR_H
508 +#define _ARM_ECONA_VAR_H
509 +
510 +extern bus_space_tag_t obio_tag;
511 +
512 +struct econa_softc {
513 +       device_t dev;
514 +       bus_space_tag_t sc_st;
515 +       bus_space_handle_t sc_sh;
516 +       bus_space_handle_t sc_sys_sh;
517 +       struct rman sc_irq_rman;
518 +       struct rman sc_mem_rman;
519 +};
520 +
521 +struct econa_ivar {
522 +       struct resource_list resources;
523 +};
524 +
525 +#endif
526 Index: arm/econa/files.econa
527 ===================================================================
528 --- arm/econa/files.econa       (revision 0)
529 +++ arm/econa/files.econa       (revision 0)
530 @@ -0,0 +1,14 @@
531 +# $FreeBSD $
532 +arm/arm/cpufunc_asm_fa526.S    standard
533 +arm/econa/econa_machdep.c              standard
534 +arm/econa/econa.c                      standard
535 +arm/econa/timer.c                      standard
536 +arm/econa/uart_bus_ec.c                optional        uart
537 +arm/econa/uart_cpu_ec.c                optional        uart
538 +dev/uart/uart_dev_ns8250.c     optional        uart
539 +arm/arm/irq_dispatch.S         standard
540 +arm/arm/bus_space_generic.c            standard
541 +arm/econa/ehci_ebus.c  standard        ehci
542 +arm/econa/ohci_ec.c    standard        ohci
543 +arm/econa/if_ece.c             standard
544 +arm/econa/cfi_bus_econa.c              optional        cfi
545 Index: arm/econa/econa_machdep.c
546 ===================================================================
547 --- arm/econa/econa_machdep.c   (revision 0)
548 +++ arm/econa/econa_machdep.c   (revision 0)
549 @@ -0,0 +1,421 @@
550 +/*-
551 + * Copyright (c) 2009 Yohanes Nugroho <yohanes@gmail.com>
552 + * Copyright (c) 1994-1998 Mark Brinicombe.
553 + * Copyright (c) 1994 Brini.
554 + * All rights reserved.
555 + *
556 + * This code is derived from software written for Brini by Mark Brinicombe
557 + *
558 + * Redistribution and use in source and binary forms, with or without
559 + * modification, are permitted provided that the following conditions
560 + * are met:
561 + * 1. Redistributions of source code must retain the above copyright
562 + *    notice, this list of conditions and the following disclaimer.
563 + * 2. Redistributions in binary form must reproduce the above copyright
564 + *    notice, this list of conditions and the following disclaimer in the
565 + *    documentation and/or other materials provided with the distribution.
566 + * 3. All advertising materials mentioning features or use of this software
567 + *    must display the following acknowledgement:
568 + *      This product includes software developed by Brini.
569 + * 4. The name of the company nor the name of the author may be used to
570 + *    endorse or promote products derived from this software without specific
571 + *    prior written permission.
572 + *
573 + * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
574 + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
575 + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
576 + * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
577 + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
578 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
579 + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
580 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
581 + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
582 + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
583 + * SUCH DAMAGE.
584 + *
585 + */
586 +
587 +#include "opt_msgbuf.h"
588 +
589 +#include <sys/cdefs.h>
590 +__FBSDID("$FreeBSD$");
591 +
592 +#define _ARM32_BUS_DMA_PRIVATE
593 +#include <sys/param.h>
594 +#include <sys/systm.h>
595 +#include <sys/sysproto.h>
596 +#include <sys/signalvar.h>
597 +#include <sys/imgact.h>
598 +#include <sys/kernel.h>
599 +#include <sys/ktr.h>
600 +#include <sys/linker.h>
601 +#include <sys/lock.h>
602 +#include <sys/malloc.h>
603 +#include <sys/mutex.h>
604 +#include <sys/pcpu.h>
605 +#include <sys/proc.h>
606 +#include <sys/ptrace.h>
607 +#include <sys/cons.h>
608 +#include <sys/bio.h>
609 +#include <sys/bus.h>
610 +#include <sys/buf.h>
611 +#include <sys/exec.h>
612 +#include <sys/kdb.h>
613 +#include <sys/msgbuf.h>
614 +#include <machine/reg.h>
615 +#include <machine/cpu.h>
616 +
617 +#include <vm/vm.h>
618 +#include <vm/pmap.h>
619 +#include <vm/vm_object.h>
620 +#include <vm/vm_page.h>
621 +#include <vm/vm_pager.h>
622 +#include <vm/vm_map.h>
623 +#include <vm/vnode_pager.h>
624 +#include <machine/pmap.h>
625 +#include <machine/vmparam.h>
626 +#include <machine/pcb.h>
627 +#include <machine/undefined.h>
628 +#include <machine/machdep.h>
629 +#include <machine/metadata.h>
630 +#include <machine/armreg.h>
631 +#include <machine/bus.h>
632 +#include <sys/reboot.h>
633 +#include "econa_reg.h"
634 +
635 +#define KERNEL_PT_SYS          0       /* Page table for mapping proc0 zero page */    
636 +#define KERNEL_PT_KERN         1
637 +#define KERNEL_PT_KERN_NUM     22
638 +/* L2 table for mapping after kernel */
639 +#define KERNEL_PT_AFKERNEL     KERNEL_PT_KERN + KERNEL_PT_KERN_NUM     
640 +#define        KERNEL_PT_AFKERNEL_NUM  5
641 +
642 +/* this should be evenly divisable by PAGE_SIZE / L2_TABLE_SIZE_REAL (or 4) */
643 +#define NUM_KERNEL_PTS         (KERNEL_PT_AFKERNEL + KERNEL_PT_AFKERNEL_NUM)
644 +
645 +/* Define various stack sizes in pages */
646 +#define IRQ_STACK_SIZE 1
647 +#define ABT_STACK_SIZE 1
648 +#define UND_STACK_SIZE 1
649 +
650 +extern u_int data_abort_handler_address;
651 +extern u_int prefetch_abort_handler_address;
652 +extern u_int undefined_handler_address;
653 +
654 +struct pv_addr kernel_pt_table[NUM_KERNEL_PTS];
655 +
656 +extern void *_end;
657 +
658 +extern int *end;
659 +
660 +struct pcpu __pcpu;
661 +struct pcpu *pcpup = &__pcpu;
662 +
663 +/* Physical and virtual addresses for some global pages */
664 +
665 +vm_paddr_t phys_avail[10];
666 +vm_paddr_t dump_avail[4];
667 +vm_offset_t physical_pages;
668 +
669 +struct pv_addr systempage;
670 +struct pv_addr msgbufpv;
671 +struct pv_addr irqstack;
672 +struct pv_addr undstack;
673 +struct pv_addr abtstack;
674 +struct pv_addr kernelstack;
675 +
676 +static void *boot_arg1;
677 +static void *boot_arg2;
678 +
679 +static struct trapframe proc0_tf;
680 +
681 +/* Static device mappings. */
682 +static const struct pmap_devmap econa_devmap[] = {
683 +       {
684 +               /*
685 +                * This maps DDR SDRAM
686 +                */
687 +               ECONA_SDRAM_BASE, /*virtual*/
688 +               ECONA_SDRAM_BASE, /*physical*/
689 +               ECONA_SDRAM_SIZE, /*size*/
690 +               VM_PROT_READ|VM_PROT_WRITE,
691 +               PTE_NOCACHE,
692 +       },
693 +       /*
694 +        * Map the on-board devices VA == PA so that we can access them
695 +        * with the MMU on or off.
696 +        */
697 +       {
698 +               /*
699 +                * This maps the interrupt controller, the UART
700 +                * and the timer.
701 +                */
702 +               ECONA_IO_BASE, /*virtual*/
703 +               ECONA_IO_BASE, /*physical*/
704 +               ECONA_IO_SIZE, /*size*/
705 +               VM_PROT_READ|VM_PROT_WRITE,
706 +               PTE_NOCACHE,
707 +       },
708 +       {
709 +               /*
710 +                * OHCI + EHCI
711 +                */
712 +               ECONA_OHCI_VBASE, /*virtual*/
713 +               ECONA_OHCI_PBASE, /*physical*/
714 +               ECONA_USB_SIZE, /*size*/
715 +               VM_PROT_READ|VM_PROT_WRITE,
716 +               PTE_NOCACHE,
717 +       },
718 +       {
719 +               /*
720 +                * CFI
721 +                */
722 +               ECONA_CFI_VBASE, /*virtual*/
723 +               ECONA_CFI_PBASE, /*physical*/
724 +               ECONA_CFI_SIZE,
725 +               VM_PROT_READ|VM_PROT_WRITE,
726 +               PTE_NOCACHE,
727 +       },
728 +       {
729 +               0,
730 +               0,
731 +               0,
732 +               0,
733 +               0,
734 +       }
735 +};
736 +
737 +
738 +void *
739 +initarm(void *arg, void *arg2)
740 +{
741 +       struct pv_addr  kernel_l1pt;
742 +       int loop, i;
743 +       u_int l1pagetable;
744 +       vm_offset_t freemempos;
745 +       vm_offset_t afterkern;
746 +       uint32_t memsize;
747 +       vm_offset_t lastaddr;
748 +       volatile uint32_t * ddr = (uint32_t *)0x4000000C;
749 +
750 +       boot_arg1 = arg;
751 +       boot_arg2 = arg2;
752 +       boothowto = RB_VERBOSE;
753 +       boothowto |=  RB_SINGLE;
754 +
755 +       set_cpufuncs();
756 +       lastaddr = fake_preload_metadata();
757 +       pcpu_init(pcpup, 0, sizeof(struct pcpu));
758 +       PCPU_SET(curthread, &thread0);
759 +
760 +
761 +       freemempos = (lastaddr + PAGE_MASK) & ~PAGE_MASK;
762 +       /* Define a macro to simplify memory allocation */
763 +#define valloc_pages(var, np)                   \
764 +       alloc_pages((var).pv_va, (np));         \
765 +       (var).pv_pa = (var).pv_va + (KERNPHYSADDR - KERNVIRTADDR);
766 +
767 +#define alloc_pages(var, np)                   \
768 +       (var) = freemempos;             \
769 +       freemempos += (np * PAGE_SIZE);         \
770 +       memset((char *)(var), 0, ((np) * PAGE_SIZE));
771 +
772 +       while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0)
773 +               freemempos += PAGE_SIZE;
774 +       valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
775 +       for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
776 +               if (!(loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) {
777 +                       valloc_pages(kernel_pt_table[loop],
778 +                           L2_TABLE_SIZE / PAGE_SIZE);
779 +               } else {
780 +                       kernel_pt_table[loop].pv_va = freemempos -
781 +                           (loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL)) *
782 +                           L2_TABLE_SIZE_REAL;
783 +                       kernel_pt_table[loop].pv_pa =
784 +                           kernel_pt_table[loop].pv_va - KERNVIRTADDR +
785 +                           KERNPHYSADDR;
786 +               }
787 +               i++;
788 +       }
789 +
790 +
791 +       /*
792 +        * Allocate a page for the system page mapped to V0x00000000
793 +        * This page will just contain the system vectors and can be
794 +        * shared by all processes.
795 +        */
796 +       valloc_pages(systempage, 1);
797 +
798 +       /* Allocate stacks for all modes */
799 +       valloc_pages(irqstack, IRQ_STACK_SIZE);
800 +       valloc_pages(abtstack, ABT_STACK_SIZE);
801 +       valloc_pages(undstack, UND_STACK_SIZE);
802 +       valloc_pages(kernelstack, KSTACK_PAGES);
803 +       valloc_pages(msgbufpv, round_page(MSGBUF_SIZE) / PAGE_SIZE);
804 +
805 +       /*
806 +        * Now we start construction of the L1 page table
807 +        * We start by mapping the L2 page tables into the L1.
808 +        * This means that we can replace L1 mappings later on if necessary
809 +        */
810 +       l1pagetable = kernel_l1pt.pv_va;
811 +
812 +       /* Map the L2 pages tables in the L1 page table */
813 +       pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH,
814 +           &kernel_pt_table[KERNEL_PT_SYS]);
815 +       for (i = 0; i < KERNEL_PT_KERN_NUM; i++)
816 +               pmap_link_l2pt(l1pagetable, KERNBASE + i * L1_S_SIZE,
817 +                   &kernel_pt_table[KERNEL_PT_KERN + i]);
818 +       pmap_map_chunk(l1pagetable, KERNBASE, PHYSADDR,
819 +          (((uint32_t)lastaddr - KERNBASE) + PAGE_SIZE) & ~(PAGE_SIZE - 1),
820 +           VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
821 +       afterkern = round_page((lastaddr + L1_S_SIZE) & ~(L1_S_SIZE - 1));
822 +       for (i = 0; i < KERNEL_PT_AFKERNEL_NUM; i++) {
823 +               pmap_link_l2pt(l1pagetable, afterkern + i * L1_S_SIZE,
824 +                   &kernel_pt_table[KERNEL_PT_AFKERNEL + i]);
825 +       }
826 +
827 +       /* Map the vector page. */
828 +       pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa,
829 +           VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
830 +
831 +
832 +       /* Map the stack pages */
833 +       pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa,
834 +           IRQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
835 +       pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa,
836 +           ABT_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
837 +       pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa,
838 +           UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
839 +       pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa,
840 +           KSTACK_PAGES * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
841 +
842 +       pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
843 +           L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
844 +       pmap_map_chunk(l1pagetable, msgbufpv.pv_va, msgbufpv.pv_pa,
845 +           MSGBUF_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
846 +
847 +       for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
848 +               pmap_map_chunk(l1pagetable, kernel_pt_table[loop].pv_va,
849 +                   kernel_pt_table[loop].pv_pa, L2_TABLE_SIZE,
850 +                   VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
851 +       }
852 +
853 +
854 +       pmap_devmap_bootstrap(l1pagetable, econa_devmap);
855 +
856 +       cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT);
857 +
858 +       setttb(kernel_l1pt.pv_pa);
859 +
860 +       cpu_tlb_flushID();
861 +
862 +       cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2));
863 +
864 +       cninit();
865 +
866 +       memsize = 32*1024*1024;
867 +
868 +       switch (((*ddr) >> 4) & 0x3) {
869 +       case 0:
870 +               memsize = 8*1024*1024;
871 +               break;
872 +       case 1:
873 +               memsize = 16*1024*1024;
874 +               break;
875 +       case 2:
876 +               memsize = 32*1024*1024;
877 +               break;
878 +       case 3:
879 +               memsize = 64*1024*1024;
880 +               break;
881 +       }
882 +
883 +
884 +       physmem = memsize / PAGE_SIZE;
885 +
886 +       /*
887 +        * Pages were allocated during the secondary bootstrap for the
888 +        * stacks for different CPU modes.
889 +        * We must now set the r13 registers in the different CPU modes to
890 +        * point to these stacks.
891 +        * Since the ARM stacks use STMFD etc. we must set r13 to the top end
892 +        * of the stack memory.
893 +        */
894 +       cpu_control(CPU_CONTROL_MMU_ENABLE, CPU_CONTROL_MMU_ENABLE);
895 +
896 +       set_stackptr(PSR_IRQ32_MODE,
897 +           irqstack.pv_va + IRQ_STACK_SIZE * PAGE_SIZE);
898 +       set_stackptr(PSR_ABT32_MODE,
899 +           abtstack.pv_va + ABT_STACK_SIZE * PAGE_SIZE);
900 +       set_stackptr(PSR_UND32_MODE,
901 +           undstack.pv_va + UND_STACK_SIZE * PAGE_SIZE);
902 +
903 +       /*
904 +        * We must now clean the cache again....
905 +        * Cleaning may be done by reading new data to displace any
906 +        * dirty data in the cache. This will have happened in setttb()
907 +        * but since we are boot strapping the addresses used for the read
908 +        * may have just been remapped and thus the cache could be out
909 +        * of sync. A re-clean after the switch will cure this.
910 +        * After booting there are no gross relocations of the kernel thus
911 +        * this problem will not occur after initarm().
912 +        */
913 +       cpu_idcache_wbinv_all();
914 +
915 +       /* Set stack for exception handlers */
916 +
917 +       data_abort_handler_address = (u_int)data_abort_handler;
918 +       prefetch_abort_handler_address = (u_int)prefetch_abort_handler;
919 +       undefined_handler_address = (u_int)undefinedinstruction_bounce;
920 +       undefined_init();
921 +
922 +       proc_linkup0(&proc0, &thread0);
923 +       thread0.td_kstack = kernelstack.pv_va;
924 +       thread0.td_pcb = (struct pcb *)
925 +               (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
926 +       thread0.td_pcb->pcb_flags = 0;
927 +       thread0.td_frame = &proc0_tf;
928 +       pcpup->pc_curpcb = thread0.td_pcb;
929 +
930 +
931 +       arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL);
932 +
933 +       pmap_curmaxkvaddr = afterkern + L1_S_SIZE * (KERNEL_PT_KERN_NUM - 1);
934 +
935 +       /*
936 +        * ARM_USE_SMALL_ALLOC uses dump_avail, so it must be filled before
937 +        * calling pmap_bootstrap.
938 +        */
939 +       dump_avail[0] = PHYSADDR;
940 +       dump_avail[1] = PHYSADDR + memsize;
941 +       dump_avail[2] = 0;
942 +       dump_avail[3] = 0;
943 +
944 +       pmap_bootstrap(freemempos,
945 +           KERNVIRTADDR + 3 * memsize,
946 +           &kernel_l1pt);
947 +
948 +       msgbufp = (void*)msgbufpv.pv_va;
949 +       msgbufinit(msgbufp, MSGBUF_SIZE);
950 +
951 +       mutex_init();
952 +
953 +       i = 0;
954 +#if PHYSADDR != KERNPHYSADDR
955 +       phys_avail[i++] = PHYSADDR;
956 +       phys_avail[i++] = KERNPHYSADDR;
957 +#endif
958 +       phys_avail[i++] = virtual_avail - KERNVIRTADDR + KERNPHYSADDR;
959 +
960 +       phys_avail[i++] = PHYSADDR + memsize;
961 +       phys_avail[i++] = 0;
962 +       phys_avail[i++] = 0;
963 +       /* Do basic tuning, hz etc */
964 +       init_param1();
965 +       init_param2(physmem);
966 +       kdb_init();
967 +
968 +       return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP -
969 +           sizeof(struct pcb)));
970 +}
971 Index: arm/econa/if_ece.c
972 ===================================================================
973 --- arm/econa/if_ece.c  (revision 0)
974 +++ arm/econa/if_ece.c  (revision 0)
975 @@ -0,0 +1,1962 @@
976 +/*-
977 + * Copyright (c) 2009 Yohanes Nugroho <yohanes@gmail.com>
978 + * All rights reserved.
979 + *
980 + * Redistribution and use in source and binary forms, with or without
981 + * modification, are permitted provided that the following conditions
982 + * are met:
983 + * 1. Redistributions of source code must retain the above copyright
984 + *    notice, this list of conditions and the following disclaimer.
985 + * 2. Redistributions in binary form must reproduce the above copyright
986 + *    notice, this list of conditions and the following disclaimer in the
987 + *    documentation and/or other materials provided with the distribution.
988 + *
989 + * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
990 + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
991 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
992 + * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
993 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
994 + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
995 + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
996 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
997 + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
998 + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
999 + * SUCH DAMAGE.
1000 + */
1001 +
1002 +
1003 +#include <sys/cdefs.h>
1004 +__FBSDID("$FreeBSD: src/sys/arm/econa/if_ece.c$");
1005 +
1006 +#include <sys/param.h>
1007 +#include <sys/systm.h>
1008 +#include <sys/bus.h>
1009 +#include <sys/kernel.h>
1010 +#include <sys/mbuf.h>
1011 +#include <sys/malloc.h>
1012 +#include <sys/module.h>
1013 +#include <sys/rman.h>
1014 +#include <sys/socket.h>
1015 +#include <sys/sockio.h>
1016 +#include <sys/sysctl.h>
1017 +#include <machine/bus.h>
1018 +#include <sys/taskqueue.h>
1019 +
1020 +#include <net/ethernet.h>
1021 +#include <net/if.h>
1022 +#include <net/if_arp.h>
1023 +#include <net/if_dl.h>
1024 +#include <net/if_media.h>
1025 +#include <net/if_types.h>
1026 +#include <net/if_vlan_var.h>
1027 +
1028 +#ifdef INET
1029 +#include <netinet/in.h>
1030 +#include <netinet/in_systm.h>
1031 +#include <netinet/in_var.h>
1032 +#include <netinet/ip.h>
1033 +#endif
1034 +
1035 +#include <net/bpf.h>
1036 +#include <net/bpfdesc.h>
1037 +
1038 +#include <dev/mii/mii.h>
1039 +#include <dev/mii/miivar.h>
1040 +#include <arm/econa/if_ecereg.h>
1041 +#include <arm/econa/if_ecevar.h>
1042 +#include <machine/intr.h>
1043 +
1044 +#include "miibus_if.h"
1045 +
1046 +static uint8_t
1047 +vlan0_mac[ETHER_ADDR_LEN] = {0x00, 0xaa, 0xbb, 0xcc, 0xdd, 0x19};
1048 +
1049 +/*
1050 + * Boot loader expects the hardware state to be the same when we
1051 + * restart the device (warm boot), so we need to save the initial
1052 + * config values.
1053 + */
1054 +int initial_switch_config;
1055 +int initial_cpu_config;
1056 +int initial_port0_config;
1057 +int initial_port1_config;
1058 +
1059 +static inline uint32_t
1060 +RD4(struct ece_softc *sc, bus_size_t off)
1061 +{
1062 +       return bus_read_4(sc->mem_res, off);
1063 +}
1064 +
1065 +static inline void
1066 +WR4(struct ece_softc *sc, bus_size_t off, uint32_t val)
1067 +{
1068 +       bus_write_4(sc->mem_res, off, val);
1069 +}
1070 +
1071 +#define ECE_LOCK(_sc)          mtx_lock(&(_sc)->sc_mtx)
1072 +#define        ECE_UNLOCK(_sc)         mtx_unlock(&(_sc)->sc_mtx)
1073 +#define ECE_LOCK_INIT(_sc) \
1074 +       mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \
1075 +           MTX_NETWORK_LOCK, MTX_DEF)
1076 +
1077 +#define ECE_TXLOCK(_sc)                mtx_lock(&(_sc)->sc_mtx_tx)
1078 +#define        ECE_TXUNLOCK(_sc)               mtx_unlock(&(_sc)->sc_mtx_tx)
1079 +#define ECE_TXLOCK_INIT(_sc) \
1080 +       mtx_init(&_sc->sc_mtx_tx, device_get_nameunit(_sc->dev), \
1081 +           "ECE TX Lock", MTX_DEF)
1082 +
1083 +#define ECE_CLEANUPLOCK(_sc)           mtx_lock(&(_sc)->sc_mtx_cleanup)
1084 +#define        ECE_CLEANUPUNLOCK(_sc)          mtx_unlock(&(_sc)->sc_mtx_cleanup)
1085 +#define ECE_CLEANUPLOCK_INIT(_sc) \
1086 +       mtx_init(&_sc->sc_mtx_cleanup, device_get_nameunit(_sc->dev), \
1087 +           "ECE cleanup Lock", MTX_DEF)
1088 +
1089 +#define ECE_RXLOCK(_sc)                mtx_lock(&(_sc)->sc_mtx_rx)
1090 +#define        ECE_RXUNLOCK(_sc)               mtx_unlock(&(_sc)->sc_mtx_rx)
1091 +#define ECE_RXLOCK_INIT(_sc) \
1092 +       mtx_init(&_sc->sc_mtx_rx, device_get_nameunit(_sc->dev), \
1093 +           "ECE RX Lock", MTX_DEF)
1094 +
1095 +#define ECE_LOCK_DESTROY(_sc)  mtx_destroy(&_sc->sc_mtx);
1096 +#define ECE_TXLOCK_DESTROY(_sc)        mtx_destroy(&_sc->sc_mtx_tx);
1097 +#define ECE_RXLOCK_DESTROY(_sc)        mtx_destroy(&_sc->sc_mtx_rx);
1098 +#define ECE_CLEANUPLOCK_DESTROY(_sc)   mtx_destroy(&_sc->sc_mtx_cleanup);
1099 +
1100 +#define ECE_ASSERT_LOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_OWNED);
1101 +#define ECE_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
1102 +
1103 +static devclass_t ece_devclass;
1104 +
1105 +/* ifnet entry points */
1106 +
1107 +static void eceinit_locked(void *);
1108 +static void ecestart_locked(struct ifnet *);
1109 +
1110 +static void eceinit(void *);
1111 +static void ecestart(struct ifnet *);
1112 +static void ecestop(struct ece_softc *);
1113 +static int eceioctl(struct ifnet * ifp, u_long, caddr_t);
1114 +
1115 +/* bus entry points */
1116 +
1117 +static int ece_probe(device_t dev);
1118 +static int ece_attach(device_t dev);
1119 +static int ece_detach(device_t dev);
1120 +static void ece_intr(void *);
1121 +static void ece_intr_qf(void *);
1122 +static void ece_intr_status(void *xsc);
1123 +
1124 +/* helper routines */
1125 +static int ece_activate(device_t dev);
1126 +static void ece_deactivate(device_t dev);
1127 +static int ece_ifmedia_upd(struct ifnet *ifp);
1128 +static void ece_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
1129 +static int ece_get_mac(struct ece_softc *sc, u_char *eaddr);
1130 +static void ece_set_mac(struct ece_softc *sc, u_char *eaddr);
1131 +static void poweron(struct ece_softc *sc);
1132 +static int configure_cpu_port(struct ece_softc *sc);
1133 +static int configure_lan_port(struct ece_softc *sc, int phy_type);
1134 +static void set_pvid(struct ece_softc *sc, int port0, int port1, int cpu);
1135 +static void set_vlan_vid(struct ece_softc *sc, int vlan);
1136 +static void set_vlan_member(struct ece_softc *sc, int vlan);
1137 +static void set_vlan_tag(struct ece_softc *sc, int vlan);
1138 +static int hardware_init(struct ece_softc *sc);
1139 +static void ece_intr_rx_locked(struct ece_softc *sc, int count);
1140 +
1141 +static void ece_free_desc_dma_tx(struct ece_softc *sc);
1142 +static void ece_free_desc_dma_rx(struct ece_softc *sc);
1143 +
1144 +static void ece_intr_task(void *arg, int pending __unused);
1145 +static void ece_tx_task(void *arg, int pending __unused);
1146 +static void ece_cleanup_task(void *arg, int pending __unused);
1147 +
1148 +static int ece_allocate_dma(struct ece_softc *sc);
1149 +
1150 +static void ece_intr_tx(void *xsc);
1151 +
1152 +static void clear_mac_entries(struct ece_softc *ec, int include_this_mac);
1153 +
1154 +static uint32_t read_mac_entry(struct ece_softc *ec,
1155 +                              uint8_t *mac_result,
1156 +                              int first);
1157 +
1158 +/*PHY related functions*/
1159 +
1160 +static inline int
1161 +phy_read(struct ece_softc *sc, int phy, int reg)
1162 +{
1163 +       int val;
1164 +       int ii;
1165 +       int status;
1166 +
1167 +       WR4(sc, PHY_CONTROL, 1 << 15);
1168 +       WR4(sc, PHY_CONTROL, ((phy & 0x1) |
1169 +                             ((reg & 0x1F) << 8) | (0x1 << 14)));
1170 +
1171 +       for (ii = 0; ii < 0x1000; ii++) {
1172 +               status = RD4(sc, PHY_CONTROL);
1173 +               if (status & (0x1 << 15)) {
1174 +                       /* clear the rw_ok status, and clear other bits value */
1175 +                       WR4(sc, PHY_CONTROL, (0x1 << 15));
1176 +                       val =  ((status >> 16) & 0xFFFF);
1177 +                       return val;
1178 +               }
1179 +       }
1180 +       return 0;
1181 +}
1182 +
1183 +static inline void
1184 +phy_write(struct ece_softc *sc, int phy, int reg, int data)
1185 +{
1186 +       int ii;
1187 +
1188 +       WR4(sc, PHY_CONTROL, 1 << 15);
1189 +       WR4(sc, PHY_CONTROL,
1190 +           ((phy & 0x1) |  ((reg & 0x1F) << 8) |
1191 +            (0x1 << 13) | ((data & 0xFFFF) << 16)));
1192 +       for (ii = 0; ii < 0x1000; ii++) {
1193 +               if (RD4(sc, PHY_CONTROL) & (0x1 << 15)) {
1194 +                       /* clear the rw_ok status, and clear other bits value */
1195 +                       WR4(sc, PHY_CONTROL, (0x1 << 15));
1196 +                       return;
1197 +               }
1198 +       }
1199 +}
1200 +
1201 +/*currently only ic_plus phy is supported*/
1202 +static int get_phy_type(struct ece_softc *sc)
1203 +{
1204 +       uint16_t phy0_id = 0, phy1_id = 0;
1205 +
1206 +       /*
1207 +        * Use SMI (MDC/MDIO) to read Link Partner's PHY Identifier Register 1
1208 +        */
1209 +       phy0_id = phy_read(sc, 0, 0x2);
1210 +       phy1_id = phy_read(sc, 1, 0x2);
1211 +
1212 +       if ((phy0_id == 0xFFFF) && (phy1_id == 0x000F)) {
1213 +               return ASIX_GIGA_PHY;
1214 +       } else if ((phy0_id == 0x0243) && (phy1_id == 0x0243)) {
1215 +               return TWO_SINGLE_PHY;
1216 +       } else if ((phy0_id == 0xFFFF) && (phy1_id == 0x0007)) {
1217 +               return VSC8601_GIGA_PHY;
1218 +       } else if ((phy1_id == 0xFFFF) && (phy0_id == 0x0243)) {
1219 +               return IC_PLUS_PHY;
1220 +       }
1221 +       return NOT_FOUND_PHY;
1222 +}
1223 +
1224 +static int
1225 +ece_probe(device_t dev)
1226 +{
1227 +       device_set_desc(dev, "Econa Ethernet Controller");
1228 +       return (0);
1229 +}
1230 +
1231 +/*make sure that the interface is not powered off*/
1232 +static void
1233 +poweron(struct ece_softc *sc)
1234 +{
1235 +       int ii;
1236 +       uint32_t cfg_reg;
1237 +
1238 +       cfg_reg =  bus_read_4(sc->power_mem_res, POWER_CFG);
1239 +       cfg_reg |= 0x10;
1240 +       /* set reset bit to HIGH active; */
1241 +       bus_write_4(sc->power_mem_res, POWER_CFG, cfg_reg);
1242 +
1243 +       /*pulse delay */
1244 +       for (ii = 0; ii < 0xFFF; ii++)
1245 +               DELAY(100);
1246 +       /* set reset bit to LOW active; */
1247 +       cfg_reg = bus_read_4(sc->power_mem_res, POWER_CFG);
1248 +       cfg_reg &= ~0x10;
1249 +       bus_write_4(sc->power_mem_res, POWER_CFG, cfg_reg);
1250 +
1251 +       /*pulse delay */
1252 +       for (ii = 0; ii < 0xFFF; ii++)
1253 +               DELAY(100);
1254 +       cfg_reg = RD4(sc, POWER_CFG);
1255 +       cfg_reg |= 0x10;
1256 +       /* set reset bit to HIGH active; */
1257 +       bus_write_4(sc->power_mem_res, POWER_CFG, cfg_reg);
1258 +}
1259 +
1260 +static int
1261 +ece_attach(device_t dev)
1262 +{
1263 +       struct ece_softc *sc = device_get_softc(dev);
1264 +       struct ifnet *ifp = NULL;
1265 +       struct sysctl_ctx_list *sctx;
1266 +       struct sysctl_oid *soid;
1267 +       int err = 0;
1268 +       u_char eaddr[ETHER_ADDR_LEN];
1269 +       uint32_t rnd;
1270 +       int i, rid;
1271 +
1272 +
1273 +       sc->dev = dev;
1274 +
1275 +       rid = 0;
1276 +       sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1277 +           RF_ACTIVE);
1278 +
1279 +       if (sc->mem_res == NULL)
1280 +               goto out;
1281 +
1282 +       rid = 1;
1283 +       sc->power_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1284 +           RF_ACTIVE);
1285 +
1286 +       if (sc->power_mem_res == NULL) {
1287 +               goto out;
1288 +       }
1289 +
1290 +       poweron(sc);
1291 +
1292 +       rid = 0;
1293 +       sc->irq_res_status = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1294 +           RF_ACTIVE);
1295 +       if (sc->irq_res_status == NULL)
1296 +               goto out;
1297 +
1298 +       rid = 1; /*TSTC: Fm-Switch-Tx-Complete*/
1299 +       sc->irq_res_tx = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1300 +           RF_ACTIVE);
1301 +       if (sc->irq_res_tx == NULL)
1302 +               goto out;
1303 +
1304 +       rid = 2; /*FSRC: Fm-Switch-Rx-Complete*/
1305 +       sc->irq_res_rec = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1306 +           RF_ACTIVE);
1307 +       if (sc->irq_res_rec == NULL)
1308 +               goto out;
1309 +
1310 +       rid = 4; /*FSQF: Fm-Switch-Queue-Full*/
1311 +       sc->irq_res_qf = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1312 +           RF_ACTIVE);
1313 +       if (sc->irq_res_qf == NULL)
1314 +               goto out;
1315 +
1316 +       err = ece_activate(dev);
1317 +       if (err)
1318 +               goto out;
1319 +
1320 +       /* Sysctls */
1321 +       sctx = device_get_sysctl_ctx(dev);
1322 +       soid = device_get_sysctl_tree(dev);
1323 +
1324 +       ECE_LOCK_INIT(sc);
1325 +
1326 +       callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
1327 +
1328 +       if ((err = ece_get_mac(sc, eaddr)) != 0) {
1329 +               /*
1330 +                * No MAC address configured. Generate the random one.
1331 +                */
1332 +               if  (bootverbose)
1333 +                       device_printf(dev,
1334 +                           "Generating random ethernet address.\n");
1335 +               rnd = arc4random();
1336 +
1337 +               /*from if_ae.c/if_ate.c*/
1338 +               /*
1339 +                * Set OUI to convenient locally assigned address.  'b'
1340 +                * is 0x62, which has the locally assigned bit set, and
1341 +                * the broadcast/multicast bit clear.
1342 +                */
1343 +               eaddr[0] = 'b';
1344 +               eaddr[1] = 's';
1345 +               eaddr[2] = 'd';
1346 +               eaddr[3] = (rnd >> 16) & 0xff;
1347 +               eaddr[4] = (rnd >> 8) & 0xff;
1348 +               eaddr[5] = rnd & 0xff;
1349 +
1350 +               for (i=0; i<ETHER_ADDR_LEN; i++) {
1351 +                       eaddr[i] = vlan0_mac[i];
1352 +               }
1353 +
1354 +       }
1355 +       ece_set_mac(sc, eaddr);
1356 +       sc->ifp = ifp = if_alloc(IFT_ETHER);
1357 +       if (mii_phy_probe(dev, &sc->miibus, ece_ifmedia_upd, ece_ifmedia_sts)) {
1358 +               device_printf(dev, "Cannot find my PHY.\n");
1359 +               err = ENXIO;
1360 +               goto out;
1361 +       }
1362 +       ifp->if_softc = sc;
1363 +       if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1364 +       ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1365 +
1366 +       ifp->if_capabilities = IFCAP_HWCSUM;
1367 +
1368 +       ifp->if_hwassist = (CSUM_IP | CSUM_TCP | CSUM_UDP);
1369 +       ifp->if_capenable = ifp->if_capabilities; /*  */
1370 +       ifp->if_start = ecestart;
1371 +       ifp->if_ioctl = eceioctl;
1372 +       ifp->if_init = eceinit;
1373 +       ifp->if_snd.ifq_drv_maxlen = ECE_MAX_TX_BUFFERS-1;
1374 +       IFQ_SET_MAXLEN(&ifp->if_snd, ECE_MAX_TX_BUFFERS-1);
1375 +       IFQ_SET_READY(&ifp->if_snd);
1376 +
1377 +       /* Create local taskq. */
1378 +
1379 +       TASK_INIT(&sc->sc_intr_task, 0, ece_intr_task, sc);
1380 +       TASK_INIT(&sc->sc_tx_task, 1, ece_tx_task, ifp);
1381 +       TASK_INIT(&sc->sc_cleanup_task, 2, ece_cleanup_task, sc);
1382 +       sc->sc_tq = taskqueue_create_fast("ece_taskq", M_WAITOK,
1383 +           taskqueue_thread_enqueue, &sc->sc_tq);
1384 +       if (sc->sc_tq == NULL) {
1385 +               device_printf(sc->dev, "could not create taskqueue\n");
1386 +               goto out;
1387 +
1388 +       }
1389 +
1390 +       ether_ifattach(ifp, eaddr);
1391 +
1392 +       /*
1393 +        * Activate interrupts
1394 +        */
1395 +       err = bus_setup_intr(dev, sc->irq_res_rec, INTR_TYPE_NET | INTR_MPSAFE,
1396 +                            NULL, ece_intr, sc, &sc->intrhand);
1397 +       if (err) {
1398 +               ether_ifdetach(ifp);
1399 +               ECE_LOCK_DESTROY(sc);
1400 +               goto out;
1401 +       }
1402 +
1403 +       err = bus_setup_intr(dev, sc->irq_res_status, INTR_TYPE_NET | INTR_MPSAFE,
1404 +                            NULL, ece_intr_status, sc, &sc->intrhand_status);
1405 +       if (err) {
1406 +               ether_ifdetach(ifp);
1407 +               ECE_LOCK_DESTROY(sc);
1408 +               goto out;
1409 +       }
1410 +
1411 +       err = bus_setup_intr(dev, sc->irq_res_qf, INTR_TYPE_NET | INTR_MPSAFE,
1412 +           NULL,ece_intr_qf, sc, &sc->intrhand_qf);
1413 +
1414 +       if (err) {
1415 +               ether_ifdetach(ifp);
1416 +               ECE_LOCK_DESTROY(sc);
1417 +               goto out;
1418 +       }
1419 +
1420 +       err = bus_setup_intr(dev, sc->irq_res_tx, INTR_TYPE_NET | INTR_MPSAFE,
1421 +           NULL,ece_intr_tx, sc, &sc->intrhand_tx);
1422 +
1423 +       if (err) {
1424 +               ether_ifdetach(ifp);
1425 +               ECE_LOCK_DESTROY(sc);
1426 +               goto out;
1427 +       }
1428 +
1429 +       ECE_TXLOCK_INIT(sc);
1430 +       ECE_RXLOCK_INIT(sc);
1431 +       ECE_CLEANUPLOCK_INIT(sc);
1432 +
1433 +       /*enable all interrupt sources*/
1434 +       WR4(sc, INTERRUPT_MASK, 0x00000000);
1435 +
1436 +       /*enable port 0*/
1437 +       WR4(sc, PORT_0_CONFIG, RD4(sc, PORT_0_CONFIG) & ~((0x1 << 18)));
1438 +
1439 +       taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq",
1440 +           device_get_nameunit(sc->dev));
1441 +
1442 +out:;
1443 +       if (err)
1444 +               ece_deactivate(dev);
1445 +       if (err && ifp)
1446 +               if_free(ifp);
1447 +       return (err);
1448 +}
1449 +
1450 +static int
1451 +ece_detach(device_t dev)
1452 +{
1453 +       /*TODO: release resources*/
1454 +
1455 +       struct ece_softc *sc = device_get_softc(dev);
1456 +       struct ifnet *ifp = sc->ifp;
1457 +
1458 +       ecestop(sc);
1459 +       if (ifp != NULL) {
1460 +               ether_ifdetach(ifp);
1461 +               if_free(ifp);
1462 +       }
1463 +       ece_deactivate(dev);
1464 +       return 0;
1465 +}
1466 +
1467 +static void
1468 +ece_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1469 +{
1470 +       u_int32_t *paddr;
1471 +       KASSERT(nsegs == 1, ("wrong number of segments, should be 1"));
1472 +       paddr = arg;
1473 +       *paddr = segs->ds_addr;
1474 +}
1475 +
1476 +static int
1477 +ece_alloc_desc_dma_tx(struct ece_softc *sc)
1478 +{
1479 +       int i;
1480 +       int error;
1481 +
1482 +       /* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
1483 +       error = bus_dma_tag_create(sc->sc_parent_tag,   /* parent */
1484 +           16, 0,                              /* alignment, boundary */
1485 +           BUS_SPACE_MAXADDR_32BIT,            /* lowaddr */
1486 +           BUS_SPACE_MAXADDR,                  /* highaddr */
1487 +           NULL, NULL,                         /* filtfunc, filtfuncarg */
1488 +           sizeof(eth_tx_desc_t)*ECE_MAX_TX_BUFFERS, 1,                /* maxsize, nsegments */
1489 +           sizeof(eth_tx_desc_t)*ECE_MAX_TX_BUFFERS, 0,                /* maxsegsz, flags */
1490 +           NULL, NULL,                         /* lockfunc, lockfuncarg */
1491 +           &sc->dmatag_data_tx);               /* dmat */
1492 +
1493 +       /* allocate memory for tx ring */
1494 +       error = bus_dmamem_alloc(sc->dmatag_data_tx,
1495 +                                (void**)&(sc->desc_tx),
1496 +                                BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1497 +                                &(sc->dmamap_ring_tx));
1498 +
1499 +       if (error) {
1500 +               if_printf(sc->ifp, "failed to allocate DMA memory\n");
1501 +               bus_dma_tag_destroy(sc->dmatag_data_tx);
1502 +               sc->dmatag_data_tx = 0;
1503 +               return (ENXIO);
1504 +       }
1505 +
1506 +       /* load ring dma */
1507 +       error = bus_dmamap_load(sc->dmatag_data_tx, sc->dmamap_ring_tx,
1508 +                               sc->desc_tx, sizeof(eth_tx_desc_t)*ECE_MAX_TX_BUFFERS, ece_getaddr,
1509 +                               &(sc->ring_paddr_tx), BUS_DMA_NOWAIT);
1510 +
1511 +       if (error) {
1512 +               if_printf(sc->ifp, "can't load descriptor\n");
1513 +               bus_dmamem_free(sc->dmatag_data_tx, sc->desc_tx,
1514 +                               sc->dmamap_ring_tx);
1515 +               sc->desc_tx = NULL;
1516 +               bus_dma_tag_destroy(sc->dmatag_data_tx);
1517 +               sc->dmatag_data_tx = 0;
1518 +               return (ENXIO);
1519 +       }
1520 +
1521 +       /* Allocate a busdma tag for mbufs. Alignment is 2 bytes */
1522 +       error = bus_dma_tag_create(sc->sc_parent_tag,   /* parent */
1523 +           1, 0,                               /* alignment, boundary */
1524 +           BUS_SPACE_MAXADDR_32BIT,            /* lowaddr */
1525 +           BUS_SPACE_MAXADDR,                  /* highaddr */
1526 +           NULL, NULL,                         /* filtfunc, filtfuncarg */
1527 +           MCLBYTES*MAX_FRAGMENT, MAX_FRAGMENT,                        /* maxsize, nsegments */
1528 +           MCLBYTES, 0,                        /* maxsegsz, flags */
1529 +           NULL, NULL,                         /* lockfunc, lockfuncarg */
1530 +           &sc->dmatag_ring_tx);                       /* dmat */
1531 +
1532 +       if (error) {
1533 +               if_printf(sc->ifp, "failed to create busdma tag for mbufs\n");
1534 +               return (ENXIO);
1535 +       }
1536 +
1537 +       for (i=0; i<ECE_MAX_TX_BUFFERS; i++) {
1538 +               /* create dma map for each descriptor */
1539 +               error = bus_dmamap_create(sc->dmatag_ring_tx, 0,
1540 +                                         &(sc->tx_desc[i].dmamap));
1541 +               if (error) {
1542 +                       if_printf(sc->ifp, "failed to create map for mbuf\n");
1543 +                       return (ENXIO);
1544 +               }
1545 +       }
1546 +       return 0;
1547 +}
1548 +
1549 +static void
1550 +ece_free_desc_dma_tx(struct ece_softc *sc)
1551 +{
1552 +
1553 +       int i;
1554 +
1555 +       for (i = 0; i < ECE_MAX_TX_BUFFERS; i++) {
1556 +               if (sc->tx_desc[i].buff) {
1557 +                       m_freem(sc->tx_desc[i].buff);
1558 +                       sc->tx_desc[i].buff= 0;
1559 +               }
1560 +       }
1561 +
1562 +       if (sc->dmamap_ring_tx) {
1563 +               bus_dmamap_unload(sc->dmatag_data_tx, sc->dmamap_ring_tx);
1564 +               if (sc->desc_tx) {
1565 +                       bus_dmamem_free(sc->dmatag_data_tx,
1566 +                                       sc->desc_tx, sc->dmamap_ring_tx);
1567 +               }
1568 +               sc->dmamap_ring_tx = 0;
1569 +       }
1570 +
1571 +       if (sc->dmatag_data_tx) {
1572 +               bus_dma_tag_destroy(sc->dmatag_data_tx);
1573 +               sc->dmatag_data_tx = 0;
1574 +       }
1575 +
1576 +       if (sc->dmatag_ring_tx) {
1577 +               for (i = 0; i<ECE_MAX_TX_BUFFERS; i++) {
1578 +                       bus_dmamap_destroy(sc->dmatag_ring_tx,
1579 +                                          sc->tx_desc[i].dmamap);
1580 +                       sc->tx_desc[i].dmamap = 0;
1581 +               }
1582 +               bus_dma_tag_destroy(sc->dmatag_ring_tx);
1583 +               sc->dmatag_ring_tx = 0;
1584 +       }
1585 +}
1586 +
1587 +static int
1588 +ece_alloc_desc_dma_rx(struct ece_softc *sc)
1589 +{
1590 +       int error;
1591 +
1592 +       /* Allocate a busdma tag and DMA safe memory for RX descriptors. */
1593 +       error = bus_dma_tag_create(sc->sc_parent_tag,   /* parent */
1594 +           16, 0,                              /* alignment, boundary */
1595 +           BUS_SPACE_MAXADDR_32BIT,            /* lowaddr */
1596 +           BUS_SPACE_MAXADDR,                  /* highaddr */
1597 +           NULL, NULL,                         /* filtfunc, filtfuncarg */
1598 +           sizeof(eth_rx_desc_t)*ECE_MAX_RX_BUFFERS, 1,                /* maxsize, nsegments */
1599 +           sizeof(eth_rx_desc_t)*ECE_MAX_RX_BUFFERS, 0,                /* maxsegsz, flags */
1600 +           NULL, NULL,                         /* lockfunc, lockfuncarg */
1601 +           &sc->dmatag_data_rx);               /* dmat */
1602 +
1603 +       /*allocate ring*/
1604 +       error = bus_dmamem_alloc(sc->dmatag_data_rx,
1605 +                                (void**)&(sc->desc_rx),
1606 +                                BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1607 +                                &(sc->dmamap_ring_rx));
1608 +
1609 +       if (error) {
1610 +               if_printf(sc->ifp, "failed to allocate DMA memory\n");
1611 +               return (ENXIO);
1612 +       }
1613 +
1614 +       /* load dmamap */
1615 +       error = bus_dmamap_load(sc->dmatag_data_rx, sc->dmamap_ring_rx,
1616 +                               sc->desc_rx,
1617 +                               sizeof(eth_rx_desc_t)*ECE_MAX_RX_BUFFERS,
1618 +                               ece_getaddr,
1619 +                               &(sc->ring_paddr_rx), BUS_DMA_NOWAIT);
1620 +
1621 +       if (error) {
1622 +               if_printf(sc->ifp, "can't load descriptor\n");
1623 +               bus_dmamem_free(sc->dmatag_data_rx, sc->desc_rx,
1624 +                               sc->dmamap_ring_rx);
1625 +               bus_dma_tag_destroy(sc->dmatag_data_rx);
1626 +               sc->desc_rx = NULL;
1627 +               return (ENXIO);
1628 +       }
1629 +
1630 +       /* Allocate a busdma tag for mbufs. */
1631 +       error = bus_dma_tag_create(sc->sc_parent_tag,   /* parent */
1632 +           16, 0,                              /* alignment, boundary */
1633 +           BUS_SPACE_MAXADDR_32BIT,            /* lowaddr */
1634 +           BUS_SPACE_MAXADDR,                  /* highaddr */
1635 +           NULL, NULL,                         /* filtfunc, filtfuncarg */
1636 +           MCLBYTES, 1,                        /* maxsize, nsegments */
1637 +           MCLBYTES, 0,                        /* maxsegsz, flags */
1638 +           NULL, NULL,                         /* lockfunc, lockfuncarg */
1639 +           &sc->dmatag_ring_rx);                       /* dmat */
1640 +
1641 +       if (error) {
1642 +               if_printf(sc->ifp, "failed to create busdma tag for mbufs\n");
1643 +               return (ENXIO);
1644 +       }
1645 +
1646 +       for (int i =0; i<ECE_MAX_RX_BUFFERS; i++) {
1647 +               error = bus_dmamap_create(sc->dmatag_ring_rx, 0, &sc->rx_desc[i].dmamap);
1648 +               if (error) {
1649 +                       if_printf(sc->ifp, "failed to create map for mbuf\n");
1650 +                       return (ENXIO);
1651 +               }
1652 +       }
1653 +
1654 +       error = bus_dmamap_create(sc->dmatag_ring_rx, 0, &sc->rx_sparemap);
1655 +       if (error) {
1656 +               if_printf(sc->ifp, "failed to create spare map\n");
1657 +               return (ENXIO);
1658 +       }
1659 +
1660 +       return (0);
1661 +}
1662 +
1663 +static void
1664 +ece_free_desc_dma_rx(struct ece_softc *sc)
1665 +{
1666 +       int i;
1667 +
1668 +       for (i = 0; i < ECE_MAX_RX_BUFFERS; i++) {
1669 +               if (sc->rx_desc[i].buff) {
1670 +                       m_freem(sc->rx_desc[i].buff);
1671 +                       sc->rx_desc[i].buff= 0;
1672 +               }
1673 +       }
1674 +
1675 +       if (sc->dmatag_data_rx) {
1676 +               bus_dmamap_unload(sc->dmatag_data_rx, sc->dmamap_ring_rx);
1677 +               bus_dmamem_free(sc->dmatag_data_rx, sc->desc_rx,
1678 +                       sc->dmamap_ring_rx);
1679 +               bus_dma_tag_destroy(sc->dmatag_data_rx);
1680 +               sc->dmatag_data_rx = 0;
1681 +               sc->dmamap_ring_rx = 0;
1682 +               sc->desc_rx = 0;
1683 +       }
1684 +
1685 +       if (sc->dmatag_ring_rx) {
1686 +               for (i = 0; i<ECE_MAX_RX_BUFFERS; i++) {
1687 +                       bus_dmamap_destroy(sc->dmatag_ring_rx, sc->rx_desc[i].dmamap);
1688 +               }
1689 +               bus_dmamap_destroy(sc->dmatag_ring_rx, sc->rx_sparemap);
1690 +               bus_dma_tag_destroy(sc->dmatag_ring_rx);
1691 +               sc->dmatag_ring_rx = 0;
1692 +       }
1693 +}
1694 +
1695 +static int
1696 +ece_new_rxbuf(struct ece_softc *sc, struct rx_desc_info* descinfo)
1697 +{
1698 +       struct mbuf *new_mbuf;
1699 +       bus_dma_segment_t seg[1];
1700 +       bus_dmamap_t map;
1701 +       int error;
1702 +       int nsegs;
1703 +       bus_dma_tag_t tag;
1704 +
1705 +       tag = sc->dmatag_ring_rx;
1706 +
1707 +       new_mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1708 +
1709 +       if (new_mbuf == NULL)
1710 +               return (ENOBUFS);
1711 +
1712 +       new_mbuf->m_len = new_mbuf->m_pkthdr.len = MCLBYTES;
1713 +
1714 +       error = bus_dmamap_load_mbuf_sg(tag, sc->rx_sparemap, new_mbuf, seg, &nsegs,
1715 +           BUS_DMA_NOWAIT);
1716 +
1717 +       KASSERT(nsegs == 1, ("Too many segments returned!"));
1718 +
1719 +       if (nsegs != 1 || error) {
1720 +               m_free(new_mbuf);
1721 +               return (ENOBUFS);
1722 +       }
1723 +
1724 +       if (descinfo->buff != NULL) {
1725 +               bus_dmamap_sync(tag, descinfo->dmamap, BUS_DMASYNC_POSTREAD);
1726 +               bus_dmamap_unload(tag, descinfo->dmamap);
1727 +       }
1728 +
1729 +       map = descinfo->dmamap;
1730 +       descinfo->dmamap = sc->rx_sparemap;
1731 +       sc->rx_sparemap = map;
1732 +
1733 +       bus_dmamap_sync(tag, descinfo->dmamap, BUS_DMASYNC_PREREAD);
1734 +
1735 +       descinfo->buff = new_mbuf;
1736 +       descinfo->desc->data_ptr = seg->ds_addr;
1737 +       descinfo->desc->length = seg->ds_len-2;
1738 +
1739 +       return (0);
1740 +}
1741 +
1742 +static int
1743 +ece_allocate_dma(struct ece_softc *sc)
1744 +{
1745 +       eth_tx_desc_t *desctx;
1746 +       eth_rx_desc_t *descrx;
1747 +       int i;
1748 +       int error;
1749 +
1750 +       /*create parent tag for tx and rx*/
1751 +       error = bus_dma_tag_create(
1752 +           bus_get_dma_tag(sc->dev),   /* parent */
1753 +           1, 0,                               /* alignment, boundary */
1754 +           BUS_SPACE_MAXADDR,                  /* lowaddr */
1755 +           BUS_SPACE_MAXADDR,                  /* highaddr */
1756 +           NULL, NULL,                         /* filter, filterarg */
1757 +           BUS_SPACE_MAXSIZE_32BIT, 0,         /* maxsize, nsegments */
1758 +           BUS_SPACE_MAXSIZE_32BIT,            /* maxsegsize */
1759 +           0,                                  /* flags */
1760 +           NULL, NULL,                         /* lockfunc, lockarg */
1761 +           &sc->sc_parent_tag);
1762 +
1763 +       ece_alloc_desc_dma_tx(sc);
1764 +
1765 +       for (i = 0; i < ECE_MAX_TX_BUFFERS; i++) {
1766 +               desctx = (eth_tx_desc_t *)(&sc->desc_tx[i]);
1767 +               memset(desctx, 0, sizeof(eth_tx_desc_t));
1768 +               desctx->length = MAX_PACKET_LEN;
1769 +               desctx->cown = 1;
1770 +               if (i==ECE_MAX_TX_BUFFERS-1) {
1771 +                       desctx->eor = 1;
1772 +               }
1773 +       }
1774 +
1775 +       ece_alloc_desc_dma_rx(sc);
1776 +
1777 +       for (i = 0; i < ECE_MAX_RX_BUFFERS; i++) {
1778 +               descrx = &(sc->desc_rx[i]);
1779 +               memset(descrx, 0, sizeof(eth_rx_desc_t));
1780 +               sc->rx_desc[i].desc = descrx;
1781 +               sc->rx_desc[i].buff = 0;
1782 +               ece_new_rxbuf(sc, &(sc->rx_desc[i]));
1783 +
1784 +               if (i==ECE_MAX_RX_BUFFERS-1) {
1785 +                       descrx->eor = 1;
1786 +               }
1787 +       }
1788 +       sc->tx_prod = 0;
1789 +       sc->tx_cons = 0;
1790 +       sc->last_rx = 0;
1791 +       sc->desc_curr_tx = 0;
1792 +
1793 +       return (0);
1794 +}
1795 +
1796 +static int
1797 +ece_activate(device_t dev)
1798 +{
1799 +       struct ece_softc *sc;
1800 +       int err;
1801 +       uint32_t mac_port_config;
1802 +       struct ifnet *ifp;
1803 +
1804 +       sc = device_get_softc(dev);
1805 +       ifp = sc->ifp;
1806 +
1807 +       initial_switch_config = RD4(sc, SWITCH_CONFIG);
1808 +       initial_cpu_config = RD4(sc, CPU_PORT_CONFIG);
1809 +       initial_port0_config = RD4(sc, MAC_PORT_0_CONFIG);
1810 +       initial_port1_config = RD4(sc, MAC_PORT_1_CONFIG);
1811 +
1812 +       /*Disable Port 0 */
1813 +       mac_port_config = RD4(sc, MAC_PORT_0_CONFIG);
1814 +       mac_port_config |= ((0x1 << 18));
1815 +       WR4(sc, MAC_PORT_0_CONFIG, mac_port_config);
1816 +
1817 +       /*Disable Port 1 */
1818 +       mac_port_config = RD4(sc, MAC_PORT_1_CONFIG);
1819 +       mac_port_config |= ((0x1 << 18));
1820 +       WR4(sc, MAC_PORT_1_CONFIG, mac_port_config);
1821 +
1822 +       err = ece_allocate_dma(sc);
1823 +       if (err) {
1824 +               if_printf(sc->ifp, "failed allocating dma\n");
1825 +               goto out;
1826 +       }
1827 +
1828 +       WR4(sc, TS_DESCRIPTOR_POINTER, sc->ring_paddr_tx);
1829 +       WR4(sc, TS_DESCRIPTOR_BASE_ADDR, sc->ring_paddr_tx);
1830 +
1831 +       WR4(sc, FS_DESCRIPTOR_POINTER, sc->ring_paddr_rx);
1832 +       WR4(sc, FS_DESCRIPTOR_BASE_ADDR, sc->ring_paddr_rx);
1833 +
1834 +       WR4(sc, FS_DMA_CONTROL, 1);
1835 +
1836 +       return (0);
1837 +out:
1838 +       return (ENXIO);
1839 +
1840 +}
1841 +
1842 +static void
1843 +ece_deactivate(device_t dev)
1844 +{
1845 +       struct ece_softc *sc;
1846 +
1847 +       sc = device_get_softc(dev);
1848 +
1849 +       if (sc->intrhand)
1850 +               bus_teardown_intr(dev, sc->irq_res_rec, sc->intrhand);
1851 +
1852 +       sc->intrhand = 0;
1853 +
1854 +       if (sc->intrhand_qf)
1855 +               bus_teardown_intr(dev, sc->irq_res_qf, sc->intrhand_qf);
1856 +
1857 +       sc->intrhand_qf = 0;
1858 +
1859 +       bus_generic_detach(sc->dev);
1860 +       if (sc->miibus)
1861 +               device_delete_child(sc->dev, sc->miibus);
1862 +       if (sc->mem_res)
1863 +               bus_release_resource(dev, SYS_RES_IOPORT,
1864 +                   rman_get_rid(sc->mem_res), sc->mem_res);
1865 +       sc->mem_res = 0;
1866 +
1867 +       if (sc->power_mem_res)
1868 +               bus_release_resource(dev, SYS_RES_IOPORT,
1869 +                   rman_get_rid(sc->power_mem_res), sc->power_mem_res);
1870 +       sc->power_mem_res = 0;
1871 +
1872 +       if (sc->irq_res_rec)
1873 +               bus_release_resource(dev, SYS_RES_IRQ,
1874 +                   rman_get_rid(sc->irq_res_rec), sc->irq_res_rec);
1875 +
1876 +       if (sc->irq_res_qf)
1877 +               bus_release_resource(dev, SYS_RES_IRQ,
1878 +                   rman_get_rid(sc->irq_res_qf), sc->irq_res_qf);
1879 +
1880 +       if (sc->irq_res_qf)
1881 +               bus_release_resource(dev, SYS_RES_IRQ,
1882 +                   rman_get_rid(sc->irq_res_status), sc->irq_res_status);
1883 +
1884 +       sc->irq_res_rec = 0;
1885 +       sc->irq_res_qf = 0;
1886 +       sc->irq_res_status = 0;
1887 +       ECE_TXLOCK_DESTROY(sc);
1888 +       ECE_RXLOCK_DESTROY(sc);
1889 +
1890 +       ece_free_desc_dma_tx(sc);
1891 +       ece_free_desc_dma_rx(sc);
1892 +
1893 +       return;
1894 +}
1895 +
1896 +/*
1897 + * Change media according to request.
1898 + */
1899 +static int
1900 +ece_ifmedia_upd(struct ifnet *ifp)
1901 +{
1902 +       struct ece_softc *sc = ifp->if_softc;
1903 +       struct mii_data *mii;
1904 +       int     error;
1905 +
1906 +       mii = device_get_softc(sc->miibus);
1907 +       ECE_LOCK(sc);
1908 +       error = mii_mediachg(mii);
1909 +       ECE_UNLOCK(sc);
1910 +       return (error);
1911 +}
1912 +
1913 +/*
1914 + * Notify the world which media we're using.
1915 + */
1916 +static void
1917 +ece_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1918 +{
1919 +       struct ece_softc *sc = ifp->if_softc;
1920 +       struct mii_data *mii;
1921 +
1922 +       mii = device_get_softc(sc->miibus);
1923 +       ECE_LOCK(sc);
1924 +       mii_pollstat(mii);
1925 +       ifmr->ifm_active = mii->mii_media_active;
1926 +       ifmr->ifm_status = mii->mii_media_status;
1927 +       ECE_UNLOCK(sc);
1928 +}
1929 +
1930 +static void
1931 +ece_tick(void *xsc)
1932 +{
1933 +       struct ece_softc *sc = xsc;
1934 +       struct mii_data *mii;
1935 +       int active;
1936 +
1937 +       mii = device_get_softc(sc->miibus);
1938 +       active = mii->mii_media_active;
1939 +       mii_tick(mii);
1940 +
1941 +       /*
1942 +        * Schedule another timeout one second from now.
1943 +        */
1944 +       callout_reset(&sc->tick_ch, hz, ece_tick, sc);
1945 +}
1946 +
1947 +static uint32_t read_mac_entry(struct ece_softc *ec,
1948 +                              uint8_t *mac_result,
1949 +       int first)
1950 +{
1951 +       uint32_t  ii;
1952 +       struct arl_table_entry_t entry;
1953 +       uint32_t *entry_val;
1954 +       WR4(ec, ARL_TABLE_ACCESS_CONTROL_0, 0);
1955 +       WR4(ec, ARL_TABLE_ACCESS_CONTROL_1, 0);
1956 +       WR4(ec, ARL_TABLE_ACCESS_CONTROL_2, 0);
1957 +       if (first)
1958 +               WR4(ec, ARL_TABLE_ACCESS_CONTROL_0, 0x1);
1959 +       else
1960 +               WR4(ec, ARL_TABLE_ACCESS_CONTROL_0, 0x2);
1961 +
1962 +       for (ii = 0; ii < 0x1000; ii++) {
1963 +               if (RD4(ec, ARL_TABLE_ACCESS_CONTROL_1) & (0x1))
1964 +                       break;
1965 +       }
1966 +       entry_val = (uint32_t*) (&entry);
1967 +       entry_val[0] = RD4(ec, ARL_TABLE_ACCESS_CONTROL_1);
1968 +       entry_val[1] = RD4(ec, ARL_TABLE_ACCESS_CONTROL_2);
1969 +
1970 +       if (mac_result)
1971 +               memcpy(mac_result, entry.mac_addr, ETHER_ADDR_LEN);
1972 +
1973 +       return entry.table_end;
1974 +}
1975 +
1976 +static uint32_t write_arl_table_entry(struct ece_softc *ec,
1977 +                                     uint32_t filter,
1978 +                                     uint32_t vlan_mac,
1979 +                                     uint32_t vlan_gid,
1980 +                                     uint32_t age_field,
1981 +                                     uint32_t port_map,
1982 +                                     const uint8_t *mac_addr)
1983 +{
1984 +       uint32_t  ii;
1985 +       uint32_t *entry_val;
1986 +       struct arl_table_entry_t entry;
1987 +
1988 +       memset(&entry, 0, sizeof(entry));
1989 +
1990 +       entry.filter = filter;
1991 +       entry.vlan_mac = vlan_mac;
1992 +       entry.vlan_gid = vlan_gid;
1993 +       entry.age_field = age_field;
1994 +       entry.port_map = port_map;
1995 +       memcpy(entry.mac_addr, mac_addr, ETHER_ADDR_LEN);
1996 +
1997 +       entry_val = (uint32_t*) (&entry);
1998 +
1999 +       WR4(ec, ARL_TABLE_ACCESS_CONTROL_0, 0);
2000 +       WR4(ec, ARL_TABLE_ACCESS_CONTROL_1, 0);
2001 +       WR4(ec, ARL_TABLE_ACCESS_CONTROL_2, 0);
2002 +
2003 +       WR4(ec, ARL_TABLE_ACCESS_CONTROL_1, entry_val[0]);
2004 +       WR4(ec, ARL_TABLE_ACCESS_CONTROL_2, entry_val[1]);
2005 +
2006 +       /* issue the write command */
2007 +       WR4(ec, ARL_TABLE_ACCESS_CONTROL_0, (0x1 << 3));
2008 +
2009 +       for (ii = 0; ii < 0x1000; ii++) {
2010 +               if (RD4(ec, ARL_TABLE_ACCESS_CONTROL_1) & (0x1))
2011 +                       return 1; /*write ok*/
2012 +       }
2013 +       /* write failed*/
2014 +       return 0;
2015 +}
2016 +
2017 +static void remove_mac_entry(struct ece_softc *sc,
2018 +                                uint8_t *mac)
2019 +{
2020 +       /* invalid age_field mean erase this entry*/
2021 +       write_arl_table_entry(sc, 0, 1, VLAN0_GROUP_ID,
2022 +                             INVALID_ENTRY, VLAN0_GROUP,
2023 +                             mac);
2024 +}
2025 +
2026 +static void add_mac_entry(struct ece_softc *sc,
2027 +                         uint8_t *mac)
2028 +{
2029 +       write_arl_table_entry(sc, 0, 1, VLAN0_GROUP_ID,
2030 +                             NEW_ENTRY, VLAN0_GROUP,
2031 +                             mac);
2032 +}
2033 +
2034 +/**
2035 + * The behavior of ARL table reading and deletion is not well defined
2036 + * in the documentation. To be safe, all mac addresses are put to a
2037 + * list, then deleted.
2038 + *
2039 + */
2040 +static void clear_mac_entries(struct ece_softc *ec, int include_this_mac)
2041 +{
2042 +       int table_end;
2043 +       struct mac_list * temp;
2044 +       struct mac_list * mac_list_header;
2045 +       struct mac_list * current;
2046 +       char mac[ETHER_ADDR_LEN];
2047 +
2048 +       current = 0;
2049 +       mac_list_header = 0;
2050 +
2051 +       table_end = read_mac_entry(ec, mac, 1);
2052 +       while (!table_end) {
2053 +               if (!include_this_mac &&
2054 +                   memcmp(mac, vlan0_mac, ETHER_ADDR_LEN)==0) {
2055 +                       /* read next entry */
2056 +                       table_end = read_mac_entry(ec, mac, 0);
2057 +                       continue;
2058 +               }
2059 +
2060 +               temp = (struct mac_list*)malloc(sizeof(struct mac_list),M_DEVBUF,
2061 +                                               M_NOWAIT | M_ZERO);
2062 +               memcpy(temp->mac_addr, mac, ETHER_ADDR_LEN);
2063 +               temp->next = 0;
2064 +               if (mac_list_header) {
2065 +                       current->next = temp;
2066 +                       current = temp;
2067 +               } else {
2068 +                       mac_list_header = temp;
2069 +                       current = temp;
2070 +               }
2071 +               /*read next */
2072 +               table_end = read_mac_entry(ec, mac, 0);
2073 +       }
2074 +
2075 +       current = mac_list_header;
2076 +
2077 +       while (current) {
2078 +               remove_mac_entry(ec, current->mac_addr);
2079 +               temp = current;
2080 +               current = current->next;
2081 +               free(temp, M_DEVBUF);
2082 +       }
2083 +}
2084 +
2085 +static int configure_lan_port(struct ece_softc *sc, int phy_type)
2086 +{
2087 +       uint32_t sw_config;
2088 +       uint32_t mac_port_config;
2089 +
2090 +       /*
2091 +        * Configure
2092 +        */
2093 +       sw_config = RD4(sc, SWITCH_CONFIG);
2094 +       /* enable fast aging */
2095 +       sw_config |= FAST_AGING;
2096 +       /* Enable IVL learning */
2097 +       sw_config |= IVL_LEARNING;
2098 +       /* disable hardware NAT */
2099 +       sw_config &= ~(HARDWARE_NAT);
2100 +
2101 +       sw_config |= SKIP_L2_LOOKUP_PORT_0 | SKIP_L2_LOOKUP_PORT_1| NIC_MODE;
2102 +
2103 +       WR4(sc, SWITCH_CONFIG, sw_config);
2104 +
2105 +       sw_config = RD4(sc, SWITCH_CONFIG);
2106 +
2107 +       mac_port_config = RD4(sc, MAC_PORT_0_CONFIG);
2108 +
2109 +       if (!(mac_port_config & 0x1) || (mac_port_config & 0x2))
2110 +               if_printf(sc->ifp, "STR9104: Link Down, 0x%08x!\n",
2111 +                      mac_port_config);
2112 +       else {
2113 +               WR4(sc, MAC_PORT_0_CONFIG, mac_port_config);
2114 +       }
2115 +       return 0;
2116 +}
2117 +
2118 +static void set_pvid(struct ece_softc *sc, int port0, int port1, int cpu)
2119 +{
2120 +       uint32_t val;
2121 +       val = RD4(sc, VLAN_PORT_PVID) & (~(0x7 << 0));
2122 +       WR4(sc, VLAN_PORT_PVID, val);
2123 +       val = RD4(sc, VLAN_PORT_PVID) | ((port0) & 0x07);
2124 +       WR4(sc, VLAN_PORT_PVID, val);
2125 +       val = RD4(sc, VLAN_PORT_PVID) & (~(0x7 << 4));
2126 +       WR4(sc, VLAN_PORT_PVID, val);
2127 +       val = RD4(sc, VLAN_PORT_PVID) | (((port1) & 0x07) << 4);
2128 +       WR4(sc, VLAN_PORT_PVID, val);
2129 +
2130 +       val = RD4(sc, VLAN_PORT_PVID) & (~(0x7 << 8));
2131 +       WR4(sc, VLAN_PORT_PVID, val);
2132 +       val = RD4(sc, VLAN_PORT_PVID) | (((cpu) & 0x07) << 8);
2133 +       WR4(sc, VLAN_PORT_PVID, val);
2134 +
2135 +}
2136 +
2137 +/* VLAN related functions */
2138 +static void set_vlan_vid(struct ece_softc *sc, int vlan)
2139 +{
2140 +       const uint32_t regs[] = {
2141 +               VLAN_VID_0_1,
2142 +               VLAN_VID_0_1,
2143 +               VLAN_VID_2_3,
2144 +               VLAN_VID_2_3,
2145 +               VLAN_VID_4_5,
2146 +               VLAN_VID_4_5,
2147 +               VLAN_VID_6_7,
2148 +               VLAN_VID_6_7
2149 +       };
2150 +
2151 +       const int vids[] = {
2152 +               VLAN0_VID,
2153 +               VLAN1_VID,
2154 +               VLAN2_VID,
2155 +               VLAN3_VID,
2156 +               VLAN4_VID,
2157 +               VLAN5_VID,
2158 +               VLAN6_VID,
2159 +               VLAN7_VID
2160 +       };
2161 +
2162 +       uint32_t val;
2163 +       uint32_t reg;
2164 +       int vid;
2165 +
2166 +       reg = regs[vlan];
2167 +       vid = vids[vlan];
2168 +
2169 +       if (vlan & 1) {
2170 +               val = RD4(sc, reg);
2171 +               WR4(sc, reg, val & (~(0xFFF << 0)));
2172 +               val = RD4(sc, reg);
2173 +               WR4(sc, reg, val|((vid & 0xFFF) << 0));
2174 +       } else {
2175 +               val = RD4(sc, reg);
2176 +               WR4(sc, reg, val & (~(0xFFF << 12)));
2177 +               val = RD4(sc, reg);
2178 +               WR4(sc, reg, val|((vid & 0xFFF) << 12));
2179 +       }
2180 +}
2181 +
2182 +static void set_vlan_member(struct ece_softc *sc, int vlan)
2183 +{
2184 +       unsigned char shift;
2185 +       uint32_t val;
2186 +       int group;
2187 +       const int groups[] = {
2188 +               VLAN0_GROUP,
2189 +               VLAN1_GROUP,
2190 +               VLAN2_GROUP,
2191 +               VLAN3_GROUP,
2192 +               VLAN4_GROUP,
2193 +               VLAN5_GROUP,
2194 +               VLAN6_GROUP,
2195 +               VLAN7_GROUP
2196 +       };
2197 +
2198 +       group = groups[vlan];
2199 +
2200 +       shift = vlan*3;
2201 +       val = RD4(sc, VLAN_MEMBER_PORT_MAP) &  (~(0x7 << shift));
2202 +       WR4(sc, VLAN_MEMBER_PORT_MAP, val);
2203 +       val = RD4(sc, VLAN_MEMBER_PORT_MAP);
2204 +       WR4(sc, VLAN_MEMBER_PORT_MAP, val | ((group & 0x7) << shift));
2205 +}
2206 +
2207 +static void set_vlan_tag(struct ece_softc *sc, int vlan)
2208 +{
2209 +       unsigned char shift;
2210 +       uint32_t val;
2211 +
2212 +       int tag = 0; /* VLAN0_VLAN_TAG .. VLAN7_VLAN_TAG*/
2213 +
2214 +       shift = vlan*3;
2215 +       val = RD4(sc, VLAN_TAG_PORT_MAP) &  (~(0x7 << shift));
2216 +       WR4(sc, VLAN_TAG_PORT_MAP, val);
2217 +       val = RD4(sc, VLAN_TAG_PORT_MAP);
2218 +       WR4(sc, VLAN_TAG_PORT_MAP, val | ((tag & 0x7) << shift));
2219 +}
2220 +
2221 +static int configure_cpu_port(struct ece_softc *sc)
2222 +{
2223 +       uint32_t cpu_port_config;
2224 +       int i;
2225 +
2226 +       cpu_port_config = RD4(sc, CPU_PORT_CONFIG);
2227 +       /*SA learning Disable */
2228 +       cpu_port_config |= (0x1 << 19);
2229 +       /*offset 4byte +2 */
2230 +       cpu_port_config &= ~(1 << 31);
2231 +
2232 +       WR4(sc, CPU_PORT_CONFIG, cpu_port_config);
2233 +
2234 +       if (!write_arl_table_entry(sc, 0, 1, VLAN0_GROUP_ID,
2235 +                                  STATIC_ENTRY, VLAN0_GROUP,
2236 +                                  vlan0_mac)) {
2237 +               return 1;
2238 +       }
2239 +
2240 +       set_pvid(sc, PORT0_PVID, PORT1_PVID, CPU_PORT_PVID);
2241 +
2242 +       for (i=0; i<8; i++) {
2243 +               set_vlan_vid(sc, i);
2244 +               set_vlan_member(sc, i);
2245 +               set_vlan_tag(sc, i);
2246 +       }
2247 +
2248 +       /* disable all interrupt status sources */
2249 +       WR4(sc, INTERRUPT_MASK, 0xffff1fff);
2250 +
2251 +       /* clear previous interrupt sources */
2252 +       WR4(sc, INTERRUPT_STATUS, 0x00001FFF);
2253 +
2254 +       WR4(sc, TS_DMA_CONTROL, 0);
2255 +       WR4(sc, FS_DMA_CONTROL, 0);
2256 +       return 0;
2257 +}
2258 +
2259 +static int hardware_init(struct ece_softc *sc)
2260 +{
2261 +       int status = 0;
2262 +       static int gw_phy_type;
2263 +
2264 +       gw_phy_type = get_phy_type(sc);
2265 +       if (gw_phy_type != IC_PLUS_PHY) {
2266 +               device_printf(sc->dev, "PHY type is not recognized (%d)\n",
2267 +                      gw_phy_type);
2268 +               return -1;
2269 +       }
2270 +       status = configure_lan_port(sc, gw_phy_type);
2271 +       configure_cpu_port(sc);
2272 +       return 0;
2273 +}
2274 +
2275 +static void set_mac_address(struct ece_softc *sc, const char *mac, int mac_len)
2276 +{
2277 +       /* invalid age_field mean erase this entry*/
2278 +       write_arl_table_entry(sc, 0, 1, VLAN0_GROUP_ID,
2279 +                             INVALID_ENTRY, VLAN0_GROUP,
2280 +                             mac);
2281 +       memcpy(vlan0_mac, mac, ETHER_ADDR_LEN);
2282 +
2283 +       write_arl_table_entry(sc, 0, 1, VLAN0_GROUP_ID,
2284 +                             STATIC_ENTRY, VLAN0_GROUP,
2285 +                             mac);
2286 +}
2287 +
2288 +static void
2289 +ece_set_mac(struct ece_softc *sc, u_char *eaddr)
2290 +{
2291 +       memcpy(vlan0_mac, eaddr, ETHER_ADDR_LEN);
2292 +       set_mac_address(sc, eaddr, ETHER_ADDR_LEN);
2293 +}
2294 +
2295 +/* TODO: the device doesn't have MAC,
2296 + * should read the configuration stored in FLASH
2297 + */
2298 +static int
2299 +ece_get_mac(struct ece_softc *sc, u_char *eaddr)
2300 +{
2301 +       return (ENXIO);
2302 +}
2303 +
2304 +/*version for one segment only*/
2305 +static void
2306 +ece_intr_rx_locked(struct ece_softc *sc, int count)
2307 +{
2308 +       struct ifnet *ifp = sc->ifp;
2309 +       struct rx_desc_info *rxdesc;
2310 +
2311 +       uint32_t status;
2312 +
2313 +       int fssd_curr;
2314 +       int fssd;
2315 +       int rxcount;
2316 +       int i;
2317 +       int idx;
2318 +       struct mbuf *mb;
2319 +       eth_rx_desc_t *desc;
2320 +
2321 +       fssd_curr = RD4(sc, FS_DESCRIPTOR_POINTER);
2322 +
2323 +       fssd = (fssd_curr - (uint32_t)sc->ring_paddr_rx)>>4;
2324 +
2325 +       desc = sc->rx_desc[sc->last_rx].desc;
2326 +
2327 +       /*prepare to read the data in the ring*/
2328 +       bus_dmamap_sync(sc->dmatag_ring_rx,
2329 +           sc->dmamap_ring_rx,
2330 +           BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2331 +
2332 +       if (fssd > sc->last_rx) {
2333 +               rxcount = fssd - sc->last_rx;
2334 +       } else if (fssd < sc->last_rx) {
2335 +               rxcount = (ECE_MAX_RX_BUFFERS - sc->last_rx) + fssd;
2336 +       } else {
2337 +
2338 +               if (desc->cown == 0) {
2339 +                       return;
2340 +               } else {
2341 +                       rxcount = ECE_MAX_RX_BUFFERS;
2342 +               }
2343 +       }
2344 +
2345 +       for (i= 0; i<rxcount; i++) {
2346 +               /* Get status */
2347 +               status = desc->cown;
2348 +               if (!status) {
2349 +                       break;
2350 +               }
2351 +
2352 +               idx = sc->last_rx;
2353 +               rxdesc = &sc->rx_desc[idx];
2354 +               mb = rxdesc->buff;
2355 +
2356 +               if (desc->length < ETHER_MIN_LEN - ETHER_CRC_LEN ||
2357 +                   desc->length > ETHER_MAX_LEN - ETHER_CRC_LEN +
2358 +                   ETHER_VLAN_ENCAP_LEN) {
2359 +                       ifp->if_ierrors++;
2360 +                       desc->cown = 0;
2361 +                       desc->length = MCLBYTES - 2;
2362 +                       /*process next packet*/
2363 +                       continue;
2364 +               }
2365 +
2366 +               if (ece_new_rxbuf(sc, rxdesc)!=0) {
2367 +                       ifp->if_iqdrops++;
2368 +                       desc->cown = 0;
2369 +                       desc->length = MCLBYTES - 2;
2370 +                       break;
2371 +               }
2372 +
2373 +               /**
2374 +                * the device will write to X+2 So we need to adjust
2375 +                * this after the packet is received.
2376 +                */
2377 +
2378 +               mb->m_data += 2;
2379 +               mb->m_len = mb->m_pkthdr.len = desc->length;
2380 +
2381 +               mb->m_flags |= M_PKTHDR;
2382 +               mb->m_pkthdr.rcvif = ifp;
2383 +               if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
2384 +                       /*check for valid checksum*/
2385 +                       if ( (!desc->l4f)  && (desc->prot!=3)) {
2386 +                               mb->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2387 +                               mb->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2388 +                               mb->m_pkthdr.csum_data = 0xffff;
2389 +                       }
2390 +               }
2391 +               ECE_RXUNLOCK(sc);
2392 +               (*ifp->if_input)(ifp, mb);
2393 +               ECE_RXLOCK(sc);
2394 +
2395 +               desc->cown = 0;
2396 +               desc->length = MCLBYTES-2;
2397 +
2398 +               bus_dmamap_sync(sc->dmatag_ring_rx,
2399 +                               sc->dmamap_ring_rx,
2400 +                               BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2401 +
2402 +               if (sc->last_rx==ECE_MAX_RX_BUFFERS-1) {
2403 +                       sc->last_rx = 0;
2404 +               } else {
2405 +                       sc->last_rx++;
2406 +               }
2407 +               desc = sc->rx_desc[sc->last_rx].desc;
2408 +       }
2409 +
2410 +       /* sync updated flags */
2411 +       bus_dmamap_sync(sc->dmatag_ring_rx,
2412 +           sc->dmamap_ring_rx,
2413 +           BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2414 +
2415 +       return;
2416 +}
2417 +
2418 +static void
2419 +ece_intr_task(void *arg, int pending __unused)
2420 +{
2421 +       struct ece_softc *sc = arg;
2422 +       ECE_RXLOCK(sc);
2423 +       ece_intr_rx_locked(sc, -1);
2424 +       ECE_RXUNLOCK(sc);
2425 +}
2426 +
2427 +static void
2428 +ece_intr(void *xsc)
2429 +{
2430 +       struct ece_softc *sc = xsc;
2431 +       struct ifnet *ifp = sc->ifp;
2432 +
2433 +       if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2434 +               WR4(sc, FS_DMA_CONTROL, 0);
2435 +               return;
2436 +       }
2437 +
2438 +       taskqueue_enqueue(sc->sc_tq, &sc->sc_intr_task);
2439 +
2440 +       if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2441 +               taskqueue_enqueue(sc->sc_tq, &sc->sc_tx_task);
2442 +}
2443 +
2444 +static void
2445 +ece_intr_status(void *xsc)
2446 +{
2447 +       struct ece_softc *sc = xsc;
2448 +       struct ifnet *ifp = sc->ifp;
2449 +       int stat;
2450 +
2451 +       stat = RD4(sc, INTERRUPT_STATUS);
2452 +
2453 +       WR4(sc, INTERRUPT_STATUS, stat);
2454 +
2455 +       if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2456 +               if ((stat & ERROR_MASK)!=0)
2457 +                       ifp->if_iqdrops++;
2458 +       }
2459 +}
2460 +
2461 +static void
2462 +ece_cleanup_locked(struct ece_softc *sc)
2463 +{
2464 +       eth_tx_desc_t *desc;
2465 +
2466 +       if (sc->tx_cons==sc->tx_prod) return;
2467 +
2468 +       /*prepare to read the ring (owner bit)*/
2469 +       bus_dmamap_sync(sc->dmatag_ring_tx,
2470 +           sc->dmamap_ring_tx,
2471 +           BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2472 +
2473 +       while (sc->tx_cons!=sc->tx_prod) {
2474 +               desc = sc->tx_desc[sc->tx_cons].desc;
2475 +               if (desc->cown != 0) {
2476 +                       struct tx_desc_info *td = &(sc->tx_desc[sc->tx_cons]);
2477 +                       /*we are finished with this descriptor*/
2478 +                       /*sync*/
2479 +                       bus_dmamap_sync(sc->dmatag_data_tx, td->dmamap,
2480 +                                       BUS_DMASYNC_POSTWRITE);
2481 +                       /*and unload, so we can reuse */
2482 +                       bus_dmamap_unload(sc->dmatag_data_tx, td->dmamap);
2483 +                       m_freem(td->buff);
2484 +                       td->buff = 0;
2485 +                       sc->tx_cons = (sc->tx_cons + 1) % ECE_MAX_TX_BUFFERS;
2486 +               } else {
2487 +                       break;
2488 +               }
2489 +       }
2490 +
2491 +}
2492 +
2493 +static void
2494 +ece_cleanup_task(void *arg, int pending __unused)
2495 +{
2496 +       struct ece_softc *sc = arg;
2497 +       ECE_CLEANUPLOCK(sc);
2498 +       ece_cleanup_locked(sc);
2499 +       ECE_CLEANUPUNLOCK(sc);
2500 +}
2501 +
2502 +static void
2503 +ece_intr_tx(void *xsc)
2504 +{
2505 +       struct ece_softc *sc = xsc;
2506 +       struct ifnet *ifp = sc->ifp;
2507 +       if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2508 +               /*this should not happen, stop DMA*/
2509 +               WR4(sc, FS_DMA_CONTROL, 0);
2510 +               return;
2511 +       }
2512 +       taskqueue_enqueue(sc->sc_tq, &sc->sc_cleanup_task);
2513 +}
2514 +
2515 +static void
2516 +ece_intr_qf(void *xsc)
2517 +{
2518 +       struct ece_softc *sc = xsc;
2519 +       struct ifnet *ifp = sc->ifp;
2520 +       if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2521 +               /*this should not happen, stop DMA*/
2522 +               WR4(sc, FS_DMA_CONTROL, 0);
2523 +               return;
2524 +       }
2525 +       taskqueue_enqueue(sc->sc_tq, &sc->sc_intr_task);
2526 +       WR4(sc, FS_DMA_CONTROL, 1);
2527 +}
2528 +
2529 +/*
2530 + * Reset and initialize the chip
2531 + */
2532 +static void
2533 +eceinit_locked(void *xsc)
2534 +{
2535 +       struct ece_softc *sc = xsc;
2536 +       struct ifnet *ifp = sc->ifp;
2537 +       struct mii_data *mii;
2538 +       uint32_t cfg_reg;
2539 +       uint32_t cpu_port_config;
2540 +       uint32_t mac_port_config;
2541 +
2542 +       while (1) {
2543 +               cfg_reg = RD4(sc, BIST_RESULT_TEST_0);
2544 +               if ((cfg_reg & (1<<17)))
2545 +                       break;
2546 +               DELAY(100);
2547 +       }
2548 +       /* set to default values */
2549 +       WR4(sc, SWITCH_CONFIG, 0x007AA7A1);
2550 +       WR4(sc, MAC_PORT_0_CONFIG, 0x00423D00);
2551 +       WR4(sc, MAC_PORT_1_CONFIG, 0x00423D80);
2552 +       WR4(sc, CPU_PORT_CONFIG, 0x004C0000);
2553 +
2554 +       hardware_init(sc);
2555 +
2556 +       mac_port_config = RD4(sc, MAC_PORT_0_CONFIG);
2557 +       mac_port_config &= (~(0x1 << 18));      /* Enable Port 0 */
2558 +       WR4(sc, MAC_PORT_0_CONFIG, mac_port_config);
2559 +
2560 +       cpu_port_config = RD4(sc, CPU_PORT_CONFIG);
2561 +       /* enable CPU */
2562 +       cpu_port_config &= ~(0x1 << 18);
2563 +       WR4(sc, CPU_PORT_CONFIG, cpu_port_config);
2564 +
2565 +       /*
2566 +        * Set 'running' flag, and clear output active flag
2567 +        * and attempt to start the output
2568 +        */
2569 +       ifp->if_drv_flags |= IFF_DRV_RUNNING;
2570 +       ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2571 +
2572 +       mii = device_get_softc(sc->miibus);
2573 +       mii_pollstat(mii);
2574 +       /* enable dma */
2575 +       WR4(sc, FS_DMA_CONTROL, 1);
2576 +
2577 +       callout_reset(&sc->tick_ch, hz, ece_tick, sc);
2578 +}
2579 +
2580 +static inline int
2581 +ece_encap(struct ece_softc *sc, struct mbuf *m0)
2582 +{
2583 +       struct ifnet *ifp;
2584 +       bus_dma_segment_t segs[MAX_FRAGMENT];
2585 +       bus_dmamap_t mapp;
2586 +       int error;
2587 +       int seg;
2588 +       int nsegs;
2589 +       int desc_no;
2590 +       eth_tx_desc_t *desc = 0;
2591 +
2592 +       int csum_flags;
2593 +
2594 +       ifp = sc->ifp;
2595 +
2596 +       /* Fetch unused map */
2597 +       mapp = sc->tx_desc[sc->tx_prod].dmamap;
2598 +
2599 +       error = bus_dmamap_load_mbuf_sg(sc->dmatag_ring_tx, mapp,
2600 +                                       m0, segs, &nsegs,
2601 +           BUS_DMA_NOWAIT);
2602 +
2603 +       if (error != 0) {
2604 +               bus_dmamap_unload(sc->dmatag_ring_tx, mapp);
2605 +               return ((error != 0) ? error : -1);
2606 +       }
2607 +
2608 +       desc = &(sc->desc_tx[sc->desc_curr_tx]);
2609 +       sc->tx_desc[sc->tx_prod].desc = desc;
2610 +       sc->tx_desc[sc->tx_prod].buff = m0;
2611 +       desc_no = sc->desc_curr_tx;
2612 +
2613 +       for (seg = 0; seg < nsegs; seg++) {
2614 +
2615 +               if (desc->cown == 0 ) {
2616 +                       if_printf(ifp, "ERROR: descriptor is still used\n");
2617 +               }
2618 +
2619 +               desc->length = segs[seg].ds_len;
2620 +               desc->data_ptr = segs[seg].ds_addr;
2621 +
2622 +               if (seg == 0) {
2623 +                       desc->fs = 1;
2624 +               } else {
2625 +                       desc->fs = 0;
2626 +               }
2627 +               if (seg==nsegs-1) {
2628 +                       desc->ls = 1;
2629 +               } else {
2630 +                       desc->ls = 0;
2631 +               }
2632 +
2633 +               csum_flags = m0->m_pkthdr.csum_flags;
2634 +
2635 +               desc->fr =  1;
2636 +               desc->pmap =  1;
2637 +               desc->insv =  0;
2638 +               desc->ico = 0;
2639 +               desc->tco = 0;
2640 +               desc->uco = 0;
2641 +               desc->interrupt = 1;
2642 +
2643 +               if (csum_flags & CSUM_IP) {
2644 +                       desc->ico = 1;
2645 +                       if (csum_flags & CSUM_TCP)
2646 +                               desc->tco = 1;
2647 +                       if (csum_flags & CSUM_UDP)
2648 +                               desc->uco = 1;
2649 +               }
2650 +
2651 +               desc++;
2652 +               sc->desc_curr_tx = (sc->desc_curr_tx+1) % ECE_MAX_TX_BUFFERS;
2653 +               if (sc->desc_curr_tx==0) {
2654 +                       desc = (eth_tx_desc_t *)&(sc->desc_tx[0]);
2655 +               }
2656 +       }
2657 +
2658 +       desc = sc->tx_desc[sc->tx_prod].desc;
2659 +
2660 +       sc->tx_prod = (sc->tx_prod+1) % ECE_MAX_TX_BUFFERS;
2661 +
2662 +       /*after all descriptors are set, we set the flag to start the
2663 +        * sending process */
2664 +       for (seg = 0; seg < nsegs; seg++) {
2665 +               desc->cown = 0;
2666 +               desc++;
2667 +               desc_no = (desc_no+1) % ECE_MAX_TX_BUFFERS;
2668 +               if (desc_no==0) {
2669 +                       desc = (eth_tx_desc_t *)&(sc->desc_tx[0]);
2670 +               }
2671 +
2672 +       }
2673 +
2674 +       bus_dmamap_sync(sc->dmatag_data_tx, mapp, BUS_DMASYNC_PREWRITE);
2675 +       return (0);
2676 +}
2677 +
2678 +/*
2679 + * dequeu packets and transmit
2680 + */
2681 +static void
2682 +ecestart_locked(struct ifnet *ifp)
2683 +{
2684 +       struct ece_softc *sc;
2685 +       struct mbuf *m0;
2686 +       uint32_t queued = 0;
2687 +
2688 +       sc = ifp->if_softc;
2689 +       if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2690 +           IFF_DRV_RUNNING)
2691 +               return;
2692 +
2693 +       bus_dmamap_sync(sc->dmatag_ring_tx,
2694 +           sc->dmamap_ring_tx,
2695 +           BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2696 +
2697 +       for (;;) {
2698 +               /* Get packet from the queue */
2699 +               IF_DEQUEUE(&ifp->if_snd, m0);
2700 +               if (m0 == NULL)
2701 +                       break;
2702 +               if (ece_encap(sc, m0)) {
2703 +                       IF_PREPEND(&ifp->if_snd, m0);
2704 +                       ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2705 +                       break;
2706 +               }
2707 +               queued++;
2708 +               BPF_MTAP(ifp, m0);
2709 +       }
2710 +       if (queued) {
2711 +               /*sync the ring*/
2712 +               bus_dmamap_sync(sc->dmatag_ring_tx, sc->dmamap_ring_tx,
2713 +                               BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2714 +               WR4(sc, TS_DMA_CONTROL, 1);
2715 +       }
2716 +}
2717 +
2718 +static void
2719 +eceinit(void *xsc)
2720 +{
2721 +       struct ece_softc *sc = xsc;
2722 +       ECE_LOCK(sc);
2723 +       eceinit_locked(sc);
2724 +       ECE_UNLOCK(sc);
2725 +}
2726 +
2727 +static void
2728 +ece_tx_task(void *arg, int pending __unused)
2729 +{
2730 +       struct ifnet *ifp;
2731 +       ifp = (struct ifnet *)arg;
2732 +       ecestart(ifp);
2733 +}
2734 +
2735 +static void
2736 +ecestart(struct ifnet *ifp)
2737 +{
2738 +       struct ece_softc *sc = ifp->if_softc;
2739 +       ECE_TXLOCK(sc);
2740 +       ecestart_locked(ifp);
2741 +       ECE_TXUNLOCK(sc);
2742 +}
2743 +
2744 +/*
2745 + * Turn off interrupts, and stop the nic.  Can be called with sc->ifp
2746 + * NULL so be careful.
2747 + */
2748 +static void
2749 +ecestop(struct ece_softc *sc)
2750 +{
2751 +       struct ifnet *ifp = sc->ifp;
2752 +       uint32_t mac_port_config;
2753 +
2754 +       WR4(sc, TS_DMA_CONTROL, 0);
2755 +       WR4(sc, FS_DMA_CONTROL, 0);
2756 +
2757 +       if (ifp) {
2758 +               ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2759 +       }
2760 +
2761 +       callout_stop(&sc->tick_ch);
2762 +
2763 +       /*Disable Port 0 */
2764 +       mac_port_config = RD4(sc, MAC_PORT_0_CONFIG);
2765 +       mac_port_config |= ((0x1 << 18));
2766 +       WR4(sc, MAC_PORT_0_CONFIG, mac_port_config);
2767 +
2768 +       /*Disable Port 1 */
2769 +       mac_port_config = RD4(sc, MAC_PORT_1_CONFIG);
2770 +       mac_port_config |= ((0x1 << 18));
2771 +       WR4(sc, MAC_PORT_1_CONFIG, mac_port_config);
2772 +
2773 +       /* disable all interrupt status sources */
2774 +       WR4(sc, INTERRUPT_MASK, 0x00001FFF);
2775 +
2776 +       /* clear previous interrupt sources */
2777 +       WR4(sc, INTERRUPT_STATUS, 0x00001FFF);
2778 +
2779 +       WR4(sc, SWITCH_CONFIG, initial_switch_config);
2780 +       WR4(sc, CPU_PORT_CONFIG, initial_cpu_config);
2781 +       WR4(sc, MAC_PORT_0_CONFIG, initial_port0_config);
2782 +       WR4(sc, MAC_PORT_1_CONFIG, initial_port1_config);
2783 +
2784 +       clear_mac_entries(sc, 1);
2785 +}
2786 +
2787 +static void
2788 +ece_restart(struct ece_softc *sc)
2789 +{
2790 +       struct ifnet *ifp = sc->ifp;
2791 +
2792 +       ifp->if_drv_flags |= IFF_DRV_RUNNING;
2793 +       ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2794 +       /*enable port 0*/
2795 +       WR4(sc, PORT_0_CONFIG,
2796 +           RD4(sc, PORT_0_CONFIG) & ~((0x1 << 18)));
2797 +       WR4(sc, INTERRUPT_MASK, 0x00000000);
2798 +       WR4(sc, FS_DMA_CONTROL, 1);
2799 +       callout_reset(&sc->tick_ch, hz, ece_tick, sc);
2800 +}
2801 +
2802 +static void
2803 +set_filter(struct ece_softc *sc)
2804 +{
2805 +       struct ifnet            *ifp;
2806 +       struct ifmultiaddr      *ifma;
2807 +       uint32_t mac_port_config;
2808 +
2809 +       ifp = sc->ifp;
2810 +
2811 +       clear_mac_entries(sc, 0);
2812 +       if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
2813 +               mac_port_config = RD4(sc, MAC_PORT_0_CONFIG);
2814 +               mac_port_config &= (~(0x1 << 27));
2815 +               mac_port_config &= (~(0x1 << 26));
2816 +               WR4(sc, MAC_PORT_0_CONFIG, mac_port_config);
2817 +               return;
2818 +       }
2819 +       if_maddr_rlock(ifp);
2820 +       TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2821 +               if (ifma->ifma_addr->sa_family != AF_LINK)
2822 +                       continue;
2823 +               add_mac_entry(sc,
2824 +                             LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
2825 +       }
2826 +       if_maddr_runlock(ifp);
2827 +}
2828 +
2829 +static int
2830 +eceioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2831 +{
2832 +       struct ece_softc *sc = ifp->if_softc;
2833 +       struct mii_data *mii;
2834 +       struct ifreq *ifr = (struct ifreq *)data;
2835 +       int mask, error = 0;
2836 +
2837 +       switch (cmd) {
2838 +       case SIOCSIFFLAGS:
2839 +               ECE_LOCK(sc);
2840 +               if ((ifp->if_flags & IFF_UP) == 0 &&
2841 +                   ifp->if_drv_flags & IFF_DRV_RUNNING) {
2842 +                       ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2843 +                       ecestop(sc);
2844 +               } else {
2845 +                       /* reinitialize card on any parameter change */
2846 +                       if ((ifp->if_flags & IFF_UP) &&
2847 +                           !(ifp->if_drv_flags & IFF_DRV_RUNNING)
2848 +                          ){
2849 +                               ece_restart(sc);
2850 +                       }
2851 +               }
2852 +               ECE_UNLOCK(sc);
2853 +               break;
2854 +
2855 +       case SIOCADDMULTI:
2856 +       case SIOCDELMULTI:
2857 +               ECE_LOCK(sc);
2858 +               set_filter(sc);
2859 +               ECE_UNLOCK(sc);
2860 +               break;
2861 +
2862 +       case SIOCSIFMEDIA:
2863 +       case SIOCGIFMEDIA:
2864 +               mii = device_get_softc(sc->miibus);
2865 +               error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
2866 +               break;
2867 +       case SIOCSIFCAP: