initial commit
[freebsd-arm:freebsd-arm.git] / clean_diff
1 Index: arm/arm/elf_trampoline.c
2 ===================================================================
3 --- arm/arm/elf_trampoline.c    (revision 201358)
4 +++ arm/arm/elf_trampoline.c    (working copy)
5 @@ -57,6 +57,8 @@
6  #define cpu_idcache_wbinv_all  arm8_cache_purgeID
7  #elif defined(CPU_ARM9)
8  #define cpu_idcache_wbinv_all  arm9_idcache_wbinv_all
9 +#elif defined(CPU_FA526)
10 +#define cpu_idcache_wbinv_all  fa526_idcache_wbinv_all
11  #elif defined(CPU_ARM9E)
12  #define cpu_idcache_wbinv_all  armv5_ec_idcache_wbinv_all
13  #elif defined(CPU_ARM10)
14 Index: arm/arm/cpufunc.c
15 ===================================================================
16 --- arm/arm/cpufunc.c   (revision 201358)
17 +++ arm/arm/cpufunc.c   (working copy)
18 @@ -781,6 +781,73 @@
19         xscale_setup                    /* cpu setup            */
20  };
21  #endif /* CPU_XSCALE_81342 */
22 +
23 +
24 +#if defined(CPU_FA526)
25 +struct cpu_functions fa526_cpufuncs = {
26 +       /* CPU functions */
27 +
28 +       .cf_id                  = cpufunc_id,
29 +       .cf_cpwait              = cpufunc_nullop,
30 +
31 +       /* MMU functions */
32 +
33 +       .cf_control             = cpufunc_control,
34 +       .cf_domains             = cpufunc_domains,
35 +       .cf_setttb              = fa526_setttb,
36 +       .cf_faultstatus         = cpufunc_faultstatus,
37 +       .cf_faultaddress        = cpufunc_faultaddress,
38 +
39 +       /* TLB functions */
40 +
41 +       .cf_tlb_flushID         = armv4_tlb_flushID,
42 +       .cf_tlb_flushID_SE      = fa526_tlb_flushID_SE,
43 +       .cf_tlb_flushI          = armv4_tlb_flushI,
44 +       .cf_tlb_flushI_SE       = fa526_tlb_flushI_SE,
45 +       .cf_tlb_flushD          = armv4_tlb_flushD,
46 +       .cf_tlb_flushD_SE       = armv4_tlb_flushD_SE,
47 +
48 +       /* Cache operations */
49 +
50 +       .cf_icache_sync_all     = fa526_icache_sync_all,
51 +       .cf_icache_sync_range   = fa526_icache_sync_range,
52 +
53 +       .cf_dcache_wbinv_all    = fa526_dcache_wbinv_all,
54 +       .cf_dcache_wbinv_range  = fa526_dcache_wbinv_range,
55 +       .cf_dcache_inv_range    = fa526_dcache_inv_range,
56 +       .cf_dcache_wb_range     = fa526_dcache_wb_range,
57 +
58 +       .cf_idcache_wbinv_all   = fa526_idcache_wbinv_all,
59 +       .cf_idcache_wbinv_range = fa526_idcache_wbinv_range,
60 +
61 +
62 +       .cf_l2cache_wbinv_all = cpufunc_nullop,
63 +       .cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
64 +       .cf_l2cache_inv_range = (void *)cpufunc_nullop,
65 +       .cf_l2cache_wb_range = (void *)cpufunc_nullop,
66 +
67 +
68 +       /* Other functions */
69 +
70 +       .cf_flush_prefetchbuf   = fa526_flush_prefetchbuf,
71 +       .cf_drain_writebuf      = armv4_drain_writebuf,
72 +       .cf_flush_brnchtgt_C    = cpufunc_nullop,
73 +       .cf_flush_brnchtgt_E    = fa526_flush_brnchtgt_E,
74 +
75 +       .cf_sleep               = fa526_cpu_sleep,
76 +
77 +       /* Soft functions */
78 +
79 +       .cf_dataabt_fixup       = cpufunc_null_fixup,
80 +       .cf_prefetchabt_fixup   = cpufunc_null_fixup,
81 +
82 +       .cf_context_switch      = fa526_context_switch,
83 +
84 +       .cf_setup               = fa526_setup
85 +};
86 +#endif /* CPU_FA526 */
87 +
88 +
89  /*
90   * Global constants also used by locore.s
91   */
92 @@ -793,6 +860,7 @@
93    defined (CPU_ARM9E) || defined (CPU_ARM10) ||                               \
94    defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||           \
95    defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||         \
96 +  defined(CPU_FA526) ||                                               \
97    defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
98  
99  static void get_cachetype_cp15(void);
100 @@ -1073,6 +1141,19 @@
101                 goto out;
102         }
103  #endif /* CPU_SA1110 */
104 +#ifdef CPU_FA526
105 +       if (cputype == CPU_ID_FA526) {
106 +               cpufuncs = fa526_cpufuncs;
107 +               cpu_reset_needs_v4_MMU_disable = 1;     /* SA needs it  */
108 +               get_cachetype_cp15();
109 +               pmap_pte_init_generic();
110 +
111 +               /* Use powersave on this CPU. */
112 +               cpu_do_powersave = 1;
113 +
114 +               goto out;
115 +       }
116 +#endif /* CPU_FA526 */
117  #ifdef CPU_IXP12X0
118          if (cputype == CPU_ID_IXP1200) {
119                  cpufuncs = ixp12x0_cpufuncs;
120 @@ -1547,7 +1628,8 @@
121    defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||            \
122    defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||          \
123    defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
124 -  defined(CPU_ARM10) ||  defined(CPU_ARM11)
125 +  defined(CPU_ARM10) ||  defined(CPU_ARM11) || \
126 +  defined(CPU_FA526)
127  
128  #define IGN    0
129  #define OR     1
130 @@ -2013,6 +2095,62 @@
131  }
132  #endif /* CPU_SA1100 || CPU_SA1110 */
133  
134 +#if defined(CPU_FA526)
135 +struct cpu_option fa526_options[] = {
136 +#ifdef COMPAT_12
137 +       { "nocache",            IGN, BIC, (CPU_CONTROL_IC_ENABLE |
138 +                                          CPU_CONTROL_DC_ENABLE) },
139 +       { "nowritebuf",         IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
140 +#endif /* COMPAT_12 */
141 +       { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE |
142 +                                          CPU_CONTROL_DC_ENABLE) },
143 +       { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE |
144 +                                          CPU_CONTROL_DC_ENABLE) },
145 +       { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
146 +       { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
147 +       { NULL,                 IGN, IGN, 0 }
148 +};
149 +
150 +void
151 +fa526_setup(char *args)
152 +{
153 +       int cpuctrl, cpuctrlmask;
154 +
155 +       cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
156 +                | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
157 +                | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
158 +                | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
159 +       cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
160 +                | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
161 +                | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
162 +                | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
163 +                | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
164 +                | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
165 +                | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
166 +
167 +#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
168 +       cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
169 +#endif
170 +
171 +       cpuctrl = parse_cpu_options(args, fa526_options, cpuctrl);
172 +
173 +#ifdef __ARMEB__
174 +       cpuctrl |= CPU_CONTROL_BEND_ENABLE;
175 +#endif
176 +
177 +       if (vector_page == ARM_VECTORS_HIGH)
178 +               cpuctrl |= CPU_CONTROL_VECRELOC;
179 +
180 +       /* Clear out the cache */
181 +       cpu_idcache_wbinv_all();
182 +
183 +       /* Set the control register */
184 +       ctrl = cpuctrl;
185 +       cpu_control(0xffffffff, cpuctrl);
186 +}
187 +#endif /* CPU_FA526 */
188 +
189 +
190  #if defined(CPU_IXP12X0)
191  struct cpu_option ixp12x0_options[] = {
192         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
193 Index: arm/arm/cpufunc_asm_fa526.S
194 ===================================================================
195 --- arm/arm/cpufunc_asm_fa526.S (revision 0)
196 +++ arm/arm/cpufunc_asm_fa526.S (revision 0)
197 @@ -0,0 +1,208 @@
198 +/*     $NetBSD: cpufunc_asm_fa526.S,v 1.3 2008/10/15 16:56:49 matt Exp $*/
199 +/*-
200 + * Copyright (c) 2008 The NetBSD Foundation, Inc.
201 + * All rights reserved.
202 + *
203 + * This code is derived from software contributed to The NetBSD Foundation
204 + * by Matt Thomas <matt@3am-software.com>
205 + *
206 + * Redistribution and use in source and binary forms, with or without
207 + * modification, are permitted provided that the following conditions
208 + * are met:
209 + * 1. Redistributions of source code must retain the above copyright
210 + *    notice, this list of conditions and the following disclaimer.
211 + * 2. Redistributions in binary form must reproduce the above copyright
212 + *    notice, this list of conditions and the following disclaimer in the
213 + *    documentation and/or other materials provided with the distribution.
214 + *
215 + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
216 + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
217 + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
218 + * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
219 + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
220 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
221 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
222 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
223 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
224 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
225 + * POSSIBILITY OF SUCH DAMAGE.
226 + */
227 +
228 +
229 +#include <machine/asm.h>
230 +
231 +#define        CACHELINE_SIZE  16
232 +
233 +ENTRY(fa526_setttb)
234 +       mov     r1, #0
235 +       mcr     p15, 0, r1, c7, c14, 0  /* clean and invalidate D$ */
236 +       mcr     p15, 0, r1, c7, c5, 0   /* invalidate I$ */
237 +       mcr     p15, 0, r1, c7, c5, 6   /* invalidate BTB */
238 +       mcr     p15, 0, r1, c7, c10, 4  /* drain write and fill buffer */
239 +
240 +       mcr     p15, 0, r0, c2, c0, 0   /* Write the TTB */
241 +
242 +       /* If we have updated the TTB we must flush the TLB */
243 +       mcr     p15, 0, r1, c8, c7, 0   /* invalidate I+D TLB */
244 +
245 +       /* Make sure that pipeline is emptied */
246 +       mov     r0, r0
247 +       mov     r0, r0
248 +       mov     pc, lr
249 +
250 +/*
251 + * TLB functions
252 + */
253 +ENTRY(fa526_tlb_flushID_SE)
254 +       mcr     p15, 0, r0, c8, c7, 1   /* flush Utlb single entry */
255 +       mov     pc, lr
256 +
257 +/*
258 + * TLB functions
259 + */
260 +ENTRY(fa526_tlb_flushI_SE)
261 +       mcr     p15, 0, r0, c8, c5, 1   /* flush Itlb single entry */
262 +       mov     pc, lr
263 +
264 +ENTRY(fa526_cpu_sleep)
265 +       mov     r0, #0
266 +/*     nop
267 +       nop*/
268 +       mcr     p15, 0, r0, c7, c0, 4   /* Wait for interrupt*/
269 +       mov     pc, lr
270 +
271 +ENTRY(fa526_flush_prefetchbuf)
272 +       mov     r0, #0
273 +       mcr     p15, 0, r0, c7, c5, 4   /* Pre-fetch flush */
274 +       mov     pc, lr
275 +
276 +/*
277 + * Cache functions
278 + */
279 +ENTRY(fa526_idcache_wbinv_all)
280 +       mov     r0, #0
281 +       mcr     p15, 0, r0, c7, c14, 0  /* clean and invalidate D$ */
282 +       mcr     p15, 0, r0, c7, c5, 0   /* invalidate I$ */
283 +       mcr     p15, 0, r0, c7, c10, 4  /* drain write buffer */
284 +       mov     pc, lr
285 +
286 +ENTRY(fa526_icache_sync_all)
287 +       mov     r0, #0
288 +       mcr     p15, 0, r0, c7, c5, 0   /* invalidate I$ */
289 +       mov     pc, lr
290 +
291 +ENTRY(fa526_dcache_wbinv_all)
292 +       mov     r0, #0
293 +       mcr     p15, 0, r0, c7, c14, 0  /* clean and invalidate D$ */
294 +       mcr     p15, 0, r0, c7, c10, 4  /* drain write buffer */
295 +       mov     pc, lr
296 +
297 +/*
298 + * Soft functions
299 + */
300 +ENTRY(fa526_dcache_wbinv_range)
301 +       cmp     r1, #0x4000
302 +       bhs     _C_LABEL(fa526_dcache_wbinv_all)
303 +
304 +       and     r2, r0, #(CACHELINE_SIZE - 1)
305 +       add     r1, r1, r2
306 +       bic     r0, r0, #(CACHELINE_SIZE - 1)
307 +
308 +1:     mcr     p15, 0, r0, c7, c14, 1  /* clean and invalidate D$ entry */
309 +       add     r0, r0, #CACHELINE_SIZE
310 +       subs    r1, r1, #CACHELINE_SIZE
311 +       bhi     1b
312 +
313 +       mcr     p15, 0, r0, c7, c10, 4  /* drain write buffer */
314 +       mov     pc, lr
315 +
316 +ENTRY(fa526_dcache_wb_range)
317 +       cmp     r1, #0x4000
318 +       bls     1f
319 +
320 +       mov     r0, #0
321 +       mcr     p15, 0, r0, c7, c10, 0  /* clean entire D$ */
322 +       b       3f
323 +
324 +1:     and     r2, r0, #(CACHELINE_SIZE - 1)
325 +       add     r1, r1, r2
326 +       bic     r0, r0, #(CACHELINE_SIZE - 1)
327 +
328 +2:     mcr     p15, 0, r0, c7, c10, 1  /* clean D$ entry */
329 +       add     r0, r0, #CACHELINE_SIZE
330 +       subs    r1, r1, #CACHELINE_SIZE
331 +       bhi     2b
332 +
333 +3:     mcr     p15, 0, r0, c7, c10, 4  /* drain write buffer */
334 +       mov     pc, lr
335 +
336 +ENTRY(fa526_dcache_inv_range)
337 +       and     r2, r0, #(CACHELINE_SIZE - 1)
338 +       add     r1, r1, r2
339 +       bic     r0, r0, #(CACHELINE_SIZE - 1)
340 +
341 +1:     mcr     p15, 0, r0, c7, c6, 1   /* invalidate D$ single entry */
342 +       add     r0, r0, #CACHELINE_SIZE
343 +       subs    r1, r1, #CACHELINE_SIZE
344 +       bhi     1b
345 +
346 +       mov     pc, lr
347 +
348 +ENTRY(fa526_idcache_wbinv_range)
349 +       cmp     r1, #0x4000
350 +       bhs     _C_LABEL(fa526_idcache_wbinv_all)
351 +
352 +       and     r2, r0, #(CACHELINE_SIZE - 1)
353 +       add     r1, r1, r2
354 +       bic     r0, r0, #(CACHELINE_SIZE - 1)
355 +
356 +1:     mcr     p15, 0, r0, c7, c14, 1  /* clean and invalidate D$ entry */
357 +       mcr     p15, 0, r0, c7, c5, 1   /* invalidate I$ entry */
358 +       add     r0, r0, #CACHELINE_SIZE
359 +       subs    r1, r1, #CACHELINE_SIZE
360 +       bhi     1b
361 +
362 +2:     mcr     p15, 0, r0, c7, c10, 4  /* drain write buffer */
363 +       mov     pc, lr
364 +
365 +ENTRY(fa526_icache_sync_range)
366 +       cmp     r1, #0x4000
367 +       bhs     _C_LABEL(fa526_icache_sync_all)
368 +
369 +       and     r2, r0, #(CACHELINE_SIZE - 1)
370 +       add     r1, r1, r2
371 +       bic     r0, r0, #(CACHELINE_SIZE - 1)
372 +
373 +1:     mcr     p15, 0, r0, c7, c10, 1  /* clean D$ entry */
374 +       mcr     p15, 0, r0, c7, c5, 1   /* invalidate I$ entry */
375 +       add     r0, r0, #CACHELINE_SIZE
376 +       subs    r1, r1, #CACHELINE_SIZE
377 +       bhi     1b
378 +
379 +2:     mcr     p15, 0, r0, c7, c10, 4  /* drain write buffer */
380 +       mov     pc, lr
381 +
382 +ENTRY(fa526_flush_brnchtgt_E)
383 +       mov     r0, #0
384 +       mcr     p15, 0, r0, c7, c5, 6   /* invalidate BTB cache */
385 +       mov     pc, lr
386 +
387 +ENTRY(fa526_context_switch)
388 +       /*
389 +        * CF_CACHE_PURGE_ID will *ALWAYS* be called prior to this.
390 +        * Thus the data cache will contain only kernel data and the
391 +        * instruction cache will contain only kernel code, and all
392 +        * kernel mappings are shared by all processes.
393 +        */
394 +
395 +       mcr     p15, 0, r0, c2, c0, 0   /* Write the TTB */
396 +
397 +       /* If we have updated the TTB we must flush the TLB */
398 +       mov     r0, #0
399 +       mcr     p15, 0, r0, c8, c7, 0   /* flush the I+D tlb */
400 +
401 +       /* Make sure that pipeline is emptied */
402 +       mov     r0, r0
403 +       mov     r0, r0
404 +       mov     pc, lr
405 +
406 Index: arm/include/cpuconf.h
407 ===================================================================
408 --- arm/include/cpuconf.h       (revision 201358)
409 +++ arm/include/cpuconf.h       (working copy)
410 @@ -61,6 +61,7 @@
411                          defined(CPU_XSCALE_80200) +                    \
412                          defined(CPU_XSCALE_80321) +                    \
413                          defined(CPU_XSCALE_PXA2X0) +                   \
414 +                        defined(CPU_FA526) +                           \
415                          defined(CPU_XSCALE_IXP425))
416  
417  /*
418 @@ -68,7 +69,7 @@
419   */
420  #if (defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) ||        \
421       defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
422 -    defined(CPU_IXP12X0) || defined(CPU_XSCALE_IXP425))
423 +     defined(CPU_IXP12X0) || defined(CPU_XSCALE_IXP425) || defined(CPU_FA526))
424  #define        ARM_ARCH_4      1
425  #else
426  #define        ARM_ARCH_4      0
427 @@ -125,7 +126,7 @@
428  
429  #if (defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) ||        \
430       defined(CPU_ARM8) || defined(CPU_ARM9) || defined(CPU_ARM9E) ||   \
431 -     defined(CPU_ARM10) || defined(CPU_ARM11))
432 +     defined(CPU_ARM10) || defined(CPU_ARM11) || defined(CPU_FA526))
433  #define        ARM_MMU_GENERIC         1
434  #else
435  #define        ARM_MMU_GENERIC         0
436 Index: arm/include/cpufunc.h
437 ===================================================================
438 --- arm/include/cpufunc.h       (revision 201358)
439 +++ arm/include/cpufunc.h       (working copy)
440 @@ -283,6 +283,28 @@
441  u_int  arm8_clock_config       (u_int, u_int);
442  #endif
443  
444 +
445 +#ifdef CPU_FA526
446 +void   fa526_setup             (char *arg);
447 +void   fa526_setttb            (u_int ttb);
448 +void   fa526_context_switch    (void);
449 +void   fa526_cpu_sleep         (int);
450 +void   fa526_tlb_flushI_SE     (u_int);
451 +void   fa526_tlb_flushID_SE    (u_int);
452 +void   fa526_flush_prefetchbuf (void);
453 +void   fa526_flush_brnchtgt_E  (u_int);
454 +
455 +void   fa526_icache_sync_all   (void);
456 +void   fa526_icache_sync_range(vm_offset_t start, vm_size_t end);
457 +void   fa526_dcache_wbinv_all  (void);
458 +void   fa526_dcache_wbinv_range(vm_offset_t start, vm_size_t end);
459 +void   fa526_dcache_inv_range  (vm_offset_t start, vm_size_t end);
460 +void   fa526_dcache_wb_range   (vm_offset_t start, vm_size_t end);
461 +void   fa526_idcache_wbinv_all(void);
462 +void   fa526_idcache_wbinv_range(vm_offset_t start, vm_size_t end);
463 +#endif
464 +
465 +
466  #ifdef CPU_SA110
467  void   sa110_setup             (char *string);
468  void   sa110_context_switch    (void);
469 @@ -445,6 +467,7 @@
470  #if defined(CPU_ARM9) || defined(CPU_ARM9E) || defined(CPU_ARM10) || \
471    defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
472    defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||         \
473 +    defined(CPU_FA526) || \
474    defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||       \
475    defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
476    
477 Index: arm/conf/CNS11XXNAS.hints
478 ===================================================================
479 --- arm/conf/CNS11XXNAS.hints   (revision 0)
480 +++ arm/conf/CNS11XXNAS.hints   (revision 0)
481 @@ -0,0 +1 @@
482 +#currently no hints
483 Index: arm/conf/CNS11XXNAS
484 ===================================================================
485 --- arm/conf/CNS11XXNAS (revision 0)
486 +++ arm/conf/CNS11XXNAS (revision 0)
487 @@ -0,0 +1,126 @@
488 +# CNS11XXNAS -  StarSemi STR9104/Cavium CNS1102 NAS
489 +# kernel configuration file for FreeBSD/arm
490 +#
491 +# For more information on this file, please read the handbook section on
492 +# Kernel Configuration Files:
493 +#
494 +#    http://www.FreeBSD.org/doc/en_US.ISO8859-1/books/handbook/kernelconfig-config.html
495 +#
496 +# The handbook is also available locally in /usr/share/doc/handbook
497 +# if you've installed the doc distribution, otherwise always see the
498 +# FreeBSD World Wide Web server (http://www.FreeBSD.org/) for the
499 +# latest information.
500 +#
501 +# An exhaustive list of options and more detailed explanations of the
502 +# device lines is also present in the ../../conf/NOTES and NOTES files. 
503 +# If you are in doubt as to the purpose or necessity of a line, check first 
504 +# in NOTES.
505 +#
506 +# $FreeBSD$
507 +
508 +ident          CNS11XXNAS
509 +
510 +#options       PHYSADDR=0x10000000
511 +#options       KERNPHYSADDR=0x10200000
512 +#options       KERNVIRTADDR=0xc0200000         # Used in ldscript.arm
513 +#options       FLASHADDR=0x50000000
514 +#options       LOADERRAMADDR=0x00000000
515 +#options       STARTUP_PAGETABLE_ADDR=0x10000000
516 +
517 +include                "../econa/std.econa"
518 +
519 +#To statically compile in device wiring instead of /boot/device.hints
520 +hints          "CNS11XXNAS.hints"      #Default places to look for devices.
521 +makeoptions    MODULES_OVERRIDE=""
522 +
523 +makeoptions    DEBUG=-g                #Build kernel with gdb(1) debug symbols
524 +options        HZ=100
525 +options        DEVICE_POLLING
526 +
527 +# Debugging for use in -current
528 +options        KDB
529 +#options       GDB
530 +options        DDB                     #Enable the kernel debugger
531 +#options       INVARIANTS              #Enable calls of extra sanity checking
532 +#options       INVARIANT_SUPPORT       #Extra sanity checks of internal structures, required by INVARIANTS
533 +#options       WITNESS         #Enable checks to detect deadlocks and cycles
534 +##options      WITNESS_SKIPSPIN        #Don't run witness on spinlocks for speed
535 +#options       DIAGNOSTIC
536 +
537 +
538 +#options               COMPAT_FREEBSD5
539 +#options               COMPAT_FREEBSD6
540 +#options               COMPAT_FREEBSD7
541 +
542 +
543 +options        SCHED_ULE               #ULE scheduler
544 +#options       SCHED_4BSD              #4BSD scheduler
545 +options        GEOM_PART_GPT           # GUID Partition Tables.
546 +#options       GEOM_PART_EBR
547 +#options       GEOM_PART_EBR_COMPAT
548 +options        GEOM_LABEL              # Provides labelization
549 +
550 +
551 +options        INET                    #InterNETworking
552 +options        INET6                   #IPv6 communications protocols
553 +options        FFS                     #Berkeley Fast Filesystem
554 +options        SOFTUPDATES             #Enable FFS soft updates support
555 +options        UFS_ACL                 #Support for access control lists
556 +options        UFS_DIRHASH             #Improve performance on big directories
557 +options        NFSCLIENT               #Network Filesystem Client
558 +#options       NFSSERVER               #Network Filesystem Server
559 +#options       NFSLOCKD                #Network Lock Manager
560 +options        NFS_ROOT                #NFS usable as /, requires NFSCLIENT
561 +options        MSDOSFS                 #MSDOS Filesystem
562 +#options       CD9660                  #ISO 9660 Filesystem
563 +#options       PROCFS                  #Process filesystem (requires PSEUDOFS)
564 +options        PSEUDOFS                #Pseudo-filesystem framework
565 +options        SCSI_DELAY=5000         #Delay (in ms) before probing SCSI
566 +options        KTRACE                  #ktrace(1) support
567 +options        SYSVSHM                 #SYSV-style shared memory
568 +options        SYSVMSG                 #SYSV-style message queues
569 +options        SYSVSEM                 #SYSV-style semaphores
570 +options        _KPOSIX_PRIORITY_SCHEDULING #Posix P1003_1B real-time extensions
571 +options        MUTEX_NOINLINE          #Mutex inlines are space hogs
572 +options        RWLOCK_NOINLINE         #rwlock inlines are space hogs
573 +options        SX_NOINLINE             #sx inliens are space hogs
574 +#options       BOOTP
575 +#options       BOOTP_NFSROOT
576 +#options       BOOTP_NFSV3
577 +#options       BOOTP_WIRED_TO=npe0
578 +#options       BOOTP_COMPAT
579 +
580 +#device                pci
581 +device         uart
582 +
583 +
584 +device         firmware
585 +device         mii             # Minimal mii routines
586 +device         ether
587 +device         bpf
588 +
589 +device         pty
590 +device         loop
591 +
592 +device         md
593 +device          random          # Entropy device
594 +
595 +#options       ARM_USE_SMALL_ALLOC
596 +
597 +device         usb
598 +#options       USB_DEBUG
599 +device         ohci
600 +device         ehci
601 +device         umass
602 +device         scbus           # SCSI bus (required for SCSI)
603 +device         da              # Direct Access (disks)
604 +device         pass
605 +device                 cfi
606 +
607 +#device                udav            # Davicom DM9601E USB
608 +
609 +device         geom_label
610 +device         geom_journal
611 +device                 geom_part_bsd
612 +
613 +options                ROOTDEVNAME=\"ufs:da0s1a\"
614 Index: arm/econa/econa_var.h
615 ===================================================================
616 --- arm/econa/econa_var.h       (revision 0)
617 +++ arm/econa/econa_var.h       (revision 0)
618 @@ -0,0 +1,50 @@
619 +/*-
620 + * Copyright (c) 2009 Yohanes Nugroho <yohanes@gmail.com>.
621 + * All rights reserved.
622 + *
623 + * Redistribution and use in source and binary forms, with or without
624 + * modification, are permitted provided that the following conditions
625 + * are met:
626 + * 1. Redistributions of source code must retain the above copyright
627 + *    notice, this list of conditions and the following disclaimer.
628 + * 2. Redistributions in binary form must reproduce the above copyright
629 + *    notice, this list of conditions and the following disclaimer in the
630 + *    documentation and/or other materials provided with the distribution.
631 + *
632 + * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
633 + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
634 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
635 + * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
636 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
637 + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
638 + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
639 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
640 + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
641 + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
642 + * SUCH DAMAGE.
643 + */
644 +
645 +#ifndef        _ARM_ECONA_VAR_H
646 +#define        _ARM_ECONA_VAR_H
647 +
648 +extern bus_space_tag_t obio_tag;
649 +
650 +struct econa_softc {
651 +       device_t dev;
652 +       bus_space_tag_t ec_st;
653 +       bus_space_handle_t ec_sh;
654 +       bus_space_handle_t ec_sys_sh;
655 +       bus_space_handle_t ec_system_sh;
656 +       struct rman ec_irq_rman;
657 +       struct rman ec_mem_rman;
658 +};
659 +
660 +struct econa_ivar {
661 +       struct resource_list resources;
662 +};
663 +
664 +void   power_on_network_interface      (void);
665 +unsigned int   get_tclk        (void);
666 +
667 +
668 +#endif
669 Index: arm/econa/files.econa
670 ===================================================================
671 --- arm/econa/files.econa       (revision 0)
672 +++ arm/econa/files.econa       (revision 0)
673 @@ -0,0 +1,14 @@
674 +# $FreeBSD $
675 +arm/arm/cpufunc_asm_fa526.S    standard
676 +arm/econa/econa_machdep.c              standard
677 +arm/econa/econa.c                      standard
678 +arm/econa/timer.c                      standard
679 +arm/econa/uart_bus_ec.c                optional        uart
680 +arm/econa/uart_cpu_ec.c                optional        uart
681 +dev/uart/uart_dev_ns8250.c     optional        uart
682 +arm/arm/irq_dispatch.S         standard
683 +arm/arm/bus_space_generic.c            standard
684 +arm/econa/ehci_ebus.c  standard        ehci
685 +arm/econa/ohci_ec.c    standard        ohci
686 +arm/econa/if_ece.c             standard
687 +arm/econa/cfi_bus_econa.c              optional        cfi
688 Index: arm/econa/econa_machdep.c
689 ===================================================================
690 --- arm/econa/econa_machdep.c   (revision 0)
691 +++ arm/econa/econa_machdep.c   (revision 0)
692 @@ -0,0 +1,396 @@
693 +/*-
694 + * Copyright (c) 2009 Yohanes Nugroho <yohanes@gmail.com>
695 + * Copyright (c) 1994-1998 Mark Brinicombe.
696 + * Copyright (c) 1994 Brini.
697 + * All rights reserved.
698 + *
699 + * This code is derived from software written for Brini by Mark Brinicombe
700 + *
701 + * Redistribution and use in source and binary forms, with or without
702 + * modification, are permitted provided that the following conditions
703 + * are met:
704 + * 1. Redistributions of source code must retain the above copyright
705 + *    notice, this list of conditions and the following disclaimer.
706 + * 2. Redistributions in binary form must reproduce the above copyright
707 + *    notice, this list of conditions and the following disclaimer in the
708 + *    documentation and/or other materials provided with the distribution.
709 + * 3. All advertising materials mentioning features or use of this software
710 + *    must display the following acknowledgement:
711 + *      This product includes software developed by Brini.
712 + * 4. The name of the company nor the name of the author may be used to
713 + *    endorse or promote products derived from this software without specific
714 + *    prior written permission.
715 + *
716 + * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
717 + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
718 + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
719 + * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
720 + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
721 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
722 + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
723 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
724 + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
725 + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
726 + * SUCH DAMAGE.
727 + *
728 + */
729 +
730 +#include "opt_msgbuf.h"
731 +
732 +#include <sys/cdefs.h>
733 +__FBSDID("$FreeBSD$");
734 +
735 +#define        _ARM32_BUS_DMA_PRIVATE
736 +#include <sys/param.h>
737 +#include <sys/systm.h>
738 +#include <sys/sysproto.h>
739 +#include <sys/signalvar.h>
740 +#include <sys/imgact.h>
741 +#include <sys/kernel.h>
742 +#include <sys/ktr.h>
743 +#include <sys/linker.h>
744 +#include <sys/lock.h>
745 +#include <sys/malloc.h>
746 +#include <sys/mutex.h>
747 +#include <sys/pcpu.h>
748 +#include <sys/proc.h>
749 +#include <sys/ptrace.h>
750 +#include <sys/cons.h>
751 +#include <sys/bio.h>
752 +#include <sys/bus.h>
753 +#include <sys/buf.h>
754 +#include <sys/exec.h>
755 +#include <sys/kdb.h>
756 +#include <sys/msgbuf.h>
757 +#include <machine/reg.h>
758 +#include <machine/cpu.h>
759 +
760 +#include <vm/vm.h>
761 +#include <vm/pmap.h>
762 +#include <vm/vm_object.h>
763 +#include <vm/vm_page.h>
764 +#include <vm/vm_pager.h>
765 +#include <vm/vm_map.h>
766 +#include <vm/vnode_pager.h>
767 +#include <machine/pmap.h>
768 +#include <machine/vmparam.h>
769 +#include <machine/pcb.h>
770 +#include <machine/undefined.h>
771 +#include <machine/machdep.h>
772 +#include <machine/metadata.h>
773 +#include <machine/armreg.h>
774 +#include <machine/bus.h>
775 +#include <sys/reboot.h>
776 +#include "econa_reg.h"
777 +
778 +/* Page table for mapping proc0 zero page */
779 +#define        KERNEL_PT_SYS           0
780 +#define        KERNEL_PT_KERN          1
781 +#define        KERNEL_PT_KERN_NUM      22
782 +/* L2 table for mapping after kernel */
783 +#define        KERNEL_PT_AFKERNEL      KERNEL_PT_KERN + KERNEL_PT_KERN_NUM
784 +#define        KERNEL_PT_AFKERNEL_NUM  5
785 +
786 +/* this should be evenly divisable by PAGE_SIZE / L2_TABLE_SIZE_REAL (or 4) */
787 +#define        NUM_KERNEL_PTS  (KERNEL_PT_AFKERNEL + KERNEL_PT_AFKERNEL_NUM)
788 +
789 +/* Define various stack sizes in pages */
790 +#define        IRQ_STACK_SIZE  1
791 +#define        ABT_STACK_SIZE  1
792 +#define        UND_STACK_SIZE  1
793 +
794 +extern u_int data_abort_handler_address;
795 +extern u_int prefetch_abort_handler_address;
796 +extern u_int undefined_handler_address;
797 +
798 +struct pv_addr kernel_pt_table[NUM_KERNEL_PTS];
799 +
800 +extern void *_end;
801 +
802 +extern int *end;
803 +
804 +struct pcpu __pcpu;
805 +struct pcpu *pcpup = &__pcpu;
806 +
807 +/* Physical and virtual addresses for some global pages */
808 +
809 +vm_paddr_t phys_avail[10];
810 +vm_paddr_t dump_avail[4];
811 +vm_offset_t physical_pages;
812 +
813 +struct pv_addr systempage;
814 +struct pv_addr msgbufpv;
815 +struct pv_addr irqstack;
816 +struct pv_addr undstack;
817 +struct pv_addr abtstack;
818 +struct pv_addr kernelstack;
819 +
820 +static void *boot_arg1;
821 +static void *boot_arg2;
822 +
823 +static struct trapframe proc0_tf;
824 +
825 +/* Static device mappings. */
826 +static const struct pmap_devmap econa_devmap[] = {
827 +       {
828 +               /*
829 +                * This maps DDR SDRAM
830 +                */
831 +               ECONA_SDRAM_BASE, /*virtual*/
832 +               ECONA_SDRAM_BASE, /*physical*/
833 +               ECONA_SDRAM_SIZE, /*size*/
834 +               VM_PROT_READ|VM_PROT_WRITE,
835 +               PTE_NOCACHE,
836 +       },
837 +       /*
838 +        * Map the on-board devices VA == PA so that we can access them
839 +        * with the MMU on or off.
840 +        */
841 +       {
842 +               /*
843 +                * This maps the interrupt controller, the UART
844 +                * and the timer.
845 +                */
846 +               ECONA_IO_BASE, /*virtual*/
847 +               ECONA_IO_BASE, /*physical*/
848 +               ECONA_IO_SIZE, /*size*/
849 +               VM_PROT_READ|VM_PROT_WRITE,
850 +               PTE_NOCACHE,
851 +       },
852 +       {
853 +               /*
854 +                * OHCI + EHCI
855 +                */
856 +               ECONA_OHCI_VBASE, /*virtual*/
857 +               ECONA_OHCI_PBASE, /*physical*/
858 +               ECONA_USB_SIZE, /*size*/
859 +               VM_PROT_READ|VM_PROT_WRITE,
860 +               PTE_NOCACHE,
861 +       },
862 +       {
863 +               /*
864 +                * CFI
865 +                */
866 +               ECONA_CFI_VBASE, /*virtual*/
867 +               ECONA_CFI_PBASE, /*physical*/
868 +               ECONA_CFI_SIZE,
869 +               VM_PROT_READ|VM_PROT_WRITE,
870 +               PTE_NOCACHE,
871 +       },
872 +       {
873 +               0,
874 +               0,
875 +               0,
876 +               0,
877 +               0,
878 +       }
879 +};
880 +
881 +
882 +void *
883 +initarm(void *arg, void *arg2)
884 +{
885 +       struct pv_addr  kernel_l1pt;
886 +       volatile uint32_t * ddr = (uint32_t *)0x4000000C;
887 +       int loop, i;
888 +       u_int l1pagetable;
889 +       vm_offset_t afterkern;
890 +       vm_offset_t freemempos;
891 +       vm_offset_t lastaddr;
892 +       uint32_t memsize;
893 +       int mem_info;
894 +
895 +
896 +       boot_arg1 = arg;
897 +       boot_arg2 = arg2;
898 +       boothowto = RB_VERBOSE;
899 +
900 +       set_cpufuncs();
901 +       lastaddr = fake_preload_metadata();
902 +       pcpu_init(pcpup, 0, sizeof(struct pcpu));
903 +       PCPU_SET(curthread, &thread0);
904 +
905 +
906 +       freemempos = (lastaddr + PAGE_MASK) & ~PAGE_MASK;
907 +       /* Define a macro to simplify memory allocation */
908 +#define        valloc_pages(var, np)                   \
909 +       alloc_pages((var).pv_va, (np));         \
910 +       (var).pv_pa = (var).pv_va + (KERNPHYSADDR - KERNVIRTADDR);
911 +
912 +#define        alloc_pages(var, np)                    \
913 +       (var) = freemempos;             \
914 +       freemempos += (np * PAGE_SIZE);         \
915 +       memset((char *)(var), 0, ((np) * PAGE_SIZE));
916 +
917 +       while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0)
918 +               freemempos += PAGE_SIZE;
919 +       valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
920 +       for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
921 +               if (!(loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) {
922 +                       valloc_pages(kernel_pt_table[loop],
923 +                           L2_TABLE_SIZE / PAGE_SIZE);
924 +               } else {
925 +                       kernel_pt_table[loop].pv_va = freemempos -
926 +                           (loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL)) *
927 +                           L2_TABLE_SIZE_REAL;
928 +                       kernel_pt_table[loop].pv_pa =
929 +                           kernel_pt_table[loop].pv_va - KERNVIRTADDR +
930 +                           KERNPHYSADDR;
931 +               }
932 +               i++;
933 +       }
934 +       /*
935 +        * Allocate a page for the system page mapped to V0x00000000
936 +        * This page will just contain the system vectors and can be
937 +        * shared by all processes.
938 +        */
939 +       valloc_pages(systempage, 1);
940 +
941 +       /* Allocate stacks for all modes */
942 +       valloc_pages(irqstack, IRQ_STACK_SIZE);
943 +       valloc_pages(abtstack, ABT_STACK_SIZE);
944 +       valloc_pages(undstack, UND_STACK_SIZE);
945 +       valloc_pages(kernelstack, KSTACK_PAGES);
946 +       valloc_pages(msgbufpv, round_page(MSGBUF_SIZE) / PAGE_SIZE);
947 +
948 +       /*
949 +        * Now we start construction of the L1 page table
950 +        * We start by mapping the L2 page tables into the L1.
951 +        * This means that we can replace L1 mappings later on if necessary
952 +        */
953 +       l1pagetable = kernel_l1pt.pv_va;
954 +
955 +       /* Map the L2 pages tables in the L1 page table */
956 +       pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH,
957 +           &kernel_pt_table[KERNEL_PT_SYS]);
958 +       for (i = 0; i < KERNEL_PT_KERN_NUM; i++)
959 +               pmap_link_l2pt(l1pagetable, KERNBASE + i * L1_S_SIZE,
960 +                   &kernel_pt_table[KERNEL_PT_KERN + i]);
961 +       pmap_map_chunk(l1pagetable, KERNBASE, PHYSADDR,
962 +          (((uint32_t)lastaddr - KERNBASE) + PAGE_SIZE) & ~(PAGE_SIZE - 1),
963 +           VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
964 +       afterkern = round_page((lastaddr + L1_S_SIZE) & ~(L1_S_SIZE - 1));
965 +       for (i = 0; i < KERNEL_PT_AFKERNEL_NUM; i++) {
966 +               pmap_link_l2pt(l1pagetable, afterkern + i * L1_S_SIZE,
967 +                   &kernel_pt_table[KERNEL_PT_AFKERNEL + i]);
968 +       }
969 +
970 +       /* Map the vector page. */
971 +       pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa,
972 +           VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
973 +
974 +
975 +       /* Map the stack pages */
976 +       pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa,
977 +           IRQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
978 +       pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa,
979 +           ABT_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
980 +       pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa,
981 +           UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
982 +       pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa,
983 +           KSTACK_PAGES * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
984 +
985 +       pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
986 +           L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
987 +       pmap_map_chunk(l1pagetable, msgbufpv.pv_va, msgbufpv.pv_pa,
988 +           MSGBUF_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
989 +
990 +       for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
991 +               pmap_map_chunk(l1pagetable, kernel_pt_table[loop].pv_va,
992 +                   kernel_pt_table[loop].pv_pa, L2_TABLE_SIZE,
993 +                   VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
994 +       }
995 +
996 +       pmap_devmap_bootstrap(l1pagetable, econa_devmap);
997 +       cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT);
998 +       setttb(kernel_l1pt.pv_pa);
999 +       cpu_tlb_flushID();
1000 +       cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2));
1001 +       cninit();
1002 +       mem_info = ((*ddr) >> 4) & 0x3;
1003 +       memsize = (8<<mem_info)*1024*1024;
1004 +       physmem = memsize / PAGE_SIZE;
1005 +
1006 +       /*
1007 +        * Pages were allocated during the secondary bootstrap for the
1008 +        * stacks for different CPU modes.
1009 +        * We must now set the r13 registers in the different CPU modes to
1010 +        * point to these stacks.
1011 +        * Since the ARM stacks use STMFD etc. we must set r13 to the top end
1012 +        * of the stack memory.
1013 +        */
1014 +       cpu_control(CPU_CONTROL_MMU_ENABLE, CPU_CONTROL_MMU_ENABLE);
1015 +
1016 +       set_stackptr(PSR_IRQ32_MODE,
1017 +           irqstack.pv_va + IRQ_STACK_SIZE * PAGE_SIZE);
1018 +       set_stackptr(PSR_ABT32_MODE,
1019 +           abtstack.pv_va + ABT_STACK_SIZE * PAGE_SIZE);
1020 +       set_stackptr(PSR_UND32_MODE,
1021 +           undstack.pv_va + UND_STACK_SIZE * PAGE_SIZE);
1022 +
1023 +       /*
1024 +        * We must now clean the cache again....
1025 +        * Cleaning may be done by reading new data to displace any
1026 +        * dirty data in the cache. This will have happened in setttb()
1027 +        * but since we are boot strapping the addresses used for the read
1028 +        * may have just been remapped and thus the cache could be out
1029 +        * of sync. A re-clean after the switch will cure this.
1030 +        * After booting there are no gross relocations of the kernel thus
1031 +        * this problem will not occur after initarm().
1032 +        */
1033 +       cpu_idcache_wbinv_all();
1034 +
1035 +       /* Set stack for exception handlers */
1036 +       data_abort_handler_address = (u_int)data_abort_handler;
1037 +       prefetch_abort_handler_address = (u_int)prefetch_abort_handler;
1038 +       undefined_handler_address = (u_int)undefinedinstruction_bounce;
1039 +       undefined_init();
1040 +
1041 +       proc_linkup0(&proc0, &thread0);
1042 +       thread0.td_kstack = kernelstack.pv_va;
1043 +       thread0.td_pcb = (struct pcb *)
1044 +               (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
1045 +       thread0.td_pcb->pcb_flags = 0;
1046 +       thread0.td_frame = &proc0_tf;
1047 +       pcpup->pc_curpcb = thread0.td_pcb;
1048 +
1049 +       arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL);
1050 +
1051 +       pmap_curmaxkvaddr = afterkern + L1_S_SIZE * (KERNEL_PT_KERN_NUM - 1);
1052 +
1053 +       /*
1054 +        * ARM_USE_SMALL_ALLOC uses dump_avail, so it must be filled before
1055 +        * calling pmap_bootstrap.
1056 +        */
1057 +       dump_avail[0] = PHYSADDR;
1058 +       dump_avail[1] = PHYSADDR + memsize;
1059 +       dump_avail[2] = 0;
1060 +       dump_avail[3] = 0;
1061 +
1062 +       pmap_bootstrap(freemempos,
1063 +           KERNVIRTADDR + 3 * memsize,
1064 +           &kernel_l1pt);
1065 +
1066 +       msgbufp = (void*)msgbufpv.pv_va;
1067 +       msgbufinit(msgbufp, MSGBUF_SIZE);
1068 +
1069 +       mutex_init();
1070 +
1071 +       i = 0;
1072 +#if PHYSADDR != KERNPHYSADDR
1073 +       phys_avail[i++] = PHYSADDR;
1074 +       phys_avail[i++] = KERNPHYSADDR;
1075 +#endif
1076 +       phys_avail[i++] = virtual_avail - KERNVIRTADDR + KERNPHYSADDR;
1077 +
1078 +       phys_avail[i++] = PHYSADDR + memsize;
1079 +       phys_avail[i++] = 0;
1080 +       phys_avail[i++] = 0;
1081 +       /* Do basic tuning, hz etc */
1082 +       init_param1();
1083 +       init_param2(physmem);
1084 +       kdb_init();
1085 +
1086 +       return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP -
1087 +           sizeof(struct pcb)));
1088 +}
1089 Index: arm/econa/if_ece.c
1090 ===================================================================
1091 --- arm/econa/if_ece.c  (revision 0)
1092 +++ arm/econa/if_ece.c  (revision 0)
1093 @@ -0,0 +1,1948 @@
1094 +/*-
1095 + * Copyright (c) 2009 Yohanes Nugroho <yohanes@gmail.com>
1096 + * All rights reserved.
1097 + *
1098 + * Redistribution and use in source and binary forms, with or without
1099 + * modification, are permitted provided that the following conditions
1100 + * are met:
1101 + * 1. Redistributions of source code must retain the above copyright
1102 + *    notice, this list of conditions and the following disclaimer.
1103 + * 2. Redistributions in binary form must reproduce the above copyright
1104 + *    notice, this list of conditions and the following disclaimer in the
1105 + *    documentation and/or other materials provided with the distribution.
1106 + *
1107 + * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1108 + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1109 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1110 + * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
1111 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
1112 + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
1113 + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
1114 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
1115 + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
1116 + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
1117 + * SUCH DAMAGE.
1118 + */
1119 +
1120 +#include <sys/cdefs.h>
1121 +__FBSDID("$FreeBSD: src/sys/arm/econa/if_ece.c$");
1122 +
1123 +#include <sys/param.h>
1124 +#include <sys/systm.h>
1125 +#include <sys/bus.h>
1126 +#include <sys/kernel.h>
1127 +#include <sys/mbuf.h>
1128 +#include <sys/malloc.h>
1129 +#include <sys/module.h>
1130 +#include <sys/rman.h>
1131 +#include <sys/socket.h>
1132 +#include <sys/sockio.h>
1133 +#include <sys/sysctl.h>
1134 +#include <sys/taskqueue.h>
1135 +
1136 +#include <net/ethernet.h>
1137 +#include <net/if.h>
1138 +#include <net/if_arp.h>
1139 +#include <net/if_dl.h>
1140 +#include <net/if_media.h>
1141 +#include <net/if_types.h>
1142 +#include <net/if_vlan_var.h>
1143 +
1144 +#ifdef INET
1145 +#include <netinet/in.h>
1146 +#include <netinet/in_systm.h>
1147 +#include <netinet/in_var.h>
1148 +#include <netinet/ip.h>
1149 +#endif
1150 +
1151 +#include <net/bpf.h>
1152 +#include <net/bpfdesc.h>
1153 +
1154 +#include <dev/mii/mii.h>
1155 +#include <dev/mii/miivar.h>
1156 +
1157 +#include <arm/econa/if_ecereg.h>
1158 +#include <arm/econa/if_ecevar.h>
1159 +#include <arm/econa/econa_var.h>
1160 +
1161 +#include <machine/bus.h>
1162 +#include <machine/intr.h>
1163 +
1164 +/* "device miibus" required.  See GENERIC if you get errors here. */
1165 +#include "miibus_if.h"
1166 +
1167 +static uint8_t
1168 +vlan0_mac[ETHER_ADDR_LEN] = {0x00, 0xaa, 0xbb, 0xcc, 0xdd, 0x19};
1169 +
1170 +/*
1171 + * Boot loader expects the hardware state to be the same when we
1172 + * restart the device (warm boot), so we need to save the initial
1173 + * config values.
1174 + */
1175 +int initial_switch_config;
1176 +int initial_cpu_config;
1177 +int initial_port0_config;
1178 +int initial_port1_config;
1179 +
1180 +static inline uint32_t
1181 +read_4(struct ece_softc *sc, bus_size_t off)
1182 +{
1183 +
1184 +       return (bus_read_4(sc->mem_res, off));
1185 +}
1186 +
1187 +static inline void
1188 +write_4(struct ece_softc *sc, bus_size_t off, uint32_t val)
1189 +{
1190 +
1191 +       bus_write_4(sc->mem_res, off, val);
1192 +}
1193 +
1194 +#define        ECE_LOCK(_sc)           mtx_lock(&(_sc)->sc_mtx)
1195 +#define        ECE_UNLOCK(_sc)         mtx_unlock(&(_sc)->sc_mtx)
1196 +#define        ECE_LOCK_INIT(_sc) \
1197 +       mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev),   \
1198 +                MTX_NETWORK_LOCK, MTX_DEF)
1199 +
1200 +#define        ECE_TXLOCK(_sc)         mtx_lock(&(_sc)->sc_mtx_tx)
1201 +#define        ECE_TXUNLOCK(_sc)               mtx_unlock(&(_sc)->sc_mtx_tx)
1202 +#define        ECE_TXLOCK_INIT(_sc) \
1203 +       mtx_init(&_sc->sc_mtx_tx, device_get_nameunit(_sc->dev),        \
1204 +                "ECE TX Lock", MTX_DEF)
1205 +
1206 +#define        ECE_CLEANUPLOCK(_sc)    mtx_lock(&(_sc)->sc_mtx_cleanup)
1207 +#define        ECE_CLEANUPUNLOCK(_sc)  mtx_unlock(&(_sc)->sc_mtx_cleanup)
1208 +#define        ECE_CLEANUPLOCK_INIT(_sc) \
1209 +       mtx_init(&_sc->sc_mtx_cleanup, device_get_nameunit(_sc->dev),   \
1210 +                "ECE cleanup Lock", MTX_DEF)
1211 +
1212 +#define        ECE_RXLOCK(_sc)         mtx_lock(&(_sc)->sc_mtx_rx)
1213 +#define        ECE_RXUNLOCK(_sc)               mtx_unlock(&(_sc)->sc_mtx_rx)
1214 +#define        ECE_RXLOCK_INIT(_sc) \
1215 +       mtx_init(&_sc->sc_mtx_rx, device_get_nameunit(_sc->dev),        \
1216 +                "ECE RX Lock", MTX_DEF)
1217 +
1218 +#define        ECE_LOCK_DESTROY(_sc)   mtx_destroy(&_sc->sc_mtx);
1219 +#define        ECE_TXLOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx_tx);
1220 +#define        ECE_RXLOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx_rx);
1221 +#define        ECE_CLEANUPLOCK_DESTROY(_sc)    \
1222 +       mtx_destroy(&_sc->sc_mtx_cleanup);
1223 +
1224 +#define        ECE_ASSERT_LOCKED(_sc)  mtx_assert(&_sc->sc_mtx, MA_OWNED);
1225 +#define        ECE_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
1226 +
1227 +static devclass_t ece_devclass;
1228 +
1229 +/* ifnet entry points */
1230 +
1231 +static void    eceinit_locked(void *);
1232 +static void    ecestart_locked(struct ifnet *);
1233 +
1234 +static void    eceinit(void *);
1235 +static void    ecestart(struct ifnet *);
1236 +static void    ecestop(struct ece_softc *);
1237 +static int     eceioctl(struct ifnet * ifp, u_long, caddr_t);
1238 +
1239 +/* bus entry points */
1240 +
1241 +static int     ece_probe(device_t dev);
1242 +static int     ece_attach(device_t dev);
1243 +static int     ece_detach(device_t dev);
1244 +static void    ece_intr(void *);
1245 +static void    ece_intr_qf(void *);
1246 +static void    ece_intr_status(void *xsc);
1247 +
1248 +/* helper routines */
1249 +static int     ece_activate(device_t dev);
1250 +static void    ece_deactivate(device_t dev);
1251 +static int     ece_ifmedia_upd(struct ifnet *ifp);
1252 +static void    ece_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
1253 +static int     ece_get_mac(struct ece_softc *sc, u_char *eaddr);
1254 +static void    ece_set_mac(struct ece_softc *sc, u_char *eaddr);
1255 +static int     configure_cpu_port(struct ece_softc *sc);
1256 +static int     configure_lan_port(struct ece_softc *sc, int phy_type);
1257 +static void    set_pvid(struct ece_softc *sc, int port0, int port1, int cpu);
1258 +static void    set_vlan_vid(struct ece_softc *sc, int vlan);
1259 +static void    set_vlan_member(struct ece_softc *sc, int vlan);
1260 +static void    set_vlan_tag(struct ece_softc *sc, int vlan);
1261 +static int     hardware_init(struct ece_softc *sc);
1262 +static void    ece_intr_rx_locked(struct ece_softc *sc, int count);
1263 +
1264 +static void    ece_free_desc_dma_tx(struct ece_softc *sc);
1265 +static void    ece_free_desc_dma_rx(struct ece_softc *sc);
1266 +
1267 +static void    ece_intr_task(void *arg, int pending __unused);
1268 +static void    ece_tx_task(void *arg, int pending __unused);
1269 +static void    ece_cleanup_task(void *arg, int pending __unused);
1270 +
1271 +static int     ece_allocate_dma(struct ece_softc *sc);
1272 +
1273 +static void    ece_intr_tx(void *xsc);
1274 +
1275 +static void    clear_mac_entries(struct ece_softc *ec, int include_this_mac);
1276 +
1277 +static uint32_t read_mac_entry(struct ece_softc *ec,
1278 +           uint8_t *mac_result,
1279 +           int first);
1280 +
1281 +/*PHY related functions*/
1282 +static inline int
1283 +phy_read(struct ece_softc *sc, int phy, int reg)
1284 +{
1285 +       int val;
1286 +       int ii;
1287 +       int status;
1288 +
1289 +       write_4(sc, PHY_CONTROL, PHY_RW_OK);
1290 +       write_4(sc, PHY_CONTROL,
1291 +           (PHY_ADDRESS(phy)|PHY_READ_COMMAND |
1292 +           PHY_REGISTER(reg)));
1293 +
1294 +       for (ii = 0; ii < 0x1000; ii++) {
1295 +               status = read_4(sc, PHY_CONTROL);
1296 +               if (status & PHY_RW_OK) {
1297 +                       /* Clear the rw_ok status, and clear other
1298 +                        * bits value. */
1299 +                       write_4(sc, PHY_CONTROL, PHY_RW_OK);
1300 +                       val = PHY_GET_DATA(status);
1301 +                       return (val);
1302 +               }
1303 +       }
1304 +       return (0);
1305 +}
1306 +
1307 +static inline void
1308 +phy_write(struct ece_softc *sc, int phy, int reg, int data)
1309 +{
1310 +       int ii;
1311 +
1312 +       write_4(sc, PHY_CONTROL, PHY_RW_OK);
1313 +       write_4(sc, PHY_CONTROL,
1314 +           PHY_ADDRESS(phy) | PHY_REGISTER(reg) |
1315 +           PHY_WRITE_COMMAND | PHY_DATA(data));
1316 +       for (ii = 0; ii < 0x1000; ii++) {
1317 +               if (read_4(sc, PHY_CONTROL) & PHY_RW_OK) {
1318 +                       /* Clear the rw_ok status, and clear other
1319 +                        * bits value.
1320 +                        */
1321 +                       write_4(sc, PHY_CONTROL, PHY_RW_OK);
1322 +                       return;
1323 +               }
1324 +       }
1325 +}
1326 +
1327 +static int get_phy_type(struct ece_softc *sc)
1328 +{
1329 +       uint16_t phy0_id = 0, phy1_id = 0;
1330 +
1331 +       /*
1332 +        * Use SMI (MDC/MDIO) to read Link Partner's PHY Identifier
1333 +        * Register 1.
1334 +        */
1335 +       phy0_id = phy_read(sc, 0, 0x2);
1336 +       phy1_id = phy_read(sc, 1, 0x2);
1337 +
1338 +       if ((phy0_id == 0xFFFF) && (phy1_id == 0x000F))
1339 +               return (ASIX_GIGA_PHY);
1340 +       else if ((phy0_id == 0x0243) && (phy1_id == 0x0243))
1341 +               return (TWO_SINGLE_PHY);
1342 +       else if ((phy0_id == 0xFFFF) && (phy1_id == 0x0007))
1343 +               return (VSC8601_GIGA_PHY);
1344 +       else if ((phy0_id == 0x0243) && (phy1_id == 0xFFFF))
1345 +               return (IC_PLUS_PHY);
1346 +
1347 +       return (NOT_FOUND_PHY);
1348 +}
1349 +
1350 +static int
1351 +ece_probe(device_t dev)
1352 +{
1353 +
1354 +       device_set_desc(dev, "Econa Ethernet Controller");
1355 +       return (0);
1356 +}
1357 +
1358 +
1359 +static int
1360 +ece_attach(device_t dev)
1361 +{
1362 +       struct ece_softc *sc;
1363 +       struct ifnet *ifp = NULL;
1364 +       struct sysctl_ctx_list *sctx;
1365 +       struct sysctl_oid *soid;
1366 +       u_char eaddr[ETHER_ADDR_LEN];
1367 +       int err;
1368 +       int i, rid;
1369 +       uint32_t rnd;
1370 +
1371 +       err = 0;
1372 +
1373 +       sc = device_get_softc(dev);
1374 +
1375 +       sc->dev = dev;
1376 +
1377 +       rid = 0;
1378 +       sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1379 +                   RF_ACTIVE);
1380 +       if (sc->mem_res == NULL)
1381 +               goto out;
1382 +
1383 +       power_on_network_interface();
1384 +
1385 +       rid = 0;
1386 +       sc->irq_res_status = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1387 +           RF_ACTIVE);
1388 +       if (sc->irq_res_status == NULL)
1389 +               goto out;
1390 +
1391 +       rid = 1;
1392 +       /*TSTC: Fm-Switch-Tx-Complete*/
1393 +       sc->irq_res_tx = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1394 +           RF_ACTIVE);
1395 +       if (sc->irq_res_tx == NULL)
1396 +               goto out;
1397 +
1398 +       rid = 2;
1399 +       /*FSRC: Fm-Switch-Rx-Complete*/
1400 +       sc->irq_res_rec = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1401 +           RF_ACTIVE);
1402 +       if (sc->irq_res_rec == NULL)
1403 +               goto out;
1404 +
1405 +       rid = 4;
1406 +       /*FSQF: Fm-Switch-Queue-Full*/
1407 +       sc->irq_res_qf = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1408 +           RF_ACTIVE);
1409 +       if (sc->irq_res_qf == NULL)
1410 +               goto out;
1411 +
1412 +       err = ece_activate(dev);
1413 +       if (err)
1414 +               goto out;
1415 +
1416 +       /* Sysctls */
1417 +       sctx = device_get_sysctl_ctx(dev);
1418 +       soid = device_get_sysctl_tree(dev);
1419 +
1420 +       ECE_LOCK_INIT(sc);
1421 +
1422 +       callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
1423 +
1424 +       if ((err = ece_get_mac(sc, eaddr)) != 0) {
1425 +               /* No MAC address configured. Generate the random one. */
1426 +               if (bootverbose)
1427 +                       device_printf(dev,
1428 +                           "Generating random ethernet address.\n");
1429 +               rnd = arc4random();
1430 +
1431 +               /*from if_ae.c/if_ate.c*/
1432 +               /*
1433 +                * Set OUI to convenient locally assigned address. 'b'
1434 +                * is 0x62, which has the locally assigned bit set, and
1435 +                * the broadcast/multicast bit clear.
1436 +                */
1437 +               eaddr[0] = 'b';
1438 +               eaddr[1] = 's';
1439 +               eaddr[2] = 'd';
1440 +               eaddr[3] = (rnd >> 16) & 0xff;
1441 +               eaddr[4] = (rnd >> 8) & 0xff;
1442 +               eaddr[5] = rnd & 0xff;
1443 +
1444 +               for (i = 0; i < ETHER_ADDR_LEN; i++)
1445 +                       eaddr[i] = vlan0_mac[i];
1446 +       }
1447 +       ece_set_mac(sc, eaddr);
1448 +       sc->ifp = ifp = if_alloc(IFT_ETHER);
1449 +       if (mii_phy_probe(dev, &sc->miibus, ece_ifmedia_upd,
1450 +                   ece_ifmedia_sts)) {
1451 +               device_printf(dev, "Cannot find my PHY.\n");
1452 +               err = ENXIO;
1453 +               goto out;
1454 +       }
1455 +       ifp->if_softc = sc;
1456 +       if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1457 +       ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1458 +
1459 +       ifp->if_capabilities = IFCAP_HWCSUM;
1460 +
1461 +       ifp->if_hwassist = (CSUM_IP | CSUM_TCP | CSUM_UDP);
1462 +       ifp->if_capenable = ifp->if_capabilities;
1463 +       ifp->if_start = ecestart;
1464 +       ifp->if_ioctl = eceioctl;
1465 +       ifp->if_init = eceinit;
1466 +       ifp->if_snd.ifq_drv_maxlen = ECE_MAX_TX_BUFFERS - 1;
1467 +       IFQ_SET_MAXLEN(&ifp->if_snd, ECE_MAX_TX_BUFFERS - 1);
1468 +       IFQ_SET_READY(&ifp->if_snd);
1469 +
1470 +       /* Create local taskq. */
1471 +
1472 +       TASK_INIT(&sc->sc_intr_task, 0, ece_intr_task, sc);
1473 +       TASK_INIT(&sc->sc_tx_task, 1, ece_tx_task, ifp);
1474 +       TASK_INIT(&sc->sc_cleanup_task, 2, ece_cleanup_task, sc);
1475 +       sc->sc_tq = taskqueue_create_fast("ece_taskq", M_WAITOK,
1476 +           taskqueue_thread_enqueue,
1477 +           &sc->sc_tq);
1478 +       if (sc->sc_tq == NULL) {
1479 +               device_printf(sc->dev, "could not create taskqueue\n");
1480 +               goto out;
1481 +       }
1482 +
1483 +       ether_ifattach(ifp, eaddr);
1484 +
1485 +       /*
1486 +        * Activate interrupts
1487 +        */
1488 +       err = bus_setup_intr(dev, sc->irq_res_rec, INTR_TYPE_NET | INTR_MPSAFE,
1489 +           NULL, ece_intr, sc, &sc->intrhand);
1490 +       if (err) {
1491 +               ether_ifdetach(ifp);
1492 +               ECE_LOCK_DESTROY(sc);
1493 +               goto out;
1494 +       }
1495 +
1496 +       err = bus_setup_intr(dev, sc->irq_res_status,
1497 +           INTR_TYPE_NET | INTR_MPSAFE,
1498 +           NULL, ece_intr_status, sc, &sc->intrhand_status);
1499 +       if (err) {
1500 +               ether_ifdetach(ifp);
1501 +               ECE_LOCK_DESTROY(sc);
1502 +               goto out;
1503 +       }
1504 +
1505 +       err = bus_setup_intr(dev, sc->irq_res_qf, INTR_TYPE_NET | INTR_MPSAFE,
1506 +           NULL,ece_intr_qf, sc, &sc->intrhand_qf);
1507 +
1508 +       if (err) {
1509 +               ether_ifdetach(ifp);
1510 +               ECE_LOCK_DESTROY(sc);
1511 +               goto out;
1512 +       }
1513 +
1514 +       err = bus_setup_intr(dev, sc->irq_res_tx, INTR_TYPE_NET | INTR_MPSAFE,
1515 +           NULL, ece_intr_tx, sc, &sc->intrhand_tx);
1516 +
1517 +       if (err) {
1518 +               ether_ifdetach(ifp);
1519 +               ECE_LOCK_DESTROY(sc);
1520 +               goto out;
1521 +       }
1522 +
1523 +       ECE_TXLOCK_INIT(sc);
1524 +       ECE_RXLOCK_INIT(sc);
1525 +       ECE_CLEANUPLOCK_INIT(sc);
1526 +
1527 +       /* Enable all interrupt sources. */
1528 +       write_4(sc, INTERRUPT_MASK, 0x00000000);
1529 +
1530 +       /* Enable port 0. */
1531 +       write_4(sc, PORT_0_CONFIG, read_4(sc, PORT_0_CONFIG) & ~(PORT_DISABLE));
1532 +
1533 +       taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq",
1534 +           device_get_nameunit(sc->dev));
1535 +
1536 +out:;
1537 +       if (err)
1538 +               ece_deactivate(dev);
1539 +       if (err && ifp)
1540 +               if_free(ifp);
1541 +       return (err);
1542 +}
1543 +
1544 +static int
1545 +ece_detach(device_t dev)
1546 +{
1547 +       struct ece_softc *sc = device_get_softc(dev);
1548 +       struct ifnet *ifp = sc->ifp;
1549 +
1550 +       ecestop(sc);
1551 +       if (ifp != NULL) {
1552 +               ether_ifdetach(ifp);
1553 +               if_free(ifp);
1554 +       }
1555 +       ece_deactivate(dev);
1556 +       return (0);
1557 +}
1558 +
1559 +static void
1560 +ece_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1561 +{
1562 +       u_int32_t *paddr;
1563 +       KASSERT(nsegs == 1, ("wrong number of segments, should be 1"));
1564 +       paddr = arg;
1565 +       *paddr = segs->ds_addr;
1566 +}
1567 +
1568 +static int
1569 +ece_alloc_desc_dma_tx(struct ece_softc *sc)
1570 +{
1571 +       int i;
1572 +       int error;
1573 +
1574 +       /* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
1575 +       error = bus_dma_tag_create(sc->sc_parent_tag,   /* parent */
1576 +           16, 0, /* alignment, boundary */
1577 +           BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
1578 +           BUS_SPACE_MAXADDR,  /* highaddr */
1579 +           NULL, NULL, /* filtfunc, filtfuncarg */
1580 +           sizeof(eth_tx_desc_t)*ECE_MAX_TX_BUFFERS, /* max size */
1581 +           1, /*nsegments */
1582 +           sizeof(eth_tx_desc_t)*ECE_MAX_TX_BUFFERS,
1583 +           0, /* flags */
1584 +           NULL, NULL, /* lockfunc, lockfuncarg */
1585 +           &sc->dmatag_data_tx); /* dmat */
1586 +
1587 +       /* Allocate memory for TX ring. */
1588 +       error = bus_dmamem_alloc(sc->dmatag_data_tx,
1589 +           (void**)&(sc->desc_tx),
1590 +           BUS_DMA_NOWAIT | BUS_DMA_ZERO |
1591 +           BUS_DMA_COHERENT,
1592 +           &(sc->dmamap_ring_tx));
1593 +
1594 +       if (error) {
1595 +               if_printf(sc->ifp, "failed to allocate DMA memory\n");
1596 +               bus_dma_tag_destroy(sc->dmatag_data_tx);
1597 +               sc->dmatag_data_tx = 0;
1598 +               return (ENXIO);
1599 +       }
1600 +
1601 +       /* Load Ring DMA. */
1602 +       error = bus_dmamap_load(sc->dmatag_data_tx, sc->dmamap_ring_tx,
1603 +           sc->desc_tx,
1604 +           sizeof(eth_tx_desc_t)*ECE_MAX_TX_BUFFERS,
1605 +           ece_getaddr,
1606 +           &(sc->ring_paddr_tx), BUS_DMA_NOWAIT);
1607 +
1608 +       if (error) {
1609 +               if_printf(sc->ifp, "can't load descriptor\n");
1610 +               bus_dmamem_free(sc->dmatag_data_tx, sc->desc_tx,
1611 +                   sc->dmamap_ring_tx);
1612 +               sc->desc_tx = NULL;
1613 +               bus_dma_tag_destroy(sc->dmatag_data_tx);
1614 +               sc->dmatag_data_tx = 0;
1615 +               return (ENXIO);
1616 +       }
1617 +
1618 +       /* Allocate a busdma tag for mbufs. Alignment is 2 bytes */
1619 +       error = bus_dma_tag_create(sc->sc_parent_tag,   /* parent */
1620 +           1, 0,                       /* alignment, boundary */
1621 +           BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
1622 +           BUS_SPACE_MAXADDR,          /* highaddr */
1623 +           NULL, NULL,         /* filtfunc, filtfuncarg */
1624 +          MCLBYTES*MAX_FRAGMENT,       /* maxsize */
1625 +          MAX_FRAGMENT,                 /* nsegments */
1626 +           MCLBYTES, 0,                /* maxsegsz, flags */
1627 +           NULL, NULL,         /* lockfunc, lockfuncarg */
1628 +           &sc->dmatag_ring_tx);       /* dmat */
1629 +
1630 +       if (error) {
1631 +               if_printf(sc->ifp, "failed to create busdma tag for mbufs\n");
1632 +               return (ENXIO);
1633 +       }
1634 +
1635 +       for (i = 0; i < ECE_MAX_TX_BUFFERS; i++) {
1636 +               /* Create dma map for each descriptor. */
1637 +               error = bus_dmamap_create(sc->dmatag_ring_tx, 0,
1638 +                   &(sc->tx_desc[i].dmamap));
1639 +               if (error) {
1640 +                       if_printf(sc->ifp, "failed to create map for mbuf\n");
1641 +                       return (ENXIO);
1642 +               }
1643 +       }
1644 +       return (0);
1645 +}
1646 +
1647 +static void
1648 +ece_free_desc_dma_tx(struct ece_softc *sc)
1649 +{
1650 +       int i;
1651 +
1652 +       for (i = 0; i < ECE_MAX_TX_BUFFERS; i++) {
1653 +               if (sc->tx_desc[i].buff) {
1654 +                       m_freem(sc->tx_desc[i].buff);
1655 +                       sc->tx_desc[i].buff= 0;
1656 +               }
1657 +       }
1658 +
1659 +       if (sc->dmamap_ring_tx) {
1660 +               bus_dmamap_unload(sc->dmatag_data_tx, sc->dmamap_ring_tx);
1661 +               if (sc->desc_tx) {
1662 +                       bus_dmamem_free(sc->dmatag_data_tx,
1663 +                           sc->desc_tx, sc->dmamap_ring_tx);
1664 +               }
1665 +               sc->dmamap_ring_tx = 0;
1666 +       }
1667 +
1668 +       if (sc->dmatag_data_tx) {
1669 +               bus_dma_tag_destroy(sc->dmatag_data_tx);
1670 +               sc->dmatag_data_tx = 0;
1671 +       }
1672 +
1673 +       if (sc->dmatag_ring_tx) {
1674 +               for (i = 0; i<ECE_MAX_TX_BUFFERS; i++) {
1675 +                       bus_dmamap_destroy(sc->dmatag_ring_tx,
1676 +                           sc->tx_desc[i].dmamap);
1677 +                       sc->tx_desc[i].dmamap = 0;
1678 +               }
1679 +               bus_dma_tag_destroy(sc->dmatag_ring_tx);
1680 +               sc->dmatag_ring_tx = 0;
1681 +       }
1682 +}
1683 +
1684 +static int
1685 +ece_alloc_desc_dma_rx(struct ece_softc *sc)
1686 +{
1687 +       int error;
1688 +       int i;
1689 +
1690 +       /* Allocate a busdma tag and DMA safe memory for RX descriptors. */
1691 +       error = bus_dma_tag_create(sc->sc_parent_tag,   /* parent */
1692 +           16, 0,                      /* alignment, boundary */
1693 +           BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
1694 +           BUS_SPACE_MAXADDR,          /* highaddr */
1695 +           NULL, NULL,         /* filtfunc, filtfuncarg */
1696 +           /* maxsize, nsegments */
1697 +           sizeof(eth_rx_desc_t)*ECE_MAX_RX_BUFFERS, 1,
1698 +           /* maxsegsz, flags */
1699 +           sizeof(eth_rx_desc_t)*ECE_MAX_RX_BUFFERS, 0,
1700 +           NULL, NULL,         /* lockfunc, lockfuncarg */
1701 +           &sc->dmatag_data_rx);       /* dmat */
1702 +
1703 +       /* Allocate RX ring. */
1704 +       error = bus_dmamem_alloc(sc->dmatag_data_rx,
1705 +           (void**)&(sc->desc_rx),
1706 +           BUS_DMA_NOWAIT | BUS_DMA_ZERO |
1707 +           BUS_DMA_COHERENT,
1708 +           &(sc->dmamap_ring_rx));
1709 +
1710 +       if (error) {
1711 +               if_printf(sc->ifp, "failed to allocate DMA memory\n");
1712 +               return (ENXIO);
1713 +       }
1714 +
1715 +       /* Load dmamap. */
1716 +       error = bus_dmamap_load(sc->dmatag_data_rx, sc->dmamap_ring_rx,
1717 +           sc->desc_rx,
1718 +           sizeof(eth_rx_desc_t)*ECE_MAX_RX_BUFFERS,
1719 +           ece_getaddr,
1720 +           &(sc->ring_paddr_rx), BUS_DMA_NOWAIT);
1721 +
1722 +       if (error) {
1723 +               if_printf(sc->ifp, "can't load descriptor\n");
1724 +               bus_dmamem_free(sc->dmatag_data_rx, sc->desc_rx,
1725 +                   sc->dmamap_ring_rx);
1726 +               bus_dma_tag_destroy(sc->dmatag_data_rx);
1727 +               sc->desc_rx = NULL;
1728 +               return (ENXIO);
1729 +       }
1730 +
1731 +       /* Allocate a busdma tag for mbufs. */
1732 +       error = bus_dma_tag_create(sc->sc_parent_tag,/* parent */
1733 +           16, 0,                      /* alignment, boundary */
1734 +           BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
1735 +           BUS_SPACE_MAXADDR,          /* highaddr */
1736 +           NULL, NULL,         /* filtfunc, filtfuncarg */
1737 +           MCLBYTES, 1,                /* maxsize, nsegments */
1738 +           MCLBYTES, 0,                /* maxsegsz, flags */
1739 +           NULL, NULL,         /* lockfunc, lockfuncarg */
1740 +           &sc->dmatag_ring_rx);       /* dmat */
1741 +
1742 +       if (error) {
1743 +               if_printf(sc->ifp, "failed to create busdma tag for mbufs\n");
1744 +               return (ENXIO);
1745 +       }
1746 +
1747 +       for (i = 0; i<ECE_MAX_RX_BUFFERS; i++) {
1748 +               error = bus_dmamap_create(sc->dmatag_ring_rx, 0,
1749 +                   &sc->rx_desc[i].dmamap);
1750 +               if (error) {
1751 +                       if_printf(sc->ifp, "failed to create map for mbuf\n");
1752 +                       return (ENXIO);
1753 +               }
1754 +       }
1755 +
1756 +       error = bus_dmamap_create(sc->dmatag_ring_rx, 0, &sc->rx_sparemap);
1757 +       if (error) {
1758 +               if_printf(sc->ifp, "failed to create spare map\n");
1759 +               return (ENXIO);
1760 +       }
1761 +
1762 +       return (0);
1763 +}
1764 +
1765 +static void
1766 +ece_free_desc_dma_rx(struct ece_softc *sc)
1767 +{
1768 +       int i;
1769 +
1770 +       for (i = 0; i < ECE_MAX_RX_BUFFERS; i++) {
1771 +               if (sc->rx_desc[i].buff) {
1772 +                       m_freem(sc->rx_desc[i].buff);
1773 +                       sc->rx_desc[i].buff= 0;
1774 +               }
1775 +       }
1776 +
1777 +       if (sc->dmatag_data_rx) {
1778 +               bus_dmamap_unload(sc->dmatag_data_rx, sc->dmamap_ring_rx);
1779 +               bus_dmamem_free(sc->dmatag_data_rx, sc->desc_rx,
1780 +                   sc->dmamap_ring_rx);
1781 +               bus_dma_tag_destroy(sc->dmatag_data_rx);
1782 +               sc->dmatag_data_rx = 0;
1783 +               sc->dmamap_ring_rx = 0;
1784 +               sc->desc_rx = 0;
1785 +       }
1786 +
1787 +       if (sc->dmatag_ring_rx) {
1788 +               for (i = 0; i < ECE_MAX_RX_BUFFERS; i++)
1789 +                       bus_dmamap_destroy(sc->dmatag_ring_rx,
1790 +                           sc->rx_desc[i].dmamap);
1791 +               bus_dmamap_destroy(sc->dmatag_ring_rx, sc->rx_sparemap);
1792 +               bus_dma_tag_destroy(sc->dmatag_ring_rx);
1793 +               sc->dmatag_ring_rx = 0;
1794 +       }
1795 +}
1796 +
1797 +static int
1798 +ece_new_rxbuf(struct ece_softc *sc, struct rx_desc_info* descinfo)
1799 +{
1800 +       struct mbuf *new_mbuf;
1801 +       bus_dma_segment_t seg[1];
1802 +       bus_dmamap_t map;
1803 +       int error;
1804 +       int nsegs;
1805 +       bus_dma_tag_t tag;
1806 +
1807 +       tag = sc->dmatag_ring_rx;
1808 +
1809 +       new_mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1810 +
1811 +       if (new_mbuf == NULL)
1812 +               return (ENOBUFS);
1813 +
1814 +       new_mbuf->m_len = new_mbuf->m_pkthdr.len = MCLBYTES;
1815 +
1816 +       error = bus_dmamap_load_mbuf_sg(tag, sc->rx_sparemap, new_mbuf,
1817 +           seg, &nsegs, BUS_DMA_NOWAIT);
1818 +
1819 +       KASSERT(nsegs == 1, ("Too many segments returned!"));
1820 +
1821 +       if (nsegs != 1 || error) {
1822 +               m_free(new_mbuf);
1823 +               return (ENOBUFS);
1824 +       }
1825 +
1826 +       if (descinfo->buff != NULL) {
1827 +               bus_dmamap_sync(tag, descinfo->dmamap, BUS_DMASYNC_POSTREAD);
1828 +               bus_dmamap_unload(tag, descinfo->dmamap);
1829 +       }
1830 +
1831 +       map = descinfo->dmamap;
1832 +       descinfo->dmamap = sc->rx_sparemap;
1833 +       sc->rx_sparemap = map;
1834 +
1835 +       bus_dmamap_sync(tag, descinfo->dmamap, BUS_DMASYNC_PREREAD);
1836 +
1837 +       descinfo->buff = new_mbuf;
1838 +       descinfo->desc->data_ptr = seg->ds_addr;
1839 +       descinfo->desc->length = seg->ds_len - 2;
1840 +
1841 +       return (0);
1842 +}
1843 +
1844 +static int
1845 +ece_allocate_dma(struct ece_softc *sc)
1846 +{
1847 +       eth_tx_desc_t *desctx;
1848 +       eth_rx_desc_t *descrx;
1849 +       int i;
1850 +       int error;
1851 +
1852 +       /* Create parent tag for tx and rx */
1853 +       error = bus_dma_tag_create(
1854 +           bus_get_dma_tag(sc->dev),/* parent */
1855 +           1, 0,               /* alignment, boundary */
1856 +           BUS_SPACE_MAXADDR,  /* lowaddr */
1857 +           BUS_SPACE_MAXADDR,  /* highaddr */
1858 +           NULL, NULL, /* filter, filterarg */
1859 +           BUS_SPACE_MAXSIZE_32BIT, 0,/* maxsize, nsegments */
1860 +           BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
1861 +           0,                  /* flags */
1862 +           NULL, NULL, /* lockfunc, lockarg */
1863 +           &sc->sc_parent_tag);
1864 +
1865 +       ece_alloc_desc_dma_tx(sc);
1866 +
1867 +       for (i = 0; i < ECE_MAX_TX_BUFFERS; i++) {
1868 +               desctx = (eth_tx_desc_t *)(&sc->desc_tx[i]);
1869 +               memset(desctx, 0, sizeof(eth_tx_desc_t));
1870 +               desctx->length = MAX_PACKET_LEN;
1871 +               desctx->cown = 1;
1872 +               if (i == ECE_MAX_TX_BUFFERS - 1)
1873 +                       desctx->eor = 1;
1874 +       }
1875 +
1876 +       ece_alloc_desc_dma_rx(sc);
1877 +
1878 +       for (i = 0; i < ECE_MAX_RX_BUFFERS; i++) {
1879 +               descrx = &(sc->desc_rx[i]);
1880 +               memset(descrx, 0, sizeof(eth_rx_desc_t));
1881 +               sc->rx_desc[i].desc = descrx;
1882 +               sc->rx_desc[i].buff = 0;
1883 +               ece_new_rxbuf(sc, &(sc->rx_desc[i]));
1884 +
1885 +               if (i == ECE_MAX_RX_BUFFERS - 1)
1886 +                       descrx->eor = 1;
1887 +       }
1888 +       sc->tx_prod = 0;
1889 +       sc->tx_cons = 0;
1890 +       sc->last_rx = 0;
1891 +       sc->desc_curr_tx = 0;
1892 +
1893 +       return (0);
1894 +}
1895 +
1896 +static int
1897 +ece_activate(device_t dev)
1898 +{
1899 +       struct ece_softc *sc;
1900 +       int err;
1901 +       uint32_t mac_port_config;
1902 +       struct ifnet *ifp;
1903 +
1904 +       sc = device_get_softc(dev);
1905 +       ifp = sc->ifp;
1906 +
1907 +       initial_switch_config = read_4(sc, SWITCH_CONFIG);
1908 +       initial_cpu_config = read_4(sc, CPU_PORT_CONFIG);
1909 +       initial_port0_config = read_4(sc, MAC_PORT_0_CONFIG);
1910 +       initial_port1_config = read_4(sc, MAC_PORT_1_CONFIG);
1911 +
1912 +       /* Disable Port 0 */
1913 +       mac_port_config = read_4(sc, MAC_PORT_0_CONFIG);
1914 +       mac_port_config |= (PORT_DISABLE);
1915 +       write_4(sc, MAC_PORT_0_CONFIG, mac_port_config);
1916 +
1917 +       /* Disable Port 1 */
1918 +       mac_port_config = read_4(sc, MAC_PORT_1_CONFIG);
1919 +       mac_port_config |= (PORT_DISABLE);
1920 +       write_4(sc, MAC_PORT_1_CONFIG, mac_port_config);
1921 +
1922 +       err = ece_allocate_dma(sc);
1923 +       if (err) {
1924 +               if_printf(sc->ifp, "failed allocating dma\n");
1925 +               goto out;
1926 +       }
1927 +
1928 +       write_4(sc, TS_DESCRIPTOR_POINTER, sc->ring_paddr_tx);
1929 +       write_4(sc, TS_DESCRIPTOR_BASE_ADDR, sc->ring_paddr_tx);
1930 +
1931 +       write_4(sc, FS_DESCRIPTOR_POINTER, sc->ring_paddr_rx);
1932 +       write_4(sc, FS_DESCRIPTOR_BASE_ADDR, sc->ring_paddr_rx);
1933 +
1934 +       write_4(sc, FS_DMA_CONTROL, 1);
1935 +
1936 +       return (0);
1937 +out:
1938 +       return (ENXIO);
1939 +
1940 +}
1941 +
1942 +static void
1943 +ece_deactivate(device_t dev)
1944 +{
1945 +       struct ece_softc *sc;
1946 +
1947 +       sc = device_get_softc(dev);
1948 +
1949 +       if (sc->intrhand)
1950 +               bus_teardown_intr(dev, sc->irq_res_rec, sc->intrhand);
1951 +
1952 +       sc->intrhand = 0;
1953 +
1954 +       if (sc->intrhand_qf)
1955 +               bus_teardown_intr(dev, sc->irq_res_qf, sc->intrhand_qf);
1956 +
1957 +       sc->intrhand_qf = 0;
1958 +
1959 +       bus_generic_detach(sc->dev);
1960 +       if (sc->miibus)
1961 +               device_delete_child(sc->dev, sc->miibus);
1962 +       if (sc->mem_res)
1963 +               bus_release_resource(dev, SYS_RES_IOPORT,
1964 +                   rman_get_rid(sc->mem_res), sc->mem_res);
1965 +       sc->mem_res = 0;
1966 +
1967 +       if (sc->irq_res_rec)
1968 +               bus_release_resource(dev, SYS_RES_IRQ,
1969 +                   rman_get_rid(sc->irq_res_rec), sc->irq_res_rec);
1970 +
1971 +       if (sc->irq_res_qf)
1972 +               bus_release_resource(dev, SYS_RES_IRQ,
1973 +                   rman_get_rid(sc->irq_res_qf), sc->irq_res_qf);
1974 +
1975 +       if (sc->irq_res_qf)
1976 +               bus_release_resource(dev, SYS_RES_IRQ,
1977 +                   rman_get_rid(sc->irq_res_status), sc->irq_res_status);
1978 +
1979 +       sc->irq_res_rec = 0;
1980 +       sc->irq_res_qf = 0;
1981 +       sc->irq_res_status = 0;
1982 +       ECE_TXLOCK_DESTROY(sc);
1983 +       ECE_RXLOCK_DESTROY(sc);
1984 +
1985 +       ece_free_desc_dma_tx(sc);
1986 +       ece_free_desc_dma_rx(sc);
1987 +
1988 +       return;
1989 +}
1990 +
1991 +/*
1992 + * Change media according to request.
1993 + */
1994 +static int
1995 +ece_ifmedia_upd(struct ifnet *ifp)
1996 +{
1997 +       struct ece_softc *sc = ifp->if_softc;
1998 +       struct mii_data *mii;
1999 +       int error;
2000 +
2001 +       mii = device_get_softc(sc->miibus);
2002 +       ECE_LOCK(sc);
2003 +       error = mii_mediachg(mii);
2004 +       ECE_UNLOCK(sc);
2005 +       return (error);
2006 +}
2007 +
2008 +/*
2009 + * Notify the world which media we're using.
2010 + */
2011 +static void
2012 +ece_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2013 +{
2014 +       struct ece_softc *sc = ifp->if_softc;
2015 +       struct mii_data *mii;
2016 +
2017 +       mii = device_get_softc(sc->miibus);
2018 +       ECE_LOCK(sc);
2019 +       mii_pollstat(mii);
2020 +       ifmr->ifm_active = mii->mii_media_active;
2021 +       ifmr->ifm_status = mii->mii_media_status;
2022 +       ECE_UNLOCK(sc);
2023 +}
2024 +
2025 +static void
2026 +ece_tick(void *xsc)
2027 +{
2028 +       struct ece_softc *sc = xsc;
2029 +       struct mii_data *mii;
2030 +       int active;
2031 +
2032 +       mii = device_get_softc(sc->miibus);
2033 +       active = mii->mii_media_active;
2034 +       mii_tick(mii);
2035 +
2036 +       /*
2037 +        * Schedule another timeout one second from now.
2038 +        */
2039 +       callout_reset(&sc->tick_ch, hz, ece_tick, sc);
2040 +}
2041 +
2042 +static uint32_t
2043 +read_mac_entry(struct ece_softc *ec,
2044 +    uint8_t *mac_result,
2045 +    int first)
2046 +{
2047 +       uint32_t ii;
2048 +       struct arl_table_entry_t entry;
2049 +       uint32_t *entry_val;
2050 +       write_4(ec, ARL_TABLE_ACCESS_CONTROL_0, 0);
2051 +       write_4(ec, ARL_TABLE_ACCESS_CONTROL_1, 0);
2052 +       write_4(ec, ARL_TABLE_ACCESS_CONTROL_2, 0);
2053 +       if (first)
2054 +               write_4(ec, ARL_TABLE_ACCESS_CONTROL_0, 0x1);
2055 +       else
2056 +               write_4(ec, ARL_TABLE_ACCESS_CONTROL_0, 0x2);
2057 +
2058 +       for (ii = 0; ii < 0x1000; ii++)
2059 +               if (read_4(ec, ARL_TABLE_ACCESS_CONTROL_1) & (0x1))
2060 +                       break;
2061 +
2062 +       entry_val = (uint32_t*) (&entry);
2063 +       entry_val[0] = read_4(ec, ARL_TABLE_ACCESS_CONTROL_1);
2064 +       entry_val[1] = read_4(ec, ARL_TABLE_ACCESS_CONTROL_2);
2065 +
2066 +       if (mac_result)
2067 +               memcpy(mac_result, entry.mac_addr, ETHER_ADDR_LEN);
2068 +
2069 +       return (entry.table_end);
2070 +}
2071 +
2072 +static uint32_t
2073 +write_arl_table_entry(struct ece_softc *ec,
2074 +    uint32_t filter,
2075 +    uint32_t vlan_mac,
2076 +    uint32_t vlan_gid,
2077 +    uint32_t age_field,
2078 +    uint32_t port_map,
2079 +    const uint8_t *mac_addr)
2080 +{
2081 +       uint32_t ii;
2082 +       uint32_t *entry_val;
2083 +       struct arl_table_entry_t entry;
2084 +
2085 +       memset(&entry, 0, sizeof(entry));
2086 +
2087 +       entry.filter = filter;
2088 +       entry.vlan_mac = vlan_mac;
2089 +       entry.vlan_gid = vlan_gid;
2090 +       entry.age_field = age_field;
2091 +       entry.port_map = port_map;
2092 +       memcpy(entry.mac_addr, mac_addr, ETHER_ADDR_LEN);
2093 +
2094 +       entry_val = (uint32_t*) (&entry);
2095 +
2096 +       write_4(ec, ARL_TABLE_ACCESS_CONTROL_0, 0);
2097 +       write_4(ec, ARL_TABLE_ACCESS_CONTROL_1, 0);
2098 +       write_4(ec, ARL_TABLE_ACCESS_CONTROL_2, 0);
2099 +
2100 +       write_4(ec, ARL_TABLE_ACCESS_CONTROL_1, entry_val[0]);
2101 +       write_4(ec, ARL_TABLE_ACCESS_CONTROL_2, entry_val[1]);
2102 +
2103 +       write_4(ec, ARL_TABLE_ACCESS_CONTROL_0, ARL_WRITE_COMMAND);
2104 +
2105 +       for (ii = 0; ii < 0x1000; ii++)
2106 +               if (read_4(ec, ARL_TABLE_ACCESS_CONTROL_1) &
2107 +                   ARL_COMMAND_COMPLETE)
2108 +                       return (1); /* Write OK. */
2109 +
2110 +       /* Write failed. */
2111 +       return (0);
2112 +}
2113 +
2114 +static void
2115 +remove_mac_entry(struct ece_softc *sc,
2116 +    uint8_t *mac)
2117 +{
2118 +
2119 +       /* Invalid age_field mean erase this entry. */
2120 +       write_arl_table_entry(sc, 0, 1, VLAN0_GROUP_ID,
2121 +           INVALID_ENTRY, VLAN0_GROUP,
2122 +           mac);
2123 +}
2124 +
2125 +static void
2126 +add_mac_entry(struct ece_softc *sc,
2127 +    uint8_t *mac)
2128 +{
2129 +
2130 +       write_arl_table_entry(sc, 0, 1, VLAN0_GROUP_ID,
2131 +           NEW_ENTRY, VLAN0_GROUP,
2132 +           mac);
2133 +}
2134 +
2135 +/**
2136 + * The behavior of ARL table reading and deletion is not well defined
2137 + * in the documentation. To be safe, all mac addresses are put to a
2138 + * list, then deleted.
2139 + *
2140 + */
2141 +static void
2142 +clear_mac_entries(struct ece_softc *ec, int include_this_mac)
2143 +{
2144 +       int table_end;
2145 +       struct mac_list * temp;
2146 +       struct mac_list * mac_list_header;
2147 +       struct mac_list * current;
2148 +       char mac[ETHER_ADDR_LEN];
2149 +
2150 +       current = 0;
2151 +       mac_list_header = 0;
2152 +
2153 +       table_end = read_mac_entry(ec, mac, 1);
2154 +       while (!table_end) {
2155 +               if (!include_this_mac &&
2156 +                   memcmp(mac, vlan0_mac, ETHER_ADDR_LEN) == 0) {
2157 +                       /* Read next entry. */
2158 +                       table_end = read_mac_entry(ec, mac, 0);
2159 +                       continue;
2160 +               }
2161 +
2162 +               temp = (struct mac_list*)malloc(sizeof(struct mac_list),
2163 +                   M_DEVBUF,
2164 +                   M_NOWAIT | M_ZERO);
2165 +               memcpy(temp->mac_addr, mac, ETHER_ADDR_LEN);
2166 +               temp->next = 0;
2167 +               if (mac_list_header) {
2168 +                       current->next = temp;
2169 +                       current = temp;
2170 +               } else {
2171 +                       mac_list_header = temp;
2172 +                       current = temp;
2173 +               }
2174 +               /* Read next Entry */
2175 +               table_end = read_mac_entry(ec, mac, 0);
2176 +       }
2177 +
2178 +       current = mac_list_header;
2179 +
2180 +       while (current) {
2181 +               remove_mac_entry(ec, current->mac_addr);
2182 +               temp = current;
2183 +               current = current->next;
2184 +               free(temp, M_DEVBUF);
2185 +       }
2186 +}
2187 +
2188 +static int
2189 +configure_lan_port(struct ece_softc *sc, int phy_type)
2190 +{
2191 +       uint32_t sw_config;
2192 +       uint32_t mac_port_config;
2193 +
2194 +       /*
2195 +        * Configure switch
2196 +        */
2197 +       sw_config = read_4(sc, SWITCH_CONFIG);
2198 +       /* Enable fast aging. */
2199 +       sw_config |= FAST_AGING;
2200 +       /* Enable IVL learning. */
2201 +       sw_config |= IVL_LEARNING;
2202 +       /* Disable hardware NAT. */
2203 +       sw_config &= ~(HARDWARE_NAT);
2204 +
2205 +       sw_config |= SKIP_L2_LOOKUP_PORT_0 | SKIP_L2_LOOKUP_PORT_1| NIC_MODE;
2206 +
2207 +       write_4(sc, SWITCH_CONFIG, sw_config);
2208 +
2209 +       sw_config = read_4(sc, SWITCH_CONFIG);
2210 +
2211 +       mac_port_config = read_4(sc, MAC_PORT_0_CONFIG);
2212 +
2213 +       if (!(mac_port_config & 0x1) || (mac_port_config & 0x2))
2214 +               if_printf(sc->ifp, "Link Down\n");
2215 +       else
2216 +               write_4(sc, MAC_PORT_0_CONFIG, mac_port_config);
2217 +       return (0);
2218 +}
2219 +
2220 +static void
2221 +set_pvid(struct ece_softc *sc, int port0, int port1, int cpu)
2222 +{
2223 +       uint32_t val;
2224 +       val = read_4(sc, VLAN_PORT_PVID) & (~(0x7 << 0));
2225 +       write_4(sc, VLAN_PORT_PVID, val);
2226 +       val = read_4(sc, VLAN_PORT_PVID) | ((port0) & 0x07);
2227 +       write_4(sc, VLAN_PORT_PVID, val);
2228 +       val = read_4(sc, VLAN_PORT_PVID) & (~(0x7 << 4));
2229 +       write_4(sc, VLAN_PORT_PVID, val);
2230 +       val = read_4(sc, VLAN_PORT_PVID) | (((port1) & 0x07) << 4);
2231 +       write_4(sc, VLAN_PORT_PVID, val);
2232 +
2233 +       val = read_4(sc, VLAN_PORT_PVID) & (~(0x7 << 8));
2234 +       write_4(sc, VLAN_PORT_PVID, val);
2235 +       val = read_4(sc, VLAN_PORT_PVID) | (((cpu) & 0x07) << 8);
2236 +       write_4(sc, VLAN_PORT_PVID, val);
2237 +
2238 +}
2239 +
2240 +/* VLAN related functions */
2241 +static void
2242 +set_vlan_vid(struct ece_softc *sc, int vlan)
2243 +{
2244 +       const uint32_t regs[] = {
2245 +           VLAN_VID_0_1,
2246 +           VLAN_VID_0_1,
2247 +           VLAN_VID_2_3,
2248 +           VLAN_VID_2_3,
2249 +           VLAN_VID_4_5,
2250 +           VLAN_VID_4_5,
2251 +           VLAN_VID_6_7,
2252 +           VLAN_VID_6_7
2253 +       };
2254 +
2255 +       const int vids[] = {
2256 +           VLAN0_VID,
2257 +           VLAN1_VID,
2258 +           VLAN2_VID,
2259 +           VLAN3_VID,
2260 +           VLAN4_VID,
2261 +           VLAN5_VID,
2262 +           VLAN6_VID,
2263 +           VLAN7_VID
2264 +       };
2265 +
2266 +       uint32_t val;
2267 +       uint32_t reg;
2268 +       int vid;
2269 +
2270 +       reg = regs[vlan];
2271 +       vid = vids[vlan];
2272 +
2273 +       if (vlan & 1) {
2274 +               val = read_4(sc, reg);
2275 +               write_4(sc, reg, val & (~(0xFFF << 0)));
2276 +               val = read_4(sc, reg);
2277 +               write_4(sc, reg, val|((vid & 0xFFF) << 0));
2278 +       } else {
2279 +               val = read_4(sc, reg);
2280 +               write_4(sc, reg, val & (~(0xFFF << 12)));
2281 +               val = read_4(sc, reg);
2282 +               write_4(sc, reg, val|((vid & 0xFFF) << 12));
2283 +       }
2284 +}
2285 +
2286 +static void
2287 +set_vlan_member(struct ece_softc *sc, int vlan)
2288 +{
2289 +       unsigned char shift;
2290 +       uint32_t val;
2291 +       int group;
2292 +       const int groups[] = {
2293 +           VLAN0_GROUP,
2294 +           VLAN1_GROUP,
2295 +           VLAN2_GROUP,
2296 +           VLAN3_GROUP,
2297 +           VLAN4_GROUP,
2298 +           VLAN5_GROUP,
2299 +           VLAN6_GROUP,
2300 +           VLAN7_GROUP
2301 +       };
2302 +
2303 +       group = groups[vlan];
2304 +
2305 +       shift = vlan*3;
2306 +       val = read_4(sc, VLAN_MEMBER_PORT_MAP) & (~(0x7 << shift));
2307 +       write_4(sc, VLAN_MEMBER_PORT_MAP, val);
2308 +       val = read_4(sc, VLAN_MEMBER_PORT_MAP);
2309 +       write_4(sc, VLAN_MEMBER_PORT_MAP, val | ((group & 0x7) << shift));
2310 +}
2311 +
2312 +static void
2313 +set_vlan_tag(struct ece_softc *sc, int vlan)
2314 +{
2315 +       unsigned char shift;
2316 +       uint32_t val;
2317 +
2318 +       int tag = 0;
2319 +
2320 +       shift = vlan*3;
2321 +       val = read_4(sc, VLAN_TAG_PORT_MAP) & (~(0x7 << shift));
2322 +       write_4(sc, VLAN_TAG_PORT_MAP, val);
2323 +       val = read_4(sc, VLAN_TAG_PORT_MAP);
2324 +       write_4(sc, VLAN_TAG_PORT_MAP, val | ((tag & 0x7) << shift));
2325 +}
2326 +
2327 +static int
2328 +configure_cpu_port(struct ece_softc *sc)
2329 +{
2330 +       uint32_t cpu_port_config;
2331 +       int i;
2332 +
2333 +       cpu_port_config = read_4(sc, CPU_PORT_CONFIG);
2334 +       /* SA learning Disable */
2335 +       cpu_port_config |= (SA_LEARNING_DISABLE);
2336 +       /* set data offset + 2 */
2337 +       cpu_port_config &= ~(1 << 31);
2338 +
2339 +       write_4(sc, CPU_PORT_CONFIG, cpu_port_config);
2340 +
2341 +       if (!write_arl_table_entry(sc, 0, 1, VLAN0_GROUP_ID,
2342 +           STATIC_ENTRY, VLAN0_GROUP,
2343 +           vlan0_mac))
2344 +               return (1);
2345 +
2346 +       set_pvid(sc, PORT0_PVID, PORT1_PVID, CPU_PORT_PVID);
2347 +
2348 +       for (i = 0; i < 8; i++) {
2349 +               set_vlan_vid(sc, i);
2350 +               set_vlan_member(sc, i);
2351 +               set_vlan_tag(sc, i);
2352 +       }
2353 +
2354 +       /* disable all interrupt status sources */
2355 +       write_4(sc, INTERRUPT_MASK, 0xffff1fff);
2356 +
2357 +       /* clear previous interrupt sources */
2358 +       write_4(sc, INTERRUPT_STATUS, 0x00001FFF);
2359 +
2360 +       write_4(sc, TS_DMA_CONTROL, 0);
2361 +       write_4(sc, FS_DMA_CONTROL, 0);
2362 +       return (0);
2363 +}
2364 +
2365 +static int
2366 +hardware_init(struct ece_softc *sc)
2367 +{
2368 +       int status = 0;
2369 +       static int gw_phy_type;
2370 +
2371 +       gw_phy_type = get_phy_type(sc);
2372 +       /* Currently only ic_plus phy is supported. */
2373 +       if (gw_phy_type != IC_PLUS_PHY) {
2374 +               device_printf(sc->dev, "PHY type is not supported (%d)\n",
2375 +                   gw_phy_type);
2376 +               return (-1);
2377 +       }
2378 +       status = configure_lan_port(sc, gw_phy_type);
2379 +       configure_cpu_port(sc);
2380 +       return (0);
2381 +}
2382 +
2383 +static void
2384 +set_mac_address(struct ece_softc *sc, const char *mac, int mac_len)
2385 +{
2386 +
2387 +       /* Invalid age_field mean erase this entry. */
2388 +       write_arl_table_entry(sc, 0, 1, VLAN0_GROUP_ID,
2389 +           INVALID_ENTRY, VLAN0_GROUP,
2390 +           mac);
2391 +       memcpy(vlan0_mac, mac, ETHER_ADDR_LEN);
2392 +
2393 +       write_arl_table_entry(sc, 0, 1, VLAN0_GROUP_ID,
2394 +           STATIC_ENTRY, VLAN0_GROUP,
2395 +           mac);
2396 +}
2397 +
2398 +static void
2399 +ece_set_mac(struct ece_softc *sc, u_char *eaddr)
2400 +{
2401 +       memcpy(vlan0_mac, eaddr, ETHER_ADDR_LEN);
2402 +       set_mac_address(sc, eaddr, ETHER_ADDR_LEN);
2403 +}
2404 +
2405 +/*
2406 + * TODO: the device doesn't have MAC stored, we should read the
2407 + * configuration stored in FLASH, but the format depends on the
2408 + * bootloader used.*
2409 + */
2410 +static int
2411 +ece_get_mac(struct ece_softc *sc, u_char *eaddr)
2412 +{
2413 +       return (ENXIO);
2414 +}
2415 +
2416 +static void
2417 +ece_intr_rx_locked(struct ece_softc *sc, int count)
2418 +{
2419 +       struct ifnet *ifp = sc->ifp;
2420 +       struct mbuf *mb;
2421 +       struct rx_desc_info *rxdesc;
2422 +       eth_rx_desc_t *desc;
2423 +
2424 +       int fssd_curr;
2425 +       int fssd;
2426 +       int i;
2427 +       int idx;
2428 +       int rxcount;
2429 +       uint32_t status;
2430 +
2431 +       fssd_curr = read_4(sc, FS_DESCRIPTOR_POINTER);
2432 +
2433 +       fssd = (fssd_curr - (uint32_t)sc->ring_paddr_rx)>>4;
2434 +
2435 +       desc = sc->rx_desc[sc->last_rx].desc;
2436 +
2437 +       /* Prepare to read the data in the ring. */
2438 +       bus_dmamap_sync(sc->dmatag_ring_rx,
2439 +           sc->dmamap_ring_rx,
2440 +           BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2441 +
2442 +       if (fssd > sc->last_rx)
2443 +               rxcount = fssd - sc->last_rx;
2444 +       else if (fssd < sc->last_rx)
2445 +               rxcount = (ECE_MAX_RX_BUFFERS - sc->last_rx) + fssd;
2446 +       else {
2447 +               if (desc->cown == 0)
2448 +                       return;
2449 +               else
2450 +                       rxcount = ECE_MAX_RX_BUFFERS;
2451 +       }
2452 +
2453 +       for (i= 0; i < rxcount; i++) {
2454 +               status = desc->cown;
2455 +               if (!status)
2456 +                       break;
2457 +
2458 +               idx = sc->last_rx;
2459 +               rxdesc = &sc->rx_desc[idx];
2460 +               mb = rxdesc->buff;
2461 +
2462 +               if (desc->length < ETHER_MIN_LEN - ETHER_CRC_LEN ||
2463 +                   desc->length > ETHER_MAX_LEN - ETHER_CRC_LEN +
2464 +                   ETHER_VLAN_ENCAP_LEN) {
2465 +                       ifp->if_ierrors++;
2466 +                       desc->cown = 0;
2467 +                       desc->length = MCLBYTES - 2;
2468 +                       /* Invalid packet, skip and process next
2469 +                        * packet.
2470 +                        */
2471 +                       continue;
2472 +               }
2473 +
2474 +               if (ece_new_rxbuf(sc, rxdesc) != 0) {
2475 +                       ifp->if_iqdrops++;
2476 +                       desc->cown = 0;
2477 +                       desc->length = MCLBYTES - 2;
2478 +                       break;
2479 +               }
2480 +
2481 +               /**
2482 +                * The device will write to addrress + 2 So we need to adjust
2483 +                * the address after the packet is received.
2484 +                */
2485 +               mb->m_data += 2;
2486 +               mb->m_len = mb->m_pkthdr.len = desc->length;
2487 +
2488 +               mb->m_flags |= M_PKTHDR;
2489 +               mb->m_pkthdr.rcvif = ifp;
2490 +               if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
2491 +                       /*check for valid checksum*/
2492 +                       if ( (!desc->l4f)  && (desc->prot != 3)) {
2493 +                               mb->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2494 +                               mb->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2495 +                               mb->m_pkthdr.csum_data = 0xffff;
2496 +                       }
2497 +               }
2498 +               ECE_RXUNLOCK(sc);
2499 +               (*ifp->if_input)(ifp, mb);
2500 +               ECE_RXLOCK(sc);
2501 +
2502 +               desc->cown = 0;
2503 +               desc->length = MCLBYTES - 2;
2504 +
2505 +               bus_dmamap_sync(sc->dmatag_ring_rx,
2506 +                   sc->dmamap_ring_rx,
2507 +                   BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2508 +
2509 +               if (sc->last_rx == ECE_MAX_RX_BUFFERS - 1)
2510 +                       sc->last_rx = 0;
2511 +               else
2512 +                       sc->last_rx++;
2513 +
2514 +               desc = sc->rx_desc[sc->last_rx].desc;
2515 +       }
2516 +
2517 +       /* Sync updated flags. */
2518 +       bus_dmamap_sync(sc->dmatag_ring_rx,
2519 +           sc->dmamap_ring_rx,
2520 +           BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2521 +
2522 +       return;
2523 +}
2524 +
2525 +static void
2526 +ece_intr_task(void *arg, int pending __unused)
2527 +{
2528 +       struct ece_softc *sc = arg;
2529 +       ECE_RXLOCK(sc);
2530 +       ece_intr_rx_locked(sc, -1);
2531 +       ECE_RXUNLOCK(sc);
2532 +}
2533 +
2534 +static void
2535 +ece_intr(void *xsc)
2536 +{
2537 +       struct ece_softc *sc = xsc;
2538 +       struct ifnet *ifp = sc->ifp;
2539 +
2540 +       if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2541 +               write_4(sc, FS_DMA_CONTROL, 0);
2542 +               return;
2543 +       }
2544 +
2545 +       taskqueue_enqueue(sc->sc_tq, &sc->sc_intr_task);
2546 +
2547 +       if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2548 +               taskqueue_enqueue(sc->sc_tq, &sc->sc_tx_task);
2549 +}
2550 +
2551 +static void
2552 +ece_intr_status(void *xsc)
2553 +{
2554 +       struct ece_softc *sc = xsc;
2555 +       struct ifnet *ifp = sc->ifp;
2556 +       int stat;
2557 +
2558 +       stat = read_4(sc, INTERRUPT_STATUS);
2559 +
2560 +       write_4(sc, INTERRUPT_STATUS, stat);
2561 +
2562 +       if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2563 +               if ((stat & ERROR_MASK) != 0)
2564 +                       ifp->if_iqdrops++;
2565 +       }
2566 +}
2567 +
2568 +static void
2569 +ece_cleanup_locked(struct ece_softc *sc)
2570 +{
2571 +       eth_tx_desc_t *desc;
2572 +
2573 +       if (sc->tx_cons == sc->tx_prod) return;
2574 +
2575 +       /* Prepare to read the ring (owner bit). */
2576 +       bus_dmamap_sync(sc->dmatag_ring_tx,
2577 +           sc->dmamap_ring_tx,
2578 +           BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2579 +
2580 +       while (sc->tx_cons != sc->tx_prod) {
2581 +               desc = sc->tx_desc[sc->tx_cons].desc;
2582 +               if (desc->cown != 0) {
2583 +                       struct tx_desc_info *td = &(sc->tx_desc[sc->tx_cons]);
2584 +                       /* We are finished with this descriptor ... */
2585 +                       bus_dmamap_sync(sc->dmatag_data_tx, td->dmamap,
2586 +                           BUS_DMASYNC_POSTWRITE);
2587 +                       /* ... and unload, so we can reuse. */
2588 +                       bus_dmamap_unload(sc->dmatag_data_tx, td->dmamap);
2589 +                       m_freem(td->buff);
2590 +                       td->buff = 0;
2591 +                       sc->tx_cons = (sc->tx_cons + 1) % ECE_MAX_TX_BUFFERS;
2592 +               } else {
2593 +                       break;
2594 +               }
2595 +       }
2596 +
2597 +}
2598 +
2599 +static void
2600 +ece_cleanup_task(void *arg, int pending __unused)
2601 +{
2602 +       struct ece_softc *sc = arg;
2603 +       ECE_CLEANUPLOCK(sc);
2604 +       ece_cleanup_locked(sc);
2605 +       ECE_CLEANUPUNLOCK(sc);
2606 +}
2607 +
2608 +static void
2609 +ece_intr_tx(void *xsc)
2610 +{
2611 +       struct ece_softc *sc = xsc;
2612 +       struct ifnet *ifp = sc->ifp;
2613 +       if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2614 +               /* This should not happen, stop DMA. */
2615 +               write_4(sc, FS_DMA_CONTROL, 0);
2616 +               return;
2617 +       }
2618 +       taskqueue_enqueue(sc->sc_tq, &sc->sc_cleanup_task);
2619 +}
2620 +
2621 +static void
2622 +ece_intr_qf(void *xsc)
2623 +{
2624 +       struct ece_softc *sc = xsc;
2625 +       struct ifnet *ifp = sc->ifp;
2626 +       if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2627 +               /* This should not happen, stop DMA. */
2628 +               write_4(sc, FS_DMA_CONTROL, 0);
2629 +               return;
2630 +       }
2631 +       taskqueue_enqueue(sc->sc_tq, &sc->sc_intr_task);
2632 +       write_4(sc, FS_DMA_CONTROL, 1);
2633 +}
2634 +
2635 +/*
2636 + * Reset and initialize the chip
2637 + */
2638 +static void
2639 +eceinit_locked(void *xsc)
2640 +{
2641 +       struct ece_softc *sc = xsc;
2642 +       struct ifnet *ifp = sc->ifp;
2643 +       struct mii_data *mii;
2644 +       uint32_t cfg_reg;
2645 +       uint32_t cpu_port_config;
2646 +       uint32_t mac_port_config;
2647 +
2648 +       while (1) {
2649 +               cfg_reg = read_4(sc, BIST_RESULT_TEST_0);
2650 +               if ((cfg_reg & (1<<17)))
2651 +                       break;
2652 +               DELAY(100);
2653 +       }
2654 +       /* Set to default values. */
2655 +       write_4(sc, SWITCH_CONFIG, 0x007AA7A1);
2656 +       write_4(sc, MAC_PORT_0_CONFIG, 0x00423D00);
2657 +       write_4(sc, MAC_PORT_1_CONFIG, 0x00423D80);
2658 +       write_4(sc, CPU_PORT_CONFIG, 0x004C0000);
2659 +
2660 +       hardware_init(sc);
2661 +
2662 +       mac_port_config = read_4(sc, MAC_PORT_0_CONFIG);
2663 +
2664 +        /* Enable Port 0 */
2665 +       mac_port_config &= (~(PORT_DISABLE));
2666 +       write_4(sc, MAC_PORT_0_CONFIG, mac_port_config);
2667 +
2668 +       cpu_port_config = read_4(sc, CPU_PORT_CONFIG);
2669 +       /* Enable CPU. */
2670 +       cpu_port_config &= ~(PORT_DISABLE);
2671 +       write_4(sc, CPU_PORT_CONFIG, cpu_port_config);
2672 +
2673 +       /*
2674 +        * Set 'running' flag, and clear output active flag
2675 +        * and attempt to start the output
2676 +        */
2677 +       ifp->if_drv_flags |= IFF_DRV_RUNNING;
2678 +       ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2679 +
2680 +       mii = device_get_softc(sc->miibus);
2681 +       mii_pollstat(mii);
2682 +       /* Enable DMA. */
2683 +       write_4(sc, FS_DMA_CONTROL, 1);
2684 +
2685 +       callout_reset(&sc->tick_ch, hz, ece_tick, sc);
2686 +}
2687 +
2688 +static inline int
2689 +ece_encap(struct ece_softc *sc, struct mbuf *m0)
2690 +{
2691 +       struct ifnet *ifp;
2692 +       bus_dma_segment_t segs[MAX_FRAGMENT];
2693 +       bus_dmamap_t mapp;
2694 +       eth_tx_desc_t *desc = 0;
2695 +       int csum_flags;
2696 +       int desc_no;
2697 +       int error;
2698 +       int nsegs;
2699 +       int seg;
2700 +
2701 +       ifp = sc->ifp;
2702 +
2703 +       /* Fetch unused map */
2704 +       mapp = sc->tx_desc[sc->tx_prod].dmamap;
2705 +
2706 +       error = bus_dmamap_load_mbuf_sg(sc->dmatag_ring_tx, mapp,
2707 +           m0, segs, &nsegs,
2708 +           BUS_DMA_NOWAIT);
2709 +
2710 +       if (error != 0) {
2711 +               bus_dmamap_unload(sc->dmatag_ring_tx, mapp);
2712 +               return ((error != 0) ? error : -1);
2713 +       }
2714 +
2715 +       desc = &(sc->desc_tx[sc->desc_curr_tx]);
2716 +       sc->tx_desc[sc->tx_prod].desc = desc;
2717 +       sc->tx_desc[sc->tx_prod].buff = m0;
2718 +       desc_no = sc->desc_curr_tx;
2719 +
2720 +       for (seg = 0; seg < nsegs; seg++) {
2721 +               if (desc->cown == 0 ) {
2722 +                       if_printf(ifp, "ERROR: descriptor is still used\n");
2723 +                       return (-1);
2724 +               }
2725 +
2726 +               desc->length = segs[seg].ds_len;
2727 +               desc->data_ptr = segs[seg].ds_addr;
2728 +
2729 +               if (seg == 0) {
2730 +                       desc->fs = 1;
2731 +               } else {
2732 +                       desc->fs = 0;
2733 +               }
2734 +               if (seg == nsegs - 1) {
2735 +                       desc->ls = 1;
2736 +               } else {
2737 +                       desc->ls = 0;
2738 +               }
2739 +
2740 +               csum_flags = m0->m_pkthdr.csum_flags;
2741 +
2742 +               desc->fr =  1;
2743 +               desc->pmap =  1;
2744 +               desc->insv =  0;
2745 +               desc->ico = 0;
2746 +               desc->tco = 0;
2747 +               desc->uco = 0;
2748 +               desc->interrupt = 1;
2749 +
2750 +               if (csum_flags & CSUM_IP) {
2751 +                       desc->ico = 1;
2752 +                       if (csum_flags & CSUM_TCP)
2753 +                               desc->tco = 1;
2754 +                       if (csum_flags & CSUM_UDP)
2755 +                               desc->uco = 1;
2756 +               }
2757 +
2758 +               desc++;
2759 +               sc->desc_curr_tx = (sc->desc_curr_tx + 1) % ECE_MAX_TX_BUFFERS;
2760 +               if (sc->desc_curr_tx == 0) {
2761 +                       desc = (eth_tx_desc_t *)&(sc->desc_tx[0]);
2762 +               }
2763 +       }
2764 +
2765 +       desc = sc->tx_desc[sc->tx_prod].desc;
2766 +
2767 +       sc->tx_prod = (sc->tx_prod + 1) % ECE_MAX_TX_BUFFERS;
2768 +
2769 +       /*
2770 +        * After all descriptors are set, we set the flags to start the
2771 +        * sending proces.
2772 +        */
2773 +       for (seg = 0; seg < nsegs; seg++) {
2774 +               desc->cown = 0;
2775 +               desc++;
2776 +               desc_no = (desc_no + 1) % ECE_MAX_TX_BUFFERS;
2777 +               if (desc_no == 0)
2778 +                       desc = (eth_tx_desc_t *)&(sc->desc_tx[0]);
2779 +       }
2780 +
2781 +       bus_dmamap_sync(sc->dmatag_data_tx, mapp, BUS_DMASYNC_PREWRITE);
2782 +       return (0);
2783 +}
2784 +
2785 +/*
2786 + * dequeu packets and transmit
2787 + */
2788 +static void
2789 +ecestart_locked(struct ifnet *ifp)
2790 +{
2791 +       struct ece_softc *sc;
2792 +       struct mbuf *m0;
2793 +       uint32_t queued = 0;
2794 +
2795 +       sc = ifp->if_softc;
2796 +       if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2797 +           IFF_DRV_RUNNING)
2798 +               return;
2799 +
2800 +       bus_dmamap_sync(sc->dmatag_ring_tx,
2801 +           sc->dmamap_ring_tx,
2802 +           BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2803 +
2804 +       for (;;) {
2805 +               /* Get packet from the queue */
2806 +               IF_DEQUEUE(&ifp->if_snd, m0);
2807 +               if (m0 == NULL)
2808 +                       break;
2809 +               if (ece_encap(sc, m0)) {
2810 +                       IF_PREPEND(&ifp->if_snd, m0);
2811 +                       ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2812 +                       break;
2813 +               }
2814 +               queued++;
2815 +               BPF_MTAP(ifp, m0);
2816 +       }
2817 +       if (queued) {
2818 +               bus_dmamap_sync(sc->dmatag_ring_tx, sc->dmamap_ring_tx,
2819 +                   BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2820 +               write_4(sc, TS_DMA_CONTROL, 1);
2821 +       }
2822 +}
2823 +
2824 +static void
2825 +eceinit(void *xsc)
2826 +{
2827 +       struct ece_softc *sc = xsc;
2828 +       ECE_LOCK(sc);
2829 +       eceinit_locked(sc);
2830 +       ECE_UNLOCK(sc);
2831 +}
2832 +
2833 +static void
2834 +ece_tx_task(void *arg, int pending __unused)
2835 +{
2836 +       struct ifnet *ifp;
2837 +       ifp = (struct ifnet *)arg;
2838 +       ecestart(ifp);
2839 +}
2840 +
2841 +static void
2842 +ecestart(struct ifnet *ifp)
2843 +{
2844 +       struct ece_softc *sc = ifp->if_softc;
2845 +       ECE_TXLOCK(sc);
2846 +       ecestart_locked(ifp);
2847 +       ECE_TXUNLOCK(sc);
2848 +}
2849 +
2850 +/*
2851 + * Turn off interrupts, and stop the nic.  Can be called with sc->ifp
2852 + * NULL so be careful.
2853 + */
2854 +static void
2855 +ecestop(struct ece_softc *sc)
2856 +{
2857 +       struct ifnet *ifp = sc->ifp;
2858 +       uint32_t mac_port_config;
2859 +
2860 +       write_4(sc, TS_DMA_CONTROL, 0);
2861 +       write_4(sc, FS_DMA_CONTROL,&